Index: head/sys/dev/aac/aac_cam.c =================================================================== --- head/sys/dev/aac/aac_cam.c (revision 129878) +++ head/sys/dev/aac/aac_cam.c (revision 129879) @@ -1,619 +1,620 @@ /* * Copyright (c) 2002 Adaptec, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * CAM front-end for communicating with non-DASD devices */ #include "opt_aac.h" #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct aac_cam { device_t dev; struct aac_sim *inf; struct cam_sim *sim; struct cam_path *path; }; static int aac_cam_probe(device_t dev); static int aac_cam_attach(device_t dev); static int aac_cam_detach(device_t dev); static void aac_cam_action(struct cam_sim *, union ccb *); static void aac_cam_poll(struct cam_sim *); static void aac_cam_complete(struct aac_command *); static u_int32_t aac_cam_reset_bus(struct cam_sim *, union ccb *); static u_int32_t aac_cam_abort_ccb(struct cam_sim *, union ccb *); static u_int32_t aac_cam_term_io(struct cam_sim *, union ccb *); static int aac_cam_get_tran_settings(struct aac_softc *, struct ccb_trans_settings *, u_int32_t); static devclass_t aac_pass_devclass; static device_method_t aac_pass_methods[] = { DEVMETHOD(device_probe, aac_cam_probe), DEVMETHOD(device_attach, aac_cam_attach), DEVMETHOD(device_detach, aac_cam_detach), { 0, 0 } }; static driver_t aac_pass_driver = { "aacp", aac_pass_methods, sizeof(struct aac_cam) }; DRIVER_MODULE(aacp, aac, aac_pass_driver, aac_pass_devclass, 0, 0); MODULE_DEPEND(aacp, cam, 1, 1, 1); MALLOC_DEFINE(M_AACCAM, "aaccam", "AAC CAM info"); static int aac_cam_probe(device_t dev) { debug_called(2); return (0); } static int aac_cam_detach(device_t dev) { struct aac_cam *camsc; debug_called(2); camsc = (struct aac_cam *)device_get_softc(dev); mtx_lock(&Giant); xpt_async(AC_LOST_DEVICE, camsc->path, NULL); xpt_free_path(camsc->path); xpt_bus_deregister(cam_sim_path(camsc->sim)); cam_sim_free(camsc->sim, /*free_devq*/TRUE); mtx_unlock(&Giant); return (0); } /* * Register the driver as a CAM SIM */ static int aac_cam_attach(device_t dev) { struct cam_devq *devq; struct cam_sim *sim; struct cam_path *path; struct aac_cam *camsc; struct aac_sim *inf; debug_called(1); camsc = (struct aac_cam *)device_get_softc(dev); inf = (struct aac_sim *)device_get_ivars(dev); camsc->inf = inf; devq = cam_simq_alloc(inf->TargetsPerBus); if (devq == NULL) return (EIO); sim = cam_sim_alloc(aac_cam_action, aac_cam_poll, "aacp", camsc, device_get_unit(dev), 1, 1, devq); if (sim == NULL) { cam_simq_free(devq); return (EIO); } /* Since every bus has it's own sim, every bus 'appears' as bus 0 */ if (xpt_bus_register(sim, 0) != CAM_SUCCESS) { cam_sim_free(sim, TRUE); return (EIO); } if (xpt_create_path(&path, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(sim)); cam_sim_free(sim, TRUE); return (EIO); } camsc->sim = sim; camsc->path = path; return (0); } static void aac_cam_action(struct cam_sim *sim, union ccb *ccb) { struct aac_cam *camsc; struct aac_softc *sc; struct aac_srb32 *srb; struct aac_fib *fib; struct aac_command *cm; debug_called(2); camsc = (struct aac_cam *)cam_sim_softc(sim); sc = camsc->inf->aac_sc; /* Synchronous ops, and ops that don't require communication with the * controller */ switch(ccb->ccb_h.func_code) { case XPT_SCSI_IO: case XPT_RESET_DEV: /* These are handled down below */ break; case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; u_int32_t size_mb; u_int32_t secs_per_cylinder; ccg = &ccb->ccg; size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size); if (size_mb >= (2 * 1024)) { /* 2GB */ ccg->heads = 255; ccg->secs_per_track = 63; } else if (size_mb >= (1 * 1024)) { /* 1GB */ ccg->heads = 128; ccg->secs_per_track = 32; } else { ccg->heads = 64; ccg->secs_per_track = 32; } secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); return; } case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_WIDE_16; cpi->target_sprt = 0; /* Resetting via the passthrough causes problems. */ cpi->hba_misc = PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = camsc->inf->TargetsPerBus; cpi->max_lun = 8; /* Per the controller spec */ cpi->initiator_id = camsc->inf->InitiatorBusId; cpi->bus_id = camsc->inf->BusNumber; cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); return; } case XPT_GET_TRAN_SETTINGS: { u_int32_t handle; handle = AAC_BTL_TO_HANDLE(camsc->inf->BusNumber, ccb->ccb_h.target_id, ccb->ccb_h.target_lun); ccb->ccb_h.status = aac_cam_get_tran_settings(sc, &ccb->cts, handle); xpt_done(ccb); return; } case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; xpt_done(ccb); return; case XPT_RESET_BUS: if (!(sc->flags & AAC_FLAGS_CAM_NORESET)) { ccb->ccb_h.status = aac_cam_reset_bus(sim, ccb); } else { ccb->ccb_h.status = CAM_REQ_CMP; } xpt_done(ccb); return; case XPT_ABORT: ccb->ccb_h.status = aac_cam_abort_ccb(sim, ccb); xpt_done(ccb); return; case XPT_TERM_IO: ccb->ccb_h.status = aac_cam_term_io(sim, ccb); xpt_done(ccb); return; default: device_printf(sc->aac_dev, "Unsupported command 0x%x\n", ccb->ccb_h.func_code); ccb->ccb_h.status = CAM_PROVIDE_FAIL; xpt_done(ccb); return; } /* Async ops that require communcation with the controller */ AAC_LOCK_ACQUIRE(&sc->aac_io_lock); if (aac_alloc_command(sc, &cm)) { AAC_LOCK_RELEASE(&sc->aac_io_lock); xpt_freeze_simq(sim, 1); ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); return; } fib = cm->cm_fib; srb = (struct aac_srb32 *)&fib->data[0]; cm->cm_datalen = 0; switch (ccb->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_IN: srb->flags = AAC_SRB_FLAGS_DATA_IN; cm->cm_flags |= AAC_CMD_DATAIN; break; case CAM_DIR_OUT: srb->flags = AAC_SRB_FLAGS_DATA_OUT; cm->cm_flags |= AAC_CMD_DATAOUT; break; case CAM_DIR_NONE: srb->flags = AAC_SRB_FLAGS_NO_DATA_XFER; break; default: srb->flags = AAC_SRB_FLAGS_UNSPECIFIED_DIRECTION; cm->cm_flags |= AAC_CMD_DATAIN | AAC_CMD_DATAOUT; break; } switch(ccb->ccb_h.func_code) { case XPT_SCSI_IO: { struct ccb_scsiio *csio = &ccb->csio; srb->function = AAC_SRB_FUNC_EXECUTE_SCSI; /* * Copy the CDB into the SRB. It's only 6-16 bytes, * so a copy is not too expensive. */ srb->cdb_len = csio->cdb_len; if (ccb->ccb_h.flags & CAM_CDB_POINTER) bcopy(csio->cdb_io.cdb_ptr, (u_int8_t *)&srb->cdb[0], srb->cdb_len); else bcopy(csio->cdb_io.cdb_bytes, (u_int8_t *)&srb->cdb[0], srb->cdb_len); /* Map the s/g list. XXX 32bit addresses only! */ if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { srb->data_len = csio->dxfer_len; if (ccb->ccb_h.flags & CAM_DATA_PHYS) { /* * XXX This isn't 64-bit clean. * However, this condition is not * normally used in CAM. */ srb->sg_map32.SgCount = 1; srb->sg_map32.SgEntry[0].SgAddress = (uint32_t)(uintptr_t)csio->data_ptr; srb->sg_map32.SgEntry[0].SgByteCount = csio->dxfer_len; } else { /* * Arrange things so that the S/G * map will get set up automagically */ cm->cm_data = (void *)csio->data_ptr; cm->cm_datalen = csio->dxfer_len; cm->cm_sgtable = &srb->sg_map32; } } else { /* XXX Need to handle multiple s/g elements */ panic("aac_cam: multiple s/g elements"); } } else { srb->sg_map32.SgCount = 0; srb->sg_map32.SgEntry[0].SgByteCount = 0; srb->data_len = 0; } break; } case XPT_RESET_DEV: if (!(sc->flags & AAC_FLAGS_CAM_NORESET)) { srb->function = AAC_SRB_FUNC_RESET_DEVICE; break; } else { ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); return; } default: break; } srb->bus = camsc->inf->BusNumber; /* Bus number relative to the card */ srb->target = ccb->ccb_h.target_id; srb->lun = ccb->ccb_h.target_lun; srb->timeout = ccb->ccb_h.timeout; /* XXX */ srb->retry_limit = 0; cm->cm_complete = aac_cam_complete; cm->cm_private = ccb; cm->cm_timestamp = time_second; cm->cm_queue = AAC_ADAP_NORM_CMD_QUEUE; fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | AAC_FIBSTATE_INITIALISED | AAC_FIBSTATE_FROMHOST | AAC_FIBSTATE_REXPECTED | AAC_FIBSTATE_NORM; fib->Header.Command = ScsiPortCommand; fib->Header.Size = sizeof(struct aac_fib_header) + sizeof(struct aac_srb32); aac_enqueue_ready(cm); aac_startio(cm->cm_sc); AAC_LOCK_RELEASE(&sc->aac_io_lock); return; } static void aac_cam_poll(struct cam_sim *sim) { /* * Pinging the interrupt routine isn't very safe, nor is it * really necessary. Do nothing. */ } static void aac_cam_complete(struct aac_command *cm) { union ccb *ccb; struct aac_srb_response *srbr; struct aac_softc *sc; debug_called(2); sc = cm->cm_sc; ccb = cm->cm_private; srbr = (struct aac_srb_response *)&cm->cm_fib->data[0]; if (srbr->fib_status != 0) { device_printf(sc->aac_dev, "Passthru FIB failed!\n"); ccb->ccb_h.status = CAM_REQ_ABORTED; } else { /* * The SRB error codes just happen to match the CAM error * codes. How convienient! */ ccb->ccb_h.status = srbr->srb_status; /* Take care of SCSI_IO ops. */ if (ccb->ccb_h.func_code == XPT_SCSI_IO) { u_int8_t command, device; ccb->csio.scsi_status = srbr->scsi_status; /* Take care of autosense */ if (srbr->sense_len) { int sense_len, scsi_sense_len; scsi_sense_len = sizeof(struct scsi_sense_data); bzero(&ccb->csio.sense_data, scsi_sense_len); sense_len = (srbr->sense_len > scsi_sense_len) ? scsi_sense_len : srbr->sense_len; bcopy(&srbr->sense[0], &ccb->csio.sense_data, srbr->sense_len); ccb->csio.sense_len = sense_len; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; scsi_sense_print(&ccb->csio); } /* If this is an inquiry command, fake things out */ if (ccb->ccb_h.flags & CAM_CDB_POINTER) command = ccb->csio.cdb_io.cdb_ptr[0]; else command = ccb->csio.cdb_io.cdb_bytes[0]; if ((command == INQUIRY) && (ccb->ccb_h.status == CAM_REQ_CMP)) { device = ccb->csio.data_ptr[0] & 0x1f; /* * We want DASD and PROC devices to only be * visible through the pass device. */ if ((device == T_DIRECT) || (device == T_PROCESSOR) || (sc->flags & AAC_FLAGS_CAM_PASSONLY)) ccb->csio.data_ptr[0] = ((device & 0xe0) | T_NODEVICE); } } } aac_release_command(cm); AAC_LOCK_RELEASE(&sc->aac_io_lock); mtx_lock(&Giant); xpt_done(ccb); mtx_unlock(&Giant); AAC_LOCK_ACQUIRE(&sc->aac_io_lock); return; } static u_int32_t aac_cam_reset_bus(struct cam_sim *sim, union ccb *ccb) { struct aac_fib *fib; struct aac_softc *sc; struct aac_cam *camsc; struct aac_vmioctl *vmi; struct aac_resetbus *rbc; int e; camsc = (struct aac_cam *)cam_sim_softc(sim); sc = camsc->inf->aac_sc; if (sc == NULL) { printf("Null sc?\n"); return (CAM_REQ_ABORTED); } aac_alloc_sync_fib(sc, &fib, 0); vmi = (struct aac_vmioctl *)&fib->data[0]; bzero(vmi, sizeof(struct aac_vmioctl)); vmi->Command = VM_Ioctl; vmi->ObjType = FT_DRIVE; vmi->MethId = sc->scsi_method_id; vmi->ObjId = 0; vmi->IoctlCmd = ResetBus; rbc = (struct aac_resetbus *)&vmi->IoctlBuf[0]; rbc->BusNumber = camsc->inf->BusNumber; e = aac_sync_fib(sc, ContainerCommand, 0, fib, sizeof(struct aac_vmioctl)); if (e) { device_printf(sc->aac_dev,"Error %d sending ResetBus command\n", e); aac_release_sync_fib(sc); return (CAM_REQ_ABORTED); } aac_release_sync_fib(sc); return (CAM_REQ_CMP); } static u_int32_t aac_cam_abort_ccb(struct cam_sim *sim, union ccb *ccb) { return (CAM_UA_ABORT); } static u_int32_t aac_cam_term_io(struct cam_sim *sim, union ccb *ccb) { return (CAM_UA_TERMIO); } static int aac_cam_get_tran_settings(struct aac_softc *sc, struct ccb_trans_settings *cts, u_int32_t handle) { struct aac_fib *fib; struct aac_vmioctl *vmi; struct aac_vmi_devinfo_resp *vmi_resp; int error; aac_alloc_sync_fib(sc, &fib, 0); vmi = (struct aac_vmioctl *)&fib->data[0]; bzero(vmi, sizeof(struct aac_vmioctl)); vmi->Command = VM_Ioctl; vmi->ObjType = FT_DRIVE; vmi->MethId = sc->scsi_method_id; vmi->ObjId = handle; vmi->IoctlCmd = GetDeviceProbeInfo; error = aac_sync_fib(sc, ContainerCommand, 0, fib, sizeof(struct aac_vmioctl)); if (error) { device_printf(sc->aac_dev, "Error %d sending GetDeviceProbeInfo" " command\n", error); aac_release_sync_fib(sc); return (CAM_REQ_INVALID); } vmi_resp = (struct aac_vmi_devinfo_resp *)&fib->data[0]; if (vmi_resp->Status != ST_OK) { /* * The only reason why this command will return an error is * if the requested device doesn't exist. */ debug(1, "GetDeviceProbeInfo returned %d\n", vmi_resp->Status); aac_release_sync_fib(sc); return (CAM_DEV_NOT_THERE); } cts->bus_width = ((vmi_resp->Inquiry7 & 0x60) >> 5); cts->valid = CCB_TRANS_BUS_WIDTH_VALID; if (vmi_resp->ScsiRate) { cts->sync_period = scsi_calc_syncparam((10000 / vmi_resp->ScsiRate)); cts->sync_offset = vmi_resp->ScsiOffset; cts->valid |= CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_SYNC_OFFSET_VALID; } cts->flags &= ~(CCB_TRANS_DISC_ENB | CCB_TRANS_TAG_ENB); cts->valid |= CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; aac_release_sync_fib(sc); return (CAM_REQ_CMP); } Index: head/sys/dev/aac/aac_disk.c =================================================================== --- head/sys/dev/aac/aac_disk.c (revision 129878) +++ head/sys/dev/aac/aac_disk.c (revision 129879) @@ -1,396 +1,397 @@ /*- * Copyright (c) 2000 Michael Smith * Copyright (c) 2001 Scott Long * Copyright (c) 2000 BSDi * Copyright (c) 2001 Adaptec, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_aac.h" #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include /* * Interface to parent. */ static int aac_disk_probe(device_t dev); static int aac_disk_attach(device_t dev); static int aac_disk_detach(device_t dev); /* * Interface to the device switch. */ static disk_open_t aac_disk_open; static disk_close_t aac_disk_close; static disk_strategy_t aac_disk_strategy; static dumper_t aac_disk_dump; static devclass_t aac_disk_devclass; static device_method_t aac_disk_methods[] = { DEVMETHOD(device_probe, aac_disk_probe), DEVMETHOD(device_attach, aac_disk_attach), DEVMETHOD(device_detach, aac_disk_detach), { 0, 0 } }; static driver_t aac_disk_driver = { "aacd", aac_disk_methods, sizeof(struct aac_disk) }; #define AAC_MAXIO 65536 DRIVER_MODULE(aacd, aac, aac_disk_driver, aac_disk_devclass, 0, 0); /* sysctl tunables */ static unsigned int aac_iosize_max = AAC_MAXIO; /* due to limits of the card */ TUNABLE_INT("hw.aac.iosize_max", &aac_iosize_max); SYSCTL_DECL(_hw_aac); SYSCTL_UINT(_hw_aac, OID_AUTO, iosize_max, CTLFLAG_RDTUN, &aac_iosize_max, 0, "Max I/O size per transfer to an array"); /* * Handle open from generic layer. * * This is called by the diskslice code on first open in order to get the * basic device geometry paramters. */ static int aac_disk_open(struct disk *dp) { struct aac_disk *sc; debug_called(4); sc = (struct aac_disk *)dp->d_drv1; if (sc == NULL) { printf("aac_disk_open: No Softc\n"); return (ENXIO); } /* check that the controller is up and running */ if (sc->ad_controller->aac_state & AAC_STATE_SUSPEND) { printf("Controller Suspended controller state = 0x%x\n", sc->ad_controller->aac_state); return(ENXIO); } sc->ad_flags |= AAC_DISK_OPEN; return (0); } /* * Handle last close of the disk device. */ static int aac_disk_close(struct disk *dp) { struct aac_disk *sc; debug_called(4); sc = (struct aac_disk *)dp->d_drv1; if (sc == NULL) return (ENXIO); sc->ad_flags &= ~AAC_DISK_OPEN; return (0); } /* * Handle an I/O request. */ static void aac_disk_strategy(struct bio *bp) { struct aac_disk *sc; debug_called(4); sc = (struct aac_disk *)bp->bio_disk->d_drv1; /* bogus disk? */ if (sc == NULL) { bp->bio_flags |= BIO_ERROR; bp->bio_error = EINVAL; biodone(bp); return; } /* do-nothing operation? */ if (bp->bio_bcount == 0) { bp->bio_resid = bp->bio_bcount; biodone(bp); return; } /* perform accounting */ /* pass the bio to the controller - it can work out who we are */ AAC_LOCK_ACQUIRE(&sc->ad_controller->aac_io_lock); aac_submit_bio(bp); AAC_LOCK_RELEASE(&sc->ad_controller->aac_io_lock); return; } /* * Map the S/G elements for doing a dump. * * XXX This does not handle >4GB of RAM. Fixing it is possible except on * adapters that cannot do 64bit s/g lists. */ static void aac_dump_map_sg(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct aac_fib *fib; struct aac_blockwrite *bw; struct aac_sg_table *sg; int i; fib = (struct aac_fib *)arg; bw = (struct aac_blockwrite *)&fib->data[0]; sg = &bw->SgMap; if (sg != NULL) { sg->SgCount = nsegs; for (i = 0; i < nsegs; i++) { if (segs[i].ds_addr >= BUS_SPACE_MAXADDR_32BIT) return; sg->SgEntry[i].SgAddress = segs[i].ds_addr; sg->SgEntry[i].SgByteCount = segs[i].ds_len; } fib->Header.Size = nsegs * sizeof(struct aac_sg_entry); } } /* * Dump memory out to an array * * Send out one command at a time with up to AAC_MAXIO of data. */ static int aac_disk_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length) { struct aac_disk *ad; struct aac_softc *sc; struct aac_fib *fib; struct aac_blockwrite *bw; size_t len; int size; static bus_dmamap_t dump_datamap; static int first = 0; struct disk *dp; dp = arg; ad = dp->d_drv1; if (ad == NULL) return (EINVAL); sc= ad->ad_controller; if (!first) { first = 1; if (bus_dmamap_create(sc->aac_buffer_dmat, 0, &dump_datamap)) { printf("bus_dmamap_create failed\n"); return (ENOMEM); } } aac_alloc_sync_fib(sc, &fib, AAC_SYNC_LOCK_FORCE); bw = (struct aac_blockwrite *)&fib->data[0]; while (length > 0) { len = (length > AAC_MAXIO) ? AAC_MAXIO : length; bw->Command = VM_CtBlockWrite; bw->ContainerId = ad->ad_container->co_mntobj.ObjectId; bw->BlockNumber = offset / AAC_BLOCK_SIZE; bw->ByteCount = len; bw->Stable = CUNSTABLE; /* * There really isn't any way to recover from errors or * resource shortages here. Oh well. Because of that, don't * bother trying to send the command from the callback; there * is too much required context. */ if (bus_dmamap_load(sc->aac_buffer_dmat, dump_datamap, virtual, len, aac_dump_map_sg, fib, 0) != 0) return (EIO); bus_dmamap_sync(sc->aac_buffer_dmat, dump_datamap, BUS_DMASYNC_PREWRITE); /* fib->Header.Size is set in aac_dump_map_sg */ size = fib->Header.Size + sizeof(struct aac_blockwrite); if (aac_sync_fib(sc, ContainerCommand, 0, fib, size)) { printf("Error dumping block 0x%jx\n", (uintmax_t)physical); return (EIO); } length -= len; offset += len; (vm_offset_t)virtual += len; } return (0); } /* * Handle completion of an I/O request. */ void aac_biodone(struct bio *bp) { struct aac_disk *sc; debug_called(4); sc = (struct aac_disk *)bp->bio_disk->d_drv1; if (bp->bio_flags & BIO_ERROR) disk_err(bp, "hard error", -1, 1); biodone(bp); } /* * Stub only. */ static int aac_disk_probe(device_t dev) { debug_called(2); return (0); } /* * Attach a unit to the controller. */ static int aac_disk_attach(device_t dev) { struct aac_disk *sc; debug_called(1); sc = (struct aac_disk *)device_get_softc(dev); /* initialise our softc */ sc->ad_controller = (struct aac_softc *)device_get_softc(device_get_parent(dev)); sc->ad_container = device_get_ivars(dev); sc->ad_dev = dev; /* * require that extended translation be enabled - other drivers read the * disk! */ sc->ad_size = sc->ad_container->co_mntobj.Capacity; if (sc->ad_size >= (2 * 1024 * 1024)) { /* 2GB */ sc->ad_heads = 255; sc->ad_sectors = 63; } else if (sc->ad_size >= (1 * 1024 * 1024)) { /* 1GB */ sc->ad_heads = 128; sc->ad_sectors = 32; } else { sc->ad_heads = 64; sc->ad_sectors = 32; } sc->ad_cylinders = (sc->ad_size / (sc->ad_heads * sc->ad_sectors)); device_printf(dev, "%uMB (%u sectors)\n", sc->ad_size / ((1024 * 1024) / AAC_BLOCK_SIZE), sc->ad_size); /* attach a generic disk device to ourselves */ sc->unit = device_get_unit(dev); sc->ad_disk = disk_alloc(); sc->ad_disk->d_drv1 = sc; sc->ad_disk->d_name = "aacd"; sc->ad_disk->d_maxsize = aac_iosize_max; sc->ad_disk->d_open = aac_disk_open; sc->ad_disk->d_close = aac_disk_close; sc->ad_disk->d_strategy = aac_disk_strategy; sc->ad_disk->d_dump = aac_disk_dump; sc->ad_disk->d_sectorsize = AAC_BLOCK_SIZE; sc->ad_disk->d_mediasize = (off_t)sc->ad_size * AAC_BLOCK_SIZE; sc->ad_disk->d_fwsectors = sc->ad_sectors; sc->ad_disk->d_fwheads = sc->ad_heads; sc->ad_disk->d_unit = sc->unit; disk_create(sc->ad_disk, DISK_VERSION); return (0); } /* * Disconnect ourselves from the system. */ static int aac_disk_detach(device_t dev) { struct aac_disk *sc; debug_called(2); sc = (struct aac_disk *)device_get_softc(dev); if (sc->ad_flags & AAC_DISK_OPEN) return(EBUSY); disk_destroy(sc->ad_disk); return(0); } Index: head/sys/dev/aac/aac_linux.c =================================================================== --- head/sys/dev/aac/aac_linux.c (revision 129878) +++ head/sys/dev/aac/aac_linux.c (revision 129879) @@ -1,85 +1,86 @@ /*- * Copyright (c) 2002 Scott Long * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Linux ioctl handler for the aac device driver */ #include #include #include #include +#include #include #include #include #include #include /* There are multiple ioctl number ranges that need to be handled */ #define AAC_LINUX_IOCTL_MIN 0x0000 #define AAC_LINUX_IOCTL_MAX 0x21ff static linux_ioctl_function_t aac_linux_ioctl; static struct linux_ioctl_handler aac_linux_handler = {aac_linux_ioctl, AAC_LINUX_IOCTL_MIN, AAC_LINUX_IOCTL_MAX}; SYSINIT (aac_linux_register, SI_SUB_KLD, SI_ORDER_MIDDLE, linux_ioctl_register_handler, &aac_linux_handler); SYSUNINIT(aac_linux_unregister, SI_SUB_KLD, SI_ORDER_MIDDLE, linux_ioctl_unregister_handler, &aac_linux_handler); static int aac_linux_modevent(module_t mod, int type, void *data) { /* Do we care about any specific load/unload actions? */ return (0); } DEV_MODULE(aac_linux, aac_linux_modevent, NULL); MODULE_DEPEND(aac_linux, linux, 1, 1, 1); static int aac_linux_ioctl(struct thread *td, struct linux_ioctl_args *args) { struct file *fp; u_long cmd; int error; if ((error = fget(td, args->fd, &fp)) != 0) return (error); cmd = args->cmd; /* * Pass the ioctl off to our standard handler. */ error = (fo_ioctl(fp, cmd, (caddr_t)args->arg, td->td_ucred, td)); fdrop(fp, td); return (error); } Index: head/sys/dev/aac/aac_pci.c =================================================================== --- head/sys/dev/aac/aac_pci.c (revision 129878) +++ head/sys/dev/aac/aac_pci.c (revision 129879) @@ -1,312 +1,313 @@ /*- * Copyright (c) 2000 Michael Smith * Copyright (c) 2001 Scott Long * Copyright (c) 2000 BSDi * Copyright (c) 2001 Adaptec, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * PCI bus interface and resource allocation. */ #include "opt_aac.h" #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include static int aac_pci_probe(device_t dev); static int aac_pci_attach(device_t dev); static device_method_t aac_methods[] = { /* Device interface */ DEVMETHOD(device_probe, aac_pci_probe), DEVMETHOD(device_attach, aac_pci_attach), DEVMETHOD(device_detach, aac_detach), DEVMETHOD(device_suspend, aac_suspend), DEVMETHOD(device_resume, aac_resume), DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), { 0, 0 } }; static driver_t aac_pci_driver = { "aac", aac_methods, sizeof(struct aac_softc) }; static devclass_t aac_devclass; DRIVER_MODULE(aac, pci, aac_pci_driver, aac_devclass, 0, 0); struct aac_ident { u_int16_t vendor; u_int16_t device; u_int16_t subvendor; u_int16_t subdevice; int hwif; int quirks; char *desc; } aac_identifiers[] = { {0x1028, 0x0001, 0x1028, 0x0001, AAC_HWIF_I960RX, 0, "Dell PERC 2/Si"}, {0x1028, 0x0002, 0x1028, 0x0002, AAC_HWIF_I960RX, 0, "Dell PERC 3/Di"}, {0x1028, 0x0003, 0x1028, 0x0003, AAC_HWIF_I960RX, 0, "Dell PERC 3/Si"}, {0x1028, 0x0004, 0x1028, 0x00d0, AAC_HWIF_I960RX, 0, "Dell PERC 3/Si"}, {0x1028, 0x0002, 0x1028, 0x00d1, AAC_HWIF_I960RX, 0, "Dell PERC 3/Di"}, {0x1028, 0x0002, 0x1028, 0x00d9, AAC_HWIF_I960RX, 0, "Dell PERC 3/Di"}, {0x1028, 0x000a, 0x1028, 0x0106, AAC_HWIF_I960RX, 0, "Dell PERC 3/Di"}, {0x1028, 0x000a, 0x1028, 0x011b, AAC_HWIF_I960RX, 0, "Dell PERC 3/Di"}, {0x1028, 0x000a, 0x1028, 0x0121, AAC_HWIF_I960RX, 0, "Dell PERC 3/Di"}, {0x1011, 0x0046, 0x9005, 0x0364, AAC_HWIF_STRONGARM, 0, "Adaptec AAC-364"}, {0x1011, 0x0046, 0x9005, 0x0365, AAC_HWIF_STRONGARM, AAC_FLAGS_BROKEN_MEMMAP, "Adaptec SCSI RAID 5400S"}, {0x1011, 0x0046, 0x9005, 0x1364, AAC_HWIF_STRONGARM, AAC_FLAGS_PERC2QC, "Dell PERC 2/QC"}, {0x1011, 0x0046, 0x103c, 0x10c2, AAC_HWIF_STRONGARM, 0, "HP NetRaid-4M"}, {0x9005, 0x0285, 0x9005, 0x0285, AAC_HWIF_I960RX, AAC_FLAGS_NO4GB | AAC_FLAGS_256FIBS, "Adaptec SCSI RAID 2200S"}, {0x9005, 0x0285, 0x1028, 0x0287, AAC_HWIF_I960RX, AAC_FLAGS_NO4GB | AAC_FLAGS_256FIBS, "Dell PERC 320/DC"}, {0x9005, 0x0285, 0x9005, 0x0286, AAC_HWIF_I960RX, AAC_FLAGS_NO4GB | AAC_FLAGS_256FIBS, "Adaptec SCSI RAID 2120S"}, {0x9005, 0x0285, 0x9005, 0x0290, AAC_HWIF_I960RX, AAC_FLAGS_NO4GB, "Adaptec SCSI RAID 2410SA"}, {0x9005, 0x0285, 0x1028, 0x0291, AAC_HWIF_I960RX, AAC_FLAGS_NO4GB, "Dell CERC SATA RAID 2"}, {0x9005, 0x0285, 0x9005, 0x0292, AAC_HWIF_I960RX, AAC_FLAGS_NO4GB, "Adaptec SCSI RAID 2810SA"}, {0, 0, 0, 0, 0, 0, 0} }; /* * Determine whether this is one of our supported adapters. */ static int aac_pci_probe(device_t dev) { struct aac_ident *m; debug_called(1); for (m = aac_identifiers; m->vendor != 0; m++) { if ((m->vendor == pci_get_vendor(dev)) && (m->device == pci_get_device(dev)) && ((m->subvendor == 0) || (m->subvendor == pci_get_subvendor(dev))) && ((m->subdevice == 0) || ((m->subdevice == pci_get_subdevice(dev))))) { device_set_desc(dev, m->desc); return(-10); /* allow room to be overridden */ } } return(ENXIO); } /* * Allocate resources for our device, set up the bus interface. */ static int aac_pci_attach(device_t dev) { struct aac_softc *sc; int i, error; u_int32_t command; debug_called(1); /* * Initialise softc. */ sc = device_get_softc(dev); bzero(sc, sizeof(*sc)); sc->aac_dev = dev; /* assume failure is 'not configured' */ error = ENXIO; /* * Verify that the adapter is correctly set up in PCI space. */ command = pci_read_config(sc->aac_dev, PCIR_COMMAND, 2); command |= PCIM_CMD_BUSMASTEREN; pci_write_config(dev, PCIR_COMMAND, command, 2); command = pci_read_config(sc->aac_dev, PCIR_COMMAND, 2); if (!(command & PCIM_CMD_BUSMASTEREN)) { device_printf(sc->aac_dev, "can't enable bus-master feature\n"); goto out; } if ((command & PCIM_CMD_MEMEN) == 0) { device_printf(sc->aac_dev, "memory window not available\n"); goto out; } /* * Allocate the PCI register window. */ sc->aac_regs_rid = PCIR_BAR(0); if ((sc->aac_regs_resource = bus_alloc_resource_any(sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid, RF_ACTIVE)) == NULL) { device_printf(sc->aac_dev, "couldn't allocate register window\n"); goto out; } sc->aac_btag = rman_get_bustag(sc->aac_regs_resource); sc->aac_bhandle = rman_get_bushandle(sc->aac_regs_resource); /* * Allocate and connect our interrupt. */ sc->aac_irq_rid = 0; if ((sc->aac_irq = bus_alloc_resource_any(sc->aac_dev, SYS_RES_IRQ, &sc->aac_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(sc->aac_dev, "can't allocate interrupt\n"); goto out; } if (bus_setup_intr(sc->aac_dev, sc->aac_irq, INTR_FAST|INTR_TYPE_BIO, aac_intr, sc, &sc->aac_intr)) { device_printf(sc->aac_dev, "can't set up FAST interrupt\n"); if (bus_setup_intr(sc->aac_dev, sc->aac_irq, INTR_MPSAFE|INTR_ENTROPY|INTR_TYPE_BIO, aac_intr, sc, &sc->aac_intr)) { device_printf(sc->aac_dev, "can't set up MPSAFE interrupt\n"); goto out; } } /* assume failure is 'out of memory' */ error = ENOMEM; /* * Allocate the parent bus DMA tag appropriate for our PCI interface. * * Note that some of these controllers are 64-bit capable. */ if (bus_dma_tag_create(NULL, /* parent */ PAGE_SIZE, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ AAC_MAXSGENTRIES, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* No locking needed */ &sc->aac_parent_dmat)) { device_printf(sc->aac_dev, "can't allocate parent DMA tag\n"); goto out; } /* * Detect the hardware interface version, set up the bus interface * indirection. */ sc->aac_hwif = AAC_HWIF_UNKNOWN; for (i = 0; aac_identifiers[i].vendor != 0; i++) { if ((aac_identifiers[i].vendor == pci_get_vendor(dev)) && (aac_identifiers[i].device == pci_get_device(dev)) && (aac_identifiers[i].subvendor == pci_get_subvendor(dev)) && (aac_identifiers[i].subdevice == pci_get_subdevice(dev))) { sc->aac_hwif = aac_identifiers[i].hwif; switch(sc->aac_hwif) { case AAC_HWIF_I960RX: debug(2, "set hardware up for i960Rx"); sc->aac_if = aac_rx_interface; break; case AAC_HWIF_STRONGARM: debug(2, "set hardware up for StrongARM"); sc->aac_if = aac_sa_interface; break; case AAC_HWIF_FALCON: debug(2, "set hardware up for Falcon/PPC"); sc->aac_if = aac_fa_interface; break; } /* Set up quirks */ sc->flags = aac_identifiers[i].quirks; break; } } if (sc->aac_hwif == AAC_HWIF_UNKNOWN) { device_printf(sc->aac_dev, "unknown hardware type\n"); error = ENXIO; goto out; } /* * Do bus-independent initialisation. */ error = aac_attach(sc); out: if (error) aac_free(sc); return(error); } Index: head/sys/dev/acpica/acpi.c =================================================================== --- head/sys/dev/acpica/acpi.c (revision 129878) +++ head/sys/dev/acpica/acpi.c (revision 129879) @@ -1,2789 +1,2790 @@ /*- * Copyright (c) 2000 Takanori Watanabe * Copyright (c) 2000 Mitsuru IWASAKI * Copyright (c) 2000, 2001 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "opt_acpi.h" #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "acpi.h" #include #include #include MALLOC_DEFINE(M_ACPIDEV, "acpidev", "ACPI devices"); /* Hooks for the ACPI CA debugging infrastructure */ #define _COMPONENT ACPI_BUS ACPI_MODULE_NAME("ACPI") static d_open_t acpiopen; static d_close_t acpiclose; static d_ioctl_t acpiioctl; static struct cdevsw acpi_cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT, .d_open = acpiopen, .d_close = acpiclose, .d_ioctl = acpiioctl, .d_name = "acpi", }; #if __FreeBSD_version >= 500000 struct mtx acpi_mutex; #endif struct acpi_quirks { char *OemId; uint32_t OemRevision; char *value; }; #define ACPI_OEM_REV_ANY 0 static struct acpi_quirks acpi_quirks_table[] = { #ifdef notyet /* Bad PCI routing table. Used on some SuperMicro boards. */ { "PTLTD ", 0x06040000, "pci_link" }, #endif { NULL, 0, NULL } }; static int acpi_modevent(struct module *mod, int event, void *junk); static void acpi_identify(driver_t *driver, device_t parent); static int acpi_probe(device_t dev); static int acpi_attach(device_t dev); static void acpi_quirks_set(void); static device_t acpi_add_child(device_t bus, int order, const char *name, int unit); static int acpi_print_child(device_t bus, device_t child); static int acpi_read_ivar(device_t dev, device_t child, int index, uintptr_t *result); static int acpi_write_ivar(device_t dev, device_t child, int index, uintptr_t value); static int acpi_set_resource(device_t dev, device_t child, int type, int rid, u_long start, u_long count); static int acpi_get_resource(device_t dev, device_t child, int type, int rid, u_long *startp, u_long *countp); static struct resource *acpi_alloc_resource(device_t bus, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags); static int acpi_release_resource(device_t bus, device_t child, int type, int rid, struct resource *r); static uint32_t acpi_isa_get_logicalid(device_t dev); static int acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count); static int acpi_isa_pnp_probe(device_t bus, device_t child, struct isa_pnp_id *ids); static void acpi_probe_children(device_t bus); static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status); static void acpi_shutdown_pre_sync(void *arg, int howto); static void acpi_shutdown_final(void *arg, int howto); static void acpi_shutdown_poweroff(void *arg); static void acpi_enable_fixed_events(struct acpi_softc *sc); static int acpi_parse_prw(ACPI_HANDLE h, struct acpi_prw_data *prw); static ACPI_STATUS acpi_wake_limit(ACPI_HANDLE h, UINT32 level, void *context, void **status); static int acpi_wake_limit_walk(int sstate); static int acpi_wake_sysctl_walk(device_t dev); static int acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS); static void acpi_system_eventhandler_sleep(void *arg, int state); static void acpi_system_eventhandler_wakeup(void *arg, int state); static int acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_pm_func(u_long cmd, void *arg, ...); static int acpi_child_location_str_method(device_t acdev, device_t child, char *buf, size_t buflen); static int acpi_child_pnpinfo_str_method(device_t acdev, device_t child, char *buf, size_t buflen); static device_method_t acpi_methods[] = { /* Device interface */ DEVMETHOD(device_identify, acpi_identify), DEVMETHOD(device_probe, acpi_probe), DEVMETHOD(device_attach, acpi_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_add_child, acpi_add_child), DEVMETHOD(bus_print_child, acpi_print_child), DEVMETHOD(bus_read_ivar, acpi_read_ivar), DEVMETHOD(bus_write_ivar, acpi_write_ivar), DEVMETHOD(bus_set_resource, acpi_set_resource), DEVMETHOD(bus_get_resource, acpi_get_resource), DEVMETHOD(bus_alloc_resource, acpi_alloc_resource), DEVMETHOD(bus_release_resource, acpi_release_resource), DEVMETHOD(bus_child_pnpinfo_str, acpi_child_pnpinfo_str_method), DEVMETHOD(bus_child_location_str, acpi_child_location_str_method), DEVMETHOD(bus_driver_added, bus_generic_driver_added), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), /* ISA emulation */ DEVMETHOD(isa_pnp_probe, acpi_isa_pnp_probe), {0, 0} }; static driver_t acpi_driver = { "acpi", acpi_methods, sizeof(struct acpi_softc), }; static devclass_t acpi_devclass; DRIVER_MODULE(acpi, nexus, acpi_driver, acpi_devclass, acpi_modevent, 0); MODULE_VERSION(acpi, 1); static const char* sleep_state_names[] = { "S0", "S1", "S2", "S3", "S4", "S5", "NONE"}; SYSCTL_NODE(_debug, OID_AUTO, acpi, CTLFLAG_RW, NULL, "ACPI debugging"); static char acpi_ca_version[12]; SYSCTL_STRING(_debug_acpi, OID_AUTO, acpi_ca_version, CTLFLAG_RD, acpi_ca_version, 0, "Version of Intel ACPI-CA"); /* * Allow override of whether methods execute in parallel or not. * Enable this for serial behavior, which fixes "AE_ALREADY_EXISTS" * errors for AML that really can't handle parallel method execution. * It is off by default since this breaks recursive methods and * some IBMs use such code. */ static int acpi_serialize_methods; TUNABLE_INT("hw.acpi.serialize_methods", &acpi_serialize_methods); /* * ACPI can only be loaded as a module by the loader; activating it after * system bootstrap time is not useful, and can be fatal to the system. * It also cannot be unloaded, since the entire system bus heirarchy hangs * off it. */ static int acpi_modevent(struct module *mod, int event, void *junk) { switch(event) { case MOD_LOAD: if (!cold) { printf("The ACPI driver cannot be loaded after boot.\n"); return (EPERM); } break; case MOD_UNLOAD: if (!cold && power_pm_get_type() == POWER_PM_TYPE_ACPI) return (EBUSY); break; default: break; } return (0); } /* * Perform early initialization. */ ACPI_STATUS acpi_Startup(void) { #ifdef ACPI_DEBUGGER char *debugpoint; #endif static int error, started = 0; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (started) return_VALUE (error); started = 1; #if __FreeBSD_version >= 500000 /* Initialise the ACPI mutex */ mtx_init(&acpi_mutex, "ACPI global lock", NULL, MTX_DEF); #endif /* * Set the globals from our tunables. This is needed because ACPI-CA * uses UINT8 for some values and we have no tunable_byte. */ AcpiGbl_AllMethodsSerialized = (UINT8)acpi_serialize_methods; /* Start up the ACPI CA subsystem. */ #ifdef ACPI_DEBUGGER debugpoint = getenv("debug.acpi.debugger"); if (debugpoint) { if (!strcmp(debugpoint, "init")) acpi_EnterDebugger(); freeenv(debugpoint); } #endif if (ACPI_FAILURE(error = AcpiInitializeSubsystem())) { printf("ACPI: initialisation failed: %s\n", AcpiFormatException(error)); return_VALUE (error); } #ifdef ACPI_DEBUGGER debugpoint = getenv("debug.acpi.debugger"); if (debugpoint) { if (!strcmp(debugpoint, "tables")) acpi_EnterDebugger(); freeenv(debugpoint); } #endif if (ACPI_FAILURE(error = AcpiLoadTables())) { printf("ACPI: table load failed: %s\n", AcpiFormatException(error)); return_VALUE(error); } /* Set up any quirks we have for this XSDT. */ acpi_quirks_set(); if (acpi_disabled("acpi")) return_VALUE (AE_ERROR); return_VALUE (AE_OK); } /* * Detect ACPI, perform early initialisation */ static void acpi_identify(driver_t *driver, device_t parent) { device_t child; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (!cold) return_VOID; /* Check that we haven't been disabled with a hint. */ if (resource_disabled("acpi", 0)) return_VOID; /* Make sure we're not being doubly invoked. */ if (device_find_child(parent, "acpi", 0) != NULL) return_VOID; /* Initialize ACPI-CA. */ if (ACPI_FAILURE(acpi_Startup())) return_VOID; snprintf(acpi_ca_version, sizeof(acpi_ca_version), "%#x", ACPI_CA_VERSION); /* Attach the actual ACPI device. */ if ((child = BUS_ADD_CHILD(parent, 0, "acpi", 0)) == NULL) { device_printf(parent, "ACPI: could not attach\n"); return_VOID; } } /* * Fetch some descriptive data from ACPI to put in our attach message */ static int acpi_probe(device_t dev) { ACPI_TABLE_HEADER th; char buf[20]; int error; struct sbuf sb; ACPI_STATUS status; ACPI_LOCK_DECL; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (power_pm_get_type() != POWER_PM_TYPE_NONE && power_pm_get_type() != POWER_PM_TYPE_ACPI) { device_printf(dev, "Other PM system enabled.\n"); return_VALUE(ENXIO); } ACPI_LOCK; if (ACPI_FAILURE(status = AcpiGetTableHeader(ACPI_TABLE_XSDT, 1, &th))) { device_printf(dev, "couldn't get XSDT header: %s\n", AcpiFormatException(status)); error = ENXIO; } else { sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN); sbuf_bcat(&sb, th.OemId, 6); sbuf_trim(&sb); sbuf_putc(&sb, ' '); sbuf_bcat(&sb, th.OemTableId, 8); sbuf_trim(&sb); sbuf_finish(&sb); device_set_desc_copy(dev, sbuf_data(&sb)); sbuf_delete(&sb); error = 0; } ACPI_UNLOCK; return_VALUE(error); } static int acpi_attach(device_t dev) { struct acpi_softc *sc; ACPI_STATUS status; int error, state; UINT32 flags; UINT8 TypeA, TypeB; char *env; #ifdef ACPI_DEBUGGER char *debugpoint; #endif ACPI_LOCK_DECL; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); ACPI_LOCK; sc = device_get_softc(dev); bzero(sc, sizeof(*sc)); sc->acpi_dev = dev; #ifdef ACPI_DEBUGGER debugpoint = getenv("debug.acpi.debugger"); if (debugpoint) { if (!strcmp(debugpoint, "spaces")) acpi_EnterDebugger(); freeenv(debugpoint); } #endif /* Install the default address space handlers. */ error = ENXIO; status = AcpiInstallAddressSpaceHandler(ACPI_ROOT_OBJECT, ACPI_ADR_SPACE_SYSTEM_MEMORY, ACPI_DEFAULT_HANDLER, NULL, NULL); if (ACPI_FAILURE(status)) { device_printf(dev, "Could not initialise SystemMemory handler: %s\n", AcpiFormatException(status)); goto out; } status = AcpiInstallAddressSpaceHandler(ACPI_ROOT_OBJECT, ACPI_ADR_SPACE_SYSTEM_IO, ACPI_DEFAULT_HANDLER, NULL, NULL); if (ACPI_FAILURE(status)) { device_printf(dev, "Could not initialise SystemIO handler: %s\n", AcpiFormatException(status)); goto out; } status = AcpiInstallAddressSpaceHandler(ACPI_ROOT_OBJECT, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL); if (ACPI_FAILURE(status)) { device_printf(dev, "could not initialise PciConfig handler: %s\n", AcpiFormatException(status)); goto out; } /* * Bring ACPI fully online. * * Note that some systems (specifically, those with namespace evaluation * issues that require the avoidance of parts of the namespace) must * avoid running _INI and _STA on everything, as well as dodging the final * object init pass. * * For these devices, we set ACPI_NO_DEVICE_INIT and ACPI_NO_OBJECT_INIT). * * XXX We should arrange for the object init pass after we have attached * all our child devices, but on many systems it works here. */ #ifdef ACPI_DEBUGGER debugpoint = getenv("debug.acpi.debugger"); if (debugpoint) { if (!strcmp(debugpoint, "enable")) acpi_EnterDebugger(); freeenv(debugpoint); } #endif flags = 0; if (testenv("debug.acpi.avoid")) flags = ACPI_NO_DEVICE_INIT | ACPI_NO_OBJECT_INIT; if (ACPI_FAILURE(status = AcpiEnableSubsystem(flags))) { device_printf(dev, "Could not enable ACPI: %s\n", AcpiFormatException(status)); goto out; } /* * Call the ECDT probe function to provide EC functionality before * the namespace has been evaluated. */ acpi_ec_ecdt_probe(dev); if (ACPI_FAILURE(status = AcpiInitializeObjects(flags))) { device_printf(dev, "Could not initialize ACPI objects: %s\n", AcpiFormatException(status)); goto out; } /* * Setup our sysctl tree. * * XXX: This doesn't check to make sure that none of these fail. */ sysctl_ctx_init(&sc->acpi_sysctl_ctx); sc->acpi_sysctl_tree = SYSCTL_ADD_NODE(&sc->acpi_sysctl_ctx, SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, device_get_name(dev), CTLFLAG_RD, 0, ""); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "supported_sleep_state", CTLTYPE_STRING | CTLFLAG_RD, 0, 0, acpi_supported_sleep_state_sysctl, "A", ""); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "power_button_state", CTLTYPE_STRING | CTLFLAG_RW, &sc->acpi_power_button_sx, 0, acpi_sleep_state_sysctl, "A", ""); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "sleep_button_state", CTLTYPE_STRING | CTLFLAG_RW, &sc->acpi_sleep_button_sx, 0, acpi_sleep_state_sysctl, "A", ""); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "lid_switch_state", CTLTYPE_STRING | CTLFLAG_RW, &sc->acpi_lid_switch_sx, 0, acpi_sleep_state_sysctl, "A", ""); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "standby_state", CTLTYPE_STRING | CTLFLAG_RW, &sc->acpi_standby_sx, 0, acpi_sleep_state_sysctl, "A", ""); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "suspend_state", CTLTYPE_STRING | CTLFLAG_RW, &sc->acpi_suspend_sx, 0, acpi_sleep_state_sysctl, "A", ""); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "sleep_delay", CTLFLAG_RD | CTLFLAG_RW, &sc->acpi_sleep_delay, 0, "sleep delay"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "s4bios", CTLFLAG_RD | CTLFLAG_RW, &sc->acpi_s4bios, 0, "S4BIOS mode"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "verbose", CTLFLAG_RD | CTLFLAG_RW, &sc->acpi_verbose, 0, "verbose mode"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "disable_on_poweroff", CTLFLAG_RD | CTLFLAG_RW, &sc->acpi_disable_on_poweroff, 0, "ACPI subsystem disable on poweroff"); /* * Default to 1 second before sleeping to give some machines time to * stabilize. */ sc->acpi_sleep_delay = 1; sc->acpi_disable_on_poweroff = 0; if (bootverbose) sc->acpi_verbose = 1; if ((env = getenv("hw.acpi.verbose")) && strcmp(env, "0")) { sc->acpi_verbose = 1; freeenv(env); } /* Only enable S4BIOS by default if the FACS says it is available. */ if (AcpiGbl_FACS->S4Bios_f != 0) sc->acpi_s4bios = 1; /* * Dispatch the default sleep state to devices. The lid switch is set * to NONE by default to avoid surprising users. */ sc->acpi_power_button_sx = ACPI_STATE_S5; sc->acpi_lid_switch_sx = ACPI_S_STATES_MAX + 1; sc->acpi_standby_sx = ACPI_STATE_S1; sc->acpi_suspend_sx = ACPI_STATE_S3; /* Pick the first valid sleep state for the sleep button default. */ sc->acpi_sleep_button_sx = ACPI_S_STATES_MAX + 1; for (state = ACPI_STATE_S1; state < ACPI_STATE_S5; state++) if (ACPI_SUCCESS(AcpiGetSleepTypeData(state, &TypeA, &TypeB))) { sc->acpi_sleep_button_sx = state; break; } acpi_enable_fixed_events(sc); /* * Scan the namespace and attach/initialise children. */ #ifdef ACPI_DEBUGGER debugpoint = getenv("debug.acpi.debugger"); if (debugpoint) { if (!strcmp(debugpoint, "probe")) acpi_EnterDebugger(); freeenv(debugpoint); } #endif /* Register our shutdown handlers */ EVENTHANDLER_REGISTER(shutdown_pre_sync, acpi_shutdown_pre_sync, sc, SHUTDOWN_PRI_LAST); EVENTHANDLER_REGISTER(shutdown_final, acpi_shutdown_final, sc, SHUTDOWN_PRI_LAST); /* * Register our acpi event handlers. * XXX should be configurable eg. via userland policy manager. */ EVENTHANDLER_REGISTER(acpi_sleep_event, acpi_system_eventhandler_sleep, sc, ACPI_EVENT_PRI_LAST); EVENTHANDLER_REGISTER(acpi_wakeup_event, acpi_system_eventhandler_wakeup, sc, ACPI_EVENT_PRI_LAST); /* Flag our initial states. */ sc->acpi_enabled = 1; sc->acpi_sstate = ACPI_STATE_S0; sc->acpi_sleep_disabled = 0; /* Create the control device */ sc->acpi_dev_t = make_dev(&acpi_cdevsw, 0, UID_ROOT, GID_WHEEL, 0644, "acpi"); sc->acpi_dev_t->si_drv1 = sc; #ifdef ACPI_DEBUGGER debugpoint = getenv("debug.acpi.debugger"); if (debugpoint) { if (strcmp(debugpoint, "running") == 0) acpi_EnterDebugger(); freeenv(debugpoint); } #endif #ifdef ACPI_USE_THREADS if ((error = acpi_task_thread_init())) goto out; #endif if ((error = acpi_machdep_init(dev))) goto out; /* Register ACPI again to pass the correct argument of pm_func. */ power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, sc); if (!acpi_disabled("bus")) acpi_probe_children(dev); error = 0; out: ACPI_UNLOCK; return_VALUE (error); } static void acpi_quirks_set() { XSDT_DESCRIPTOR *xsdt; struct acpi_quirks *quirk; char *env, *tmp; int len; /* * If the user loaded a custom table or disabled "quirks", leave * the settings alone. */ len = 0; if ((env = getenv("acpi_dsdt_load")) != NULL) { /* XXX No strcasecmp but this is good enough. */ if (*env == 'Y' || *env == 'y') goto out; freeenv(env); } if ((env = getenv("debug.acpi.disabled")) != NULL) { if (strstr("quirks", env) != NULL) goto out; len = strlen(env); } /* * Search through our quirk table and concatenate the disabled * values with whatever we find. */ xsdt = AcpiGbl_XSDT; for (quirk = acpi_quirks_table; quirk->OemId; quirk++) { if (!strncmp(xsdt->OemId, quirk->OemId, strlen(quirk->OemId)) && (xsdt->OemRevision == quirk->OemRevision || quirk->OemRevision == ACPI_OEM_REV_ANY)) { len += strlen(quirk->value) + 2; if ((tmp = malloc(len, M_TEMP, M_NOWAIT)) == NULL) goto out; sprintf(tmp, "%s %s", env ? env : "", quirk->value); setenv("debug.acpi.disabled", tmp); free(tmp, M_TEMP); break; } } out: if (env) freeenv(env); } /* * Handle a new device being added */ static device_t acpi_add_child(device_t bus, int order, const char *name, int unit) { struct acpi_device *ad; device_t child; if ((ad = malloc(sizeof(*ad), M_ACPIDEV, M_NOWAIT | M_ZERO)) == NULL) return (NULL); resource_list_init(&ad->ad_rl); child = device_add_child_ordered(bus, order, name, unit); if (child != NULL) device_set_ivars(child, ad); return (child); } static int acpi_print_child(device_t bus, device_t child) { struct acpi_device *adev = device_get_ivars(child); struct resource_list *rl = &adev->ad_rl; int retval = 0; retval += bus_print_child_header(bus, child); retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx"); retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#lx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld"); retval += resource_list_print_type(rl, "drq", SYS_RES_DRQ, "%ld"); retval += bus_print_child_footer(bus, child); return (retval); } /* Location hint for devctl(8) */ static int acpi_child_location_str_method(device_t cbdev, device_t child, char *buf, size_t buflen) { struct acpi_device *dinfo = device_get_ivars(child); if (dinfo->ad_handle) snprintf(buf, buflen, "path=%s", acpi_name(dinfo->ad_handle)); else snprintf(buf, buflen, "magic=unknown"); return (0); } /* PnP information for devctl(8) */ static int acpi_child_pnpinfo_str_method(device_t cbdev, device_t child, char *buf, size_t buflen) { ACPI_BUFFER adbuf = {ACPI_ALLOCATE_BUFFER, NULL}; ACPI_DEVICE_INFO *adinfo; struct acpi_device *dinfo = device_get_ivars(child); char *end; int error; error = AcpiGetObjectInfo(dinfo->ad_handle, &adbuf); adinfo = (ACPI_DEVICE_INFO *) adbuf.Pointer; if (error) snprintf(buf, buflen, "Unknown"); else snprintf(buf, buflen, "_HID=%s _UID=%lu", (adinfo->Valid & ACPI_VALID_HID) ? adinfo->HardwareId.Value : "UNKNOWN", (adinfo->Valid & ACPI_VALID_UID) ? strtoul(adinfo->UniqueId.Value, &end, 10) : 0); if (adinfo) AcpiOsFree(adinfo); return (0); } /* * Handle per-device ivars */ static int acpi_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct acpi_device *ad; if ((ad = device_get_ivars(child)) == NULL) { printf("device has no ivars\n"); return (ENOENT); } /* ACPI and ISA compatibility ivars */ switch(index) { case ACPI_IVAR_HANDLE: *(ACPI_HANDLE *)result = ad->ad_handle; break; case ACPI_IVAR_MAGIC: *(int *)result = ad->ad_magic; break; case ACPI_IVAR_PRIVATE: *(void **)result = ad->ad_private; break; case ISA_IVAR_VENDORID: case ISA_IVAR_SERIAL: case ISA_IVAR_COMPATID: *(int *)result = -1; break; case ISA_IVAR_LOGICALID: *(int *)result = acpi_isa_get_logicalid(child); break; default: return (ENOENT); } return (0); } static int acpi_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { struct acpi_device *ad; if ((ad = device_get_ivars(child)) == NULL) { printf("device has no ivars\n"); return (ENOENT); } switch(index) { case ACPI_IVAR_HANDLE: ad->ad_handle = (ACPI_HANDLE)value; break; case ACPI_IVAR_MAGIC: ad->ad_magic = (int)value; break; case ACPI_IVAR_PRIVATE: ad->ad_private = (void *)value; break; default: panic("bad ivar write request (%d)", index); return (ENOENT); } return (0); } /* * Handle child resource allocation/removal */ static int acpi_set_resource(device_t dev, device_t child, int type, int rid, u_long start, u_long count) { struct acpi_device *ad = device_get_ivars(child); struct resource_list *rl = &ad->ad_rl; resource_list_add(rl, type, rid, start, start + count -1, count); return(0); } static int acpi_get_resource(device_t dev, device_t child, int type, int rid, u_long *startp, u_long *countp) { struct acpi_device *ad = device_get_ivars(child); struct resource_list *rl = &ad->ad_rl; struct resource_list_entry *rle; rle = resource_list_find(rl, type, rid); if (!rle) return(ENOENT); if (startp) *startp = rle->start; if (countp) *countp = rle->count; return (0); } static struct resource * acpi_alloc_resource(device_t bus, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { struct acpi_device *ad = device_get_ivars(child); struct resource_list *rl = &ad->ad_rl; return (resource_list_alloc(rl, bus, child, type, rid, start, end, count, flags)); } static int acpi_release_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { struct acpi_device *ad = device_get_ivars(child); struct resource_list *rl = &ad->ad_rl; return (resource_list_release(rl, bus, child, type, rid, r)); } /* Allocate an IO port or memory resource, given its GAS. */ struct resource * acpi_bus_alloc_gas(device_t dev, int *rid, ACPI_GENERIC_ADDRESS *gas) { int type; if (gas == NULL || !ACPI_VALID_ADDRESS(gas->Address) || gas->RegisterBitWidth < 8) return (NULL); switch (gas->AddressSpaceId) { case ACPI_ADR_SPACE_SYSTEM_MEMORY: type = SYS_RES_MEMORY; break; case ACPI_ADR_SPACE_SYSTEM_IO: type = SYS_RES_IOPORT; break; default: return (NULL); } bus_set_resource(dev, type, *rid, gas->Address, gas->RegisterBitWidth / 8); return (bus_alloc_resource_any(dev, type, rid, RF_ACTIVE)); } /* * Handle ISA-like devices probing for a PnP ID to match. */ #define PNP_EISAID(s) \ ((((s[0] - '@') & 0x1f) << 2) \ | (((s[1] - '@') & 0x18) >> 3) \ | (((s[1] - '@') & 0x07) << 13) \ | (((s[2] - '@') & 0x1f) << 8) \ | (PNP_HEXTONUM(s[4]) << 16) \ | (PNP_HEXTONUM(s[3]) << 20) \ | (PNP_HEXTONUM(s[6]) << 24) \ | (PNP_HEXTONUM(s[5]) << 28)) static uint32_t acpi_isa_get_logicalid(device_t dev) { ACPI_DEVICE_INFO *devinfo; ACPI_BUFFER buf; ACPI_HANDLE h; ACPI_STATUS error; u_int32_t pnpid; ACPI_LOCK_DECL; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); pnpid = 0; buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; ACPI_LOCK; /* Fetch and validate the HID. */ if ((h = acpi_get_handle(dev)) == NULL) goto out; error = AcpiGetObjectInfo(h, &buf); if (ACPI_FAILURE(error)) goto out; devinfo = (ACPI_DEVICE_INFO *)buf.Pointer; if ((devinfo->Valid & ACPI_VALID_HID) != 0) pnpid = PNP_EISAID(devinfo->HardwareId.Value); out: if (buf.Pointer != NULL) AcpiOsFree(buf.Pointer); ACPI_UNLOCK; return_VALUE (pnpid); } static int acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count) { ACPI_DEVICE_INFO *devinfo; ACPI_BUFFER buf; ACPI_HANDLE h; ACPI_STATUS error; uint32_t *pnpid; int valid, i; ACPI_LOCK_DECL; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); pnpid = cids; valid = 0; buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; ACPI_LOCK; /* Fetch and validate the CID */ if ((h = acpi_get_handle(dev)) == NULL) goto out; error = AcpiGetObjectInfo(h, &buf); if (ACPI_FAILURE(error)) goto out; devinfo = (ACPI_DEVICE_INFO *)buf.Pointer; if ((devinfo->Valid & ACPI_VALID_CID) == 0) goto out; if (devinfo->CompatibilityId.Count < count) count = devinfo->CompatibilityId.Count; for (i = 0; i < count; i++) { if (strncmp(devinfo->CompatibilityId.Id[i].Value, "PNP", 3) != 0) continue; *pnpid++ = PNP_EISAID(devinfo->CompatibilityId.Id[i].Value); valid++; } out: if (buf.Pointer != NULL) AcpiOsFree(buf.Pointer); ACPI_UNLOCK; return_VALUE (valid); } static int acpi_isa_pnp_probe(device_t bus, device_t child, struct isa_pnp_id *ids) { int result, cid_count, i; uint32_t lid, cids[8]; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* * ISA-style drivers attached to ACPI may persist and * probe manually if we return ENOENT. We never want * that to happen, so don't ever return it. */ result = ENXIO; /* Scan the supplied IDs for a match */ lid = acpi_isa_get_logicalid(child); cid_count = acpi_isa_get_compatid(child, cids, 8); while (ids && ids->ip_id) { if (lid == ids->ip_id) { result = 0; goto out; } for (i = 0; i < cid_count; i++) { if (cids[i] == ids->ip_id) { result = 0; goto out; } } ids++; } out: return_VALUE (result); } /* * Scan relevant portions of the ACPI namespace and attach child devices. * * Note that we only expect to find devices in the \_PR_, \_TZ_, \_SI_ and * \_SB_ scopes, and \_PR_ and \_TZ_ become obsolete in the ACPI 2.0 spec. */ static void acpi_probe_children(device_t bus) { ACPI_HANDLE parent; ACPI_STATUS status; static char *scopes[] = {"\\_PR_", "\\_TZ_", "\\_SI", "\\_SB_", NULL}; int i; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); ACPI_ASSERTLOCK; /* Create any static children by calling device identify methods. */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "device identify routines\n")); bus_generic_probe(bus); /* * Scan the namespace and insert placeholders for all the devices that * we find. * * Note that we use AcpiWalkNamespace rather than AcpiGetDevices because * we want to create nodes for all devices, not just those that are * currently present. (This assumes that we don't want to create/remove * devices as they appear, which might be smarter.) */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "namespace scan\n")); for (i = 0; scopes[i] != NULL; i++) { status = AcpiGetHandle(ACPI_ROOT_OBJECT, scopes[i], &parent); if (ACPI_SUCCESS(status)) { AcpiWalkNamespace(ACPI_TYPE_ANY, parent, 100, acpi_probe_child, bus, NULL); } } /* * Scan all of the child devices we have created and let them probe/attach. */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "first bus_generic_attach\n")); bus_generic_attach(bus); /* * Some of these children may have attached others as part of their attach * process (eg. the root PCI bus driver), so rescan. */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "second bus_generic_attach\n")); bus_generic_attach(bus); /* Attach wake sysctls. */ acpi_wake_sysctl_walk(bus); ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "done attaching children\n")); return_VOID; } /* * Evaluate a child device and determine whether we might attach a device to * it. */ static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status) { ACPI_OBJECT_TYPE type; device_t child, bus = (device_t)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* Skip this device if we think we'll have trouble with it. */ if (acpi_avoid(handle)) return_ACPI_STATUS (AE_OK); if (ACPI_SUCCESS(AcpiGetType(handle, &type))) { switch(type) { case ACPI_TYPE_DEVICE: case ACPI_TYPE_PROCESSOR: case ACPI_TYPE_THERMAL: case ACPI_TYPE_POWER: if (acpi_disabled("children")) break; /* * Create a placeholder device for this node. Sort the placeholder * so that the probe/attach passes will run breadth-first. */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "scanning '%s'\n", acpi_name(handle))); child = BUS_ADD_CHILD(bus, level * 10, NULL, -1); if (child == NULL) break; acpi_set_handle(child, handle); /* Check if the device can generate wake events. */ if (ACPI_SUCCESS(AcpiEvaluateObject(handle, "_PRW", NULL, NULL))) device_set_flags(child, ACPI_FLAG_WAKE_CAPABLE); /* * Check that the device is present. If it's not present, * leave it disabled (so that we have a device_t attached to * the handle, but we don't probe it). */ if (type == ACPI_TYPE_DEVICE && !acpi_DeviceIsPresent(child)) { device_disable(child); break; } /* * Get the device's resource settings and attach them. * Note that if the device has _PRS but no _CRS, we need * to decide when it's appropriate to try to configure the * device. Ignore the return value here; it's OK for the * device not to have any resources. */ acpi_parse_resources(child, handle, &acpi_res_parse_set, NULL); /* If we're debugging, probe/attach now rather than later */ ACPI_DEBUG_EXEC(device_probe_and_attach(child)); break; } } return_ACPI_STATUS (AE_OK); } static void acpi_shutdown_pre_sync(void *arg, int howto) { struct acpi_softc *sc = arg; ACPI_ASSERTLOCK; /* Disable all wake GPEs not appropriate for this state. */ acpi_wake_limit_walk(ACPI_STATE_S5); /* * Disable all ACPI events before soft off, otherwise the system * will be turned on again on some laptops. * * XXX this should probably be restricted to masking some events just * before powering down, since we may still need ACPI during the * shutdown process. */ if (sc->acpi_disable_on_poweroff) acpi_Disable(sc); } static void acpi_shutdown_final(void *arg, int howto) { ACPI_STATUS status; ACPI_ASSERTLOCK; /* * If powering off, run the actual shutdown code on each processor. * It will only perform the shutdown on the BSP. Some chipsets do * not power off the system correctly if called from an AP. */ if ((howto & RB_POWEROFF) != 0) { status = AcpiEnterSleepStatePrep(ACPI_STATE_S5); if (ACPI_FAILURE(status)) { printf("AcpiEnterSleepStatePrep failed - %s\n", AcpiFormatException(status)); return; } printf("Powering system off using ACPI\n"); smp_rendezvous(NULL, acpi_shutdown_poweroff, NULL, NULL); } else { printf("Shutting down ACPI\n"); AcpiTerminate(); } } /* * Since this function may be called with locks held or in an unknown * context, it cannot allocate memory, acquire locks, sleep, etc. */ static void acpi_shutdown_poweroff(void *arg) { ACPI_STATUS status; ACPI_ASSERTLOCK; /* Only attempt to power off if this is the BSP (cpuid 0). */ if (PCPU_GET(cpuid) != 0) return; ACPI_DISABLE_IRQS(); status = AcpiEnterSleepState(ACPI_STATE_S5); if (ACPI_FAILURE(status)) { printf("ACPI power-off failed - %s\n", AcpiFormatException(status)); } else { DELAY(1000000); printf("ACPI power-off failed - timeout\n"); } } static void acpi_enable_fixed_events(struct acpi_softc *sc) { static int first_time = 1; ACPI_ASSERTLOCK; /* Enable and clear fixed events and install handlers. */ if (AcpiGbl_FADT != NULL && AcpiGbl_FADT->PwrButton == 0) { AcpiClearEvent(ACPI_EVENT_POWER_BUTTON); AcpiInstallFixedEventHandler(ACPI_EVENT_POWER_BUTTON, acpi_event_power_button_sleep, sc); if (first_time) device_printf(sc->acpi_dev, "Power Button (fixed)\n"); } if (AcpiGbl_FADT != NULL && AcpiGbl_FADT->SleepButton == 0) { AcpiClearEvent(ACPI_EVENT_SLEEP_BUTTON); AcpiInstallFixedEventHandler(ACPI_EVENT_SLEEP_BUTTON, acpi_event_sleep_button_sleep, sc); if (first_time) device_printf(sc->acpi_dev, "Sleep Button (fixed)\n"); } first_time = 0; } /* * Returns true if the device is actually present and should * be attached to. This requires the present, enabled, UI-visible * and diagnostics-passed bits to be set. */ BOOLEAN acpi_DeviceIsPresent(device_t dev) { ACPI_DEVICE_INFO *devinfo; ACPI_HANDLE h; ACPI_BUFFER buf; ACPI_STATUS error; int ret; ACPI_ASSERTLOCK; ret = FALSE; if ((h = acpi_get_handle(dev)) == NULL) return (FALSE); buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; error = AcpiGetObjectInfo(h, &buf); if (ACPI_FAILURE(error)) return (FALSE); devinfo = (ACPI_DEVICE_INFO *)buf.Pointer; /* If no _STA method, must be present */ if ((devinfo->Valid & ACPI_VALID_STA) == 0) ret = TRUE; /* Return true for 'present' and 'functioning' */ if ((devinfo->CurrentStatus & 0x9) == 0x9) ret = TRUE; AcpiOsFree(buf.Pointer); return (ret); } /* * Returns true if the battery is actually present and inserted. */ BOOLEAN acpi_BatteryIsPresent(device_t dev) { ACPI_DEVICE_INFO *devinfo; ACPI_HANDLE h; ACPI_BUFFER buf; ACPI_STATUS error; int ret; ACPI_ASSERTLOCK; ret = FALSE; if ((h = acpi_get_handle(dev)) == NULL) return (FALSE); buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; error = AcpiGetObjectInfo(h, &buf); if (ACPI_FAILURE(error)) return (FALSE); devinfo = (ACPI_DEVICE_INFO *)buf.Pointer; /* If no _STA method, must be present */ if ((devinfo->Valid & ACPI_VALID_STA) == 0) ret = TRUE; /* Return true for 'present' and 'functioning' */ if ((devinfo->CurrentStatus & 0x19) == 0x19) ret = TRUE; AcpiOsFree(buf.Pointer); return (ret); } /* * Match a HID string against a device */ BOOLEAN acpi_MatchHid(device_t dev, char *hid) { ACPI_DEVICE_INFO *devinfo; ACPI_HANDLE h; ACPI_BUFFER buf; ACPI_STATUS error; int ret, i; ACPI_ASSERTLOCK; ret = FALSE; if (hid == NULL) return (FALSE); if ((h = acpi_get_handle(dev)) == NULL) return (FALSE); buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; error = AcpiGetObjectInfo(h, &buf); if (ACPI_FAILURE(error)) return (FALSE); devinfo = (ACPI_DEVICE_INFO *)buf.Pointer; if ((devinfo->Valid & ACPI_VALID_HID) != 0 && strcmp(hid, devinfo->HardwareId.Value) == 0) ret = TRUE; else if ((devinfo->Valid & ACPI_VALID_CID) != 0) { for (i = 0; i < devinfo->CompatibilityId.Count; i++) { if (strcmp(hid, devinfo->CompatibilityId.Id[i].Value) == 0) { ret = TRUE; break; } } } AcpiOsFree(buf.Pointer); return (ret); } /* * Return the handle of a named object within our scope, ie. that of (parent) * or one if its parents. */ ACPI_STATUS acpi_GetHandleInScope(ACPI_HANDLE parent, char *path, ACPI_HANDLE *result) { ACPI_HANDLE r; ACPI_STATUS status; ACPI_ASSERTLOCK; /* Walk back up the tree to the root */ for (;;) { status = AcpiGetHandle(parent, path, &r); if (ACPI_SUCCESS(status)) { *result = r; return (AE_OK); } if (status != AE_NOT_FOUND) return (AE_OK); if (ACPI_FAILURE(AcpiGetParent(parent, &r))) return (AE_NOT_FOUND); parent = r; } } /* Find the difference between two PM tick counts. */ uint32_t acpi_TimerDelta(uint32_t end, uint32_t start) { uint32_t delta; if (end >= start) delta = end - start; else if (AcpiGbl_FADT->TmrValExt == 0) delta = ((0x00FFFFFF - start) + end + 1) & 0x00FFFFFF; else delta = ((0xFFFFFFFF - start) + end + 1); return (delta); } /* * Allocate a buffer with a preset data size. */ ACPI_BUFFER * acpi_AllocBuffer(int size) { ACPI_BUFFER *buf; if ((buf = malloc(size + sizeof(*buf), M_ACPIDEV, M_NOWAIT)) == NULL) return (NULL); buf->Length = size; buf->Pointer = (void *)(buf + 1); return (buf); } ACPI_STATUS acpi_SetInteger(ACPI_HANDLE handle, char *path, UINT32 number) { ACPI_OBJECT arg1; ACPI_OBJECT_LIST args; ACPI_ASSERTLOCK; arg1.Type = ACPI_TYPE_INTEGER; arg1.Integer.Value = number; args.Count = 1; args.Pointer = &arg1; return (AcpiEvaluateObject(handle, path, &args, NULL)); } /* * Evaluate a path that should return an integer. */ ACPI_STATUS acpi_GetInteger(ACPI_HANDLE handle, char *path, UINT32 *number) { ACPI_STATUS status; ACPI_BUFFER buf; ACPI_OBJECT param; ACPI_ASSERTLOCK; if (handle == NULL) handle = ACPI_ROOT_OBJECT; /* * Assume that what we've been pointed at is an Integer object, or * a method that will return an Integer. */ buf.Pointer = ¶m; buf.Length = sizeof(param); status = AcpiEvaluateObject(handle, path, NULL, &buf); if (ACPI_SUCCESS(status)) { if (param.Type == ACPI_TYPE_INTEGER) *number = param.Integer.Value; else status = AE_TYPE; } /* * In some applications, a method that's expected to return an Integer * may instead return a Buffer (probably to simplify some internal * arithmetic). We'll try to fetch whatever it is, and if it's a Buffer, * convert it into an Integer as best we can. * * This is a hack. */ if (status == AE_BUFFER_OVERFLOW) { if ((buf.Pointer = AcpiOsAllocate(buf.Length)) == NULL) { status = AE_NO_MEMORY; } else { status = AcpiEvaluateObject(handle, path, NULL, &buf); if (ACPI_SUCCESS(status)) status = acpi_ConvertBufferToInteger(&buf, number); AcpiOsFree(buf.Pointer); } } return (status); } ACPI_STATUS acpi_ConvertBufferToInteger(ACPI_BUFFER *bufp, UINT32 *number) { ACPI_OBJECT *p; UINT8 *val; int i; p = (ACPI_OBJECT *)bufp->Pointer; if (p->Type == ACPI_TYPE_INTEGER) { *number = p->Integer.Value; return (AE_OK); } if (p->Type != ACPI_TYPE_BUFFER) return (AE_TYPE); if (p->Buffer.Length > sizeof(int)) return (AE_BAD_DATA); *number = 0; val = p->Buffer.Pointer; for (i = 0; i < p->Buffer.Length; i++) *number += val[i] << (i * 8); return (AE_OK); } /* * Iterate over the elements of an a package object, calling the supplied * function for each element. * * XXX possible enhancement might be to abort traversal on error. */ ACPI_STATUS acpi_ForeachPackageObject(ACPI_OBJECT *pkg, void (*func)(ACPI_OBJECT *comp, void *arg), void *arg) { ACPI_OBJECT *comp; int i; if (pkg == NULL || pkg->Type != ACPI_TYPE_PACKAGE) return (AE_BAD_PARAMETER); /* Iterate over components */ i = 0; comp = pkg->Package.Elements; for (; i < pkg->Package.Count; i++, comp++) func(comp, arg); return (AE_OK); } /* * Find the (index)th resource object in a set. */ ACPI_STATUS acpi_FindIndexedResource(ACPI_BUFFER *buf, int index, ACPI_RESOURCE **resp) { ACPI_RESOURCE *rp; int i; rp = (ACPI_RESOURCE *)buf->Pointer; i = index; while (i-- > 0) { /* Range check */ if (rp > (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length)) return (AE_BAD_PARAMETER); /* Check for terminator */ if (rp->Id == ACPI_RSTYPE_END_TAG || rp->Length == 0) return (AE_NOT_FOUND); rp = ACPI_NEXT_RESOURCE(rp); } if (resp != NULL) *resp = rp; return (AE_OK); } /* * Append an ACPI_RESOURCE to an ACPI_BUFFER. * * Given a pointer to an ACPI_RESOURCE structure, expand the ACPI_BUFFER * provided to contain it. If the ACPI_BUFFER is empty, allocate a sensible * backing block. If the ACPI_RESOURCE is NULL, return an empty set of * resources. */ #define ACPI_INITIAL_RESOURCE_BUFFER_SIZE 512 ACPI_STATUS acpi_AppendBufferResource(ACPI_BUFFER *buf, ACPI_RESOURCE *res) { ACPI_RESOURCE *rp; void *newp; /* Initialise the buffer if necessary. */ if (buf->Pointer == NULL) { buf->Length = ACPI_INITIAL_RESOURCE_BUFFER_SIZE; if ((buf->Pointer = AcpiOsAllocate(buf->Length)) == NULL) return (AE_NO_MEMORY); rp = (ACPI_RESOURCE *)buf->Pointer; rp->Id = ACPI_RSTYPE_END_TAG; rp->Length = 0; } if (res == NULL) return (AE_OK); /* * Scan the current buffer looking for the terminator. * This will either find the terminator or hit the end * of the buffer and return an error. */ rp = (ACPI_RESOURCE *)buf->Pointer; for (;;) { /* Range check, don't go outside the buffer */ if (rp >= (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length)) return (AE_BAD_PARAMETER); if (rp->Id == ACPI_RSTYPE_END_TAG || rp->Length == 0) break; rp = ACPI_NEXT_RESOURCE(rp); } /* * Check the size of the buffer and expand if required. * * Required size is: * size of existing resources before terminator + * size of new resource and header + * size of terminator. * * Note that this loop should really only run once, unless * for some reason we are stuffing a *really* huge resource. */ while ((((u_int8_t *)rp - (u_int8_t *)buf->Pointer) + res->Length + ACPI_RESOURCE_LENGTH_NO_DATA + ACPI_RESOURCE_LENGTH) >= buf->Length) { if ((newp = AcpiOsAllocate(buf->Length * 2)) == NULL) return (AE_NO_MEMORY); bcopy(buf->Pointer, newp, buf->Length); rp = (ACPI_RESOURCE *)((u_int8_t *)newp + ((u_int8_t *)rp - (u_int8_t *)buf->Pointer)); AcpiOsFree(buf->Pointer); buf->Pointer = newp; buf->Length += buf->Length; } /* Insert the new resource. */ bcopy(res, rp, res->Length + ACPI_RESOURCE_LENGTH_NO_DATA); /* And add the terminator. */ rp = ACPI_NEXT_RESOURCE(rp); rp->Id = ACPI_RSTYPE_END_TAG; rp->Length = 0; return (AE_OK); } /* * Set interrupt model. */ ACPI_STATUS acpi_SetIntrModel(int model) { return (acpi_SetInteger(ACPI_ROOT_OBJECT, "_PIC", model)); } #define ACPI_MINIMUM_AWAKETIME 5 static void acpi_sleep_enable(void *arg) { ((struct acpi_softc *)arg)->acpi_sleep_disabled = 0; } /* * Set the system sleep state * * Currently we support S1-S5 but S4 is only S4BIOS */ ACPI_STATUS acpi_SetSleepState(struct acpi_softc *sc, int state) { ACPI_STATUS status = AE_OK; UINT8 TypeA; UINT8 TypeB; ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); ACPI_ASSERTLOCK; /* Avoid reentry if already attempting to suspend. */ if (sc->acpi_sstate != ACPI_STATE_S0) return_ACPI_STATUS (AE_BAD_PARAMETER); /* We recently woke up so don't suspend again for a while. */ if (sc->acpi_sleep_disabled) return_ACPI_STATUS (AE_OK); switch (state) { case ACPI_STATE_S1: case ACPI_STATE_S2: case ACPI_STATE_S3: case ACPI_STATE_S4: status = AcpiGetSleepTypeData((UINT8)state, &TypeA, &TypeB); if (status == AE_NOT_FOUND) { device_printf(sc->acpi_dev, "Sleep state S%d not supported by BIOS\n", state); break; } else if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "AcpiGetSleepTypeData failed - %s\n", AcpiFormatException(status)); break; } sc->acpi_sstate = state; sc->acpi_sleep_disabled = 1; /* Disable all wake GPEs not appropriate for this state. */ acpi_wake_limit_walk(state); /* Inform all devices that we are going to sleep. */ if (DEVICE_SUSPEND(root_bus) != 0) { /* * Re-wake the system. * * XXX note that a better two-pass approach with a 'veto' pass * followed by a "real thing" pass would be better, but the * current bus interface does not provide for this. */ DEVICE_RESUME(root_bus); return_ACPI_STATUS (AE_ERROR); } status = AcpiEnterSleepStatePrep(state); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n", AcpiFormatException(status)); break; } if (sc->acpi_sleep_delay > 0) DELAY(sc->acpi_sleep_delay * 1000000); if (state != ACPI_STATE_S1) { acpi_sleep_machdep(sc, state); /* AcpiEnterSleepState() may be incomplete, unlock if locked. */ if (AcpiGbl_MutexInfo[ACPI_MTX_HARDWARE].OwnerId != ACPI_MUTEX_NOT_ACQUIRED) { AcpiUtReleaseMutex(ACPI_MTX_HARDWARE); } /* Re-enable ACPI hardware on wakeup from sleep state 4. */ if (state == ACPI_STATE_S4) AcpiEnable(); } else { status = AcpiEnterSleepState((UINT8)state); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n", AcpiFormatException(status)); break; } } AcpiLeaveSleepState((UINT8)state); DEVICE_RESUME(root_bus); sc->acpi_sstate = ACPI_STATE_S0; acpi_enable_fixed_events(sc); break; case ACPI_STATE_S5: /* * Shut down cleanly and power off. This will call us back through the * shutdown handlers. */ shutdown_nice(RB_POWEROFF); break; case ACPI_STATE_S0: default: status = AE_BAD_PARAMETER; break; } /* Disable a second sleep request for a short period */ if (sc->acpi_sleep_disabled) timeout(acpi_sleep_enable, (caddr_t)sc, hz * ACPI_MINIMUM_AWAKETIME); return_ACPI_STATUS (status); } /* Initialize a device's wake GPE. */ int acpi_wake_init(device_t dev, int type) { struct acpi_prw_data prw; /* Check that the device can wake the system. */ if ((device_get_flags(dev) & ACPI_FLAG_WAKE_CAPABLE) == 0) return (ENXIO); /* Evaluate _PRW to find the GPE. */ if (acpi_parse_prw(acpi_get_handle(dev), &prw) != 0) return (ENXIO); /* Set the requested type for the GPE (runtime, wake, or both). */ if (ACPI_FAILURE(AcpiSetGpeType(prw.gpe_handle, prw.gpe_bit, type))) { device_printf(dev, "set GPE type failed\n"); return (ENXIO); } return (0); } /* Enable or disable the device's wake GPE. */ int acpi_wake_set_enable(device_t dev, int enable) { struct acpi_prw_data prw; ACPI_HANDLE handle; ACPI_STATUS status; int flags; /* Make sure the device supports waking the system. */ flags = device_get_flags(dev); handle = acpi_get_handle(dev); if ((flags & ACPI_FLAG_WAKE_CAPABLE) == 0 || handle == NULL) return (ENXIO); /* Evaluate _PRW to find the GPE. */ if (acpi_parse_prw(handle, &prw) != 0) return (ENXIO); if (enable) { status = AcpiEnableGpe(prw.gpe_handle, prw.gpe_bit, ACPI_NOT_ISR); if (ACPI_FAILURE(status)) { device_printf(dev, "enable wake failed\n"); return (ENXIO); } device_set_flags(dev, flags | ACPI_FLAG_WAKE_ENABLED); } else { status = AcpiDisableGpe(prw.gpe_handle, prw.gpe_bit, ACPI_NOT_ISR); if (ACPI_FAILURE(status)) { device_printf(dev, "disable wake failed\n"); return (ENXIO); } device_set_flags(dev, flags & ~ACPI_FLAG_WAKE_ENABLED); } return (0); } /* Configure a device's GPE appropriately for the new sleep state. */ int acpi_wake_sleep_prep(device_t dev, int sstate) { struct acpi_prw_data prw; ACPI_HANDLE handle; int flags; /* Check that this is an ACPI device and get its GPE. */ flags = device_get_flags(dev); handle = acpi_get_handle(dev); if ((flags & ACPI_FLAG_WAKE_CAPABLE) == 0 || handle == NULL) return (ENXIO); /* Evaluate _PRW to find the GPE. */ if (acpi_parse_prw(handle, &prw) != 0) return (ENXIO); /* * TBD: All Power Resources referenced by elements 2 through N * of the _PRW object are put into the ON state. */ /* * If the user requested that this device wake the system and the next * sleep state is valid for this GPE, enable it and the device's wake * capability. The sleep state must be less than (i.e., higher power) * or equal to the value specified by _PRW. Return early, leaving * the appropriate power resources enabled. */ if ((flags & ACPI_FLAG_WAKE_ENABLED) != 0 && sstate <= prw.lowest_wake) { if (bootverbose) device_printf(dev, "wake_prep enabled gpe %#x for state %d\n", prw.gpe_bit, sstate); AcpiEnableGpe(prw.gpe_handle, prw.gpe_bit, ACPI_NOT_ISR); acpi_SetInteger(handle, "_PSW", 1); return (0); } /* * If the device wake was disabled or this sleep state is too low for * this device, disable its wake capability and GPE. */ AcpiDisableGpe(prw.gpe_handle, prw.gpe_bit, ACPI_NOT_ISR); acpi_SetInteger(handle, "_PSW", 0); if (bootverbose) device_printf(dev, "wake_prep disabled gpe %#x for state %d\n", prw.gpe_bit, sstate); /* * TBD: All Power Resources referenced by elements 2 through N * of the _PRW object are put into the OFF state. */ return (0); } /* Re-enable GPEs after wake. */ int acpi_wake_run_prep(device_t dev) { struct acpi_prw_data prw; ACPI_HANDLE handle; int flags; /* Check that this is an ACPI device and get its GPE. */ flags = device_get_flags(dev); handle = acpi_get_handle(dev); if ((flags & ACPI_FLAG_WAKE_CAPABLE) == 0 || handle == NULL) return (ENXIO); /* Evaluate _PRW to find the GPE. */ if (acpi_parse_prw(handle, &prw) != 0) return (ENXIO); /* * TBD: Be sure all Power Resources referenced by elements 2 through N * of the _PRW object are in the ON state. */ /* Disable wake capability and if the user requested, enable the GPE. */ acpi_SetInteger(handle, "_PSW", 0); if ((flags & ACPI_FLAG_WAKE_ENABLED) != 0) AcpiEnableGpe(prw.gpe_handle, prw.gpe_bit, ACPI_NOT_ISR); return (0); } static ACPI_STATUS acpi_wake_limit(ACPI_HANDLE h, UINT32 level, void *context, void **status) { struct acpi_prw_data prw; int *sstate; /* It's ok not to have _PRW if the device can't wake the system. */ if (acpi_parse_prw(h, &prw) != 0) return (AE_OK); sstate = (int *)context; if (*sstate > prw.lowest_wake) AcpiDisableGpe(prw.gpe_handle, prw.gpe_bit, ACPI_NOT_ISR); return (AE_OK); } /* Walk all system devices, disabling them if necessary for sstate. */ static int acpi_wake_limit_walk(int sstate) { ACPI_HANDLE sb_handle; if (ACPI_SUCCESS(AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle))) AcpiWalkNamespace(ACPI_TYPE_ANY, sb_handle, 100, acpi_wake_limit, &sstate, NULL); return (0); } /* Walk the tree rooted at acpi0 to attach per-device wake sysctls. */ static int acpi_wake_sysctl_walk(device_t dev) { int error, i, numdevs; device_t *devlist; device_t child; error = device_get_children(dev, &devlist, &numdevs); if (error != 0 || numdevs == 0) return (error); for (i = 0; i < numdevs; i++) { child = devlist[i]; if (!device_is_attached(child)) continue; if (device_get_flags(child) & ACPI_FLAG_WAKE_CAPABLE) { SYSCTL_ADD_PROC(device_get_sysctl_ctx(child), SYSCTL_CHILDREN(device_get_sysctl_tree(child)), OID_AUTO, "wake", CTLTYPE_INT | CTLFLAG_RW, child, 0, acpi_wake_set_sysctl, "I", "Device set to wake the system"); } acpi_wake_sysctl_walk(child); } free(devlist, M_TEMP); return (0); } /* Enable or disable wake from userland. */ static int acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS) { int enable, error; device_t dev; dev = (device_t)arg1; enable = (device_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) ? 1 : 0; error = sysctl_handle_int(oidp, &enable, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (enable != 0 && enable != 1) return (EINVAL); return (acpi_wake_set_enable(dev, enable)); } /* Parse a device's _PRW into a structure. */ static int acpi_parse_prw(ACPI_HANDLE h, struct acpi_prw_data *prw) { ACPI_STATUS status; ACPI_BUFFER prw_buffer; ACPI_OBJECT *res, *res2; int error; if (h == NULL || prw == NULL) return (EINVAL); /* * The _PRW object (7.2.9) is only required for devices that have the * ability to wake the system from a sleeping state. */ error = EINVAL; prw_buffer.Pointer = NULL; prw_buffer.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObject(h, "_PRW", NULL, &prw_buffer); if (ACPI_FAILURE(status)) return (ENOENT); res = (ACPI_OBJECT *)prw_buffer.Pointer; if (res == NULL) return (ENOENT); if (!ACPI_PKG_VALID(res, 2)) goto out; /* * Element 1 of the _PRW object: * The lowest power system sleeping state that can be entered while still * providing wake functionality. The sleeping state being entered must * be less than (i.e., higher power) or equal to this value. */ if (acpi_PkgInt32(res, 1, &prw->lowest_wake) != 0) goto out; /* * Element 0 of the _PRW object: */ switch (res->Package.Elements[0].Type) { case ACPI_TYPE_INTEGER: /* * If the data type of this package element is numeric, then this * _PRW package element is the bit index in the GPEx_EN, in the * GPE blocks described in the FADT, of the enable bit that is * enabled for the wake event. */ prw->gpe_handle = NULL; prw->gpe_bit = res->Package.Elements[0].Integer.Value; error = 0; break; case ACPI_TYPE_PACKAGE: /* * If the data type of this package element is a package, then this * _PRW package element is itself a package containing two * elements. The first is an object reference to the GPE Block * device that contains the GPE that will be triggered by the wake * event. The second element is numeric and it contains the bit * index in the GPEx_EN, in the GPE Block referenced by the * first element in the package, of the enable bit that is enabled for * the wake event. * * For example, if this field is a package then it is of the form: * Package() {\_SB.PCI0.ISA.GPE, 2} */ res2 = &res->Package.Elements[0]; if (!ACPI_PKG_VALID(res2, 2)) goto out; prw->gpe_handle = acpi_GetReference(NULL, &res2->Package.Elements[0]); if (prw->gpe_handle == NULL) goto out; if (acpi_PkgInt32(res2, 1, &prw->gpe_bit) != 0) goto out; error = 0; break; default: goto out; } /* XXX No power resource handling yet. */ prw->power_res = NULL; out: if (prw_buffer.Pointer != NULL) AcpiOsFree(prw_buffer.Pointer); return (error); } /* * Enable/Disable ACPI */ ACPI_STATUS acpi_Enable(struct acpi_softc *sc) { ACPI_STATUS status; u_int32_t flags; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); ACPI_ASSERTLOCK; flags = ACPI_NO_ADDRESS_SPACE_INIT | ACPI_NO_HARDWARE_INIT | ACPI_NO_DEVICE_INIT | ACPI_NO_OBJECT_INIT; if (!sc->acpi_enabled) status = AcpiEnableSubsystem(flags); else status = AE_OK; if (status == AE_OK) sc->acpi_enabled = 1; return_ACPI_STATUS (status); } ACPI_STATUS acpi_Disable(struct acpi_softc *sc) { ACPI_STATUS status; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); ACPI_ASSERTLOCK; if (sc->acpi_enabled) status = AcpiDisable(); else status = AE_OK; if (status == AE_OK) sc->acpi_enabled = 0; return_ACPI_STATUS (status); } /* * ACPI Event Handlers */ /* System Event Handlers (registered by EVENTHANDLER_REGISTER) */ static void acpi_system_eventhandler_sleep(void *arg, int state) { ACPI_LOCK_DECL; ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); ACPI_LOCK; if (state >= ACPI_STATE_S0 && state <= ACPI_S_STATES_MAX) acpi_SetSleepState((struct acpi_softc *)arg, state); ACPI_UNLOCK; return_VOID; } static void acpi_system_eventhandler_wakeup(void *arg, int state) { ACPI_LOCK_DECL; ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); /* Well, what to do? :-) */ ACPI_LOCK; ACPI_UNLOCK; return_VOID; } /* * ACPICA Event Handlers (FixedEvent, also called from button notify handler) */ UINT32 acpi_event_power_button_sleep(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); EVENTHANDLER_INVOKE(acpi_sleep_event, sc->acpi_power_button_sx); return_VALUE (ACPI_INTERRUPT_HANDLED); } UINT32 acpi_event_power_button_wake(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); EVENTHANDLER_INVOKE(acpi_wakeup_event, sc->acpi_power_button_sx); return_VALUE (ACPI_INTERRUPT_HANDLED); } UINT32 acpi_event_sleep_button_sleep(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); EVENTHANDLER_INVOKE(acpi_sleep_event, sc->acpi_sleep_button_sx); return_VALUE (ACPI_INTERRUPT_HANDLED); } UINT32 acpi_event_sleep_button_wake(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); EVENTHANDLER_INVOKE(acpi_wakeup_event, sc->acpi_sleep_button_sx); return_VALUE (ACPI_INTERRUPT_HANDLED); } /* * XXX This is kinda ugly, and should not be here. */ struct acpi_staticbuf { ACPI_BUFFER buffer; char data[512]; }; char * acpi_name(ACPI_HANDLE handle) { static struct acpi_staticbuf buf; ACPI_ASSERTLOCK; buf.buffer.Length = 512; buf.buffer.Pointer = &buf.data[0]; if (ACPI_SUCCESS(AcpiGetName(handle, ACPI_FULL_PATHNAME, &buf.buffer))) return (buf.buffer.Pointer); return ("(unknown path)"); } /* * Debugging/bug-avoidance. Avoid trying to fetch info on various * parts of the namespace. */ int acpi_avoid(ACPI_HANDLE handle) { char *cp, *env, *np; int len; np = acpi_name(handle); if (*np == '\\') np++; if ((env = getenv("debug.acpi.avoid")) == NULL) return (0); /* Scan the avoid list checking for a match */ cp = env; for (;;) { while ((*cp != 0) && isspace(*cp)) cp++; if (*cp == 0) break; len = 0; while ((cp[len] != 0) && !isspace(cp[len])) len++; if (!strncmp(cp, np, len)) { freeenv(env); return(1); } cp += len; } freeenv(env); return (0); } /* * Debugging/bug-avoidance. Disable ACPI subsystem components. */ int acpi_disabled(char *subsys) { char *cp, *env; int len; if ((env = getenv("debug.acpi.disabled")) == NULL) return (0); if (strcmp(env, "all") == 0) { freeenv(env); return (1); } /* Scan the disable list, checking for a match. */ cp = env; for (;;) { while (*cp != '\0' && isspace(*cp)) cp++; if (*cp == '\0') break; len = 0; while (cp[len] != '\0' && !isspace(cp[len])) len++; if (strncmp(cp, subsys, len) == 0) { freeenv(env); return (1); } cp += len; } freeenv(env); return (0); } /* * Control interface. * * We multiplex ioctls for all participating ACPI devices here. Individual * drivers wanting to be accessible via /dev/acpi should use the * register/deregister interface to make their handlers visible. */ struct acpi_ioctl_hook { TAILQ_ENTRY(acpi_ioctl_hook) link; u_long cmd; acpi_ioctl_fn fn; void *arg; }; static TAILQ_HEAD(,acpi_ioctl_hook) acpi_ioctl_hooks; static int acpi_ioctl_hooks_initted; /* * Register an ioctl handler. */ int acpi_register_ioctl(u_long cmd, acpi_ioctl_fn fn, void *arg) { struct acpi_ioctl_hook *hp; if ((hp = malloc(sizeof(*hp), M_ACPIDEV, M_NOWAIT)) == NULL) return (ENOMEM); hp->cmd = cmd; hp->fn = fn; hp->arg = arg; if (acpi_ioctl_hooks_initted == 0) { TAILQ_INIT(&acpi_ioctl_hooks); acpi_ioctl_hooks_initted = 1; } TAILQ_INSERT_TAIL(&acpi_ioctl_hooks, hp, link); return (0); } /* * Deregister an ioctl handler. */ void acpi_deregister_ioctl(u_long cmd, acpi_ioctl_fn fn) { struct acpi_ioctl_hook *hp; TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link) if ((hp->cmd == cmd) && (hp->fn == fn)) break; if (hp != NULL) { TAILQ_REMOVE(&acpi_ioctl_hooks, hp, link); free(hp, M_ACPIDEV); } } static int acpiopen(dev_t dev, int flag, int fmt, d_thread_t *td) { return (0); } static int acpiclose(dev_t dev, int flag, int fmt, d_thread_t *td) { return (0); } static int acpiioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, d_thread_t *td) { struct acpi_softc *sc; struct acpi_ioctl_hook *hp; int error, xerror, state; ACPI_LOCK_DECL; ACPI_LOCK; error = state = 0; sc = dev->si_drv1; /* * Scan the list of registered ioctls, looking for handlers. */ if (acpi_ioctl_hooks_initted) { TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link) { if (hp->cmd == cmd) { xerror = hp->fn(cmd, addr, hp->arg); if (xerror != 0) error = xerror; goto out; } } } /* * Core ioctls are not permitted for non-writable user. * Currently, other ioctls just fetch information. * Not changing system behavior. */ if((flag & FWRITE) == 0) return (EPERM); /* Core system ioctls. */ switch (cmd) { case ACPIIO_ENABLE: if (ACPI_FAILURE(acpi_Enable(sc))) error = ENXIO; break; case ACPIIO_DISABLE: if (ACPI_FAILURE(acpi_Disable(sc))) error = ENXIO; break; case ACPIIO_SETSLPSTATE: if (!sc->acpi_enabled) { error = ENXIO; break; } state = *(int *)addr; if (state >= ACPI_STATE_S0 && state <= ACPI_S_STATES_MAX) { if (ACPI_FAILURE(acpi_SetSleepState(sc, state))) error = EINVAL; } else { error = EINVAL; } break; default: if (error == 0) error = EINVAL; break; } out: ACPI_UNLOCK; return (error); } static int acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS) { char sleep_state[4]; char buf[16]; int error; UINT8 state, TypeA, TypeB; buf[0] = '\0'; for (state = ACPI_STATE_S1; state < ACPI_S_STATES_MAX + 1; state++) { if (ACPI_SUCCESS(AcpiGetSleepTypeData(state, &TypeA, &TypeB))) { sprintf(sleep_state, "S%d ", state); strcat(buf, sleep_state); } } error = sysctl_handle_string(oidp, buf, sizeof(buf), req); return (error); } static int acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS) { char sleep_state[10]; int error; u_int new_state, old_state; old_state = *(u_int *)oidp->oid_arg1; if (old_state > ACPI_S_STATES_MAX + 1) { strcpy(sleep_state, "unknown"); } else { bzero(sleep_state, sizeof(sleep_state)); strncpy(sleep_state, sleep_state_names[old_state], sizeof(sleep_state_names[old_state])); } error = sysctl_handle_string(oidp, sleep_state, sizeof(sleep_state), req); if (error == 0 && req->newptr != NULL) { new_state = ACPI_STATE_S0; for (; new_state <= ACPI_S_STATES_MAX + 1; new_state++) { if (strncmp(sleep_state, sleep_state_names[new_state], sizeof(sleep_state)) == 0) break; } if (new_state <= ACPI_S_STATES_MAX + 1) { if (new_state != old_state) *(u_int *)oidp->oid_arg1 = new_state; } else { error = EINVAL; } } return (error); } /* Inform devctl(4) when we receive a Notify. */ void acpi_UserNotify(const char *subsystem, ACPI_HANDLE h, uint8_t notify) { char notify_buf[16]; ACPI_BUFFER handle_buf; ACPI_STATUS status; if (subsystem == NULL) return; handle_buf.Pointer = NULL; handle_buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiNsHandleToPathname(h, &handle_buf); if (ACPI_FAILURE(status)) return; snprintf(notify_buf, sizeof(notify_buf), "notify=0x%02x", notify); devctl_notify("ACPI", subsystem, handle_buf.Pointer, notify_buf); AcpiOsFree(handle_buf.Pointer); } #ifdef ACPI_DEBUG /* * Support for parsing debug options from the kernel environment. * * Bits may be set in the AcpiDbgLayer and AcpiDbgLevel debug registers * by specifying the names of the bits in the debug.acpi.layer and * debug.acpi.level environment variables. Bits may be unset by * prefixing the bit name with !. */ struct debugtag { char *name; UINT32 value; }; static struct debugtag dbg_layer[] = { {"ACPI_UTILITIES", ACPI_UTILITIES}, {"ACPI_HARDWARE", ACPI_HARDWARE}, {"ACPI_EVENTS", ACPI_EVENTS}, {"ACPI_TABLES", ACPI_TABLES}, {"ACPI_NAMESPACE", ACPI_NAMESPACE}, {"ACPI_PARSER", ACPI_PARSER}, {"ACPI_DISPATCHER", ACPI_DISPATCHER}, {"ACPI_EXECUTER", ACPI_EXECUTER}, {"ACPI_RESOURCES", ACPI_RESOURCES}, {"ACPI_CA_DEBUGGER", ACPI_CA_DEBUGGER}, {"ACPI_OS_SERVICES", ACPI_OS_SERVICES}, {"ACPI_CA_DISASSEMBLER", ACPI_CA_DISASSEMBLER}, {"ACPI_ALL_COMPONENTS", ACPI_ALL_COMPONENTS}, {"ACPI_AC_ADAPTER", ACPI_AC_ADAPTER}, {"ACPI_BATTERY", ACPI_BATTERY}, {"ACPI_BUS", ACPI_BUS}, {"ACPI_BUTTON", ACPI_BUTTON}, {"ACPI_EC", ACPI_EC}, {"ACPI_FAN", ACPI_FAN}, {"ACPI_POWERRES", ACPI_POWERRES}, {"ACPI_PROCESSOR", ACPI_PROCESSOR}, {"ACPI_THERMAL", ACPI_THERMAL}, {"ACPI_TIMER", ACPI_TIMER}, {"ACPI_ALL_DRIVERS", ACPI_ALL_DRIVERS}, {NULL, 0} }; static struct debugtag dbg_level[] = { {"ACPI_LV_ERROR", ACPI_LV_ERROR}, {"ACPI_LV_WARN", ACPI_LV_WARN}, {"ACPI_LV_INIT", ACPI_LV_INIT}, {"ACPI_LV_DEBUG_OBJECT", ACPI_LV_DEBUG_OBJECT}, {"ACPI_LV_INFO", ACPI_LV_INFO}, {"ACPI_LV_ALL_EXCEPTIONS", ACPI_LV_ALL_EXCEPTIONS}, /* Trace verbosity level 1 [Standard Trace Level] */ {"ACPI_LV_INIT_NAMES", ACPI_LV_INIT_NAMES}, {"ACPI_LV_PARSE", ACPI_LV_PARSE}, {"ACPI_LV_LOAD", ACPI_LV_LOAD}, {"ACPI_LV_DISPATCH", ACPI_LV_DISPATCH}, {"ACPI_LV_EXEC", ACPI_LV_EXEC}, {"ACPI_LV_NAMES", ACPI_LV_NAMES}, {"ACPI_LV_OPREGION", ACPI_LV_OPREGION}, {"ACPI_LV_BFIELD", ACPI_LV_BFIELD}, {"ACPI_LV_TABLES", ACPI_LV_TABLES}, {"ACPI_LV_VALUES", ACPI_LV_VALUES}, {"ACPI_LV_OBJECTS", ACPI_LV_OBJECTS}, {"ACPI_LV_RESOURCES", ACPI_LV_RESOURCES}, {"ACPI_LV_USER_REQUESTS", ACPI_LV_USER_REQUESTS}, {"ACPI_LV_PACKAGE", ACPI_LV_PACKAGE}, {"ACPI_LV_VERBOSITY1", ACPI_LV_VERBOSITY1}, /* Trace verbosity level 2 [Function tracing and memory allocation] */ {"ACPI_LV_ALLOCATIONS", ACPI_LV_ALLOCATIONS}, {"ACPI_LV_FUNCTIONS", ACPI_LV_FUNCTIONS}, {"ACPI_LV_OPTIMIZATIONS", ACPI_LV_OPTIMIZATIONS}, {"ACPI_LV_VERBOSITY2", ACPI_LV_VERBOSITY2}, {"ACPI_LV_ALL", ACPI_LV_ALL}, /* Trace verbosity level 3 [Threading, I/O, and Interrupts] */ {"ACPI_LV_MUTEX", ACPI_LV_MUTEX}, {"ACPI_LV_THREADS", ACPI_LV_THREADS}, {"ACPI_LV_IO", ACPI_LV_IO}, {"ACPI_LV_INTERRUPTS", ACPI_LV_INTERRUPTS}, {"ACPI_LV_VERBOSITY3", ACPI_LV_VERBOSITY3}, /* Exceptionally verbose output -- also used in the global "DebugLevel" */ {"ACPI_LV_AML_DISASSEMBLE", ACPI_LV_AML_DISASSEMBLE}, {"ACPI_LV_VERBOSE_INFO", ACPI_LV_VERBOSE_INFO}, {"ACPI_LV_FULL_TABLES", ACPI_LV_FULL_TABLES}, {"ACPI_LV_EVENTS", ACPI_LV_EVENTS}, {"ACPI_LV_VERBOSE", ACPI_LV_VERBOSE}, {NULL, 0} }; static void acpi_parse_debug(char *cp, struct debugtag *tag, UINT32 *flag) { char *ep; int i, l; int set; while (*cp) { if (isspace(*cp)) { cp++; continue; } ep = cp; while (*ep && !isspace(*ep)) ep++; if (*cp == '!') { set = 0; cp++; if (cp == ep) continue; } else { set = 1; } l = ep - cp; for (i = 0; tag[i].name != NULL; i++) { if (!strncmp(cp, tag[i].name, l)) { if (set) *flag |= tag[i].value; else *flag &= ~tag[i].value; } } cp = ep; } } static void acpi_set_debugging(void *junk) { char *layer, *level; if (cold) { AcpiDbgLayer = 0; AcpiDbgLevel = 0; } layer = getenv("debug.acpi.layer"); level = getenv("debug.acpi.level"); if (layer == NULL && level == NULL) return; printf("ACPI set debug"); if (layer != NULL) { if (strcmp("NONE", layer) != 0) printf(" layer '%s'", layer); acpi_parse_debug(layer, &dbg_layer[0], &AcpiDbgLayer); freeenv(layer); } if (level != NULL) { if (strcmp("NONE", level) != 0) printf(" level '%s'", level); acpi_parse_debug(level, &dbg_level[0], &AcpiDbgLevel); freeenv(level); } printf("\n"); } SYSINIT(acpi_debugging, SI_SUB_TUNABLES, SI_ORDER_ANY, acpi_set_debugging, NULL); static int acpi_debug_sysctl(SYSCTL_HANDLER_ARGS) { int error, *dbg; struct debugtag *tag; struct sbuf sb; if (sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND) == NULL) return (ENOMEM); if (strcmp(oidp->oid_arg1, "debug.acpi.layer") == 0) { tag = &dbg_layer[0]; dbg = &AcpiDbgLayer; } else { tag = &dbg_level[0]; dbg = &AcpiDbgLevel; } /* Get old values if this is a get request. */ if (*dbg == 0) { sbuf_cpy(&sb, "NONE"); } else if (req->newptr == NULL) { for (; tag->name != NULL; tag++) { if ((*dbg & tag->value) == tag->value) sbuf_printf(&sb, "%s ", tag->name); } } sbuf_trim(&sb); sbuf_finish(&sb); /* Copy out the old values to the user. */ error = SYSCTL_OUT(req, sbuf_data(&sb), sbuf_len(&sb)); sbuf_delete(&sb); /* If the user is setting a string, parse it. */ if (error == 0 && req->newptr != NULL) { *dbg = 0; setenv((char *)oidp->oid_arg1, (char *)req->newptr); acpi_set_debugging(NULL); } return (error); } SYSCTL_PROC(_debug_acpi, OID_AUTO, layer, CTLFLAG_RW | CTLTYPE_STRING, "debug.acpi.layer", 0, acpi_debug_sysctl, "A", ""); SYSCTL_PROC(_debug_acpi, OID_AUTO, level, CTLFLAG_RW | CTLTYPE_STRING, "debug.acpi.level", 0, acpi_debug_sysctl, "A", ""); #endif static int acpi_pm_func(u_long cmd, void *arg, ...) { int state, acpi_state; int error; struct acpi_softc *sc; va_list ap; error = 0; switch (cmd) { case POWER_CMD_SUSPEND: sc = (struct acpi_softc *)arg; if (sc == NULL) { error = EINVAL; goto out; } va_start(ap, arg); state = va_arg(ap, int); va_end(ap); switch (state) { case POWER_SLEEP_STATE_STANDBY: acpi_state = sc->acpi_standby_sx; break; case POWER_SLEEP_STATE_SUSPEND: acpi_state = sc->acpi_suspend_sx; break; case POWER_SLEEP_STATE_HIBERNATE: acpi_state = ACPI_STATE_S4; break; default: error = EINVAL; goto out; } acpi_SetSleepState(sc, acpi_state); break; default: error = EINVAL; goto out; } out: return (error); } static void acpi_pm_register(void *arg) { if (!cold || resource_disabled("acpi", 0)) return; power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, NULL); } SYSINIT(power, SI_SUB_KLD, SI_ORDER_ANY, acpi_pm_register, 0); Index: head/sys/dev/acpica/acpi_acad.c =================================================================== --- head/sys/dev/acpica/acpi_acad.c (revision 129878) +++ head/sys/dev/acpica/acpi_acad.c (revision 129879) @@ -1,277 +1,278 @@ /*- * Copyright (c) 2000 Takanori Watanabe * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "opt_acpi.h" #include #include #include #include #include #include #include +#include #include #include #include "acpi.h" #include #include /* Hooks for the ACPI CA debugging infrastructure */ #define _COMPONENT ACPI_AC_ADAPTER ACPI_MODULE_NAME("AC_ADAPTER") /* Number of times to retry initialization before giving up. */ #define ACPI_ACAD_RETRY_MAX 6 #define ACPI_DEVICE_CHECK_PNP 0x00 #define ACPI_DEVICE_CHECK_EXISTENCE 0x01 #define ACPI_POWERSOURCE_STAT_CHANGE 0x80 struct acpi_acad_softc { int status; int initializing; }; static void acpi_acad_get_status(void *); static void acpi_acad_notify_handler(ACPI_HANDLE, UINT32, void *); static int acpi_acad_probe(device_t); static int acpi_acad_attach(device_t); static int acpi_acad_ioctl(u_long, caddr_t, void *); static int acpi_acad_sysctl(SYSCTL_HANDLER_ARGS); static void acpi_acad_init_acline(void *arg); static device_method_t acpi_acad_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_acad_probe), DEVMETHOD(device_attach, acpi_acad_attach), {0, 0} }; static driver_t acpi_acad_driver = { "acpi_acad", acpi_acad_methods, sizeof(struct acpi_acad_softc), }; static devclass_t acpi_acad_devclass; DRIVER_MODULE(acpi_acad, acpi, acpi_acad_driver, acpi_acad_devclass, 0, 0); MODULE_DEPEND(acpi_acad, acpi, 1, 1, 1); static void acpi_acad_get_status(void *context) { struct acpi_acad_softc *sc; device_t dev; ACPI_HANDLE h; int newstatus; dev = context; sc = device_get_softc(dev); h = acpi_get_handle(dev); if (ACPI_FAILURE(acpi_GetInteger(h, "_PSR", &newstatus))) { sc->status = -1; return; } if (sc->status != newstatus) { sc->status = newstatus; /* Set system power profile based on AC adapter status */ power_profile_set_state(sc->status ? POWER_PROFILE_PERFORMANCE : POWER_PROFILE_ECONOMY); ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev), "%s Line\n", sc->status ? "On" : "Off"); acpi_UserNotify("ACAD", h, sc->status); } } static void acpi_acad_notify_handler(ACPI_HANDLE h, UINT32 notify, void *context) { device_t dev; dev = (device_t)context; switch (notify) { case ACPI_DEVICE_CHECK_PNP: case ACPI_DEVICE_CHECK_EXISTENCE: case ACPI_POWERSOURCE_STAT_CHANGE: /* Temporarily. It is better to notify policy manager */ AcpiOsQueueForExecution(OSD_PRIORITY_LO, acpi_acad_get_status, context); break; default: device_printf(dev, "unknown notify %#x\n", notify); break; } } static int acpi_acad_probe(device_t dev) { if (acpi_get_type(dev) == ACPI_TYPE_DEVICE && !acpi_disabled("acad") && acpi_MatchHid(dev, "ACPI0003")) { device_set_desc(dev, "AC Adapter"); return (0); } return (ENXIO); } static int acpi_acad_attach(device_t dev) { struct acpi_acad_softc *sc; struct acpi_softc *acpi_sc; ACPI_HANDLE handle; int error; sc = device_get_softc(dev); if (sc == NULL) return (ENXIO); handle = acpi_get_handle(dev); error = acpi_register_ioctl(ACPIIO_ACAD_GET_STATUS, acpi_acad_ioctl, dev); if (error != 0) return (error); if (device_get_unit(dev) == 0) { acpi_sc = acpi_device_get_parent_softc(dev); SYSCTL_ADD_PROC(&acpi_sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree), OID_AUTO, "acline", CTLTYPE_INT | CTLFLAG_RD, &sc->status, 0, acpi_acad_sysctl, "I", ""); } /* Get initial status after whole system is up. */ sc->status = -1; sc->initializing = 0; /* * Also install a system notify handler even though this is not * required by the specification. The Casio FIVA needs this. */ AcpiInstallNotifyHandler(handle, ACPI_SYSTEM_NOTIFY, acpi_acad_notify_handler, dev); AcpiInstallNotifyHandler(handle, ACPI_DEVICE_NOTIFY, acpi_acad_notify_handler, dev); AcpiOsQueueForExecution(OSD_PRIORITY_LO, acpi_acad_init_acline, dev); return (0); } static int acpi_acad_ioctl(u_long cmd, caddr_t addr, void *arg) { struct acpi_acad_softc *sc; device_t dev; dev = (device_t)arg; sc = device_get_softc(dev); if (sc == NULL) return (ENXIO); /* * No security check required: information retrieval only. If * new functions are added here, a check might be required. */ switch (cmd) { case ACPIIO_ACAD_GET_STATUS: acpi_acad_get_status(dev); *(int *)addr = sc->status; break; default: break; } return (0); } static int acpi_acad_sysctl(SYSCTL_HANDLER_ARGS) { int val, error; if (acpi_acad_get_acline(&val) != 0) return (ENXIO); val = *(u_int *)oidp->oid_arg1; error = sysctl_handle_int(oidp, &val, 0, req); return (error); } static void acpi_acad_init_acline(void *arg) { struct acpi_acad_softc *sc; device_t dev; int retry, status; dev = (device_t)arg; sc = device_get_softc(dev); if (sc->initializing) return; sc->initializing = 1; ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev), "acline initialization start\n"); status = 0; for (retry = 0; retry < ACPI_ACAD_RETRY_MAX; retry++) { acpi_acad_get_status(dev); if (status != sc->status) break; AcpiOsSleep(10, 0); } sc->initializing = 0; ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev), "acline initialization done, tried %d times\n", retry + 1); } /* * Public interfaces. */ int acpi_acad_get_acline(int *status) { struct acpi_acad_softc *sc; device_t dev; dev = devclass_get_device(acpi_acad_devclass, 0); if (dev == NULL) return (ENXIO); sc = device_get_softc(dev); if (sc == NULL) return (ENXIO); acpi_acad_get_status(dev); *status = sc->status; return (0); } Index: head/sys/dev/acpica/acpi_button.c =================================================================== --- head/sys/dev/acpica/acpi_button.c (revision 129878) +++ head/sys/dev/acpica/acpi_button.c (revision 129879) @@ -1,276 +1,277 @@ /*- * Copyright (c) 2000 Mitsaru IWASAKI * Copyright (c) 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "opt_acpi.h" #include #include +#include #include #include "acpi.h" #include /* Hooks for the ACPI CA debugging infrastructure */ #define _COMPONENT ACPI_BUTTON ACPI_MODULE_NAME("BUTTON") struct acpi_button_softc { device_t button_dev; ACPI_HANDLE button_handle; boolean_t button_type; #define ACPI_POWER_BUTTON 0 #define ACPI_SLEEP_BUTTON 1 boolean_t fixed; }; #define ACPI_NOTIFY_BUTTON_PRESSED_FOR_SLEEP 0x80 #define ACPI_NOTIFY_BUTTON_PRESSED_FOR_WAKEUP 0x02 static int acpi_button_probe(device_t dev); static int acpi_button_attach(device_t dev); static int acpi_button_suspend(device_t dev); static int acpi_button_resume(device_t dev); static void acpi_button_notify_handler(ACPI_HANDLE h, UINT32 notify, void *context); static ACPI_STATUS acpi_button_fixed_handler(void *context); static void acpi_button_notify_sleep(void *arg); static void acpi_button_notify_wakeup(void *arg); static device_method_t acpi_button_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_button_probe), DEVMETHOD(device_attach, acpi_button_attach), DEVMETHOD(device_suspend, acpi_button_suspend), DEVMETHOD(device_shutdown, acpi_button_suspend), DEVMETHOD(device_resume, acpi_button_resume), {0, 0} }; static driver_t acpi_button_driver = { "acpi_button", acpi_button_methods, sizeof(struct acpi_button_softc), }; static devclass_t acpi_button_devclass; DRIVER_MODULE(acpi_button, acpi, acpi_button_driver, acpi_button_devclass, 0, 0); MODULE_DEPEND(acpi_button, acpi, 1, 1, 1); static int acpi_button_probe(device_t dev) { struct acpi_button_softc *sc; int ret = ENXIO; sc = device_get_softc(dev); if (acpi_get_type(dev) == ACPI_TYPE_DEVICE && !acpi_disabled("button")) { if (acpi_MatchHid(dev, "PNP0C0C")) { device_set_desc(dev, "Power Button"); sc->button_type = ACPI_POWER_BUTTON; ret = 0; } else if (acpi_MatchHid(dev, "ACPI_FPB")) { device_set_desc(dev, "Power Button (fixed)"); sc->button_type = ACPI_POWER_BUTTON; sc->fixed = 1; ret = 0; } else if (acpi_MatchHid(dev, "PNP0C0E")) { device_set_desc(dev, "Sleep Button"); sc->button_type = ACPI_SLEEP_BUTTON; ret = 0; } else if (acpi_MatchHid(dev, "ACPI_FSB")) { device_set_desc(dev, "Sleep Button (fixed)"); sc->button_type = ACPI_SLEEP_BUTTON; sc->fixed = 1; ret = 0; } } return (ret); } static int acpi_button_attach(device_t dev) { struct acpi_button_softc *sc; ACPI_STATUS status; int event; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); sc = device_get_softc(dev); sc->button_dev = dev; sc->button_handle = acpi_get_handle(dev); event = (sc->button_type == ACPI_SLEEP_BUTTON) ? ACPI_EVENT_SLEEP_BUTTON : ACPI_EVENT_POWER_BUTTON; /* * Install the new handler. We could remove any fixed handlers added * from the FADT once we have a duplicate from the AML but some systems * only return events on one or the other so we have to keep both. */ if (sc->fixed) { AcpiClearEvent(event); status = AcpiInstallFixedEventHandler(event, acpi_button_fixed_handler, sc); } else { /* * If a system does not get lid events, it may make sense to change * the type to ACPI_ALL_NOTIFY. Some systems generate both a wake * and runtime notify in that case though. */ status = AcpiInstallNotifyHandler(sc->button_handle, ACPI_DEVICE_NOTIFY, acpi_button_notify_handler, sc); } if (ACPI_FAILURE(status)) { device_printf(sc->button_dev, "couldn't install notify handler - %s\n", AcpiFormatException(status)); return_VALUE (ENXIO); } /* Enable the GPE for wake/runtime. */ acpi_wake_init(dev, ACPI_GPE_TYPE_WAKE_RUN); acpi_wake_set_enable(dev, 1); return_VALUE (0); } static int acpi_button_suspend(device_t dev) { struct acpi_softc *acpi_sc; acpi_sc = acpi_device_get_parent_softc(dev); acpi_wake_sleep_prep(dev, acpi_sc->acpi_sstate); return (0); } static int acpi_button_resume(device_t dev) { acpi_wake_run_prep(dev); return (0); } static void acpi_button_notify_sleep(void *arg) { struct acpi_button_softc *sc; struct acpi_softc *acpi_sc; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); sc = (struct acpi_button_softc *)arg; acpi_sc = acpi_device_get_parent_softc(sc->button_dev); if (acpi_sc == NULL) return_VOID; acpi_UserNotify("Button", sc->button_handle, sc->button_type); switch (sc->button_type) { case ACPI_POWER_BUTTON: ACPI_VPRINT(sc->button_dev, acpi_sc, "power button pressed\n"); acpi_event_power_button_sleep(acpi_sc); break; case ACPI_SLEEP_BUTTON: ACPI_VPRINT(sc->button_dev, acpi_sc, "sleep button pressed\n"); acpi_event_sleep_button_sleep(acpi_sc); break; default: break; /* unknown button type */ } } static void acpi_button_notify_wakeup(void *arg) { struct acpi_button_softc *sc; struct acpi_softc *acpi_sc; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); sc = (struct acpi_button_softc *)arg; acpi_sc = acpi_device_get_parent_softc(sc->button_dev); if (acpi_sc == NULL) return_VOID; acpi_UserNotify("Button", sc->button_handle, sc->button_type); switch (sc->button_type) { case ACPI_POWER_BUTTON: ACPI_VPRINT(sc->button_dev, acpi_sc, "wakeup by power button\n"); acpi_event_power_button_wake(acpi_sc); break; case ACPI_SLEEP_BUTTON: ACPI_VPRINT(sc->button_dev, acpi_sc, "wakeup by sleep button\n"); acpi_event_sleep_button_wake(acpi_sc); break; default: break; /* unknown button type */ } } static void acpi_button_notify_handler(ACPI_HANDLE h, UINT32 notify, void *context) { struct acpi_button_softc *sc; ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, notify); sc = (struct acpi_button_softc *)context; switch (notify) { case ACPI_NOTIFY_BUTTON_PRESSED_FOR_SLEEP: AcpiOsQueueForExecution(OSD_PRIORITY_LO, acpi_button_notify_sleep, sc); break; case ACPI_NOTIFY_BUTTON_PRESSED_FOR_WAKEUP: AcpiOsQueueForExecution(OSD_PRIORITY_LO, acpi_button_notify_wakeup, sc); break; default: device_printf(sc->button_dev, "unknown notify %#x\n", notify); break; } } static ACPI_STATUS acpi_button_fixed_handler(void *context) { struct acpi_button_softc *sc = (struct acpi_button_softc *)context; ACPI_FUNCTION_TRACE_PTR((char *)(uintptr_t)__func__, context); if (context == NULL) return_ACPI_STATUS (AE_BAD_PARAMETER); acpi_button_notify_handler(sc->button_handle, ACPI_NOTIFY_BUTTON_PRESSED_FOR_SLEEP, sc); return_ACPI_STATUS (AE_OK); } Index: head/sys/dev/acpica/acpi_cmbat.c =================================================================== --- head/sys/dev/acpica/acpi_cmbat.c (revision 129878) +++ head/sys/dev/acpica/acpi_cmbat.c (revision 129879) @@ -1,662 +1,663 @@ /*- * Copyright (c) 2000 Munehiro Matsuda * Copyright (c) 2000 Takanori Watanabe * Copyright (c) 2000 Mitsuru IWASAKI * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "opt_acpi.h" #include #include +#include #include #include #include #include #include #include "acpi.h" #include #include MALLOC_DEFINE(M_ACPICMBAT, "acpicmbat", "ACPI control method battery data"); /* Number of times to retry initialization before giving up. */ #define ACPI_CMBAT_RETRY_MAX 6 /* Check the battery once a minute. */ #define CMBAT_POLLRATE (60 * hz) /* Hooks for the ACPI CA debugging infrastructure */ #define _COMPONENT ACPI_BATTERY ACPI_MODULE_NAME("BATTERY") #define ACPI_BATTERY_BST_CHANGE 0x80 #define ACPI_BATTERY_BIF_CHANGE 0x81 struct acpi_cmbat_softc { device_t dev; struct acpi_bif bif; struct acpi_bst bst; struct timespec bif_lastupdated; struct timespec bst_lastupdated; int bif_updating; int bst_updating; int present; int cap; int min; int full_charge_time; int initializing; }; static struct timespec acpi_cmbat_info_lastupdated; /* XXX: devclass_get_maxunit() don't give us the current allocated units. */ static int acpi_cmbat_units = 0; static int acpi_cmbat_info_expired(struct timespec *); static void acpi_cmbat_info_updated(struct timespec *); static void acpi_cmbat_get_bst(void *); static void acpi_cmbat_get_bif(void *); static void acpi_cmbat_notify_handler(ACPI_HANDLE, UINT32, void *); static int acpi_cmbat_probe(device_t); static int acpi_cmbat_attach(device_t); static int acpi_cmbat_resume(device_t); static int acpi_cmbat_ioctl(u_long, caddr_t, void *); static int acpi_cmbat_is_bst_valid(struct acpi_bst*); static int acpi_cmbat_is_bif_valid(struct acpi_bif*); static int acpi_cmbat_get_total_battinfo(struct acpi_battinfo *); static void acpi_cmbat_init_battery(void *); static device_method_t acpi_cmbat_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_cmbat_probe), DEVMETHOD(device_attach, acpi_cmbat_attach), DEVMETHOD(device_resume, acpi_cmbat_resume), {0, 0} }; static driver_t acpi_cmbat_driver = { "acpi_cmbat", acpi_cmbat_methods, sizeof(struct acpi_cmbat_softc), }; static devclass_t acpi_cmbat_devclass; DRIVER_MODULE(acpi_cmbat, acpi, acpi_cmbat_driver, acpi_cmbat_devclass, 0, 0); MODULE_DEPEND(acpi_cmbat, acpi, 1, 1, 1); static int acpi_cmbat_info_expired(struct timespec *lastupdated) { struct timespec curtime; if (lastupdated == NULL) return (1); if (!timespecisset(lastupdated)) return (1); getnanotime(&curtime); timespecsub(&curtime, lastupdated); return (curtime.tv_sec < 0 || curtime.tv_sec > acpi_battery_get_info_expire()); } static void acpi_cmbat_info_updated(struct timespec *lastupdated) { if (lastupdated != NULL) getnanotime(lastupdated); } static void acpi_cmbat_get_bst(void *context) { device_t dev; struct acpi_cmbat_softc *sc; ACPI_STATUS as; ACPI_OBJECT *res; ACPI_HANDLE h; ACPI_BUFFER bst_buffer; dev = context; sc = device_get_softc(dev); h = acpi_get_handle(dev); if (!acpi_cmbat_info_expired(&sc->bst_lastupdated)) return; if (sc->bst_updating) return; sc->bst_updating = 1; bst_buffer.Pointer = NULL; bst_buffer.Length = ACPI_ALLOCATE_BUFFER; as = AcpiEvaluateObject(h, "_BST", NULL, &bst_buffer); if (ACPI_FAILURE(as)) { ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev), "error fetching current battery status -- %s\n", AcpiFormatException(as)); goto end; } res = (ACPI_OBJECT *)bst_buffer.Pointer; if (!ACPI_PKG_VALID(res, 4)) { ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev), "battery status corrupted\n"); goto end; } if (acpi_PkgInt32(res, 0, &sc->bst.state) != 0) goto end; if (acpi_PkgInt32(res, 1, &sc->bst.rate) != 0) goto end; if (acpi_PkgInt32(res, 2, &sc->bst.cap) != 0) goto end; if (acpi_PkgInt32(res, 3, &sc->bst.volt) != 0) goto end; acpi_cmbat_info_updated(&sc->bst_lastupdated); end: if (bst_buffer.Pointer != NULL) AcpiOsFree(bst_buffer.Pointer); sc->bst_updating = 0; } static void acpi_cmbat_get_bif(void *context) { device_t dev; struct acpi_cmbat_softc *sc; ACPI_STATUS as; ACPI_OBJECT *res; ACPI_HANDLE h; ACPI_BUFFER bif_buffer; dev = context; sc = device_get_softc(dev); h = acpi_get_handle(dev); if (!acpi_cmbat_info_expired(&sc->bif_lastupdated)) return; if (sc->bif_updating) return; sc->bif_updating = 1; bif_buffer.Pointer = NULL; bif_buffer.Length = ACPI_ALLOCATE_BUFFER; as = AcpiEvaluateObject(h, "_BIF", NULL, &bif_buffer); if (ACPI_FAILURE(as)) { ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev), "error fetching current battery info -- %s\n", AcpiFormatException(as)); goto end; } res = (ACPI_OBJECT *)bif_buffer.Pointer; if (!ACPI_PKG_VALID(res, 13)) { ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev), "battery info corrupted\n"); goto end; } if (acpi_PkgInt32(res, 0, &sc->bif.units) != 0) goto end; if (acpi_PkgInt32(res, 1, &sc->bif.dcap) != 0) goto end; if (acpi_PkgInt32(res, 2, &sc->bif.lfcap) != 0) goto end; if (acpi_PkgInt32(res, 3, &sc->bif.btech) != 0) goto end; if (acpi_PkgInt32(res, 4, &sc->bif.dvol) != 0) goto end; if (acpi_PkgInt32(res, 5, &sc->bif.wcap) != 0) goto end; if (acpi_PkgInt32(res, 6, &sc->bif.lcap) != 0) goto end; if (acpi_PkgInt32(res, 7, &sc->bif.gra1) != 0) goto end; if (acpi_PkgInt32(res, 8, &sc->bif.gra2) != 0) goto end; if (acpi_PkgStr(res, 9, sc->bif.model, ACPI_CMBAT_MAXSTRLEN) != 0) goto end; if (acpi_PkgStr(res, 10, sc->bif.serial, ACPI_CMBAT_MAXSTRLEN) != 0) goto end; if (acpi_PkgStr(res, 11, sc->bif.type, ACPI_CMBAT_MAXSTRLEN) != 0) goto end; if (acpi_PkgStr(res, 12, sc->bif.oeminfo, ACPI_CMBAT_MAXSTRLEN) != 0) goto end; acpi_cmbat_info_updated(&sc->bif_lastupdated); end: if (bif_buffer.Pointer != NULL) AcpiOsFree(bif_buffer.Pointer); sc->bif_updating = 0; } static void acpi_cmbat_notify_handler(ACPI_HANDLE h, UINT32 notify, void *context) { device_t dev; struct acpi_cmbat_softc *sc; dev = (device_t)context; if ((sc = device_get_softc(dev)) == NULL) return; acpi_UserNotify("CMBAT", h, notify); switch (notify) { case ACPI_NOTIFY_DEVICE_CHECK: case ACPI_BATTERY_BST_CHANGE: timespecclear(&sc->bst_lastupdated); break; case ACPI_NOTIFY_BUS_CHECK: case ACPI_BATTERY_BIF_CHANGE: timespecclear(&sc->bif_lastupdated); AcpiOsQueueForExecution(OSD_PRIORITY_LO, acpi_cmbat_get_bif, dev); break; default: break; } } static int acpi_cmbat_probe(device_t dev) { if (acpi_get_type(dev) == ACPI_TYPE_DEVICE && !acpi_disabled("cmbat") && acpi_MatchHid(dev, "PNP0C0A")) { device_set_desc(dev, "Control Method Battery"); return (0); } return (ENXIO); } static int acpi_cmbat_attach(device_t dev) { int error; ACPI_HANDLE handle; struct acpi_cmbat_softc *sc; if ((sc = device_get_softc(dev)) == NULL) return (ENXIO); handle = acpi_get_handle(dev); /* * Install a system notify handler in addition to the device notify. * Toshiba notebook uses this alternate notify for its battery. */ AcpiInstallNotifyHandler(handle, ACPI_SYSTEM_NOTIFY, acpi_cmbat_notify_handler, dev); AcpiInstallNotifyHandler(handle, ACPI_DEVICE_NOTIFY, acpi_cmbat_notify_handler, dev); sc->bif_updating = sc->bst_updating = 0; sc->dev = dev; timespecclear(&sc->bif_lastupdated); timespecclear(&sc->bst_lastupdated); if (acpi_cmbat_units == 0) { error = acpi_register_ioctl(ACPIIO_CMBAT_GET_BIF, acpi_cmbat_ioctl, NULL); if (error != 0) return (error); error = acpi_register_ioctl(ACPIIO_CMBAT_GET_BST, acpi_cmbat_ioctl, NULL); if (error != 0) return (error); } error = acpi_battery_register(ACPI_BATT_TYPE_CMBAT, acpi_cmbat_units); if (error != 0) return (error); acpi_cmbat_units++; timespecclear(&acpi_cmbat_info_lastupdated); sc->initializing = 0; AcpiOsQueueForExecution(OSD_PRIORITY_LO, acpi_cmbat_init_battery, dev); return (0); } static int acpi_cmbat_resume(device_t dev) { AcpiOsQueueForExecution(OSD_PRIORITY_LO, acpi_cmbat_init_battery, dev); return (0); } static int acpi_cmbat_ioctl(u_long cmd, caddr_t addr, void *arg) { device_t dev; union acpi_battery_ioctl_arg *ioctl_arg; struct acpi_cmbat_softc *sc; struct acpi_bif *bifp; struct acpi_bst *bstp; ioctl_arg = (union acpi_battery_ioctl_arg *)addr; dev = devclass_get_device(acpi_cmbat_devclass, ioctl_arg->unit); if (dev == NULL) return (ENXIO); sc = device_get_softc(dev); if (sc == NULL) return (ENXIO); /* * No security check required: information retrieval only. If * new functions are added here, a check might be required. */ switch (cmd) { case ACPIIO_CMBAT_GET_BIF: acpi_cmbat_get_bif(dev); bifp = &ioctl_arg->bif; bifp->units = sc->bif.units; bifp->dcap = sc->bif.dcap; bifp->lfcap = sc->bif.lfcap; bifp->btech = sc->bif.btech; bifp->dvol = sc->bif.dvol; bifp->wcap = sc->bif.wcap; bifp->lcap = sc->bif.lcap; bifp->gra1 = sc->bif.gra1; bifp->gra2 = sc->bif.gra2; strncpy(bifp->model, sc->bif.model, sizeof(sc->bif.model)); strncpy(bifp->serial, sc->bif.serial, sizeof(sc->bif.serial)); strncpy(bifp->type, sc->bif.type, sizeof(sc->bif.type)); strncpy(bifp->oeminfo, sc->bif.oeminfo, sizeof(sc->bif.oeminfo)); break; case ACPIIO_CMBAT_GET_BST: bstp = &ioctl_arg->bst; if (acpi_BatteryIsPresent(dev)) { acpi_cmbat_get_bst(dev); bstp->state = sc->bst.state; bstp->rate = sc->bst.rate; bstp->cap = sc->bst.cap; bstp->volt = sc->bst.volt; } else { bstp->state = ACPI_BATT_STAT_NOT_PRESENT; } break; default: break; } return (0); } static int acpi_cmbat_is_bst_valid(struct acpi_bst *bst) { if (bst->state >= ACPI_BATT_STAT_MAX || bst->cap == 0xffffffff || bst->volt == 0xffffffff) return (0); else return (1); } static int acpi_cmbat_is_bif_valid(struct acpi_bif *bif) { if (bif->lfcap == 0) return (0); else return (1); } static int acpi_cmbat_get_total_battinfo(struct acpi_battinfo *battinfo) { int i; int error; int batt_stat; int valid_rate, valid_units; int cap, min; int total_cap, total_min, total_full; device_t dev; struct acpi_cmbat_softc *sc; static int bat_units = 0; static struct acpi_cmbat_softc **bat = NULL; cap = min = -1; batt_stat = ACPI_BATT_STAT_NOT_PRESENT; error = 0; /* Allocate array of softc pointers */ if (bat_units != acpi_cmbat_units) { if (bat != NULL) { free(bat, M_ACPICMBAT); bat = NULL; } bat_units = 0; } if (bat == NULL) { bat_units = acpi_cmbat_units; bat = malloc(sizeof(struct acpi_cmbat_softc *) * bat_units, M_ACPICMBAT, M_NOWAIT); if (bat == NULL) { error = ENOMEM; goto out; } /* Collect softc pointers */ for (i = 0; i < acpi_cmbat_units; i++) { if ((dev = devclass_get_device(acpi_cmbat_devclass, i)) == NULL) { error = ENXIO; goto out; } if ((sc = device_get_softc(dev)) == NULL) { error = ENXIO; goto out; } bat[i] = sc; } } /* Get battery status, valid rate and valid units */ batt_stat = valid_rate = valid_units = 0; for (i = 0; i < acpi_cmbat_units; i++) { bat[i]->present = acpi_BatteryIsPresent(bat[i]->dev); if (!bat[i]->present) continue; acpi_cmbat_get_bst(bat[i]->dev); /* If battery not installed, we get strange values */ if (!acpi_cmbat_is_bst_valid(&(bat[i]->bst)) || !acpi_cmbat_is_bif_valid(&(bat[i]->bif))) { bat[i]->present = 0; continue; } valid_units++; bat[i]->cap = 100 * bat[i]->bst.cap / bat[i]->bif.lfcap; batt_stat |= bat[i]->bst.state; if (bat[i]->bst.rate > 0) { /* * XXX Hack to calculate total battery time. * Systems with 2 or more battries, they may get used * one by one, thus bst.rate is set only to the one * in use. For remaining batteries bst.rate = 0, which * makes it impossible to calculate remaining time. * Some other systems may need sum of bst.rate in * dis-charging state. * There for we sum up the bst.rate that is valid * (in dis-charging state), and use the sum to * calcutate remaining batteries' time. */ if (bat[i]->bst.state & ACPI_BATT_STAT_DISCHARG) valid_rate += bat[i]->bst.rate; } } /* Calculate total battery capacity and time */ total_cap = total_min = total_full = 0; for (i = 0; i < acpi_cmbat_units; i++) { if (!bat[i]->present) continue; if (valid_rate > 0) { /* Use the sum of bst.rate */ bat[i]->min = 60 * bat[i]->bst.cap / valid_rate; } else if (bat[i]->full_charge_time > 0) { bat[i]->min = (bat[i]->full_charge_time * bat[i]->cap) / 100; } else { /* Couldn't find valid rate and full battery time */ bat[i]->min = 0; } total_min += bat[i]->min; total_cap += bat[i]->cap; total_full += bat[i]->full_charge_time; } /* Battery life */ if (valid_units == 0) { cap = -1; batt_stat = ACPI_BATT_STAT_NOT_PRESENT; } else { cap = total_cap / valid_units; } /* Battery time */ if (valid_units == 0) { min = -1; } else if (valid_rate == 0 || (batt_stat & ACPI_BATT_STAT_CHARGING)) { if (total_full == 0) min = -1; else min = (total_full * cap) / 100; } else { min = total_min; } acpi_cmbat_info_updated(&acpi_cmbat_info_lastupdated); out: battinfo->cap = cap; battinfo->min = min; battinfo->state = batt_stat; return (error); } static void acpi_cmbat_init_battery(void *arg) { int retry; device_t dev = (device_t)arg; struct acpi_cmbat_softc *sc = device_get_softc(dev); if (sc->initializing) return; sc->initializing = 1; ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev), "battery initialization start\n"); for (retry = 0; retry < ACPI_CMBAT_RETRY_MAX; retry++, AcpiOsSleep(10, 0)) { sc->present = acpi_BatteryIsPresent(dev); if (!sc->present) continue; timespecclear(&sc->bst_lastupdated); timespecclear(&sc->bif_lastupdated); acpi_cmbat_get_bst(dev); if (!acpi_cmbat_is_bst_valid(&sc->bst)) continue; acpi_cmbat_get_bif(dev); if (!acpi_cmbat_is_bif_valid(&sc->bif)) continue; break; } sc->initializing = 0; if (retry == ACPI_CMBAT_RETRY_MAX) { ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev), "battery initialization failed, giving up\n"); } else { ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev), "battery initialization done, tried %d times\n", retry + 1); } } /* * Public interfaces. */ int acpi_cmbat_get_battinfo(int unit, struct acpi_battinfo *battinfo) { int error; device_t dev; struct acpi_cmbat_softc *sc; if (unit == -1) return (acpi_cmbat_get_total_battinfo(battinfo)); if (acpi_cmbat_info_expired(&acpi_cmbat_info_lastupdated)) { error = acpi_cmbat_get_total_battinfo(battinfo); if (error) goto out; } error = 0; if (unit >= acpi_cmbat_units) { error = ENXIO; goto out; } if ((dev = devclass_get_device(acpi_cmbat_devclass, unit)) == NULL) { error = ENXIO; goto out; } if ((sc = device_get_softc(dev)) == NULL) { error = ENXIO; goto out; } if (!sc->present) { battinfo->cap = -1; battinfo->min = -1; battinfo->state = ACPI_BATT_STAT_NOT_PRESENT; } else { battinfo->cap = sc->cap; battinfo->min = sc->min; battinfo->state = sc->bst.state; } out: return (error); } Index: head/sys/dev/acpica/acpi_cpu.c =================================================================== --- head/sys/dev/acpica/acpi_cpu.c (revision 129878) +++ head/sys/dev/acpica/acpi_cpu.c (revision 129879) @@ -1,1127 +1,1128 @@ /*- * Copyright (c) 2003 Nate Lawson (SDG) * Copyright (c) 2001 Michael Smith * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_acpi.h" #include #include #include #include +#include #include #include #include #include #include #include #include #include #ifdef __ia64__ #include #endif #include #include "acpi.h" #include /* * Support for ACPI Processor devices, including ACPI 2.0 throttling * and C[1-3] sleep states. * * TODO: implement scans of all CPUs to be sure all Cx states are * equivalent. */ /* Hooks for the ACPI CA debugging infrastructure */ #define _COMPONENT ACPI_PROCESSOR ACPI_MODULE_NAME("PROCESSOR") struct acpi_cx { struct resource *p_lvlx; /* Register to read to enter state. */ uint32_t type; /* C1-3 (C4 and up treated as C3). */ uint32_t trans_lat; /* Transition latency (usec). */ uint32_t power; /* Power consumed (mW). */ }; #define MAX_CX_STATES 8 struct acpi_cx_stats { int long_slp; /* Count of sleeps >= trans_lat. */ int short_slp; /* Count of sleeps < trans_lat. */ }; struct acpi_cpu_softc { device_t cpu_dev; ACPI_HANDLE cpu_handle; uint32_t acpi_id; /* ACPI processor id */ uint32_t cpu_p_blk; /* ACPI P_BLK location */ uint32_t cpu_p_blk_len; /* P_BLK length (must be 6). */ struct resource *cpu_p_cnt; /* Throttling control register */ struct acpi_cx cpu_cx_states[MAX_CX_STATES]; int cpu_cx_count; /* Number of valid Cx states. */ }; #define CPU_GET_REG(reg, width) \ (bus_space_read_ ## width(rman_get_bustag((reg)), \ rman_get_bushandle((reg)), 0)) #define CPU_SET_REG(reg, width, val) \ (bus_space_write_ ## width(rman_get_bustag((reg)), \ rman_get_bushandle((reg)), 0, (val))) /* * Speeds are stored in counts, from 1 to CPU_MAX_SPEED, and * reported to the user in tenths of a percent. */ static uint32_t cpu_duty_offset; static uint32_t cpu_duty_width; #define CPU_MAX_SPEED (1 << cpu_duty_width) #define CPU_SPEED_PERCENT(x) ((1000 * (x)) / CPU_MAX_SPEED) #define CPU_SPEED_PRINTABLE(x) (CPU_SPEED_PERCENT(x) / 10), \ (CPU_SPEED_PERCENT(x) % 10) #define CPU_P_CNT_THT_EN (1<<4) #define PM_USEC(x) ((x) >> 2) /* ~4 clocks per usec (3.57955 Mhz) */ #define ACPI_CPU_NOTIFY_PERF_STATES 0x80 /* _PSS changed. */ #define ACPI_CPU_NOTIFY_CX_STATES 0x81 /* _CST changed. */ #define CPU_QUIRK_NO_C3 0x0001 /* C3-type states are not usable. */ #define CPU_QUIRK_NO_THROTTLE 0x0002 /* Throttling is not usable. */ #define PCI_VENDOR_INTEL 0x8086 #define PCI_DEVICE_82371AB_3 0x7113 /* PIIX4 chipset for quirks. */ #define PCI_REVISION_A_STEP 0 #define PCI_REVISION_B_STEP 1 #define PCI_REVISION_4E 2 #define PCI_REVISION_4M 3 /* Platform hardware resource information. */ static uint32_t cpu_smi_cmd; /* Value to write to SMI_CMD. */ static uint8_t cpu_pstate_cnt;/* Register to take over throttling. */ static uint8_t cpu_cst_cnt; /* Indicate we are _CST aware. */ static uint32_t cpu_rid; /* Driver-wide resource id. */ static uint32_t cpu_quirks; /* Indicate any hardware bugs. */ /* Runtime state. */ static int cpu_cx_count; /* Number of valid states */ static uint32_t cpu_cx_next; /* State to use for next sleep. */ static uint32_t cpu_non_c3; /* Index of lowest non-C3 state. */ static struct acpi_cx_stats cpu_cx_stats[MAX_CX_STATES]; static int cpu_idle_busy; /* Count of CPUs in acpi_cpu_idle. */ /* Values for sysctl. */ static uint32_t cpu_throttle_state; static uint32_t cpu_throttle_max; static int cpu_cx_lowest; static char cpu_cx_supported[64]; static device_t *cpu_devices; static int cpu_ndevices; static struct acpi_cpu_softc **cpu_softc; static struct sysctl_ctx_list acpi_cpu_sysctl_ctx; static struct sysctl_oid *acpi_cpu_sysctl_tree; static int acpi_cpu_probe(device_t dev); static int acpi_cpu_attach(device_t dev); static int acpi_pcpu_get_id(uint32_t idx, uint32_t *acpi_id, uint32_t *cpu_id); static int acpi_cpu_shutdown(device_t dev); static int acpi_cpu_throttle_probe(struct acpi_cpu_softc *sc); static int acpi_cpu_cx_probe(struct acpi_cpu_softc *sc); static int acpi_cpu_cx_cst(struct acpi_cpu_softc *sc); static void acpi_cpu_startup(void *arg); static void acpi_cpu_startup_throttling(void); static void acpi_cpu_startup_cx(void); static void acpi_cpu_throttle_set(uint32_t speed); static void acpi_cpu_idle(void); static void acpi_cpu_c1(void); static void acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context); static int acpi_cpu_quirks(struct acpi_cpu_softc *sc); static int acpi_cpu_throttle_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_cpu_history_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS); static device_method_t acpi_cpu_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_cpu_probe), DEVMETHOD(device_attach, acpi_cpu_attach), DEVMETHOD(device_shutdown, acpi_cpu_shutdown), {0, 0} }; static driver_t acpi_cpu_driver = { "cpu", acpi_cpu_methods, sizeof(struct acpi_cpu_softc), }; static devclass_t acpi_cpu_devclass; DRIVER_MODULE(cpu, acpi, acpi_cpu_driver, acpi_cpu_devclass, 0, 0); MODULE_DEPEND(cpu, acpi, 1, 1, 1); static int acpi_cpu_probe(device_t dev) { int acpi_id, cpu_id, cx_count; ACPI_BUFFER buf; ACPI_HANDLE handle; char msg[32]; ACPI_OBJECT *obj; ACPI_STATUS status; if (acpi_disabled("cpu") || acpi_get_type(dev) != ACPI_TYPE_PROCESSOR) return (ENXIO); handle = acpi_get_handle(dev); if (cpu_softc == NULL) cpu_softc = malloc(sizeof(struct acpi_cpu_softc *) * (mp_maxid + 1), M_TEMP /* XXX */, M_WAITOK | M_ZERO); /* Get our Processor object. */ buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObject(handle, NULL, NULL, &buf); if (ACPI_FAILURE(status)) { device_printf(dev, "probe failed to get Processor obj - %s\n", AcpiFormatException(status)); return (ENXIO); } obj = (ACPI_OBJECT *)buf.Pointer; if (obj->Type != ACPI_TYPE_PROCESSOR) { device_printf(dev, "Processor object has bad type %d\n", obj->Type); AcpiOsFree(obj); return (ENXIO); } /* * Find the processor associated with our unit. We could use the * ProcId as a key, however, some boxes do not have the same values * in their Processor object as the ProcId values in the MADT. */ acpi_id = obj->Processor.ProcId; AcpiOsFree(obj); if (acpi_pcpu_get_id(device_get_unit(dev), &acpi_id, &cpu_id) != 0) return (ENXIO); /* * Check if we already probed this processor. We scan the bus twice * so it's possible we've already seen this one. */ if (cpu_softc[cpu_id] != NULL) return (ENXIO); /* Get a count of Cx states for our device string. */ cx_count = 0; buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObject(handle, "_CST", NULL, &buf); if (ACPI_SUCCESS(status)) { obj = (ACPI_OBJECT *)buf.Pointer; if (ACPI_PKG_VALID(obj, 2)) acpi_PkgInt32(obj, 0, &cx_count); AcpiOsFree(obj); } else { if (AcpiGbl_FADT->Plvl2Lat <= 100) cx_count++; if (AcpiGbl_FADT->Plvl3Lat <= 1000) cx_count++; if (cx_count > 0) cx_count++; } if (cx_count > 0) snprintf(msg, sizeof(msg), "ACPI CPU (%d Cx states)", cx_count); else strlcpy(msg, "ACPI CPU", sizeof(msg)); device_set_desc_copy(dev, msg); /* Mark this processor as in-use and save our derived id for attach. */ cpu_softc[cpu_id] = (void *)1; acpi_set_magic(dev, cpu_id); return (0); } static int acpi_cpu_attach(device_t dev) { ACPI_BUFFER buf; ACPI_OBJECT *obj; struct acpi_cpu_softc *sc; struct acpi_softc *acpi_sc; ACPI_STATUS status; int thr_ret, cx_ret; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); ACPI_ASSERTLOCK; sc = device_get_softc(dev); sc->cpu_dev = dev; sc->cpu_handle = acpi_get_handle(dev); cpu_softc[acpi_get_magic(dev)] = sc; buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObject(sc->cpu_handle, NULL, NULL, &buf); if (ACPI_FAILURE(status)) { device_printf(dev, "attach failed to get Processor obj - %s\n", AcpiFormatException(status)); return (ENXIO); } obj = (ACPI_OBJECT *)buf.Pointer; sc->cpu_p_blk = obj->Processor.PblkAddress; sc->cpu_p_blk_len = obj->Processor.PblkLength; sc->acpi_id = obj->Processor.ProcId; AcpiOsFree(obj); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: P_BLK at %#x/%d\n", device_get_unit(dev), sc->cpu_p_blk, sc->cpu_p_blk_len)); acpi_sc = acpi_device_get_parent_softc(dev); sysctl_ctx_init(&acpi_cpu_sysctl_ctx); acpi_cpu_sysctl_tree = SYSCTL_ADD_NODE(&acpi_cpu_sysctl_ctx, SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree), OID_AUTO, "cpu", CTLFLAG_RD, 0, ""); /* If this is the first device probed, check for quirks. */ if (device_get_unit(dev) == 0) acpi_cpu_quirks(sc); /* * Probe for throttling and Cx state support. * If none of these is present, free up unused resources. */ thr_ret = acpi_cpu_throttle_probe(sc); cx_ret = acpi_cpu_cx_probe(sc); if (thr_ret == 0 || cx_ret == 0) { status = AcpiInstallNotifyHandler(sc->cpu_handle, ACPI_DEVICE_NOTIFY, acpi_cpu_notify, sc); if (device_get_unit(dev) == 0) AcpiOsQueueForExecution(OSD_PRIORITY_LO, acpi_cpu_startup, NULL); } else { sysctl_ctx_free(&acpi_cpu_sysctl_ctx); } return_VALUE (0); } /* * Find the nth present CPU and return its pc_cpuid as well as set the * pc_acpi_id from the most reliable source. */ static int acpi_pcpu_get_id(uint32_t idx, uint32_t *acpi_id, uint32_t *cpu_id) { struct pcpu *pcpu_data; uint32_t i; KASSERT(acpi_id != NULL, ("Null acpi_id")); KASSERT(cpu_id != NULL, ("Null cpu_id")); for (i = 0; i <= mp_maxid; i++) { if (CPU_ABSENT(i)) continue; pcpu_data = pcpu_find(i); KASSERT(pcpu_data != NULL, ("no pcpu data for %d", i)); if (idx-- == 0) { /* * If pc_acpi_id was not initialized (e.g., a non-APIC UP box) * override it with the value from the ASL. Otherwise, if the * two don't match, prefer the MADT-derived value. Finally, * return the pc_cpuid to reference this processor. */ if (pcpu_data->pc_acpi_id == 0xffffffff) pcpu_data->pc_acpi_id = *acpi_id; else if (pcpu_data->pc_acpi_id != *acpi_id) *acpi_id = pcpu_data->pc_acpi_id; *cpu_id = pcpu_data->pc_cpuid; return (0); } } return (ESRCH); } static int acpi_cpu_shutdown(device_t dev) { ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* Disable any entry to the idle function. */ cpu_cx_count = 0; /* Wait for all processors to exit acpi_cpu_idle(). */ smp_rendezvous(NULL, NULL, NULL, NULL); while (cpu_idle_busy > 0) DELAY(1); return_VALUE (0); } static int acpi_cpu_throttle_probe(struct acpi_cpu_softc *sc) { uint32_t duty_end; ACPI_BUFFER buf; ACPI_OBJECT obj; ACPI_GENERIC_ADDRESS gas; ACPI_STATUS status; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); ACPI_ASSERTLOCK; /* Get throttling parameters from the FADT. 0 means not supported. */ if (device_get_unit(sc->cpu_dev) == 0) { cpu_smi_cmd = AcpiGbl_FADT->SmiCmd; cpu_pstate_cnt = AcpiGbl_FADT->PstateCnt; cpu_cst_cnt = AcpiGbl_FADT->CstCnt; cpu_duty_offset = AcpiGbl_FADT->DutyOffset; cpu_duty_width = AcpiGbl_FADT->DutyWidth; } if (cpu_duty_width == 0 || (cpu_quirks & CPU_QUIRK_NO_THROTTLE) != 0) return (ENXIO); /* Validate the duty offset/width. */ duty_end = cpu_duty_offset + cpu_duty_width - 1; if (duty_end > 31) { device_printf(sc->cpu_dev, "CLK_VAL field overflows P_CNT register\n"); return (ENXIO); } if (cpu_duty_offset <= 4 && duty_end >= 4) { device_printf(sc->cpu_dev, "CLK_VAL field overlaps THT_EN bit\n"); return (ENXIO); } /* * If not present, fall back to using the processor's P_BLK to find * the P_CNT register. * * Note that some systems seem to duplicate the P_BLK pointer * across multiple CPUs, so not getting the resource is not fatal. */ buf.Pointer = &obj; buf.Length = sizeof(obj); status = AcpiEvaluateObject(sc->cpu_handle, "_PTC", NULL, &buf); if (ACPI_SUCCESS(status)) { if (obj.Buffer.Length < sizeof(ACPI_GENERIC_ADDRESS) + 3) { device_printf(sc->cpu_dev, "_PTC buffer too small\n"); return (ENXIO); } memcpy(&gas, obj.Buffer.Pointer + 3, sizeof(gas)); sc->cpu_p_cnt = acpi_bus_alloc_gas(sc->cpu_dev, &cpu_rid, &gas); if (sc->cpu_p_cnt != NULL) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: P_CNT from _PTC\n", device_get_unit(sc->cpu_dev))); } } /* If _PTC not present or other failure, try the P_BLK. */ if (sc->cpu_p_cnt == NULL) { /* * The spec says P_BLK must be 6 bytes long. However, some * systems use it to indicate a fractional set of features * present so we take anything >= 4. */ if (sc->cpu_p_blk_len < 4) return (ENXIO); gas.Address = sc->cpu_p_blk; gas.AddressSpaceId = ACPI_ADR_SPACE_SYSTEM_IO; gas.RegisterBitWidth = 32; sc->cpu_p_cnt = acpi_bus_alloc_gas(sc->cpu_dev, &cpu_rid, &gas); if (sc->cpu_p_cnt != NULL) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: P_CNT from P_BLK\n", device_get_unit(sc->cpu_dev))); } else { device_printf(sc->cpu_dev, "Failed to attach throttling P_CNT\n"); return (ENXIO); } } cpu_rid++; return (0); } static int acpi_cpu_cx_probe(struct acpi_cpu_softc *sc) { ACPI_GENERIC_ADDRESS gas; struct acpi_cx *cx_ptr; int error; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* Bus mastering arbitration control is needed for C3. */ if (AcpiGbl_FADT->V1_Pm2CntBlk == 0 || AcpiGbl_FADT->Pm2CntLen == 0) { cpu_quirks |= CPU_QUIRK_NO_C3; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: No BM control, C3 disabled\n", device_get_unit(sc->cpu_dev))); } /* * First, check for the ACPI 2.0 _CST sleep states object. * If not usable, fall back to the P_BLK's P_LVL2 and P_LVL3. */ sc->cpu_cx_count = 0; error = acpi_cpu_cx_cst(sc); if (error != 0) { cx_ptr = sc->cpu_cx_states; /* C1 has been required since just after ACPI 1.0 */ cx_ptr->type = ACPI_STATE_C1; cx_ptr->trans_lat = 0; cpu_non_c3 = 0; cx_ptr++; sc->cpu_cx_count++; /* * The spec says P_BLK must be 6 bytes long. However, some systems * use it to indicate a fractional set of features present so we * take 5 as C2. Some may also have a value of 7 to indicate * another C3 but most use _CST for this (as required) and having * "only" C1-C3 is not a hardship. */ if (sc->cpu_p_blk_len < 5) goto done; /* Validate and allocate resources for C2 (P_LVL2). */ gas.AddressSpaceId = ACPI_ADR_SPACE_SYSTEM_IO; gas.RegisterBitWidth = 8; if (AcpiGbl_FADT->Plvl2Lat <= 100) { gas.Address = sc->cpu_p_blk + 4; cx_ptr->p_lvlx = acpi_bus_alloc_gas(sc->cpu_dev, &cpu_rid, &gas); if (cx_ptr->p_lvlx != NULL) { cpu_rid++; cx_ptr->type = ACPI_STATE_C2; cx_ptr->trans_lat = AcpiGbl_FADT->Plvl2Lat; cpu_non_c3 = 1; cx_ptr++; sc->cpu_cx_count++; } } if (sc->cpu_p_blk_len < 6) goto done; /* Validate and allocate resources for C3 (P_LVL3). */ if (AcpiGbl_FADT->Plvl3Lat <= 1000 && (cpu_quirks & CPU_QUIRK_NO_C3) == 0) { gas.Address = sc->cpu_p_blk + 5; cx_ptr->p_lvlx = acpi_bus_alloc_gas(sc->cpu_dev, &cpu_rid, &gas); if (cx_ptr->p_lvlx != NULL) { cpu_rid++; cx_ptr->type = ACPI_STATE_C3; cx_ptr->trans_lat = AcpiGbl_FADT->Plvl3Lat; cx_ptr++; sc->cpu_cx_count++; } } } done: /* If no valid registers were found, don't attach. */ if (sc->cpu_cx_count == 0) return (ENXIO); return (0); } /* * Parse a _CST package and set up its Cx states. Since the _CST object * can change dynamically, our notify handler may call this function * to clean up and probe the new _CST package. */ static int acpi_cpu_cx_cst(struct acpi_cpu_softc *sc) { struct acpi_cx *cx_ptr; ACPI_STATUS status; ACPI_BUFFER buf; ACPI_OBJECT *top; ACPI_OBJECT *pkg; uint32_t count; int i; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObject(sc->cpu_handle, "_CST", NULL, &buf); if (ACPI_FAILURE(status)) return (ENXIO); /* _CST is a package with a count and at least one Cx package. */ top = (ACPI_OBJECT *)buf.Pointer; if (!ACPI_PKG_VALID(top, 2) || acpi_PkgInt32(top, 0, &count) != 0) { device_printf(sc->cpu_dev, "Invalid _CST package\n"); AcpiOsFree(buf.Pointer); return (ENXIO); } if (count != top->Package.Count - 1) { device_printf(sc->cpu_dev, "Invalid _CST state count (%d != %d)\n", count, top->Package.Count - 1); count = top->Package.Count - 1; } if (count > MAX_CX_STATES) { device_printf(sc->cpu_dev, "_CST has too many states (%d)\n", count); count = MAX_CX_STATES; } /* Set up all valid states. */ sc->cpu_cx_count = 0; cx_ptr = sc->cpu_cx_states; for (i = 0; i < count; i++) { pkg = &top->Package.Elements[i + 1]; if (!ACPI_PKG_VALID(pkg, 4) || acpi_PkgInt32(pkg, 1, &cx_ptr->type) != 0 || acpi_PkgInt32(pkg, 2, &cx_ptr->trans_lat) != 0 || acpi_PkgInt32(pkg, 3, &cx_ptr->power) != 0) { device_printf(sc->cpu_dev, "Skipping invalid Cx state package\n"); continue; } /* Validate the state to see if we should use it. */ switch (cx_ptr->type) { case ACPI_STATE_C1: cpu_non_c3 = i; cx_ptr++; sc->cpu_cx_count++; continue; case ACPI_STATE_C2: if (cx_ptr->trans_lat > 100) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: C2[%d] not available.\n", device_get_unit(sc->cpu_dev), i)); continue; } cpu_non_c3 = i; break; case ACPI_STATE_C3: default: if (cx_ptr->trans_lat > 1000 || (cpu_quirks & CPU_QUIRK_NO_C3) != 0) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: C3[%d] not available.\n", device_get_unit(sc->cpu_dev), i)); continue; } break; } #ifdef notyet /* Free up any previous register. */ if (cx_ptr->p_lvlx != NULL) { bus_release_resource(sc->cpu_dev, 0, 0, cx_ptr->p_lvlx); cx_ptr->p_lvlx = NULL; } #endif /* Allocate the control register for C2 or C3. */ acpi_PkgGas(sc->cpu_dev, pkg, 0, &cpu_rid, &cx_ptr->p_lvlx); if (cx_ptr->p_lvlx != NULL) { cpu_rid++; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: Got C%d - %d latency\n", device_get_unit(sc->cpu_dev), cx_ptr->type, cx_ptr->trans_lat)); cx_ptr++; sc->cpu_cx_count++; } } AcpiOsFree(buf.Pointer); return (0); } /* * Call this *after* all CPUs have been attached. */ static void acpi_cpu_startup(void *arg) { struct acpi_cpu_softc *sc; int count, i; /* Get set of CPU devices */ devclass_get_devices(acpi_cpu_devclass, &cpu_devices, &cpu_ndevices); /* * Make sure all the processors' Cx counts match. We should probably * also check the contents of each. However, no known systems have * non-matching Cx counts so we'll deal with this later. */ count = MAX_CX_STATES; for (i = 0; i < cpu_ndevices; i++) { sc = device_get_softc(cpu_devices[i]); count = min(sc->cpu_cx_count, count); } cpu_cx_count = count; /* Perform throttling and Cx final initialization. */ sc = device_get_softc(cpu_devices[0]); if (sc->cpu_p_cnt != NULL) acpi_cpu_startup_throttling(); if (cpu_cx_count > 0) acpi_cpu_startup_cx(); } /* * Takes the ACPI lock to avoid fighting anyone over the SMI command * port. */ static void acpi_cpu_startup_throttling() { ACPI_LOCK_DECL; /* Initialise throttling states */ cpu_throttle_max = CPU_MAX_SPEED; cpu_throttle_state = CPU_MAX_SPEED; SYSCTL_ADD_INT(&acpi_cpu_sysctl_ctx, SYSCTL_CHILDREN(acpi_cpu_sysctl_tree), OID_AUTO, "throttle_max", CTLFLAG_RD, &cpu_throttle_max, 0, "maximum CPU speed"); SYSCTL_ADD_PROC(&acpi_cpu_sysctl_ctx, SYSCTL_CHILDREN(acpi_cpu_sysctl_tree), OID_AUTO, "throttle_state", CTLTYPE_INT | CTLFLAG_RW, &cpu_throttle_state, 0, acpi_cpu_throttle_sysctl, "I", "current CPU speed"); /* If ACPI 2.0+, signal platform that we are taking over throttling. */ ACPI_LOCK; if (cpu_pstate_cnt != 0) AcpiOsWritePort(cpu_smi_cmd, cpu_pstate_cnt, 8); /* Set initial speed to maximum. */ acpi_cpu_throttle_set(cpu_throttle_max); ACPI_UNLOCK; printf("acpi_cpu: throttling enabled, %d steps (100%% to %d.%d%%), " "currently %d.%d%%\n", CPU_MAX_SPEED, CPU_SPEED_PRINTABLE(1), CPU_SPEED_PRINTABLE(cpu_throttle_state)); } static void acpi_cpu_startup_cx() { struct acpi_cpu_softc *sc; struct sbuf sb; int i; ACPI_LOCK_DECL; sc = device_get_softc(cpu_devices[0]); sbuf_new(&sb, cpu_cx_supported, sizeof(cpu_cx_supported), SBUF_FIXEDLEN); for (i = 0; i < cpu_cx_count; i++) sbuf_printf(&sb, "C%d/%d ", i + 1, sc->cpu_cx_states[i].trans_lat); sbuf_trim(&sb); sbuf_finish(&sb); SYSCTL_ADD_STRING(&acpi_cpu_sysctl_ctx, SYSCTL_CHILDREN(acpi_cpu_sysctl_tree), OID_AUTO, "cx_supported", CTLFLAG_RD, cpu_cx_supported, 0, "Cx/microsecond values for supported Cx states"); SYSCTL_ADD_PROC(&acpi_cpu_sysctl_ctx, SYSCTL_CHILDREN(acpi_cpu_sysctl_tree), OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW, NULL, 0, acpi_cpu_cx_lowest_sysctl, "A", "lowest Cx sleep state to use"); SYSCTL_ADD_PROC(&acpi_cpu_sysctl_ctx, SYSCTL_CHILDREN(acpi_cpu_sysctl_tree), OID_AUTO, "cx_history", CTLTYPE_STRING | CTLFLAG_RD, NULL, 0, acpi_cpu_history_sysctl, "A", "count of full sleeps for Cx state / short sleeps"); #ifdef notyet /* Signal platform that we can handle _CST notification. */ if (cpu_cst_cnt != 0) { ACPI_LOCK; AcpiOsWritePort(cpu_smi_cmd, cpu_cst_cnt, 8); ACPI_UNLOCK; } #endif /* Take over idling from cpu_idle_default(). */ cpu_cx_next = cpu_cx_lowest; cpu_idle_hook = acpi_cpu_idle; } /* * Set CPUs to the new state. * * Must be called with the ACPI lock held. */ static void acpi_cpu_throttle_set(uint32_t speed) { struct acpi_cpu_softc *sc; int i; uint32_t p_cnt, clk_val; ACPI_ASSERTLOCK; /* Iterate over processors */ for (i = 0; i < cpu_ndevices; i++) { sc = device_get_softc(cpu_devices[i]); if (sc->cpu_p_cnt == NULL) continue; /* Get the current P_CNT value and disable throttling */ p_cnt = CPU_GET_REG(sc->cpu_p_cnt, 4); p_cnt &= ~CPU_P_CNT_THT_EN; CPU_SET_REG(sc->cpu_p_cnt, 4, p_cnt); /* If we're at maximum speed, that's all */ if (speed < CPU_MAX_SPEED) { /* Mask the old CLK_VAL off and or-in the new value */ clk_val = (CPU_MAX_SPEED - 1) << cpu_duty_offset; p_cnt &= ~clk_val; p_cnt |= (speed << cpu_duty_offset); /* Write the new P_CNT value and then enable throttling */ CPU_SET_REG(sc->cpu_p_cnt, 4, p_cnt); p_cnt |= CPU_P_CNT_THT_EN; CPU_SET_REG(sc->cpu_p_cnt, 4, p_cnt); } ACPI_VPRINT(sc->cpu_dev, acpi_device_get_parent_softc(sc->cpu_dev), "set speed to %d.%d%%\n", CPU_SPEED_PRINTABLE(speed)); } cpu_throttle_state = speed; } /* * Idle the CPU in the lowest state possible. * This function is called with interrupts disabled. */ static void acpi_cpu_idle() { struct acpi_cpu_softc *sc; struct acpi_cx *cx_next; uint32_t start_time, end_time; int bm_active, i, asleep; /* If disabled, return immediately. */ if (cpu_cx_count == 0) { ACPI_ENABLE_IRQS(); return; } /* * Look up our CPU id to get our softc. If it's NULL, we'll use C1 * since there is no ACPI processor object for this CPU. This occurs * for logical CPUs in the HTT case. */ sc = cpu_softc[PCPU_GET(cpuid)]; if (sc == NULL) { acpi_cpu_c1(); return; } /* Record that a CPU is in the idle function. */ atomic_add_int(&cpu_idle_busy, 1); /* * Check for bus master activity. If there was activity, clear * the bit and use the lowest non-C3 state. Note that the USB * driver polling for new devices keeps this bit set all the * time if USB is enabled. */ AcpiGetRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active, ACPI_MTX_DO_NOT_LOCK); if (bm_active != 0) { AcpiSetRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1, ACPI_MTX_DO_NOT_LOCK); cpu_cx_next = min(cpu_cx_next, cpu_non_c3); } /* Perform the actual sleep based on the Cx-specific semantics. */ cx_next = &sc->cpu_cx_states[cpu_cx_next]; switch (cx_next->type) { case ACPI_STATE_C0: panic("acpi_cpu_idle: attempting to sleep in C0"); /* NOTREACHED */ case ACPI_STATE_C1: /* Execute HLT (or equivalent) and wait for an interrupt. */ acpi_cpu_c1(); /* * We can't calculate the time spent in C1 since the place we * wake up is an ISR. Use a constant time of 1 ms. */ start_time = 0; end_time = 1000; break; case ACPI_STATE_C2: /* * Read from P_LVLx to enter C2, checking time spent asleep. * Use the ACPI timer for measuring sleep time. Since we need to * get the time very close to the CPU start/stop clock logic, this * is the only reliable time source. */ AcpiHwLowLevelRead(32, &start_time, &AcpiGbl_FADT->XPmTmrBlk); CPU_GET_REG(cx_next->p_lvlx, 1); /* * Read the end time twice. Since it may take an arbitrary time * to enter the idle state, the first read may be executed before * the processor has stopped. Doing it again provides enough * margin that we are certain to have a correct value. */ AcpiHwLowLevelRead(32, &end_time, &AcpiGbl_FADT->XPmTmrBlk); AcpiHwLowLevelRead(32, &end_time, &AcpiGbl_FADT->XPmTmrBlk); ACPI_ENABLE_IRQS(); break; case ACPI_STATE_C3: default: /* Disable bus master arbitration and enable bus master wakeup. */ AcpiSetRegister(ACPI_BITREG_ARB_DISABLE, 1, ACPI_MTX_DO_NOT_LOCK); AcpiSetRegister(ACPI_BITREG_BUS_MASTER_RLD, 1, ACPI_MTX_DO_NOT_LOCK); /* Read from P_LVLx to enter C3, checking time spent asleep. */ AcpiHwLowLevelRead(32, &start_time, &AcpiGbl_FADT->XPmTmrBlk); CPU_GET_REG(cx_next->p_lvlx, 1); /* Read the end time twice. See comment for C2 above. */ AcpiHwLowLevelRead(32, &end_time, &AcpiGbl_FADT->XPmTmrBlk); AcpiHwLowLevelRead(32, &end_time, &AcpiGbl_FADT->XPmTmrBlk); /* Enable bus master arbitration and disable bus master wakeup. */ AcpiSetRegister(ACPI_BITREG_ARB_DISABLE, 0, ACPI_MTX_DO_NOT_LOCK); AcpiSetRegister(ACPI_BITREG_BUS_MASTER_RLD, 0, ACPI_MTX_DO_NOT_LOCK); ACPI_ENABLE_IRQS(); break; } /* Find the actual time asleep in microseconds, minus overhead. */ end_time = acpi_TimerDelta(end_time, start_time); asleep = PM_USEC(end_time) - cx_next->trans_lat; /* Record statistics */ if (asleep < cx_next->trans_lat) cpu_cx_stats[cpu_cx_next].short_slp++; else cpu_cx_stats[cpu_cx_next].long_slp++; /* * If we slept 100 us or more, use the lowest Cx state. * Otherwise, find the lowest state that has a latency less than * or equal to the length of our last sleep. */ if (asleep >= 100) cpu_cx_next = cpu_cx_lowest; else { for (i = cpu_cx_lowest; i >= 0; i--) { if (sc->cpu_cx_states[i].trans_lat <= asleep) { cpu_cx_next = i; break; } } } /* Decrement reference count checked by acpi_cpu_shutdown(). */ atomic_subtract_int(&cpu_idle_busy, 1); } /* Put the CPU in C1 in a machine-dependant way. */ static void acpi_cpu_c1() { #ifdef __ia64__ ia64_call_pal_static(PAL_HALT_LIGHT, 0, 0, 0); #else __asm __volatile("sti; hlt"); #endif } /* * Re-evaluate the _PSS and _CST objects when we are notified that they * have changed. * * XXX Re-evaluation disabled until locking is done. */ static void acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context) { struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)context; switch (notify) { case ACPI_CPU_NOTIFY_PERF_STATES: device_printf(sc->cpu_dev, "Performance states changed\n"); /* acpi_cpu_px_available(sc); */ break; case ACPI_CPU_NOTIFY_CX_STATES: device_printf(sc->cpu_dev, "Cx states changed\n"); /* acpi_cpu_cx_cst(sc); */ break; default: device_printf(sc->cpu_dev, "Unknown notify %#x\n", notify); break; } } static int acpi_cpu_quirks(struct acpi_cpu_softc *sc) { /* * C3 is not supported on multiple CPUs since this would require * flushing all caches which is currently too expensive. */ if (mp_ncpus > 1) cpu_quirks |= CPU_QUIRK_NO_C3; #ifdef notyet /* Look for various quirks of the PIIX4 part. */ acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3); if (acpi_dev != NULL) { switch (pci_get_revid(acpi_dev)) { /* * Disable throttling control on PIIX4 A and B-step. * See specification changes #13 ("Manual Throttle Duty Cycle") * and #14 ("Enabling and Disabling Manual Throttle"), plus * erratum #5 ("STPCLK# Deassertion Time") from the January * 2002 PIIX4 specification update. Note that few (if any) * mobile systems ever used this part. */ case PCI_REVISION_A_STEP: case PCI_REVISION_B_STEP: cpu_quirks |= CPU_QUIRK_NO_THROTTLE; /* FALLTHROUGH */ /* * Disable C3 support for all PIIX4 chipsets. Some of these parts * do not report the BMIDE status to the BM status register and * others have a livelock bug if Type-F DMA is enabled. Linux * works around the BMIDE bug by reading the BM status directly * but we take the simpler approach of disabling C3 for these * parts. * * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA * Livelock") from the January 2002 PIIX4 specification update. * Applies to all PIIX4 models. */ case PCI_REVISION_4E: case PCI_REVISION_4M: cpu_quirks |= CPU_QUIRK_NO_C3; break; default: break; } } #endif return (0); } /* Handle changes in the CPU throttling setting. */ static int acpi_cpu_throttle_sysctl(SYSCTL_HANDLER_ARGS) { uint32_t *argp; uint32_t arg; int error; ACPI_LOCK_DECL; argp = (uint32_t *)oidp->oid_arg1; arg = *argp; error = sysctl_handle_int(oidp, &arg, 0, req); /* Error or no new value */ if (error != 0 || req->newptr == NULL) return (error); if (arg < 1 || arg > cpu_throttle_max) return (EINVAL); /* If throttling changed, notify the BIOS of the new rate. */ ACPI_LOCK; if (*argp != arg) { *argp = arg; acpi_cpu_throttle_set(arg); } ACPI_UNLOCK; return (0); } static int acpi_cpu_history_sysctl(SYSCTL_HANDLER_ARGS) { struct sbuf sb; char buf[128]; int i; sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN); for (i = 0; i < cpu_cx_count; i++) { sbuf_printf(&sb, "%u/%u ", cpu_cx_stats[i].long_slp, cpu_cx_stats[i].short_slp); } sbuf_trim(&sb); sbuf_finish(&sb); sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); sbuf_delete(&sb); return (0); } static int acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS) { struct acpi_cpu_softc *sc; char state[8]; int val, error, i; sc = device_get_softc(cpu_devices[0]); snprintf(state, sizeof(state), "C%d", cpu_cx_lowest + 1); error = sysctl_handle_string(oidp, state, sizeof(state), req); if (error != 0 || req->newptr == NULL) return (error); if (strlen(state) < 2 || toupper(state[0]) != 'C') return (EINVAL); val = (int) strtol(state + 1, NULL, 10) - 1; if (val < 0 || val > cpu_cx_count - 1) return (EINVAL); /* Use the new value for the next idle slice. */ cpu_cx_lowest = val; cpu_cx_next = val; /* If not disabling, cache the new lowest non-C3 state. */ cpu_non_c3 = 0; for (i = cpu_cx_lowest; i >= 0; i--) { if (sc->cpu_cx_states[i].type < ACPI_STATE_C3) { cpu_non_c3 = i; break; } } /* Reset the statistics counters. */ memset(cpu_cx_stats, 0, sizeof(cpu_cx_stats)); return (0); } Index: head/sys/dev/acpica/acpi_ec.c =================================================================== --- head/sys/dev/acpica/acpi_ec.c (revision 129878) +++ head/sys/dev/acpica/acpi_ec.c (revision 129879) @@ -1,994 +1,995 @@ /*- * Copyright (c) 2003 Nate Lawson * Copyright (c) 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /****************************************************************************** * * 1. Copyright Notice * * Some or all of this work - Copyright (c) 1999, Intel Corp. All rights * reserved. * * 2. License * * 2.1. This is your license from Intel Corp. under its intellectual property * rights. You may have additional license terms from the party that provided * you this software, covering your right to use that party's intellectual * property rights. * * 2.2. Intel grants, free of charge, to any person ("Licensee") obtaining a * copy of the source code appearing in this file ("Covered Code") an * irrevocable, perpetual, worldwide license under Intel's copyrights in the * base code distributed originally by Intel ("Original Intel Code") to copy, * make derivatives, distribute, use and display any portion of the Covered * Code in any form, with the right to sublicense such rights; and * * 2.3. Intel grants Licensee a non-exclusive and non-transferable patent * license (with the right to sublicense), under only those claims of Intel * patents that are infringed by the Original Intel Code, to make, use, sell, * offer to sell, and import the Covered Code and derivative works thereof * solely to the minimum extent necessary to exercise the above copyright * license, and in no event shall the patent license extend to any additions * to or modifications of the Original Intel Code. No other license or right * is granted directly or by implication, estoppel or otherwise; * * The above copyright and patent license is granted only if the following * conditions are met: * * 3. Conditions * * 3.1. Redistribution of Source with Rights to Further Distribute Source. * Redistribution of source code of any substantial portion of the Covered * Code or modification with rights to further distribute source must include * the above Copyright Notice, the above License, this list of Conditions, * and the following Disclaimer and Export Compliance provision. In addition, * Licensee must cause all Covered Code to which Licensee contributes to * contain a file documenting the changes Licensee made to create that Covered * Code and the date of any change. Licensee must include in that file the * documentation of any changes made by any predecessor Licensee. Licensee * must include a prominent statement that the modification is derived, * directly or indirectly, from Original Intel Code. * * 3.2. Redistribution of Source with no Rights to Further Distribute Source. * Redistribution of source code of any substantial portion of the Covered * Code or modification without rights to further distribute source must * include the following Disclaimer and Export Compliance provision in the * documentation and/or other materials provided with distribution. In * addition, Licensee may not authorize further sublicense of source of any * portion of the Covered Code, and must include terms to the effect that the * license from Licensee to its licensee is limited to the intellectual * property embodied in the software Licensee provides to its licensee, and * not to intellectual property embodied in modifications its licensee may * make. * * 3.3. Redistribution of Executable. Redistribution in executable form of any * substantial portion of the Covered Code or modification must reproduce the * above Copyright Notice, and the following Disclaimer and Export Compliance * provision in the documentation and/or other materials provided with the * distribution. * * 3.4. Intel retains all right, title, and interest in and to the Original * Intel Code. * * 3.5. Neither the name Intel nor any other trademark owned or controlled by * Intel shall be used in advertising or otherwise to promote the sale, use or * other dealings in products derived from or relating to the Covered Code * without prior written authorization from Intel. * * 4. Disclaimer and Export Compliance * * 4.1. INTEL MAKES NO WARRANTY OF ANY KIND REGARDING ANY SOFTWARE PROVIDED * HERE. ANY SOFTWARE ORIGINATING FROM INTEL OR DERIVED FROM INTEL SOFTWARE * IS PROVIDED "AS IS," AND INTEL WILL NOT PROVIDE ANY SUPPORT, ASSISTANCE, * INSTALLATION, TRAINING OR OTHER SERVICES. INTEL WILL NOT PROVIDE ANY * UPDATES, ENHANCEMENTS OR EXTENSIONS. INTEL SPECIFICALLY DISCLAIMS ANY * IMPLIED WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT AND FITNESS FOR A * PARTICULAR PURPOSE. * * 4.2. IN NO EVENT SHALL INTEL HAVE ANY LIABILITY TO LICENSEE, ITS LICENSEES * OR ANY OTHER THIRD PARTY, FOR ANY LOST PROFITS, LOST DATA, LOSS OF USE OR * COSTS OF PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES, OR FOR ANY INDIRECT, * SPECIAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THIS AGREEMENT, UNDER ANY * CAUSE OF ACTION OR THEORY OF LIABILITY, AND IRRESPECTIVE OF WHETHER INTEL * HAS ADVANCE NOTICE OF THE POSSIBILITY OF SUCH DAMAGES. THESE LIMITATIONS * SHALL APPLY NOTWITHSTANDING THE FAILURE OF THE ESSENTIAL PURPOSE OF ANY * LIMITED REMEDY. * * 4.3. Licensee shall not export, either directly or indirectly, any of this * software or system incorporating such software without first obtaining any * required license or other approval from the U. S. Department of Commerce or * any other agency or department of the United States Government. In the * event Licensee exports any such software from the United States or * re-exports any such software from a foreign destination, Licensee shall * ensure that the distribution and export/re-export of the software is in * compliance with all laws, regulations, orders, or other restrictions of the * U.S. Export Administration Regulations. Licensee agrees that neither it nor * any of its subsidiaries will export/re-export any technical data, process, * software, or service, directly or indirectly, to any country for which the * United States government or any agency thereof requires an export license, * other governmental approval, or letter of assurance, without first obtaining * such license, approval or letter. * *****************************************************************************/ #include __FBSDID("$FreeBSD$"); #include "opt_acpi.h" #include #include +#include #include #include #include #include #include #include "acpi.h" #include /* * Hooks for the ACPI CA debugging infrastructure */ #define _COMPONENT ACPI_EC ACPI_MODULE_NAME("EC") /* * EC_COMMAND: * ----------- */ typedef UINT8 EC_COMMAND; #define EC_COMMAND_UNKNOWN ((EC_COMMAND) 0x00) #define EC_COMMAND_READ ((EC_COMMAND) 0x80) #define EC_COMMAND_WRITE ((EC_COMMAND) 0x81) #define EC_COMMAND_BURST_ENABLE ((EC_COMMAND) 0x82) #define EC_COMMAND_BURST_DISABLE ((EC_COMMAND) 0x83) #define EC_COMMAND_QUERY ((EC_COMMAND) 0x84) /* * EC_STATUS: * ---------- * The encoding of the EC status register is illustrated below. * Note that a set bit (1) indicates the property is TRUE * (e.g. if bit 0 is set then the output buffer is full). * +-+-+-+-+-+-+-+-+ * |7|6|5|4|3|2|1|0| * +-+-+-+-+-+-+-+-+ * | | | | | | | | * | | | | | | | +- Output Buffer Full? * | | | | | | +--- Input Buffer Full? * | | | | | +----- * | | | | +------- Data Register is Command Byte? * | | | +--------- Burst Mode Enabled? * | | +----------- SCI Event? * | +------------- SMI Event? * +--------------- * */ typedef UINT8 EC_STATUS; #define EC_FLAG_OUTPUT_BUFFER ((EC_STATUS) 0x01) #define EC_FLAG_INPUT_BUFFER ((EC_STATUS) 0x02) #define EC_FLAG_BURST_MODE ((EC_STATUS) 0x10) #define EC_FLAG_SCI ((EC_STATUS) 0x20) /* * EC_EVENT: * --------- */ typedef UINT8 EC_EVENT; #define EC_EVENT_UNKNOWN ((EC_EVENT) 0x00) #define EC_EVENT_OUTPUT_BUFFER_FULL ((EC_EVENT) 0x01) #define EC_EVENT_INPUT_BUFFER_EMPTY ((EC_EVENT) 0x02) #define EC_EVENT_SCI ((EC_EVENT) 0x20) /* * Register access primitives */ #define EC_GET_DATA(sc) \ bus_space_read_1((sc)->ec_data_tag, (sc)->ec_data_handle, 0) #define EC_SET_DATA(sc, v) \ bus_space_write_1((sc)->ec_data_tag, (sc)->ec_data_handle, 0, (v)) #define EC_GET_CSR(sc) \ bus_space_read_1((sc)->ec_csr_tag, (sc)->ec_csr_handle, 0) #define EC_SET_CSR(sc, v) \ bus_space_write_1((sc)->ec_csr_tag, (sc)->ec_csr_handle, 0, (v)) /* Embedded Controller Boot Resources Table (ECDT) */ typedef struct { ACPI_TABLE_HEADER header; ACPI_GENERIC_ADDRESS control; ACPI_GENERIC_ADDRESS data; UINT32 uid; UINT8 gpe_bit; char ec_id[0]; } ACPI_TABLE_ECDT; /* Additional params to pass from the probe routine */ struct acpi_ec_params { int glk; int gpe_bit; ACPI_HANDLE gpe_handle; int uid; }; /* Indicate that this device has already been probed via ECDT. */ #define DEV_ECDT(x) (acpi_get_magic(x) == (int)&acpi_ec_devclass) /* * Driver softc. */ struct acpi_ec_softc { device_t ec_dev; ACPI_HANDLE ec_handle; int ec_uid; ACPI_HANDLE ec_gpehandle; UINT8 ec_gpebit; UINT8 ec_csrvalue; int ec_data_rid; struct resource *ec_data_res; bus_space_tag_t ec_data_tag; bus_space_handle_t ec_data_handle; int ec_csr_rid; struct resource *ec_csr_res; bus_space_tag_t ec_csr_tag; bus_space_handle_t ec_csr_handle; int ec_glk; int ec_glkhandle; struct mtx ec_mtx; int ec_polldelay; }; /* * XXX * I couldn't find it in the spec but other implementations also use a * value of 1 ms for the time to acquire global lock. */ #define EC_LOCK_TIMEOUT 1000 /* * Start with an interval of 1 us for status poll loop. This delay * will be dynamically adjusted based on the actual time waited. */ #define EC_POLL_DELAY 1 /* Total time in ms spent in the poll loop waiting for a response. */ #define EC_POLL_TIMEOUT 100 #define EVENT_READY(event, status) \ (((event) == EC_EVENT_OUTPUT_BUFFER_FULL && \ ((status) & EC_FLAG_OUTPUT_BUFFER) != 0) || \ ((event) == EC_EVENT_INPUT_BUFFER_EMPTY && \ ((status) & EC_FLAG_INPUT_BUFFER) == 0)) static int ec_poll_timeout = EC_POLL_TIMEOUT; TUNABLE_INT("hw.acpi.ec.poll_timeout", &ec_poll_timeout); static __inline ACPI_STATUS EcLock(struct acpi_ec_softc *sc) { ACPI_STATUS status = AE_OK; /* Always acquire this EC's mutex. */ mtx_lock(&sc->ec_mtx); /* If _GLK is non-zero, also acquire the global lock. */ if (sc->ec_glk) { status = AcpiAcquireGlobalLock(EC_LOCK_TIMEOUT, &sc->ec_glkhandle); if (ACPI_FAILURE(status)) mtx_unlock(&sc->ec_mtx); } return (status); } static __inline void EcUnlock(struct acpi_ec_softc *sc) { if (sc->ec_glk) AcpiReleaseGlobalLock(sc->ec_glkhandle); mtx_unlock(&sc->ec_mtx); } static uint32_t EcGpeHandler(void *Context); static ACPI_STATUS EcSpaceSetup(ACPI_HANDLE Region, UINT32 Function, void *Context, void **return_Context); static ACPI_STATUS EcSpaceHandler(UINT32 Function, ACPI_PHYSICAL_ADDRESS Address, UINT32 width, ACPI_INTEGER *Value, void *Context, void *RegionContext); static ACPI_STATUS EcWaitEvent(struct acpi_ec_softc *sc, EC_EVENT Event); static ACPI_STATUS EcCommand(struct acpi_ec_softc *sc, EC_COMMAND cmd); static ACPI_STATUS EcRead(struct acpi_ec_softc *sc, UINT8 Address, UINT8 *Data); static ACPI_STATUS EcWrite(struct acpi_ec_softc *sc, UINT8 Address, UINT8 *Data); static int acpi_ec_probe(device_t dev); static int acpi_ec_attach(device_t dev); static device_method_t acpi_ec_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_ec_probe), DEVMETHOD(device_attach, acpi_ec_attach), {0, 0} }; static driver_t acpi_ec_driver = { "acpi_ec", acpi_ec_methods, sizeof(struct acpi_ec_softc), }; static devclass_t acpi_ec_devclass; DRIVER_MODULE(acpi_ec, acpi, acpi_ec_driver, acpi_ec_devclass, 0, 0); MODULE_DEPEND(acpi_ec, acpi, 1, 1, 1); /* * Look for an ECDT and if we find one, set up default GPE and * space handlers to catch attempts to access EC space before * we have a real driver instance in place. * TODO: if people report invalid ECDTs, add a tunable to disable them. */ void acpi_ec_ecdt_probe(device_t parent) { ACPI_TABLE_ECDT *ecdt; ACPI_STATUS status; device_t child; ACPI_HANDLE h; struct acpi_ec_params *params; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* Find and validate the ECDT. */ status = AcpiGetFirmwareTable("ECDT", 1, ACPI_LOGICAL_ADDRESSING, (ACPI_TABLE_HEADER **)&ecdt); if (ACPI_FAILURE(status) || ecdt->control.RegisterBitWidth != 8 || ecdt->data.RegisterBitWidth != 8) { return; } /* Create the child device with the given unit number. */ child = BUS_ADD_CHILD(parent, 0, "acpi_ec", ecdt->uid); if (child == NULL) { printf("%s: can't add child\n", __func__); return; } /* Find and save the ACPI handle for this device. */ status = AcpiGetHandle(NULL, ecdt->ec_id, &h); if (ACPI_FAILURE(status)) { device_delete_child(parent, child); printf("%s: can't get handle\n", __func__); return; } acpi_set_handle(child, h); /* Set the data and CSR register addresses. */ bus_set_resource(child, SYS_RES_IOPORT, 0, ecdt->data.Address, /*count*/1); bus_set_resource(child, SYS_RES_IOPORT, 1, ecdt->control.Address, /*count*/1); /* * Store values for the probe/attach routines to use. Store the * ECDT GPE bit and set the global lock flag according to _GLK. * Note that it is not perfectly correct to be evaluating a method * before initializing devices, but in practice this function * should be safe to call at this point. */ params = malloc(sizeof(struct acpi_ec_params), M_TEMP, M_WAITOK | M_ZERO); params->gpe_handle = NULL; params->gpe_bit = ecdt->gpe_bit; params->uid = ecdt->uid; acpi_GetInteger(h, "_GLK", ¶ms->glk); acpi_set_private(child, params); acpi_set_magic(child, (int)&acpi_ec_devclass); /* Finish the attach process. */ if (device_probe_and_attach(child) != 0) device_delete_child(parent, child); } static int acpi_ec_probe(device_t dev) { ACPI_BUFFER buf; ACPI_HANDLE h; ACPI_OBJECT *obj; ACPI_STATUS status; device_t peer; char desc[64]; int ret; struct acpi_ec_params *params; /* Check that this is a device and that EC is not disabled. */ if (acpi_get_type(dev) != ACPI_TYPE_DEVICE || acpi_disabled("ec")) return (ENXIO); /* * If probed via ECDT, set description and continue. Otherwise, * we can access the namespace and make sure this is not a * duplicate probe. */ ret = ENXIO; params = NULL; buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; if (DEV_ECDT(dev)) { params = acpi_get_private(dev); ret = 0; } else if (acpi_MatchHid(dev, "PNP0C09")) { params = malloc(sizeof(struct acpi_ec_params), M_TEMP, M_WAITOK | M_ZERO); h = acpi_get_handle(dev); /* * Read the unit ID to check for duplicate attach and the * global lock value to see if we should acquire it when * accessing the EC. */ status = acpi_GetInteger(h, "_UID", ¶ms->uid); if (ACPI_FAILURE(status)) params->uid = 0; status = acpi_GetInteger(h, "_GLK", ¶ms->glk); if (ACPI_FAILURE(status)) params->glk = 0; /* * Evaluate the _GPE method to find the GPE bit used by the EC to * signal status (SCI). If it's a package, it contains a reference * and GPE bit, similar to _PRW. */ status = AcpiEvaluateObject(h, "_GPE", NULL, &buf); if (ACPI_FAILURE(status)) { device_printf(dev, "can't evaluate _GPE - %s\n", AcpiFormatException(status)); return (ENXIO); } obj = (ACPI_OBJECT *)buf.Pointer; if (obj == NULL) return (ENXIO); switch (obj->Type) { case ACPI_TYPE_INTEGER: params->gpe_handle = NULL; params->gpe_bit = obj->Integer.Value; break; case ACPI_TYPE_PACKAGE: if (!ACPI_PKG_VALID(obj, 2)) goto out; params->gpe_handle = acpi_GetReference(NULL, &obj->Package.Elements[0]); if (params->gpe_handle == NULL || acpi_PkgInt32(obj, 1, ¶ms->gpe_bit) != 0) goto out; break; default: device_printf(dev, "_GPE has invalid type %d\n", obj->Type); goto out; } /* Store the values we got from the namespace for attach. */ acpi_set_private(dev, params); /* * Check for a duplicate probe. This can happen when a probe * via ECDT succeeded already. If this is a duplicate, disable * this device. */ peer = devclass_get_device(acpi_ec_devclass, params->uid); if (peer == NULL || !device_is_alive(peer)) ret = 0; else device_disable(dev); } out: if (ret == 0) { snprintf(desc, sizeof(desc), "Embedded Controller: GPE %#x%s%s", params->gpe_bit, (params->glk) ? ", GLK" : "", DEV_ECDT(dev) ? ", ECDT" : ""); device_set_desc_copy(dev, desc); } if (ret > 0 && params) free(params, M_TEMP); if (buf.Pointer) AcpiOsFree(buf.Pointer); return (ret); } static int acpi_ec_attach(device_t dev) { struct acpi_ec_softc *sc; struct acpi_ec_params *params; ACPI_STATUS Status; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* Fetch/initialize softc (assumes softc is pre-zeroed). */ sc = device_get_softc(dev); params = acpi_get_private(dev); sc->ec_dev = dev; sc->ec_handle = acpi_get_handle(dev); sc->ec_polldelay = EC_POLL_DELAY; mtx_init(&sc->ec_mtx, "ACPI embedded controller", NULL, MTX_DEF); /* Retrieve previously probed values via device ivars. */ sc->ec_glk = params->glk; sc->ec_gpebit = params->gpe_bit; sc->ec_gpehandle = params->gpe_handle; sc->ec_uid = params->uid; free(params, M_TEMP); /* Attach bus resources for data and command/status ports. */ sc->ec_data_rid = 0; sc->ec_data_res = bus_alloc_resource_any(sc->ec_dev, SYS_RES_IOPORT, &sc->ec_data_rid, RF_ACTIVE); if (sc->ec_data_res == NULL) { device_printf(dev, "can't allocate data port\n"); goto error; } sc->ec_data_tag = rman_get_bustag(sc->ec_data_res); sc->ec_data_handle = rman_get_bushandle(sc->ec_data_res); sc->ec_csr_rid = 1; sc->ec_csr_res = bus_alloc_resource_any(sc->ec_dev, SYS_RES_IOPORT, &sc->ec_csr_rid, RF_ACTIVE); if (sc->ec_csr_res == NULL) { device_printf(dev, "can't allocate command/status port\n"); goto error; } sc->ec_csr_tag = rman_get_bustag(sc->ec_csr_res); sc->ec_csr_handle = rman_get_bushandle(sc->ec_csr_res); /* * Install a handler for this EC's GPE bit. We want edge-triggered * behavior. */ ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "attaching GPE handler\n")); Status = AcpiInstallGpeHandler(sc->ec_gpehandle, sc->ec_gpebit, ACPI_GPE_EDGE_TRIGGERED, &EcGpeHandler, sc); if (ACPI_FAILURE(Status)) { device_printf(dev, "can't install GPE handler for %s - %s\n", acpi_name(sc->ec_handle), AcpiFormatException(Status)); goto error; } /* * Install address space handler */ ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "attaching address space handler\n")); Status = AcpiInstallAddressSpaceHandler(sc->ec_handle, ACPI_ADR_SPACE_EC, &EcSpaceHandler, &EcSpaceSetup, sc); if (ACPI_FAILURE(Status)) { device_printf(dev, "can't install address space handler for %s - %s\n", acpi_name(sc->ec_handle), AcpiFormatException(Status)); goto error; } /* Enable runtime GPEs for the handler. */ Status = AcpiSetGpeType(sc->ec_gpehandle, sc->ec_gpebit, ACPI_GPE_TYPE_RUNTIME); if (ACPI_FAILURE(Status)) { device_printf(dev, "AcpiSetGpeType failed: %s\n", AcpiFormatException(Status)); goto error; } Status = AcpiEnableGpe(sc->ec_gpehandle, sc->ec_gpebit, ACPI_NOT_ISR); if (ACPI_FAILURE(Status)) { device_printf(dev, "AcpiEnableGpe failed: %s\n", AcpiFormatException(Status)); goto error; } ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "acpi_ec_attach complete\n")); return (0); error: AcpiRemoveGpeHandler(sc->ec_gpehandle, sc->ec_gpebit, &EcGpeHandler); AcpiRemoveAddressSpaceHandler(sc->ec_handle, ACPI_ADR_SPACE_EC, EcSpaceHandler); if (sc->ec_csr_res) bus_release_resource(sc->ec_dev, SYS_RES_IOPORT, sc->ec_csr_rid, sc->ec_csr_res); if (sc->ec_data_res) bus_release_resource(sc->ec_dev, SYS_RES_IOPORT, sc->ec_data_rid, sc->ec_data_res); mtx_destroy(&sc->ec_mtx); return (ENXIO); } static void EcGpeQueryHandler(void *Context) { struct acpi_ec_softc *sc = (struct acpi_ec_softc *)Context; UINT8 Data; ACPI_STATUS Status; EC_STATUS EcStatus; char qxx[5]; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); KASSERT(Context != NULL, ("EcGpeQueryHandler called with NULL")); Status = EcLock(sc); if (ACPI_FAILURE(Status)) { ACPI_VPRINT(sc->ec_dev, acpi_device_get_parent_softc(sc->ec_dev), "GpeQuery lock error: %s\n", AcpiFormatException(Status)); return; } /* * If the EC_SCI bit of the status register is not set, then pass * it along to any potential waiters as it may be an IBE/OBF event. */ EcStatus = EC_GET_CSR(sc); if ((EcStatus & EC_EVENT_SCI) == 0) { sc->ec_csrvalue = EcStatus; wakeup(&sc->ec_csrvalue); EcUnlock(sc); goto re_enable; } /* * Send a query command to the EC to find out which _Qxx call it * wants to make. This command clears the SCI bit and also the * interrupt source since we are edge-triggered. */ Status = EcCommand(sc, EC_COMMAND_QUERY); if (ACPI_FAILURE(Status)) { EcUnlock(sc); ACPI_VPRINT(sc->ec_dev, acpi_device_get_parent_softc(sc->ec_dev), "GPE query failed - %s\n", AcpiFormatException(Status)); goto re_enable; } Data = EC_GET_DATA(sc); EcUnlock(sc); /* Ignore the value for "no outstanding event". (13.3.5) */ if (Data == 0) goto re_enable; /* Evaluate _Qxx to respond to the controller. */ sprintf(qxx, "_Q%02x", Data); strupr(qxx); Status = AcpiEvaluateObject(sc->ec_handle, qxx, NULL, NULL); if (ACPI_FAILURE(Status) && Status != AE_NOT_FOUND) { ACPI_VPRINT(sc->ec_dev, acpi_device_get_parent_softc(sc->ec_dev), "evaluation of GPE query method %s failed - %s\n", qxx, AcpiFormatException(Status)); } re_enable: /* Re-enable the GPE event so we'll get future requests. */ Status = AcpiEnableGpe(sc->ec_gpehandle, sc->ec_gpebit, ACPI_NOT_ISR); if (ACPI_FAILURE(Status)) printf("EcGpeQueryHandler: AcpiEnableEvent failed\n"); } /* * Handle a GPE. Currently we only handle SCI events as others must * be handled by polling in EcWaitEvent(). This is because some ECs * treat events as level when they should be edge-triggered. */ static uint32_t EcGpeHandler(void *Context) { struct acpi_ec_softc *sc = Context; ACPI_STATUS Status; KASSERT(Context != NULL, ("EcGpeHandler called with NULL")); /* Disable further GPEs while we handle this one. */ AcpiDisableGpe(sc->ec_gpehandle, sc->ec_gpebit, ACPI_NOT_ISR); /* Schedule the GPE query handler. */ Status = AcpiOsQueueForExecution(OSD_PRIORITY_GPE, EcGpeQueryHandler, Context); if (ACPI_FAILURE(Status)) { printf("Queuing GPE query handler failed.\n"); Status = AcpiEnableGpe(sc->ec_gpehandle, sc->ec_gpebit, ACPI_NOT_ISR); if (ACPI_FAILURE(Status)) printf("EcGpeHandler: AcpiEnableEvent failed\n"); } return (0); } static ACPI_STATUS EcSpaceSetup(ACPI_HANDLE Region, UINT32 Function, void *Context, void **RegionContext) { ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* * If deactivating a region, always set the output to NULL. Otherwise, * just pass the context through. */ if (Function == ACPI_REGION_DEACTIVATE) *RegionContext = NULL; else *RegionContext = Context; return_ACPI_STATUS (AE_OK); } static ACPI_STATUS EcSpaceHandler(UINT32 Function, ACPI_PHYSICAL_ADDRESS Address, UINT32 width, ACPI_INTEGER *Value, void *Context, void *RegionContext) { struct acpi_ec_softc *sc = (struct acpi_ec_softc *)Context; ACPI_STATUS Status; UINT8 EcAddr, EcData; int i; ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, (UINT32)Address); if (width % 8 != 0 || Value == NULL || Context == NULL) return_ACPI_STATUS (AE_BAD_PARAMETER); if (Address + (width / 8) - 1 > 0xFF) return_ACPI_STATUS (AE_BAD_ADDRESS); if (Function == ACPI_READ) *Value = 0; EcAddr = Address; Status = AE_ERROR; /* Perform the transaction(s), based on width. */ for (i = 0; i < width; i += 8, EcAddr++) { Status = EcLock(sc); if (ACPI_FAILURE(Status)) break; switch (Function) { case ACPI_READ: Status = EcRead(sc, EcAddr, &EcData); if (ACPI_SUCCESS(Status)) *Value |= ((ACPI_INTEGER)EcData) << i; break; case ACPI_WRITE: EcData = (UINT8)((*Value) >> i); Status = EcWrite(sc, EcAddr, &EcData); break; default: device_printf(sc->ec_dev, "invalid EcSpaceHandler function %d\n", Function); Status = AE_BAD_PARAMETER; break; } EcUnlock(sc); if (ACPI_FAILURE(Status)) break; } return_ACPI_STATUS (Status); } static ACPI_STATUS EcWaitEvent(struct acpi_ec_softc *sc, EC_EVENT Event) { EC_STATUS EcStatus; ACPI_STATUS Status; int i, period, retval; static int EcDbgMaxDelay; mtx_assert(&sc->ec_mtx, MA_OWNED); Status = AE_NO_HARDWARE_RESPONSE; /* * Wait for 1 us before checking the CSR. Testing shows about * 50% of requests complete in 1 us and 90% of them complete * in 5 us or less. */ AcpiOsStall(1); /* * Poll the EC status register to detect completion of the last * command. First, wait up to 1 ms in chunks of sc->ec_polldelay * microseconds. */ for (i = 0; i < 1000 / sc->ec_polldelay; i++) { EcStatus = EC_GET_CSR(sc); if (EVENT_READY(Event, EcStatus)) { Status = AE_OK; break; } AcpiOsStall(sc->ec_polldelay); } /* Scale poll delay by the amount of time actually waited. */ period = i * sc->ec_polldelay; if (period <= 5) sc->ec_polldelay = 1; else if (period <= 20) sc->ec_polldelay = 5; else if (period <= 100) sc->ec_polldelay = 10; else sc->ec_polldelay = 100; /* * If we still don't have a response, wait up to ec_poll_timeout ms * for completion, sleeping for chunks of 10 ms. */ if (Status != AE_OK) { retval = -1; for (i = 0; i < ec_poll_timeout / 10; i++) { if (retval != 0) EcStatus = EC_GET_CSR(sc); else EcStatus = sc->ec_csrvalue; if (EVENT_READY(Event, EcStatus)) { Status = AE_OK; break; } retval = msleep(&sc->ec_csrvalue, &sc->ec_mtx, PZERO, "ecpoll", 10/*ms*/); } } /* Calculate new delay and print it if it exceeds the max. */ if (period == 1000) period += i * 10000; if (period > EcDbgMaxDelay) { EcDbgMaxDelay = period; ACPI_VPRINT(sc->ec_dev, acpi_device_get_parent_softc(sc->ec_dev), "info: new max delay is %d us\n", period); } return (Status); } static ACPI_STATUS EcCommand(struct acpi_ec_softc *sc, EC_COMMAND cmd) { ACPI_STATUS Status; EC_EVENT Event; mtx_assert(&sc->ec_mtx, MA_OWNED); /* Decide what to wait for based on command type. */ switch (cmd) { case EC_COMMAND_READ: case EC_COMMAND_WRITE: case EC_COMMAND_BURST_DISABLE: Event = EC_EVENT_INPUT_BUFFER_EMPTY; break; case EC_COMMAND_QUERY: case EC_COMMAND_BURST_ENABLE: Event = EC_EVENT_OUTPUT_BUFFER_FULL; break; default: ACPI_VPRINT(sc->ec_dev, acpi_device_get_parent_softc(sc->ec_dev), "EcCommand: Invalid command %#x\n", cmd); return (AE_BAD_PARAMETER); } /* Run the command and wait for the chosen event. */ EC_SET_CSR(sc, cmd); Status = EcWaitEvent(sc, Event); if (ACPI_FAILURE(Status)) { ACPI_VPRINT(sc->ec_dev, acpi_device_get_parent_softc(sc->ec_dev), "EcCommand: no response to %#x\n", cmd); } return (Status); } static ACPI_STATUS EcRead(struct acpi_ec_softc *sc, UINT8 Address, UINT8 *Data) { ACPI_STATUS Status; mtx_assert(&sc->ec_mtx, MA_OWNED); #ifdef notyet /* If we can't start burst mode, continue anyway. */ EcCommand(sc, EC_COMMAND_BURST_ENABLE); #endif Status = EcCommand(sc, EC_COMMAND_READ); if (ACPI_FAILURE(Status)) return (Status); EC_SET_DATA(sc, Address); Status = EcWaitEvent(sc, EC_EVENT_OUTPUT_BUFFER_FULL); if (ACPI_FAILURE(Status)) { ACPI_VPRINT(sc->ec_dev, acpi_device_get_parent_softc(sc->ec_dev), "EcRead: Failed waiting for EC to send data.\n"); return (Status); } *Data = EC_GET_DATA(sc); #ifdef notyet if (sc->ec_burstactive) { Status = EcCommand(sc, EC_COMMAND_BURST_DISABLE); if (ACPI_FAILURE(Status)) return (Status); } #endif return (AE_OK); } static ACPI_STATUS EcWrite(struct acpi_ec_softc *sc, UINT8 Address, UINT8 *Data) { ACPI_STATUS Status; mtx_assert(&sc->ec_mtx, MA_OWNED); #ifdef notyet /* If we can't start burst mode, continue anyway. */ EcCommand(sc, EC_COMMAND_BURST_ENABLE); #endif Status = EcCommand(sc, EC_COMMAND_WRITE); if (ACPI_FAILURE(Status)) return (Status); EC_SET_DATA(sc, Address); Status = EcWaitEvent(sc, EC_EVENT_INPUT_BUFFER_EMPTY); if (ACPI_FAILURE(Status)) { ACPI_VPRINT(sc->ec_dev, acpi_device_get_parent_softc(sc->ec_dev), "EcRead: Failed waiting for EC to process address\n"); return (Status); } EC_SET_DATA(sc, *Data); Status = EcWaitEvent(sc, EC_EVENT_INPUT_BUFFER_EMPTY); if (ACPI_FAILURE(Status)) { ACPI_VPRINT(sc->ec_dev, acpi_device_get_parent_softc(sc->ec_dev), "EcWrite: Failed waiting for EC to process data\n"); return (Status); } #ifdef notyet if (sc->ec_burstactive) { Status = EcCommand(sc, EC_COMMAND_BURST_DISABLE); if (ACPI_FAILURE(Status)) return (Status); } #endif return (AE_OK); } Index: head/sys/dev/acpica/acpi_isab.c =================================================================== --- head/sys/dev/acpica/acpi_isab.c (revision 129878) +++ head/sys/dev/acpica/acpi_isab.c (revision 129879) @@ -1,128 +1,129 @@ /*- * Copyright (c) 2003 John Baldwin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * ISA Bridge driver for Generic ISA Bus Devices. See section 10.7 of the * ACPI 2.0a specification for details on this device. */ #include "opt_acpi.h" #include #include #include #include +#include #include "acpi.h" #include #include /* Hooks for the ACPI CA debugging infrastructure. */ #define _COMPONENT ACPI_BUS ACPI_MODULE_NAME("ISA_ACPI") struct acpi_isab_softc { device_t ap_dev; ACPI_HANDLE ap_handle; }; static int acpi_isab_probe(device_t bus); static int acpi_isab_attach(device_t bus); static int acpi_isab_read_ivar(device_t dev, device_t child, int which, uintptr_t *result); static device_method_t acpi_isab_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_isab_probe), DEVMETHOD(device_attach, acpi_isab_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_read_ivar, acpi_isab_read_ivar), DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), {0, 0} }; static driver_t acpi_isab_driver = { "isab", acpi_isab_methods, sizeof(struct acpi_isab_softc), }; DRIVER_MODULE(acpi_isab, acpi, acpi_isab_driver, isab_devclass, 0, 0); MODULE_DEPEND(acpi_isab, acpi, 1, 1, 1); static int acpi_isab_probe(device_t dev) { if (acpi_get_type(dev) == ACPI_TYPE_DEVICE && !acpi_disabled("isa") && devclass_get_device(isab_devclass, 0) == dev && (acpi_MatchHid(dev, "PNP0A05") || acpi_MatchHid(dev, "PNP0A06"))) { device_set_desc(dev, "ACPI Generic ISA bridge"); return (0); } return (ENXIO); } static int acpi_isab_attach(device_t dev) { struct acpi_isab_softc *sc; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); sc = device_get_softc(dev); sc->ap_dev = dev; sc->ap_handle = acpi_get_handle(dev); return (isab_attach(dev)); } static int acpi_isab_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct acpi_isab_softc *sc = device_get_softc(dev); switch (which) { case ACPI_IVAR_HANDLE: *result = (uintptr_t)sc->ap_handle; return (0); } return (ENOENT); } Index: head/sys/dev/acpica/acpi_lid.c =================================================================== --- head/sys/dev/acpica/acpi_lid.c (revision 129878) +++ head/sys/dev/acpica/acpi_lid.c (revision 129879) @@ -1,194 +1,195 @@ /*- * Copyright (c) 2000 Takanori Watanabe * Copyright (c) 2000 Mitsuru IWASAKI * Copyright (c) 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_acpi.h" #include #include +#include #include #include #include "acpi.h" #include /* Hooks for the ACPI CA debugging infrastructure */ #define _COMPONENT ACPI_BUTTON ACPI_MODULE_NAME("LID") struct acpi_lid_softc { device_t lid_dev; ACPI_HANDLE lid_handle; int lid_status; /* open or closed */ }; static int acpi_lid_probe(device_t dev); static int acpi_lid_attach(device_t dev); static int acpi_lid_suspend(device_t dev); static int acpi_lid_resume(device_t dev); static void acpi_lid_notify_status_changed(void *arg); static void acpi_lid_notify_handler(ACPI_HANDLE h, UINT32 notify, void *context); static device_method_t acpi_lid_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_lid_probe), DEVMETHOD(device_attach, acpi_lid_attach), DEVMETHOD(device_suspend, acpi_lid_suspend), DEVMETHOD(device_resume, acpi_lid_resume), {0, 0} }; static driver_t acpi_lid_driver = { "acpi_lid", acpi_lid_methods, sizeof(struct acpi_lid_softc), }; static devclass_t acpi_lid_devclass; DRIVER_MODULE(acpi_lid, acpi, acpi_lid_driver, acpi_lid_devclass, 0, 0); MODULE_DEPEND(acpi_lid, acpi, 1, 1, 1); static int acpi_lid_probe(device_t dev) { if (acpi_get_type(dev) == ACPI_TYPE_DEVICE && !acpi_disabled("lid") && acpi_MatchHid(dev, "PNP0C0D")) { device_set_desc(dev, "Control Method Lid Switch"); return (0); } return (ENXIO); } static int acpi_lid_attach(device_t dev) { struct acpi_lid_softc *sc; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); sc = device_get_softc(dev); sc->lid_dev = dev; sc->lid_handle = acpi_get_handle(dev); /* * If a system does not get lid events, it may make sense to change * the type to ACPI_ALL_NOTIFY. Some systems generate both a wake and * runtime notify in that case though. */ AcpiInstallNotifyHandler(sc->lid_handle, ACPI_DEVICE_NOTIFY, acpi_lid_notify_handler, sc); /* Enable the GPE for wake/runtime. */ acpi_wake_init(dev, ACPI_GPE_TYPE_WAKE_RUN); acpi_wake_set_enable(dev, 1); return_VALUE (0); } static int acpi_lid_suspend(device_t dev) { struct acpi_softc *acpi_sc; acpi_sc = acpi_device_get_parent_softc(dev); acpi_wake_sleep_prep(dev, acpi_sc->acpi_sstate); return (0); } static int acpi_lid_resume(device_t dev) { acpi_wake_run_prep(dev); return (0); } static void acpi_lid_notify_status_changed(void *arg) { struct acpi_lid_softc *sc; struct acpi_softc *acpi_sc; ACPI_STATUS status; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); sc = (struct acpi_lid_softc *)arg; /* * Evaluate _LID and check the return value, update lid status. * Zero: The lid is closed * Non-zero: The lid is open */ status = acpi_GetInteger(sc->lid_handle, "_LID", &sc->lid_status); if (ACPI_FAILURE(status)) return_VOID; acpi_sc = acpi_device_get_parent_softc(sc->lid_dev); if (acpi_sc == NULL) return_VOID; ACPI_VPRINT(sc->lid_dev, acpi_sc, "Lid %s\n", sc->lid_status ? "opened" : "closed"); acpi_UserNotify("Lid", sc->lid_handle, sc->lid_status); if (sc->lid_status == 0) EVENTHANDLER_INVOKE(acpi_sleep_event, acpi_sc->acpi_lid_switch_sx); else EVENTHANDLER_INVOKE(acpi_wakeup_event, acpi_sc->acpi_lid_switch_sx); return_VOID; } /* XXX maybe not here */ #define ACPI_NOTIFY_STATUS_CHANGED 0x80 static void acpi_lid_notify_handler(ACPI_HANDLE h, UINT32 notify, void *context) { struct acpi_lid_softc *sc; ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, notify); sc = (struct acpi_lid_softc *)context; switch (notify) { case ACPI_NOTIFY_STATUS_CHANGED: AcpiOsQueueForExecution(OSD_PRIORITY_LO, acpi_lid_notify_status_changed, sc); break; default: device_printf(sc->lid_dev, "unknown notify %#x\n", notify); break; } return_VOID; } Index: head/sys/dev/acpica/acpi_pcib_acpi.c =================================================================== --- head/sys/dev/acpica/acpi_pcib_acpi.c (revision 129878) +++ head/sys/dev/acpica/acpi_pcib_acpi.c (revision 129879) @@ -1,311 +1,312 @@ /*- * Copyright (c) 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "opt_acpi.h" #include #include #include #include +#include #include "acpi.h" #include #include #include #include #include "pcib_if.h" #include /* Hooks for the ACPI CA debugging infrastructure. */ #define _COMPONENT ACPI_BUS ACPI_MODULE_NAME("PCI_ACPI") struct acpi_hpcib_softc { device_t ap_dev; ACPI_HANDLE ap_handle; int ap_segment; /* analagous to Alpha 'hose' */ int ap_bus; /* bios-assigned bus number */ ACPI_BUFFER ap_prt; /* interrupt routing table */ }; static int acpi_pcib_acpi_probe(device_t bus); static int acpi_pcib_acpi_attach(device_t bus); static int acpi_pcib_acpi_resume(device_t bus); static int acpi_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result); static int acpi_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value); static uint32_t acpi_pcib_read_config(device_t dev, int bus, int slot, int func, int reg, int bytes); static void acpi_pcib_write_config(device_t dev, int bus, int slot, int func, int reg, uint32_t data, int bytes); static int acpi_pcib_acpi_route_interrupt(device_t pcib, device_t dev, int pin); static struct resource *acpi_pcib_acpi_alloc_resource(device_t dev, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags); static device_method_t acpi_pcib_acpi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_pcib_acpi_probe), DEVMETHOD(device_attach, acpi_pcib_acpi_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, acpi_pcib_acpi_resume), /* Bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_read_ivar, acpi_pcib_read_ivar), DEVMETHOD(bus_write_ivar, acpi_pcib_write_ivar), DEVMETHOD(bus_alloc_resource, acpi_pcib_acpi_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, pcib_maxslots), DEVMETHOD(pcib_read_config, acpi_pcib_read_config), DEVMETHOD(pcib_write_config, acpi_pcib_write_config), DEVMETHOD(pcib_route_interrupt, acpi_pcib_acpi_route_interrupt), {0, 0} }; static driver_t acpi_pcib_acpi_driver = { "pcib", acpi_pcib_acpi_methods, sizeof(struct acpi_hpcib_softc), }; DRIVER_MODULE(acpi_pcib, acpi, acpi_pcib_acpi_driver, pcib_devclass, 0, 0); MODULE_DEPEND(acpi_pcib, acpi, 1, 1, 1); static int acpi_pcib_acpi_probe(device_t dev) { if (acpi_get_type(dev) == ACPI_TYPE_DEVICE && !acpi_disabled("pci") && acpi_MatchHid(dev, "PNP0A03")) { if (pci_cfgregopen() == 0) return (ENXIO); device_set_desc(dev, "ACPI Host-PCI bridge"); return (0); } return (ENXIO); } static int acpi_pcib_acpi_attach(device_t dev) { struct acpi_hpcib_softc *sc; ACPI_STATUS status; u_int addr, slot, func, busok; uint8_t busno; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); sc = device_get_softc(dev); sc->ap_dev = dev; sc->ap_handle = acpi_get_handle(dev); /* * Get our base bus number by evaluating _BBN. * If this doesn't work, we assume we're bus number 0. * * XXX note that it may also not exist in the case where we are * meant to use a private configuration space mechanism for this bus, * so we should dig out our resources and check to see if we have * anything like that. How do we do this? * XXX If we have the requisite information, and if we don't think the * default PCI configuration space handlers can deal with this bus, * we should attach our own handler. * XXX invoke _REG on this for the PCI config space address space? * XXX It seems many BIOS's with multiple Host-PCI bridges do not set * _BBN correctly. They set _BBN to zero for all bridges. Thus, * if _BBN is zero and pcib0 already exists, we try to read our * bus number from the configuration registers at address _ADR. */ status = acpi_GetInteger(sc->ap_handle, "_BBN", &sc->ap_bus); if (ACPI_FAILURE(status)) { if (status != AE_NOT_FOUND) { device_printf(dev, "could not evaluate _BBN - %s\n", AcpiFormatException(status)); return_VALUE (ENXIO); } else { /* If it's not found, assume 0. */ sc->ap_bus = 0; } } /* * If the bus is zero and pcib0 already exists, read the bus number * via PCI config space. */ busok = 1; if (sc->ap_bus == 0 && devclass_get_device(pcib_devclass, 0) != dev) { busok = 0; status = acpi_GetInteger(sc->ap_handle, "_ADR", &addr); if (ACPI_FAILURE(status)) { if (status != AE_NOT_FOUND) { device_printf(dev, "could not evaluate _ADR - %s\n", AcpiFormatException(status)); return_VALUE (ENXIO); } else device_printf(dev, "couldn't find _ADR\n"); } else { /* XXX: We assume bus 0. */ slot = addr >> 16; func = addr & 0xffff; if (bootverbose) device_printf(dev, "reading config registers from 0:%d:%d\n", slot, func); if (host_pcib_get_busno(pci_cfgregread, 0, slot, func, &busno) == 0) device_printf(dev, "couldn't read bus number from cfg space\n"); else { sc->ap_bus = busno; busok = 1; } } } /* * If nothing else worked, hope that ACPI at least lays out the * host-PCI bridges in order and that as a result our unit number * is actually our bus number. There are several reasons this * might not be true. */ if (busok == 0) { sc->ap_bus = device_get_unit(dev); device_printf(dev, "trying bus number %d\n", sc->ap_bus); } /* * Get our segment number by evaluating _SEG * It's OK for this to not exist. */ status = acpi_GetInteger(sc->ap_handle, "_SEG", &sc->ap_segment); if (ACPI_FAILURE(status)) { if (status != AE_NOT_FOUND) { device_printf(dev, "could not evaluate _SEG - %s\n", AcpiFormatException(status)); return_VALUE (ENXIO); } /* If it's not found, assume 0. */ sc->ap_segment = 0; } return (acpi_pcib_attach(dev, &sc->ap_prt, sc->ap_bus)); } static int acpi_pcib_acpi_resume(device_t dev) { struct acpi_hpcib_softc *sc = device_get_softc(dev); return (acpi_pcib_resume(dev, &sc->ap_prt, sc->ap_bus)); } /* * Support for standard PCI bridge ivars. */ static int acpi_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct acpi_hpcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: *result = sc->ap_bus; return (0); case ACPI_IVAR_HANDLE: *result = (uintptr_t)sc->ap_handle; return (0); } return (ENOENT); } static int acpi_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { struct acpi_hpcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: sc->ap_bus = value; return (0); } return (ENOENT); } static uint32_t acpi_pcib_read_config(device_t dev, int bus, int slot, int func, int reg, int bytes) { return (pci_cfgregread(bus, slot, func, reg, bytes)); } static void acpi_pcib_write_config(device_t dev, int bus, int slot, int func, int reg, uint32_t data, int bytes) { pci_cfgregwrite(bus, slot, func, reg, data, bytes); } static int acpi_pcib_acpi_route_interrupt(device_t pcib, device_t dev, int pin) { struct acpi_hpcib_softc *sc; /* Find the bridge softc. */ sc = device_get_softc(pcib); return (acpi_pcib_route_interrupt(pcib, dev, pin, &sc->ap_prt)); } struct resource * acpi_pcib_acpi_alloc_resource(device_t dev, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { /* * If no memory preference is given, use upper 256MB slot most * bioses use for their memory window. Typically other bridges * before us get in the way to assert their preferences on memory. * Hardcoding like this sucks, so a more MD/MI way needs to be * found to do it. */ if (type == SYS_RES_MEMORY && start == 0UL && end == ~0UL) start = 0xf0000000; return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); } Index: head/sys/dev/acpica/acpi_pcib_pci.c =================================================================== --- head/sys/dev/acpica/acpi_pcib_pci.c (revision 129878) +++ head/sys/dev/acpica/acpi_pcib_pci.c (revision 129879) @@ -1,174 +1,175 @@ /*- * Copyright (c) 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_acpi.h" #include #include #include #include +#include #include "acpi.h" #include #include #include #include #include #include #include "pcib_if.h" /* Hooks for the ACPI CA debugging infrastructure. */ #define _COMPONENT ACPI_BUS ACPI_MODULE_NAME("PCI_PCI") struct acpi_pcib_softc { struct pcib_softc ap_pcibsc; ACPI_HANDLE ap_handle; ACPI_BUFFER ap_prt; /* interrupt routing table */ }; struct acpi_pcib_lookup_info { UINT32 address; ACPI_HANDLE handle; }; static int acpi_pcib_pci_probe(device_t bus); static int acpi_pcib_pci_attach(device_t bus); static int acpi_pcib_pci_resume(device_t bus); static int acpi_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result); static int acpi_pcib_pci_route_interrupt(device_t pcib, device_t dev, int pin); static device_method_t acpi_pcib_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_pcib_pci_probe), DEVMETHOD(device_attach, acpi_pcib_pci_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, acpi_pcib_pci_resume), /* Bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_read_ivar, acpi_pcib_read_ivar), DEVMETHOD(bus_write_ivar, pcib_write_ivar), DEVMETHOD(bus_alloc_resource, pcib_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, pcib_maxslots), DEVMETHOD(pcib_read_config, pcib_read_config), DEVMETHOD(pcib_write_config, pcib_write_config), DEVMETHOD(pcib_route_interrupt, acpi_pcib_pci_route_interrupt), {0, 0} }; static driver_t acpi_pcib_pci_driver = { "pcib", acpi_pcib_pci_methods, sizeof(struct acpi_pcib_softc), }; DRIVER_MODULE(acpi_pcib, pci, acpi_pcib_pci_driver, pcib_devclass, 0, 0); MODULE_DEPEND(acpi_pcib, acpi, 1, 1, 1); static int acpi_pcib_pci_probe(device_t dev) { if (pci_get_class(dev) != PCIC_BRIDGE || pci_get_subclass(dev) != PCIS_BRIDGE_PCI || acpi_disabled("pci")) return (ENXIO); if (acpi_get_handle(dev) == NULL) return (ENXIO); if (pci_cfgregopen() == 0) return (ENXIO); device_set_desc(dev, "ACPI PCI-PCI bridge"); return (-1000); } static int acpi_pcib_pci_attach(device_t dev) { struct acpi_pcib_softc *sc; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); pcib_attach_common(dev); sc = device_get_softc(dev); sc->ap_handle = acpi_get_handle(dev); return (acpi_pcib_attach(dev, &sc->ap_prt, sc->ap_pcibsc.secbus)); } static int acpi_pcib_pci_resume(device_t dev) { struct acpi_pcib_softc *sc = device_get_softc(dev); return (acpi_pcib_resume(dev, &sc->ap_prt, sc->ap_pcibsc.secbus)); } static int acpi_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct acpi_pcib_softc *sc = device_get_softc(dev); switch (which) { case ACPI_IVAR_HANDLE: *result = (uintptr_t)sc->ap_handle; return (0); } return (pcib_read_ivar(dev, child, which, result)); } static int acpi_pcib_pci_route_interrupt(device_t pcib, device_t dev, int pin) { struct acpi_pcib_softc *sc; sc = device_get_softc(pcib); /* * If we don't have a _PRT, fall back to the swizzle method * for routing interrupts. */ if (sc->ap_prt.Pointer == NULL) return (pcib_route_interrupt(pcib, dev, pin)); else return (acpi_pcib_route_interrupt(pcib, dev, pin, &sc->ap_prt)); } Index: head/sys/dev/acpica/acpi_resource.c =================================================================== --- head/sys/dev/acpica/acpi_resource.c (revision 129878) +++ head/sys/dev/acpica/acpi_resource.c (revision 129879) @@ -1,607 +1,608 @@ /*- * Copyright (c) 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_acpi.h" #include #include #include +#include #include #include #include #include "acpi.h" #include /* Hooks for the ACPI CA debugging infrastructure */ #define _COMPONENT ACPI_BUS ACPI_MODULE_NAME("RESOURCE") /* * Fetch a device's resources and associate them with the device. * * Note that it might be nice to also locate ACPI-specific resource items, such * as GPE bits. * * We really need to split the resource-fetching code out from the * resource-parsing code, since we may want to use the parsing * code for _PRS someday. */ ACPI_STATUS acpi_parse_resources(device_t dev, ACPI_HANDLE handle, struct acpi_parse_resource_set *set, void *arg) { ACPI_BUFFER buf; ACPI_RESOURCE *res; char *curr, *last; ACPI_STATUS status; void *context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* * Special-case some devices that abuse _PRS/_CRS to mean * something other than "I consume this resource". * * XXX do we really need this? It's only relevant once * we start always-allocating these resources, and even * then, the only special-cased device is likely to be * the PCI interrupt link. */ /* Fetch the device's current resources. */ buf.Length = ACPI_ALLOCATE_BUFFER; if (ACPI_FAILURE((status = AcpiGetCurrentResources(handle, &buf)))) { if (status != AE_NOT_FOUND) printf("can't fetch resources for %s - %s\n", acpi_name(handle), AcpiFormatException(status)); return_ACPI_STATUS (status); } ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "%s - got %ld bytes of resources\n", acpi_name(handle), (long)buf.Length)); set->set_init(dev, arg, &context); /* Iterate through the resources */ curr = buf.Pointer; last = (char *)buf.Pointer + buf.Length; while (curr < last) { res = (ACPI_RESOURCE *)curr; curr += res->Length; /* Handle the individual resource types */ switch(res->Id) { case ACPI_RSTYPE_END_TAG: ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "EndTag\n")); curr = last; break; case ACPI_RSTYPE_FIXED_IO: if (res->Data.FixedIo.RangeLength <= 0) break; ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "FixedIo 0x%x/%d\n", res->Data.FixedIo.BaseAddress, res->Data.FixedIo.RangeLength)); set->set_ioport(dev, context, res->Data.FixedIo.BaseAddress, res->Data.FixedIo.RangeLength); break; case ACPI_RSTYPE_IO: if (res->Data.Io.RangeLength <= 0) break; if (res->Data.Io.MinBaseAddress == res->Data.Io.MaxBaseAddress) { ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Io 0x%x/%d\n", res->Data.Io.MinBaseAddress, res->Data.Io.RangeLength)); set->set_ioport(dev, context, res->Data.Io.MinBaseAddress, res->Data.Io.RangeLength); } else { ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Io 0x%x-0x%x/%d\n", res->Data.Io.MinBaseAddress, res->Data.Io.MaxBaseAddress, res->Data.Io.RangeLength)); set->set_iorange(dev, context, res->Data.Io.MinBaseAddress, res->Data.Io.MaxBaseAddress, res->Data.Io.RangeLength, res->Data.Io.Alignment); } break; case ACPI_RSTYPE_FIXED_MEM32: if (res->Data.FixedMemory32.RangeLength <= 0) break; ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "FixedMemory32 0x%x/%d\n", res->Data.FixedMemory32.RangeBaseAddress, res->Data.FixedMemory32.RangeLength)); set->set_memory(dev, context, res->Data.FixedMemory32.RangeBaseAddress, res->Data.FixedMemory32.RangeLength); break; case ACPI_RSTYPE_MEM32: if (res->Data.Memory32.RangeLength <= 0) break; if (res->Data.Memory32.MinBaseAddress == res->Data.Memory32.MaxBaseAddress) { ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Memory32 0x%x/%d\n", res->Data.Memory32.MinBaseAddress, res->Data.Memory32.RangeLength)); set->set_memory(dev, context, res->Data.Memory32.MinBaseAddress, res->Data.Memory32.RangeLength); } else { ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Memory32 0x%x-0x%x/%d\n", res->Data.Memory32.MinBaseAddress, res->Data.Memory32.MaxBaseAddress, res->Data.Memory32.RangeLength)); set->set_memoryrange(dev, context, res->Data.Memory32.MinBaseAddress, res->Data.Memory32.MaxBaseAddress, res->Data.Memory32.RangeLength, res->Data.Memory32.Alignment); } break; case ACPI_RSTYPE_MEM24: if (res->Data.Memory24.RangeLength <= 0) break; if (res->Data.Memory24.MinBaseAddress == res->Data.Memory24.MaxBaseAddress) { ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Memory24 0x%x/%d\n", res->Data.Memory24.MinBaseAddress, res->Data.Memory24.RangeLength)); set->set_memory(dev, context, res->Data.Memory24.MinBaseAddress, res->Data.Memory24.RangeLength); } else { ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Memory24 0x%x-0x%x/%d\n", res->Data.Memory24.MinBaseAddress, res->Data.Memory24.MaxBaseAddress, res->Data.Memory24.RangeLength)); set->set_memoryrange(dev, context, res->Data.Memory24.MinBaseAddress, res->Data.Memory24.MaxBaseAddress, res->Data.Memory24.RangeLength, res->Data.Memory24.Alignment); } break; case ACPI_RSTYPE_IRQ: /* * from 1.0b 6.4.2 * "This structure is repeated for each separate interrupt * required" */ set->set_irq(dev, context, res->Data.Irq.Interrupts, res->Data.Irq.NumberOfInterrupts, res->Data.Irq.EdgeLevel, res->Data.Irq.ActiveHighLow); break; case ACPI_RSTYPE_DMA: /* * from 1.0b 6.4.3 * "This structure is repeated for each separate dma channel * required" */ set->set_drq(dev, context, res->Data.Dma.Channels, res->Data.Dma.NumberOfChannels); break; case ACPI_RSTYPE_START_DPF: ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "start dependant functions\n")); set->set_start_dependant(dev, context, res->Data.StartDpf.CompatibilityPriority); break; case ACPI_RSTYPE_END_DPF: ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "end dependant functions\n")); set->set_end_dependant(dev, context); break; case ACPI_RSTYPE_ADDRESS32: if (res->Data.Address32.AddressLength <= 0) break; if (res->Data.Address32.ProducerConsumer != ACPI_CONSUMER) { ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "ignored Address32 %s producer\n", res->Data.Address32.ResourceType == ACPI_IO_RANGE ? "IO" : "Memory")); break; } if (res->Data.Address32.ResourceType != ACPI_MEMORY_RANGE && res->Data.Address32.ResourceType != ACPI_IO_RANGE) { ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "ignored Address32 for non-memory, non-I/O\n")); break; } if (res->Data.Address32.MinAddressFixed == ACPI_ADDRESS_FIXED && res->Data.Address32.MaxAddressFixed == ACPI_ADDRESS_FIXED) { if (res->Data.Address32.ResourceType == ACPI_MEMORY_RANGE) { ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Address32/Memory 0x%x/%d\n", res->Data.Address32.MinAddressRange, res->Data.Address32.AddressLength)); set->set_memory(dev, context, res->Data.Address32.MinAddressRange, res->Data.Address32.AddressLength); } else { ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Address32/IO 0x%x/%d\n", res->Data.Address32.MinAddressRange, res->Data.Address32.AddressLength)); set->set_ioport(dev, context, res->Data.Address32.MinAddressRange, res->Data.Address32.AddressLength); } } else { if (res->Data.Address32.ResourceType == ACPI_MEMORY_RANGE) { ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Address32/Memory 0x%x-0x%x/%d\n", res->Data.Address32.MinAddressRange, res->Data.Address32.MaxAddressRange, res->Data.Address32.AddressLength)); set->set_memoryrange(dev, context, res->Data.Address32.MinAddressRange, res->Data.Address32.MaxAddressRange, res->Data.Address32.AddressLength, res->Data.Address32.Granularity); } else { ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Address32/IO 0x%x-0x%x/%d\n", res->Data.Address32.MinAddressRange, res->Data.Address32.MaxAddressRange, res->Data.Address32.AddressLength)); set->set_iorange(dev, context, res->Data.Address32.MinAddressRange, res->Data.Address32.MaxAddressRange, res->Data.Address32.AddressLength, res->Data.Address32.Granularity); } } break; case ACPI_RSTYPE_ADDRESS16: if (res->Data.Address16.AddressLength <= 0) break; if (res->Data.Address16.ProducerConsumer != ACPI_CONSUMER) { ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "ignored Address16 %s producer\n", res->Data.Address16.ResourceType == ACPI_IO_RANGE ? "IO" : "Memory")); break; } if (res->Data.Address16.ResourceType != ACPI_MEMORY_RANGE && res->Data.Address16.ResourceType != ACPI_IO_RANGE) { ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "ignored Address16 for non-memory, non-I/O\n")); break; } if (res->Data.Address16.MinAddressFixed == ACPI_ADDRESS_FIXED && res->Data.Address16.MaxAddressFixed == ACPI_ADDRESS_FIXED) { if (res->Data.Address16.ResourceType == ACPI_MEMORY_RANGE) { ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Address16/Memory 0x%x/%d\n", res->Data.Address16.MinAddressRange, res->Data.Address16.AddressLength)); set->set_memory(dev, context, res->Data.Address16.MinAddressRange, res->Data.Address16.AddressLength); } else { ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Address16/IO 0x%x/%d\n", res->Data.Address16.MinAddressRange, res->Data.Address16.AddressLength)); set->set_ioport(dev, context, res->Data.Address16.MinAddressRange, res->Data.Address16.AddressLength); } } else { if (res->Data.Address16.ResourceType == ACPI_MEMORY_RANGE) { ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Address16/Memory 0x%x-0x%x/%d\n", res->Data.Address16.MinAddressRange, res->Data.Address16.MaxAddressRange, res->Data.Address16.AddressLength)); set->set_memoryrange(dev, context, res->Data.Address16.MinAddressRange, res->Data.Address16.MaxAddressRange, res->Data.Address16.AddressLength, res->Data.Address16.Granularity); } else { ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Address16/IO 0x%x-0x%x/%d\n", res->Data.Address16.MinAddressRange, res->Data.Address16.MaxAddressRange, res->Data.Address16.AddressLength)); set->set_iorange(dev, context, res->Data.Address16.MinAddressRange, res->Data.Address16.MaxAddressRange, res->Data.Address16.AddressLength, res->Data.Address16.Granularity); } } break; case ACPI_RSTYPE_ADDRESS64: ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "unimplemented Address64 resource\n")); break; case ACPI_RSTYPE_EXT_IRQ: /* XXX special handling? */ set->set_irq(dev, context,res->Data.ExtendedIrq.Interrupts, res->Data.ExtendedIrq.NumberOfInterrupts, res->Data.ExtendedIrq.EdgeLevel, res->Data.ExtendedIrq.ActiveHighLow); break; case ACPI_RSTYPE_VENDOR: ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "unimplemented VendorSpecific resource\n")); break; default: break; } } AcpiOsFree(buf.Pointer); set->set_done(dev, context); return_ACPI_STATUS (AE_OK); } /* * Resource-set vectors used to attach _CRS-derived resources * to an ACPI device. */ static void acpi_res_set_init(device_t dev, void *arg, void **context); static void acpi_res_set_done(device_t dev, void *context); static void acpi_res_set_ioport(device_t dev, void *context, u_int32_t base, u_int32_t length); static void acpi_res_set_iorange(device_t dev, void *context, u_int32_t low, u_int32_t high, u_int32_t length, u_int32_t align); static void acpi_res_set_memory(device_t dev, void *context, u_int32_t base, u_int32_t length); static void acpi_res_set_memoryrange(device_t dev, void *context, u_int32_t low, u_int32_t high, u_int32_t length, u_int32_t align); static void acpi_res_set_irq(device_t dev, void *context, u_int32_t *irq, int count, int trig, int pol); static void acpi_res_set_drq(device_t dev, void *context, u_int32_t *drq, int count); static void acpi_res_set_start_dependant(device_t dev, void *context, int preference); static void acpi_res_set_end_dependant(device_t dev, void *context); struct acpi_parse_resource_set acpi_res_parse_set = { acpi_res_set_init, acpi_res_set_done, acpi_res_set_ioport, acpi_res_set_iorange, acpi_res_set_memory, acpi_res_set_memoryrange, acpi_res_set_irq, acpi_res_set_drq, acpi_res_set_start_dependant, acpi_res_set_end_dependant }; struct acpi_res_context { int ar_nio; int ar_nmem; int ar_nirq; int ar_ndrq; void *ar_parent; }; static void acpi_res_set_init(device_t dev, void *arg, void **context) { struct acpi_res_context *cp; if ((cp = AcpiOsAllocate(sizeof(*cp))) != NULL) { bzero(cp, sizeof(*cp)); cp->ar_parent = arg; *context = cp; } } static void acpi_res_set_done(device_t dev, void *context) { struct acpi_res_context *cp = (struct acpi_res_context *)context; if (cp == NULL) return; AcpiOsFree(cp); } static void acpi_res_set_ioport(device_t dev, void *context, u_int32_t base, u_int32_t length) { struct acpi_res_context *cp = (struct acpi_res_context *)context; if (cp == NULL) return; bus_set_resource(dev, SYS_RES_IOPORT, cp->ar_nio++, base, length); } static void acpi_res_set_iorange(device_t dev, void *context, u_int32_t low, u_int32_t high, u_int32_t length, u_int32_t align) { struct acpi_res_context *cp = (struct acpi_res_context *)context; if (cp == NULL) return; device_printf(dev, "I/O range not supported\n"); } static void acpi_res_set_memory(device_t dev, void *context, u_int32_t base, u_int32_t length) { struct acpi_res_context *cp = (struct acpi_res_context *)context; if (cp == NULL) return; bus_set_resource(dev, SYS_RES_MEMORY, cp->ar_nmem++, base, length); } static void acpi_res_set_memoryrange(device_t dev, void *context, u_int32_t low, u_int32_t high, u_int32_t length, u_int32_t align) { struct acpi_res_context *cp = (struct acpi_res_context *)context; if (cp == NULL) return; device_printf(dev, "memory range not supported\n"); } static void acpi_res_set_irq(device_t dev, void *context, u_int32_t *irq, int count, int trig, int pol) { struct acpi_res_context *cp = (struct acpi_res_context *)context; if (cp == NULL || irq == NULL) return; /* This implements no resource relocation. */ if (count != 1) return; bus_set_resource(dev, SYS_RES_IRQ, cp->ar_nirq++, *irq, 1); BUS_CONFIG_INTR(dev, *irq, (trig == ACPI_EDGE_SENSITIVE) ? INTR_TRIGGER_EDGE : INTR_TRIGGER_LEVEL, (pol == ACPI_ACTIVE_HIGH) ? INTR_POLARITY_HIGH : INTR_POLARITY_LOW); } static void acpi_res_set_drq(device_t dev, void *context, u_int32_t *drq, int count) { struct acpi_res_context *cp = (struct acpi_res_context *)context; if (cp == NULL || drq == NULL) return; /* This implements no resource relocation. */ if (count != 1) return; bus_set_resource(dev, SYS_RES_DRQ, cp->ar_ndrq++, *drq, 1); } static void acpi_res_set_start_dependant(device_t dev, void *context, int preference) { struct acpi_res_context *cp = (struct acpi_res_context *)context; if (cp == NULL) return; device_printf(dev, "dependant functions not supported\n"); } static void acpi_res_set_end_dependant(device_t dev, void *context) { struct acpi_res_context *cp = (struct acpi_res_context *)context; if (cp == NULL) return; device_printf(dev, "dependant functions not supported\n"); } /* * Resource-owning placeholders. * * This code "owns" system resource objects that aren't * otherwise useful to devices, and which shouldn't be * considered "free". * * Note that some systems claim *all* of the physical address space * with a PNP0C01 device, so we cannot correctly "own" system memory * here (must be done in the SMAP handler on x86 systems, for * example). */ static int acpi_sysresource_probe(device_t dev); static int acpi_sysresource_attach(device_t dev); static device_method_t acpi_sysresource_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_sysresource_probe), DEVMETHOD(device_attach, acpi_sysresource_attach), {0, 0} }; static driver_t acpi_sysresource_driver = { "acpi_sysresource", acpi_sysresource_methods, 0, }; static devclass_t acpi_sysresource_devclass; DRIVER_MODULE(acpi_sysresource, acpi, acpi_sysresource_driver, acpi_sysresource_devclass, 0, 0); MODULE_DEPEND(acpi_sysresource, acpi, 1, 1, 1); static int acpi_sysresource_probe(device_t dev) { if (!acpi_disabled("sysresource") && acpi_MatchHid(dev, "PNP0C02")) device_set_desc(dev, "System Resource"); else return (ENXIO); device_quiet(dev); return (-100); } static int acpi_sysresource_attach(device_t dev) { struct resource *res; int i, rid; /* * Suck up all the resources that might have been assigned to us. * Note that it's impossible to tell the difference between a * resource that someone else has claimed, and one that doesn't * exist. */ for (i = 0; i < 100; i++) { rid = i; res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, 0); rid = i; res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 0); rid = i; res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE); } return (0); } Index: head/sys/dev/acpica/acpi_thermal.c =================================================================== --- head/sys/dev/acpica/acpi_thermal.c (revision 129878) +++ head/sys/dev/acpica/acpi_thermal.c (revision 129879) @@ -1,805 +1,806 @@ /*- * Copyright (c) 2000, 2001 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_acpi.h" #include #include #include +#include #include #include #include #include #include #include #include "acpi.h" #include /* Hooks for the ACPI CA debugging infrastructure */ #define _COMPONENT ACPI_THERMAL ACPI_MODULE_NAME("THERMAL") #define TZ_ZEROC 2732 #define TZ_KELVTOC(x) (((x) - TZ_ZEROC) / 10), (((x) - TZ_ZEROC) % 10) #define TZ_NOTIFY_TEMPERATURE 0x80 /* Temperature changed. */ #define TZ_NOTIFY_LEVELS 0x81 /* Cooling levels changed. */ #define TZ_NOTIFY_DEVICES 0x82 /* Device lists changed. */ #define TZ_NOTIFY_CRITICAL 0xcc /* Fake notify that _CRT/_HOT reached. */ /* Check for temperature changes every 10 seconds by default */ #define TZ_POLLRATE 10 /* Make sure the reported temperature is valid for this number of polls. */ #define TZ_VALIDCHECKS 3 /* Notify the user we will be shutting down in one more poll cycle. */ #define TZ_NOTIFYCOUNT (TZ_VALIDCHECKS - 1) /* ACPI spec defines this */ #define TZ_NUMLEVELS 10 struct acpi_tz_zone { int ac[TZ_NUMLEVELS]; ACPI_BUFFER al[TZ_NUMLEVELS]; int crt; int hot; ACPI_BUFFER psl; int psv; int tc1; int tc2; int tsp; int tzp; }; struct acpi_tz_softc { device_t tz_dev; ACPI_HANDLE tz_handle; /*Thermal zone handle*/ int tz_temperature; /*Current temperature*/ int tz_active; /*Current active cooling*/ #define TZ_ACTIVE_NONE -1 int tz_requested; /*Minimum active cooling*/ int tz_thflags; /*Current temp-related flags*/ #define TZ_THFLAG_NONE 0 #define TZ_THFLAG_PSV (1<<0) #define TZ_THFLAG_HOT (1<<2) #define TZ_THFLAG_CRT (1<<3) int tz_flags; #define TZ_FLAG_NO_SCP (1<<0) /*No _SCP method*/ #define TZ_FLAG_GETPROFILE (1<<1) /*Get power_profile in timeout*/ struct timespec tz_cooling_started; /*Current cooling starting time*/ struct sysctl_ctx_list tz_sysctl_ctx; struct sysctl_oid *tz_sysctl_tree; struct acpi_tz_zone tz_zone; /*Thermal zone parameters*/ int tz_tmp_updating; int tz_validchecks; }; static int acpi_tz_probe(device_t dev); static int acpi_tz_attach(device_t dev); static int acpi_tz_establish(struct acpi_tz_softc *sc); static void acpi_tz_monitor(void *Context); static void acpi_tz_all_off(struct acpi_tz_softc *sc); static void acpi_tz_switch_cooler_off(ACPI_OBJECT *obj, void *arg); static void acpi_tz_switch_cooler_on(ACPI_OBJECT *obj, void *arg); static void acpi_tz_getparam(struct acpi_tz_softc *sc, char *node, int *data); static void acpi_tz_sanity(struct acpi_tz_softc *sc, int *val, char *what); static int acpi_tz_active_sysctl(SYSCTL_HANDLER_ARGS); static void acpi_tz_notify_handler(ACPI_HANDLE h, UINT32 notify, void *context); static void acpi_tz_timeout(struct acpi_tz_softc *sc); static void acpi_tz_power_profile(void *arg); static void acpi_tz_thread(void *arg); static device_method_t acpi_tz_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_tz_probe), DEVMETHOD(device_attach, acpi_tz_attach), {0, 0} }; static driver_t acpi_tz_driver = { "acpi_tz", acpi_tz_methods, sizeof(struct acpi_tz_softc), }; static devclass_t acpi_tz_devclass; DRIVER_MODULE(acpi_tz, acpi, acpi_tz_driver, acpi_tz_devclass, 0, 0); MODULE_DEPEND(acpi_tz, acpi, 1, 1, 1); static struct sysctl_ctx_list acpi_tz_sysctl_ctx; static struct sysctl_oid *acpi_tz_sysctl_tree; /* Minimum cooling run time */ static int acpi_tz_min_runtime = 0; static int acpi_tz_polling_rate = TZ_POLLRATE; /* Timezone polling thread */ static struct proc *acpi_tz_proc; /* * Match an ACPI thermal zone. */ static int acpi_tz_probe(device_t dev) { int result; ACPI_LOCK_DECL; ACPI_LOCK; /* No FUNCTION_TRACE - too noisy */ if (acpi_get_type(dev) == ACPI_TYPE_THERMAL && !acpi_disabled("thermal")) { device_set_desc(dev, "Thermal Zone"); result = -10; } else { result = ENXIO; } ACPI_UNLOCK; return (result); } /* * Attach to an ACPI thermal zone. */ static int acpi_tz_attach(device_t dev) { struct acpi_tz_softc *sc; struct acpi_softc *acpi_sc; int error; char oidname[8]; ACPI_LOCK_DECL; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); ACPI_LOCK; sc = device_get_softc(dev); sc->tz_dev = dev; sc->tz_handle = acpi_get_handle(dev); sc->tz_requested = TZ_ACTIVE_NONE; sc->tz_tmp_updating = 0; /* * Parse the current state of the thermal zone and build control * structures. */ if ((error = acpi_tz_establish(sc)) != 0) goto out; /* * Register for any Notify events sent to this zone. */ AcpiInstallNotifyHandler(sc->tz_handle, ACPI_DEVICE_NOTIFY, acpi_tz_notify_handler, sc); /* * Create our sysctl nodes. * * XXX we need a mechanism for adding nodes under ACPI. */ if (device_get_unit(dev) == 0) { acpi_sc = acpi_device_get_parent_softc(dev); sysctl_ctx_init(&acpi_tz_sysctl_ctx); acpi_tz_sysctl_tree = SYSCTL_ADD_NODE(&acpi_tz_sysctl_ctx, SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree), OID_AUTO, "thermal", CTLFLAG_RD, 0, ""); SYSCTL_ADD_INT(&acpi_tz_sysctl_ctx, SYSCTL_CHILDREN(acpi_tz_sysctl_tree), OID_AUTO, "min_runtime", CTLFLAG_RD | CTLFLAG_RW, &acpi_tz_min_runtime, 0, "minimum cooling run time in sec"); SYSCTL_ADD_INT(&acpi_tz_sysctl_ctx, SYSCTL_CHILDREN(acpi_tz_sysctl_tree), OID_AUTO, "polling_rate", CTLFLAG_RD | CTLFLAG_RW, &acpi_tz_polling_rate, 0, "monitor polling rate"); } sysctl_ctx_init(&sc->tz_sysctl_ctx); sprintf(oidname, "tz%d", device_get_unit(dev)); sc->tz_sysctl_tree = SYSCTL_ADD_NODE(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(acpi_tz_sysctl_tree), OID_AUTO, oidname, CTLFLAG_RD, 0, ""); SYSCTL_ADD_INT(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree), OID_AUTO, "temperature", CTLFLAG_RD, &sc->tz_temperature, 0, "current thermal zone temperature"); SYSCTL_ADD_PROC(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree), OID_AUTO, "active", CTLTYPE_INT | CTLFLAG_RW, sc, 0, acpi_tz_active_sysctl, "I", ""); SYSCTL_ADD_INT(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree), OID_AUTO, "thermal_flags", CTLFLAG_RD, &sc->tz_thflags, 0, "thermal zone flags"); SYSCTL_ADD_INT(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree), OID_AUTO, "_PSV", CTLFLAG_RD, &sc->tz_zone.psv, 0, ""); SYSCTL_ADD_INT(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree), OID_AUTO, "_HOT", CTLFLAG_RD, &sc->tz_zone.hot, 0, ""); SYSCTL_ADD_INT(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree), OID_AUTO, "_CRT", CTLFLAG_RD, &sc->tz_zone.crt, 0, ""); SYSCTL_ADD_OPAQUE(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree), OID_AUTO, "_ACx", CTLFLAG_RD, &sc->tz_zone.ac, sizeof(sc->tz_zone.ac), "I", ""); /* * Register our power profile event handler, and flag it for a manual * invocation by our timeout. We defer it like this so that the rest * of the subsystem has time to come up. */ EVENTHANDLER_REGISTER(power_profile_change, acpi_tz_power_profile, sc, 0); sc->tz_flags |= TZ_FLAG_GETPROFILE; /* * Don't bother evaluating/printing the temperature at this point; * on many systems it'll be bogus until the EC is running. */ /* * Create our thread; we only need one, it will service all of the * thermal zones. */ if (acpi_tz_proc == NULL) { error = kthread_create(acpi_tz_thread, NULL, &acpi_tz_proc, RFHIGHPID, 0, "acpi_thermal"); if (error != 0) { device_printf(sc->tz_dev, "could not create thread - %d", error); goto out; } } out: ACPI_UNLOCK; return_VALUE (error); } /* * Parse the current state of this thermal zone and set up to use it. * * Note that we may have previous state, which will have to be discarded. */ static int acpi_tz_establish(struct acpi_tz_softc *sc) { ACPI_OBJECT *obj; int i; char nbuf[8]; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); ACPI_ASSERTLOCK; /* Power everything off and erase any existing state. */ acpi_tz_all_off(sc); for (i = 0; i < TZ_NUMLEVELS; i++) if (sc->tz_zone.al[i].Pointer != NULL) AcpiOsFree(sc->tz_zone.al[i].Pointer); if (sc->tz_zone.psl.Pointer != NULL) AcpiOsFree(sc->tz_zone.psl.Pointer); bzero(&sc->tz_zone, sizeof(sc->tz_zone)); /* Evaluate thermal zone parameters. */ for (i = 0; i < TZ_NUMLEVELS; i++) { sprintf(nbuf, "_AC%d", i); acpi_tz_getparam(sc, nbuf, &sc->tz_zone.ac[i]); sprintf(nbuf, "_AL%d", i); sc->tz_zone.al[i].Length = ACPI_ALLOCATE_BUFFER; sc->tz_zone.al[i].Pointer = NULL; AcpiEvaluateObject(sc->tz_handle, nbuf, NULL, &sc->tz_zone.al[i]); obj = (ACPI_OBJECT *)sc->tz_zone.al[i].Pointer; if (obj != NULL) { /* Should be a package containing a list of power objects */ if (obj->Type != ACPI_TYPE_PACKAGE) { device_printf(sc->tz_dev, "%s has unknown type %d, rejecting\n", nbuf, obj->Type); return_VALUE (ENXIO); } } } acpi_tz_getparam(sc, "_CRT", &sc->tz_zone.crt); acpi_tz_getparam(sc, "_HOT", &sc->tz_zone.hot); sc->tz_zone.psl.Length = ACPI_ALLOCATE_BUFFER; sc->tz_zone.psl.Pointer = NULL; AcpiEvaluateObject(sc->tz_handle, "_PSL", NULL, &sc->tz_zone.psl); acpi_tz_getparam(sc, "_PSV", &sc->tz_zone.psv); acpi_tz_getparam(sc, "_TC1", &sc->tz_zone.tc1); acpi_tz_getparam(sc, "_TC2", &sc->tz_zone.tc2); acpi_tz_getparam(sc, "_TSP", &sc->tz_zone.tsp); acpi_tz_getparam(sc, "_TZP", &sc->tz_zone.tzp); /* * Sanity-check the values we've been given. * * XXX what do we do about systems that give us the same value for * more than one of these setpoints? */ acpi_tz_sanity(sc, &sc->tz_zone.crt, "_CRT"); acpi_tz_sanity(sc, &sc->tz_zone.hot, "_HOT"); acpi_tz_sanity(sc, &sc->tz_zone.psv, "_PSV"); for (i = 0; i < TZ_NUMLEVELS; i++) acpi_tz_sanity(sc, &sc->tz_zone.ac[i], "_ACx"); /* * Power off everything that we've just been given. */ acpi_tz_all_off(sc); return_VALUE (0); } static char *aclevel_string[] = { "NONE", "_AC0", "_AC1", "_AC2", "_AC3", "_AC4", "_AC5", "_AC6", "_AC7", "_AC8", "_AC9" }; static __inline const char * acpi_tz_aclevel_string(int active) { if (active < -1 || active >= TZ_NUMLEVELS) return (aclevel_string[0]); return (aclevel_string[active+1]); } /* * Evaluate the condition of a thermal zone, take appropriate actions. */ static void acpi_tz_monitor(void *Context) { struct acpi_tz_softc *sc; struct timespec curtime; int temp; int i; int newactive, newflags; ACPI_STATUS status; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); ACPI_ASSERTLOCK; sc = (struct acpi_tz_softc *)Context; if (sc->tz_tmp_updating) goto out; sc->tz_tmp_updating = 1; /* Get the current temperature. */ status = acpi_GetInteger(sc->tz_handle, "_TMP", &temp); if (ACPI_FAILURE(status)) { ACPI_VPRINT(sc->tz_dev, acpi_device_get_parent_softc(sc->tz_dev), "error fetching current temperature -- %s\n", AcpiFormatException(status)); /* XXX disable zone? go to max cooling? */ goto out; } ACPI_DEBUG_PRINT((ACPI_DB_VALUES, "got %d.%dC\n", TZ_KELVTOC(temp))); sc->tz_temperature = temp; /* * Work out what we ought to be doing right now. * * Note that the _ACx levels sort from hot to cold. */ newactive = TZ_ACTIVE_NONE; for (i = TZ_NUMLEVELS - 1; i >= 0; i--) { if ((sc->tz_zone.ac[i] != -1) && (temp >= sc->tz_zone.ac[i])) { newactive = i; if (sc->tz_active != newactive) { ACPI_VPRINT(sc->tz_dev, acpi_device_get_parent_softc(sc->tz_dev), "_AC%d: temperature %d.%d >= setpoint %d.%d\n", i, TZ_KELVTOC(temp), TZ_KELVTOC(sc->tz_zone.ac[i])); getnanotime(&sc->tz_cooling_started); } } } /* * We are going to get _ACx level down (colder side), but give a guaranteed * minimum cooling run time if requested. */ if (acpi_tz_min_runtime > 0 && sc->tz_active != TZ_ACTIVE_NONE && (newactive == TZ_ACTIVE_NONE || newactive > sc->tz_active)) { getnanotime(&curtime); timespecsub(&curtime, &sc->tz_cooling_started); if (curtime.tv_sec < acpi_tz_min_runtime) newactive = sc->tz_active; } /* Handle user override of active mode */ if (sc->tz_requested != TZ_ACTIVE_NONE && sc->tz_requested < newactive) newactive = sc->tz_requested; /* update temperature-related flags */ newflags = TZ_THFLAG_NONE; if (sc->tz_zone.psv != -1 && temp >= sc->tz_zone.psv) newflags |= TZ_THFLAG_PSV; if (sc->tz_zone.hot != -1 && temp >= sc->tz_zone.hot) newflags |= TZ_THFLAG_HOT; if (sc->tz_zone.crt != -1 && temp >= sc->tz_zone.crt) newflags |= TZ_THFLAG_CRT; /* If the active cooling state has changed, we have to switch things. */ if (newactive != sc->tz_active) { /* Turn off the cooling devices that are on, if any are */ if (sc->tz_active != TZ_ACTIVE_NONE) acpi_ForeachPackageObject( (ACPI_OBJECT *)sc->tz_zone.al[sc->tz_active].Pointer, acpi_tz_switch_cooler_off, sc); /* Turn on cooling devices that are required, if any are */ if (newactive != TZ_ACTIVE_NONE) { acpi_ForeachPackageObject( (ACPI_OBJECT *)sc->tz_zone.al[newactive].Pointer, acpi_tz_switch_cooler_on, sc); } ACPI_VPRINT(sc->tz_dev, acpi_device_get_parent_softc(sc->tz_dev), "switched from %s to %s: %d.%dC\n", acpi_tz_aclevel_string(sc->tz_active), acpi_tz_aclevel_string(newactive), TZ_KELVTOC(temp)); sc->tz_active = newactive; } /* XXX (de)activate any passive cooling that may be required. */ /* * If the temperature is at _HOT or _CRT, increment our event count. * If it has occurred enough times, shutdown the system. This is * needed because some systems will report an invalid high temperature * for one poll cycle. It is suspected this is due to the embedded * controller timing out. A typical value is 138C for one cycle on * a system that is otherwise 65C. * * If we're almost at that threshold, notify the user through devd(8). */ if ((newflags & (TZ_THFLAG_HOT | TZ_THFLAG_CRT)) != 0) { sc->tz_validchecks++; if (sc->tz_validchecks == TZ_VALIDCHECKS) { device_printf(sc->tz_dev, "WARNING - current temperature (%d.%dC) exceeds safe limits\n", TZ_KELVTOC(sc->tz_temperature)); shutdown_nice(RB_POWEROFF); } else if (sc->tz_validchecks == TZ_NOTIFYCOUNT) acpi_UserNotify("Thermal", sc->tz_handle, TZ_NOTIFY_CRITICAL); } else { sc->tz_validchecks = 0; } sc->tz_thflags = newflags; out: sc->tz_tmp_updating = 0; return_VOID; } /* * Turn off all the cooling devices. */ static void acpi_tz_all_off(struct acpi_tz_softc *sc) { int i; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); ACPI_ASSERTLOCK; /* Scan all the _ALx objects and turn them all off. */ for (i = 0; i < TZ_NUMLEVELS; i++) { if (sc->tz_zone.al[i].Pointer == NULL) continue; acpi_ForeachPackageObject((ACPI_OBJECT *)sc->tz_zone.al[i].Pointer, acpi_tz_switch_cooler_off, sc); } /* * XXX revert any passive-cooling options. */ sc->tz_active = TZ_ACTIVE_NONE; sc->tz_thflags = TZ_THFLAG_NONE; return_VOID; } /* * Given an object, verify that it's a reference to a device of some sort, * and try to switch it off. */ static void acpi_tz_switch_cooler_off(ACPI_OBJECT *obj, void *arg) { ACPI_HANDLE cooler; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); ACPI_ASSERTLOCK; cooler = acpi_GetReference(NULL, obj); if (cooler == NULL) { ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "can't get handle\n")); return_VOID; } ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "called to turn %s off\n", acpi_name(cooler))); acpi_pwr_switch_consumer(cooler, ACPI_STATE_D3); return_VOID; } /* * Given an object, verify that it's a reference to a device of some sort, * and try to switch it on. * * XXX replication of off/on function code is bad. */ static void acpi_tz_switch_cooler_on(ACPI_OBJECT *obj, void *arg) { struct acpi_tz_softc *sc = (struct acpi_tz_softc *)arg; ACPI_HANDLE cooler; ACPI_STATUS status; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); ACPI_ASSERTLOCK; cooler = acpi_GetReference(NULL, obj); if (cooler == NULL) { ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "can't get handle\n")); return_VOID; } ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "called to turn %s on\n", acpi_name(cooler))); status = acpi_pwr_switch_consumer(cooler, ACPI_STATE_D0); if (ACPI_FAILURE(status)) { ACPI_VPRINT(sc->tz_dev, acpi_device_get_parent_softc(sc->tz_dev), "failed to activate %s - %s\n", acpi_name(cooler), AcpiFormatException(status)); } return_VOID; } /* * Read/debug-print a parameter, default it to -1. */ static void acpi_tz_getparam(struct acpi_tz_softc *sc, char *node, int *data) { ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); ACPI_ASSERTLOCK; if (ACPI_FAILURE(acpi_GetInteger(sc->tz_handle, node, data))) { *data = -1; } else { ACPI_DEBUG_PRINT((ACPI_DB_VALUES, "%s.%s = %d\n", acpi_name(sc->tz_handle), node, *data)); } return_VOID; } /* * Sanity-check a temperature value. Assume that setpoints * should be between 0C and 150C. */ static void acpi_tz_sanity(struct acpi_tz_softc *sc, int *val, char *what) { if (*val != -1 && (*val < TZ_ZEROC || *val > TZ_ZEROC + 1500)) { device_printf(sc->tz_dev, "%s value is absurd, ignored (%d.%dC)\n", what, TZ_KELVTOC(*val)); *val = -1; } } /* * Respond to a sysctl on the active state node. */ static int acpi_tz_active_sysctl(SYSCTL_HANDLER_ARGS) { struct acpi_tz_softc *sc; int active; int error; ACPI_LOCK_DECL; ACPI_LOCK; sc = (struct acpi_tz_softc *)oidp->oid_arg1; active = sc->tz_active; error = sysctl_handle_int(oidp, &active, 0, req); /* Error or no new value */ if (error != 0 || req->newptr == NULL) goto out; if (active < -1 || active >= TZ_NUMLEVELS) { error = EINVAL; goto out; } /* Set new preferred level and re-switch */ sc->tz_requested = active; acpi_tz_monitor(sc); out: ACPI_UNLOCK; return (error); } /* * Respond to a Notify event sent to the zone. */ static void acpi_tz_notify_handler(ACPI_HANDLE h, UINT32 notify, void *context) { struct acpi_tz_softc *sc = (struct acpi_tz_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); ACPI_ASSERTLOCK; switch(notify) { case TZ_NOTIFY_TEMPERATURE: /* Temperature change occurred */ AcpiOsQueueForExecution(OSD_PRIORITY_HIGH, acpi_tz_monitor, sc); break; case TZ_NOTIFY_DEVICES: case TZ_NOTIFY_LEVELS: /* Zone devices/setpoints changed */ AcpiOsQueueForExecution(OSD_PRIORITY_HIGH, (OSD_EXECUTION_CALLBACK)acpi_tz_establish, sc); break; default: ACPI_VPRINT(sc->tz_dev, acpi_device_get_parent_softc(sc->tz_dev), "unknown Notify event 0x%x\n", notify); break; } acpi_UserNotify("Thermal", h, notify); return_VOID; } /* * Poll the thermal zone. */ static void acpi_tz_timeout(struct acpi_tz_softc *sc) { /* Do we need to get the power profile settings? */ if (sc->tz_flags & TZ_FLAG_GETPROFILE) { acpi_tz_power_profile((void *)sc); sc->tz_flags &= ~TZ_FLAG_GETPROFILE; } ACPI_ASSERTLOCK; /* Check the current temperature and take action based on it */ acpi_tz_monitor(sc); /* XXX passive cooling actions? */ } /* * System power profile may have changed; fetch and notify the * thermal zone accordingly. * * Since this can be called from an arbitrary eventhandler, it needs * to get the ACPI lock itself. */ static void acpi_tz_power_profile(void *arg) { ACPI_STATUS status; struct acpi_tz_softc *sc = (struct acpi_tz_softc *)arg; int state; ACPI_LOCK_DECL; state = power_profile_get_state(); if (state != POWER_PROFILE_PERFORMANCE && state != POWER_PROFILE_ECONOMY) return; ACPI_LOCK; /* check that we haven't decided there's no _SCP method */ if ((sc->tz_flags & TZ_FLAG_NO_SCP) == 0) { /* Call _SCP to set the new profile */ status = acpi_SetInteger(sc->tz_handle, "_SCP", (state == POWER_PROFILE_PERFORMANCE) ? 0 : 1); if (ACPI_FAILURE(status)) { if (status != AE_NOT_FOUND) ACPI_VPRINT(sc->tz_dev, acpi_device_get_parent_softc(sc->tz_dev), "can't evaluate %s._SCP - %s\n", acpi_name(sc->tz_handle), AcpiFormatException(status)); sc->tz_flags |= TZ_FLAG_NO_SCP; } else { /* We have to re-evaluate the entire zone now */ AcpiOsQueueForExecution(OSD_PRIORITY_HIGH, (OSD_EXECUTION_CALLBACK)acpi_tz_establish, sc); } } ACPI_UNLOCK; } /* * Thermal zone monitor thread. */ static void acpi_tz_thread(void *arg) { device_t *devs; int devcount, i; ACPI_LOCK_DECL; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); devs = NULL; devcount = 0; for (;;) { tsleep(&acpi_tz_proc, PZERO, "tzpoll", hz * acpi_tz_polling_rate); #if __FreeBSD_version >= 500000 mtx_lock(&Giant); #endif if (devcount == 0) devclass_get_devices(acpi_tz_devclass, &devs, &devcount); ACPI_LOCK; for (i = 0; i < devcount; i++) acpi_tz_timeout(device_get_softc(devs[i])); ACPI_UNLOCK; #if __FreeBSD_version >= 500000 mtx_unlock(&Giant); #endif } } Index: head/sys/dev/acpica/acpi_timer.c =================================================================== --- head/sys/dev/acpica/acpi_timer.c (revision 129878) +++ head/sys/dev/acpica/acpi_timer.c (revision 129879) @@ -1,339 +1,340 @@ /*- * Copyright (c) 2000, 2001 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "opt_acpi.h" #include #include #include +#include #include #include #include #include #include #include "acpi.h" #include #include /* * A timecounter based on the free-running ACPI timer. * * Based on the i386-only mp_clock.c by . */ /* Hooks for the ACPI CA debugging infrastructure */ #define _COMPONENT ACPI_TIMER ACPI_MODULE_NAME("TIMER") static device_t acpi_timer_dev; static struct resource *acpi_timer_reg; static bus_space_handle_t acpi_timer_bsh; static bus_space_tag_t acpi_timer_bst; static u_int acpi_timer_frequency = 14318182 / 4; static void acpi_timer_identify(driver_t *driver, device_t parent); static int acpi_timer_probe(device_t dev); static int acpi_timer_attach(device_t dev); static u_int acpi_timer_get_timecount(struct timecounter *tc); static u_int acpi_timer_get_timecount_safe(struct timecounter *tc); static int acpi_timer_sysctl_freq(SYSCTL_HANDLER_ARGS); static void acpi_timer_boot_test(void); static u_int acpi_timer_read(void); static int acpi_timer_test(void); static device_method_t acpi_timer_methods[] = { DEVMETHOD(device_identify, acpi_timer_identify), DEVMETHOD(device_probe, acpi_timer_probe), DEVMETHOD(device_attach, acpi_timer_attach), {0, 0} }; static driver_t acpi_timer_driver = { "acpi_timer", acpi_timer_methods, 0, }; static devclass_t acpi_timer_devclass; DRIVER_MODULE(acpi_timer, acpi, acpi_timer_driver, acpi_timer_devclass, 0, 0); MODULE_DEPEND(acpi_timer, acpi, 1, 1, 1); static struct timecounter acpi_timer_timecounter = { acpi_timer_get_timecount_safe, /* get_timecount function */ 0, /* no poll_pps */ 0, /* no default counter_mask */ 0, /* no default frequency */ "ACPI", /* name */ 1000 /* quality */ }; static u_int acpi_timer_read() { return (bus_space_read_4(acpi_timer_bst, acpi_timer_bsh, 0)); } /* * Locate the ACPI timer using the FADT, set up and allocate the I/O resources * we will be using. */ static void acpi_timer_identify(driver_t *driver, device_t parent) { device_t dev; char desc[40]; u_long rlen, rstart; int i, j, rid, rtype; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (acpi_disabled("timer") || AcpiGbl_FADT == NULL) return_VOID; if ((dev = BUS_ADD_CHILD(parent, 0, "acpi_timer", 0)) == NULL) { device_printf(parent, "could not add acpi_timer0\n"); return_VOID; } acpi_timer_dev = dev; rid = 0; rlen = AcpiGbl_FADT->PmTmLen; rtype = (AcpiGbl_FADT->XPmTmrBlk.AddressSpaceId) ? SYS_RES_IOPORT : SYS_RES_MEMORY; rstart = AcpiGbl_FADT->XPmTmrBlk.Address; bus_set_resource(dev, rtype, rid, rstart, rlen); acpi_timer_reg = bus_alloc_resource_any(dev, rtype, &rid, RF_ACTIVE); if (acpi_timer_reg == NULL) { device_printf(dev, "couldn't allocate I/O resource (%s 0x%lx)\n", rtype == SYS_RES_IOPORT ? "port" : "mem", rstart); return_VOID; } acpi_timer_bsh = rman_get_bushandle(acpi_timer_reg); acpi_timer_bst = rman_get_bustag(acpi_timer_reg); if (AcpiGbl_FADT->TmrValExt != 0) acpi_timer_timecounter.tc_counter_mask = 0xffffffff; else acpi_timer_timecounter.tc_counter_mask = 0x00ffffff; acpi_timer_timecounter.tc_frequency = acpi_timer_frequency; if (testenv("debug.acpi.timer_test")) acpi_timer_boot_test(); /* * If all tests of the counter succeed, use the ACPI-fast method. If * at least one failed, default to using the safe routine, which reads * the timer multiple times to get a consistent value before returning. */ j = 0; for (i = 0; i < 10; i++) j += acpi_timer_test(); if (j == 10) { acpi_timer_timecounter.tc_name = "ACPI-fast"; acpi_timer_timecounter.tc_get_timecount = acpi_timer_get_timecount; } else { acpi_timer_timecounter.tc_name = "ACPI-safe"; acpi_timer_timecounter.tc_get_timecount = acpi_timer_get_timecount_safe; } tc_init(&acpi_timer_timecounter); sprintf(desc, "%d-bit timer at 3.579545MHz", AcpiGbl_FADT->TmrValExt ? 32 : 24); device_set_desc_copy(dev, desc); return_VOID; } static int acpi_timer_probe(device_t dev) { if (dev == acpi_timer_dev) return (0); return (ENXIO); } static int acpi_timer_attach(device_t dev) { return (0); } /* * Fetch current time value from reliable hardware. */ static u_int acpi_timer_get_timecount(struct timecounter *tc) { return (acpi_timer_read()); } /* * Fetch current time value from hardware that may not correctly * latch the counter. We need to read until we have three monotonic * samples and then use the middle one, otherwise we are not protected * against the fact that the bits can be wrong in two directions. If * we only cared about monosity, two reads would be enough. */ static u_int acpi_timer_get_timecount_safe(struct timecounter *tc) { u_int u1, u2, u3; u2 = acpi_timer_read(); u3 = acpi_timer_read(); do { u1 = u2; u2 = u3; u3 = acpi_timer_read(); } while (u1 > u2 || u2 > u3); return (u2); } /* * Timecounter freqency adjustment interface. */ static int acpi_timer_sysctl_freq(SYSCTL_HANDLER_ARGS) { int error; u_int freq; if (acpi_timer_timecounter.tc_frequency == 0) return (EOPNOTSUPP); freq = acpi_timer_frequency; error = sysctl_handle_int(oidp, &freq, sizeof(freq), req); if (error == 0 && req->newptr != NULL) { acpi_timer_frequency = freq; acpi_timer_timecounter.tc_frequency = acpi_timer_frequency; } return (error); } SYSCTL_PROC(_machdep, OID_AUTO, acpi_timer_freq, CTLTYPE_INT | CTLFLAG_RW, 0, sizeof(u_int), acpi_timer_sysctl_freq, "I", ""); /* * Some ACPI timers are known or believed to suffer from implementation * problems which can lead to erroneous values being read. This function * tests for consistent results from the timer and returns 1 if it believes * the timer is consistent, otherwise it returns 0. * * It appears the cause is that the counter is not latched to the PCI bus * clock when read: * * ] 20. ACPI Timer Errata * ] * ] Problem: The power management timer may return improper result when * ] read. Although the timer value settles properly after incrementing, * ] while incrementing there is a 3nS window every 69.8nS where the * ] timer value is indeterminate (a 4.2% chance that the data will be * ] incorrect when read). As a result, the ACPI free running count up * ] timer specification is violated due to erroneous reads. Implication: * ] System hangs due to the "inaccuracy" of the timer when used by * ] software for time critical events and delays. * ] * ] Workaround: Read the register twice and compare. * ] Status: This will not be fixed in the PIIX4 or PIIX4E, it is fixed * ] in the PIIX4M. */ #define N 2000 static int acpi_timer_test() { uint32_t last, this; int min, max, n, delta; register_t s; min = 10000000; max = 0; /* Test the timer with interrupts disabled to get accurate results. */ s = intr_disable(); last = acpi_timer_read(); for (n = 0; n < N; n++) { this = acpi_timer_read(); delta = acpi_TimerDelta(this, last); if (delta > max) max = delta; else if (delta < min) min = delta; last = this; } intr_restore(s); if (max - min > 2) n = 0; else if (min < 0 || max == 0) n = 0; else n = 1; if (bootverbose) { printf("ACPI timer looks %s min = %d, max = %d, width = %d\n", n ? "GOOD" : "BAD ", min, max, max - min); } return (n); } #undef N /* * Test harness for verifying ACPI timer behaviour. * Boot with debug.acpi.timer_test set to invoke this. */ static void acpi_timer_boot_test(void) { uint32_t u1, u2, u3; u1 = acpi_timer_read(); u2 = acpi_timer_read(); u3 = acpi_timer_read(); device_printf(acpi_timer_dev, "timer test in progress, reboot to quit.\n"); for (;;) { /* * The failure case is where u3 > u1, but u2 does not fall between * the two, ie. it contains garbage. */ if (u3 > u1) { if (u2 < u1 || u2 > u3) device_printf(acpi_timer_dev, "timer is not monotonic: 0x%08x,0x%08x,0x%08x\n", u1, u2, u3); } u1 = u2; u2 = u3; u3 = acpi_timer_read(); } } Index: head/sys/dev/acpica/acpi_video.c =================================================================== --- head/sys/dev/acpica/acpi_video.c (revision 129878) +++ head/sys/dev/acpica/acpi_video.c (revision 129879) @@ -1,931 +1,932 @@ /*- * Copyright (c) 2002-2003 Taku YAMAMOTO * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: acpi_vid.c,v 1.4 2003/10/13 10:07:36 taku Exp $ * $FreeBSD$ */ #include #include #include +#include #include #include #include #include #include "acpi.h" #include /* ACPI video extension driver. */ struct acpi_video_output { ACPI_HANDLE handle; UINT32 adr; STAILQ_ENTRY(acpi_video_output) vo_next; struct { int num; STAILQ_ENTRY(acpi_video_output) next; } vo_unit; int vo_brightness; int vo_fullpower; int vo_economy; int vo_numlevels; int *vo_levels; struct sysctl_ctx_list vo_sysctl_ctx; struct sysctl_oid *vo_sysctl_tree; }; STAILQ_HEAD(acpi_video_output_queue, acpi_video_output); struct acpi_video_softc { device_t device; ACPI_HANDLE handle; STAILQ_HEAD(, acpi_video_output) vid_outputs; eventhandler_tag vid_pwr_evh; }; /* interfaces */ static int acpi_video_modevent(struct module*, int, void *); static int acpi_video_probe(device_t); static int acpi_video_attach(device_t); static int acpi_video_detach(device_t); static int acpi_video_shutdown(device_t); static void acpi_video_notify_handler(ACPI_HANDLE, UINT32, void *); static void acpi_video_power_profile(void *); static void acpi_video_bind_outputs(struct acpi_video_softc *); static struct acpi_video_output *acpi_video_vo_init(UINT32); static void acpi_video_vo_bind(struct acpi_video_output *, ACPI_HANDLE); static void acpi_video_vo_destroy(struct acpi_video_output *); static int acpi_video_vo_check_level(struct acpi_video_output *, int); static int acpi_video_vo_active_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_video_vo_bright_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_video_vo_presets_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_video_vo_levels_sysctl(SYSCTL_HANDLER_ARGS); /* operations */ static int vid_check_requirements(ACPI_HANDLE); static void vid_set_switch_policy(ACPI_HANDLE, UINT32); static int vid_enum_outputs(ACPI_HANDLE, void(*)(ACPI_HANDLE, UINT32, void *), void *); static int vo_query_brightness_levels(ACPI_HANDLE, int **); static void vo_set_brightness(ACPI_HANDLE, int); static UINT32 vo_get_device_status(ACPI_HANDLE); static UINT32 vo_query_graphics_state(ACPI_HANDLE); static void vo_set_device_state(ACPI_HANDLE, UINT32); /* events */ #define VID_NOTIFY_SWITCHED 0x80 #define VID_NOTIFY_REPROBE 0x81 /* _DOS (Enable/Disable Output Switching) argument bits */ #define DOS_SWITCH_MASK ((UINT32)3) #define DOS_SWITCH_BY_OSPM ((UINT32)0) #define DOS_SWITCH_BY_BIOS ((UINT32)1) #define DOS_SWITCH_LOCKED ((UINT32)2) #define DOS_BRIGHTNESS_BY_BIOS ((UINT32)1 << 2) /* _DOD and subdev's _ADR */ #define DOD_DEVID_MASK ((UINT32)0xffff) #define DOD_DEVID_MONITOR ((UINT32)0x0100) #define DOD_DEVID_PANEL ((UINT32)0x0110) #define DOD_DEVID_TV ((UINT32)0x0200) #define DOD_BIOS ((UINT32)1 << 16) #define DOD_NONVGA ((UINT32)1 << 17) #define DOD_HEAD_ID_SHIFT 18 #define DOD_HEAD_ID_BITS 3 #define DOD_HEAD_ID_MASK \ ((((UINT32)1 << DOD_HEAD_ID_BITS) - 1) << DOD_HEAD_ID_SHIFT) /* _BCL related constants */ #define BCL_FULLPOWER 0 #define BCL_ECONOMY 1 /* _DCS (Device Currrent Status) value bits and masks. */ #define DCS_EXISTS ((UINT32)1 << 0) #define DCS_ACTIVE ((UINT32)1 << 1) #define DCS_READY ((UINT32)1 << 2) #define DCS_FUNCTIONAL ((UINT32)1 << 3) #define DCS_ATTACHED ((UINT32)1 << 4) /* _DSS (Device Set Status) argument bits and masks. */ #define DSS_INACTIVE ((UINT32)0) #define DSS_ACTIVE ((UINT32)1 << 0) #define DSS_ACTIVITY ((UINT32)1 << 0) #define DSS_SETNEXT ((UINT32)1 << 30) #define DSS_COMMIT ((UINT32)1 << 31) static device_method_t acpi_video_methods[] = { DEVMETHOD(device_probe, acpi_video_probe), DEVMETHOD(device_attach, acpi_video_attach), DEVMETHOD(device_detach, acpi_video_detach), DEVMETHOD(device_shutdown, acpi_video_shutdown), { 0, 0 } }; static driver_t acpi_video_driver = { "acpi_video", acpi_video_methods, sizeof(struct acpi_video_softc), }; static devclass_t acpi_video_devclass; DRIVER_MODULE(acpi_video, acpi, acpi_video_driver, acpi_video_devclass, acpi_video_modevent, NULL); MODULE_DEPEND(acpi_video, acpi, 1, 1, 1); struct sysctl_ctx_list acpi_video_sysctl_ctx; struct sysctl_oid *acpi_video_sysctl_tree; static struct acpi_video_output_queue lcd_units, crt_units, tv_units, other_units; MALLOC_DEFINE(M_ACPIVIDEO, "acpivideo", "ACPI video extension"); static int acpi_video_modevent(struct module *mod __unused, int evt, void *cookie __unused) { int err = 0; switch (evt) { case MOD_LOAD: acpi_video_sysctl_tree = NULL; sysctl_ctx_init(&acpi_video_sysctl_ctx); STAILQ_INIT(&lcd_units); STAILQ_INIT(&crt_units); STAILQ_INIT(&tv_units); STAILQ_INIT(&other_units); break; case MOD_UNLOAD: acpi_video_sysctl_tree = NULL; sysctl_ctx_free(&acpi_video_sysctl_ctx); break; default: err = EINVAL; } return (err); } static int acpi_video_probe(device_t dev) { int err = ENXIO; ACPI_HANDLE handle; ACPI_LOCK_DECL; ACPI_LOCK; handle = acpi_get_handle(dev); if (acpi_get_type(dev) == ACPI_TYPE_DEVICE && !acpi_disabled("video") && vid_check_requirements(handle)) { device_set_desc(dev, "ACPI video extension"); err = 0; } ACPI_UNLOCK; return (err); } static int acpi_video_attach(device_t dev) { struct acpi_softc *acpi_sc; struct acpi_video_softc *sc; ACPI_LOCK_DECL; sc = device_get_softc(dev); ACPI_LOCK; acpi_sc = acpi_device_get_parent_softc(dev); if (acpi_video_sysctl_tree == NULL && acpi_sc != NULL) { acpi_video_sysctl_tree = SYSCTL_ADD_NODE(&acpi_video_sysctl_ctx, SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree), OID_AUTO, "video", CTLFLAG_RD, 0, "video extension control"); } sc->device = dev; sc->handle = acpi_get_handle(dev); STAILQ_INIT(&sc->vid_outputs); AcpiInstallNotifyHandler(sc->handle, ACPI_DEVICE_NOTIFY, acpi_video_notify_handler, sc); sc->vid_pwr_evh = EVENTHANDLER_REGISTER(power_profile_change, acpi_video_power_profile, sc, 0); acpi_video_bind_outputs(sc); vid_set_switch_policy(sc->handle, DOS_SWITCH_BY_OSPM); ACPI_UNLOCK; acpi_video_power_profile(sc); return (0); } static int acpi_video_detach(device_t dev) { struct acpi_video_softc *sc; struct acpi_video_output *vo, *vn; ACPI_LOCK_DECL; sc = device_get_softc(dev); ACPI_LOCK; vid_set_switch_policy(sc->handle, DOS_SWITCH_BY_BIOS); EVENTHANDLER_DEREGISTER(power_profile_change, sc->vid_pwr_evh); AcpiRemoveNotifyHandler(sc->handle, ACPI_DEVICE_NOTIFY, acpi_video_notify_handler); for (vo = STAILQ_FIRST(&sc->vid_outputs); vo != NULL; vo = vn) { vn = STAILQ_NEXT(vo, vo_next); acpi_video_vo_destroy(vo); } ACPI_UNLOCK; return (0); } static int acpi_video_shutdown(device_t dev) { struct acpi_video_softc *sc; ACPI_LOCK_DECL; sc = device_get_softc(dev); ACPI_LOCK; vid_set_switch_policy(sc->handle, DOS_SWITCH_BY_BIOS); ACPI_UNLOCK; return (0); } static void acpi_video_notify_handler(ACPI_HANDLE handle __unused, UINT32 notify, void *context) { struct acpi_video_softc *sc; struct acpi_video_output *vo; ACPI_HANDLE lasthand = NULL; UINT32 dcs, dss, dss_p = 0; ACPI_ASSERTLOCK; sc = context; switch (notify) { case VID_NOTIFY_SWITCHED: STAILQ_FOREACH(vo, &sc->vid_outputs, vo_next) { dss = vo_query_graphics_state(vo->handle); dcs = vo_get_device_status(vo->handle); if (!(dcs & DCS_READY)) dss = DSS_INACTIVE; if (((dcs & DCS_ACTIVE) && dss == DSS_INACTIVE) || (!(dcs & DCS_ACTIVE) && dss == DSS_ACTIVE)) { if (lasthand != NULL) vo_set_device_state(lasthand, dss_p); dss_p = dss; lasthand = vo->handle; } } if (lasthand != NULL) vo_set_device_state(lasthand, dss_p|DSS_COMMIT); break; case VID_NOTIFY_REPROBE: STAILQ_FOREACH(vo, &sc->vid_outputs, vo_next) vo->handle = NULL; acpi_video_bind_outputs(sc); STAILQ_FOREACH(vo, &sc->vid_outputs, vo_next) { if (vo->handle == NULL) { STAILQ_REMOVE(&sc->vid_outputs, vo, acpi_video_output, vo_next); acpi_video_vo_destroy(vo); } } break; default: device_printf(sc->device, "unknown notify event 0x%x\n", notify); } } static void acpi_video_power_profile(void *context) { int state; struct acpi_video_softc *sc; struct acpi_video_output *vo; ACPI_LOCK_DECL; sc = context; state = power_profile_get_state(); if (state != POWER_PROFILE_PERFORMANCE && state != POWER_PROFILE_ECONOMY) return; ACPI_LOCK; STAILQ_FOREACH(vo, &sc->vid_outputs, vo_next) { if (vo->vo_levels != NULL && vo->vo_brightness == -1) vo_set_brightness(vo->handle, state == POWER_PROFILE_ECONOMY ? vo->vo_economy : vo->vo_fullpower); } ACPI_UNLOCK; } static void acpi_video_bind_outputs_subr(ACPI_HANDLE handle, UINT32 adr, void *context) { struct acpi_video_softc *sc; struct acpi_video_output *vo; sc = context; STAILQ_FOREACH(vo, &sc->vid_outputs, vo_next) { if (vo->adr == adr) { acpi_video_vo_bind(vo, handle); return; } } vo = acpi_video_vo_init(adr); if (vo != NULL) { acpi_video_vo_bind(vo, handle); STAILQ_INSERT_TAIL(&sc->vid_outputs, vo, vo_next); } } static void acpi_video_bind_outputs(struct acpi_video_softc *sc) { ACPI_ASSERTLOCK; vid_enum_outputs(sc->handle, acpi_video_bind_outputs_subr, sc); } static struct acpi_video_output * acpi_video_vo_init(UINT32 adr) { struct acpi_video_output *vn, *vo, *vp; int n, x; char name[64], env[128]; const char *type, *desc; struct acpi_video_output_queue *voqh; switch (adr & DOD_DEVID_MASK) { case DOD_DEVID_MONITOR: desc = "CRT monitor"; type = "crt"; voqh = &crt_units; break; case DOD_DEVID_PANEL: desc = "LCD panel"; type = "lcd"; voqh = &lcd_units; break; case DOD_DEVID_TV: desc = "TV"; type = "tv"; voqh = &tv_units; break; default: desc = "unknown output"; type = "out"; voqh = &other_units; } n = 0; vn = vp = NULL; /* XXX - needs locking for protecting STAILQ xxx_units. */ STAILQ_FOREACH(vn, voqh, vo_unit.next) { if (vn->vo_unit.num != n) break; vp = vn; n++; } snprintf(name, 64, "%s%d", type, n); vo = malloc(sizeof(*vo), M_ACPIVIDEO, M_NOWAIT); if (vo != NULL) { vo->handle = NULL; vo->adr = adr; vo->vo_unit.num = n; vo->vo_brightness = -1; vo->vo_fullpower = -1; /* TODO: override with tunables */ vo->vo_economy = -1; vo->vo_numlevels = 0; vo->vo_levels = NULL; snprintf(env, 128, "hw.acpi.video.%s.fullpower", name); if (getenv_int(env, &x)) vo->vo_fullpower = x; snprintf(env, 128, "hw.acpi.video.%s.economy", name); if (getenv_int(env, &x)) vo->vo_economy = x; sysctl_ctx_init(&vo->vo_sysctl_ctx); if (vp != NULL) STAILQ_INSERT_AFTER(voqh, vp, vo, vo_unit.next); else STAILQ_INSERT_TAIL(voqh, vo, vo_unit.next); if (acpi_video_sysctl_tree != NULL) vo->vo_sysctl_tree = SYSCTL_ADD_NODE(&vo->vo_sysctl_ctx, SYSCTL_CHILDREN(acpi_video_sysctl_tree), OID_AUTO, name, CTLFLAG_RD, 0, desc); if (vo->vo_sysctl_tree != NULL) { SYSCTL_ADD_PROC(&vo->vo_sysctl_ctx, SYSCTL_CHILDREN(vo->vo_sysctl_tree), OID_AUTO, "active", CTLTYPE_INT|CTLFLAG_RW, vo, 0, acpi_video_vo_active_sysctl, "I", "current activity of this device"); SYSCTL_ADD_PROC(&vo->vo_sysctl_ctx, SYSCTL_CHILDREN(vo->vo_sysctl_tree), OID_AUTO, "brightness", CTLTYPE_INT|CTLFLAG_RW, vo, 0, acpi_video_vo_bright_sysctl, "I", "current brightness level"); SYSCTL_ADD_PROC(&vo->vo_sysctl_ctx, SYSCTL_CHILDREN(vo->vo_sysctl_tree), OID_AUTO, "fullpower", CTLTYPE_INT|CTLFLAG_RW, vo, POWER_PROFILE_PERFORMANCE, acpi_video_vo_presets_sysctl, "I", "preset level for full power mode"); SYSCTL_ADD_PROC(&vo->vo_sysctl_ctx, SYSCTL_CHILDREN(vo->vo_sysctl_tree), OID_AUTO, "economy", CTLTYPE_INT|CTLFLAG_RW, vo, POWER_PROFILE_ECONOMY, acpi_video_vo_presets_sysctl, "I", "preset level for economy mode"); SYSCTL_ADD_PROC(&vo->vo_sysctl_ctx, SYSCTL_CHILDREN(vo->vo_sysctl_tree), OID_AUTO, "levels", CTLTYPE_OPAQUE|CTLFLAG_RD, vo, 0, acpi_video_vo_levels_sysctl, "I", "supported brightness levels"); } else printf("%s: sysctl node creation failed\n", type); } else printf("%s: softc allocation failed\n", type); /* XXX unlock here - needs locking for protecting STAILQ xxx_units. */ if (bootverbose) { printf("found %s(%x)", desc, (unsigned int)(adr & DOD_DEVID_MASK)); if (adr & DOD_BIOS) printf(", detectable by BIOS"); if (adr & DOD_NONVGA) printf(" (not a VGA output)"); printf(", head #%d\n", (int)((adr & DOD_HEAD_ID_MASK) >> DOD_HEAD_ID_SHIFT)); } return vo; } static void acpi_video_vo_bind(struct acpi_video_output *vo, ACPI_HANDLE handle) { ACPI_ASSERTLOCK; if (vo->vo_levels != NULL) AcpiOsFree(vo->vo_levels); vo->handle = handle; vo->vo_numlevels = vo_query_brightness_levels(handle, &vo->vo_levels); if (vo->vo_numlevels >= 2) { if (vo->vo_fullpower == -1 || acpi_video_vo_check_level(vo, vo->vo_fullpower) != 0) /* XXX - can't deal with rebinding... */ vo->vo_fullpower = vo->vo_levels[BCL_FULLPOWER]; if (vo->vo_economy == -1 || acpi_video_vo_check_level(vo, vo->vo_economy) != 0) /* XXX - see above. */ vo->vo_economy = vo->vo_levels[BCL_ECONOMY]; } } static void acpi_video_vo_destroy(struct acpi_video_output *vo) { struct acpi_video_output_queue *voqh; ACPI_ASSERTLOCK; if (vo->vo_sysctl_tree != NULL) { vo->vo_sysctl_tree = NULL; sysctl_ctx_free(&vo->vo_sysctl_ctx); } if (vo->vo_levels != NULL) AcpiOsFree(vo->vo_levels); switch (vo->adr & DOD_DEVID_MASK) { case DOD_DEVID_MONITOR: voqh = &crt_units; break; case DOD_DEVID_PANEL: voqh = &lcd_units; break; case DOD_DEVID_TV: voqh = &tv_units; break; default: voqh = &other_units; } /* XXX - needs locking for protecting STAILQ xxx_units. */ STAILQ_REMOVE(voqh, vo, acpi_video_output, vo_unit.next); free(vo, M_ACPIVIDEO); } static int acpi_video_vo_check_level(struct acpi_video_output *vo, int level) { int i; if (vo->vo_levels == NULL) return (ENODEV); for (i = 0; i < vo->vo_numlevels; i++) if (vo->vo_levels[i] == level) return (0); return (EINVAL); } /* ARGSUSED */ static int acpi_video_vo_active_sysctl(SYSCTL_HANDLER_ARGS) { struct acpi_video_output *vo; int state, err; ACPI_LOCK_DECL; ACPI_LOCK; vo = (struct acpi_video_output *)arg1; if (vo->handle == NULL) { err = ENXIO; goto out; } state = vo_get_device_status(vo->handle) & DCS_ACTIVE? 1 : 0; err = sysctl_handle_int(oidp, &state, 0, req); if (err != 0 || req->newptr == NULL) goto out; vo_set_device_state(vo->handle, DSS_COMMIT | (state? DSS_ACTIVE : DSS_INACTIVE)); out: ACPI_UNLOCK; return (err); } /* ARGSUSED */ static int acpi_video_vo_bright_sysctl(SYSCTL_HANDLER_ARGS) { struct acpi_video_output *vo; int level, preset, err; ACPI_LOCK_DECL; ACPI_LOCK; vo = (struct acpi_video_output *)arg1; if (vo->handle == NULL) { err = ENXIO; goto out; } if (vo->vo_levels == NULL) { err = ENODEV; goto out; } preset = (power_profile_get_state() == POWER_PROFILE_ECONOMY ? vo->vo_economy : vo->vo_fullpower); level = vo->vo_brightness; if (level == -1) level = preset; err = sysctl_handle_int(oidp, &level, 0, req); if (err != 0 || req->newptr == NULL) goto out; if (level < -1 || level > 100) { err = EINVAL; goto out; } if (level != -1 && (err = acpi_video_vo_check_level(vo, level))) goto out; vo->vo_brightness = level; vo_set_brightness(vo->handle, level == -1? preset : level); out: ACPI_UNLOCK; return (err); } static int acpi_video_vo_presets_sysctl(SYSCTL_HANDLER_ARGS) { struct acpi_video_output *vo; int level, *preset, err = 0; ACPI_LOCK_DECL; ACPI_LOCK; vo = (struct acpi_video_output *)arg1; if (vo->handle == NULL) { err = ENXIO; goto out; } if (vo->vo_levels == NULL) { err = ENODEV; goto out; } preset = (arg2 == POWER_PROFILE_ECONOMY ? &vo->vo_economy : &vo->vo_fullpower); level = *preset; err = sysctl_handle_int(oidp, &level, 0, req); if (err != 0 || req->newptr == NULL) goto out; if (level < -1 || level > 100) { err = EINVAL; goto out; } if (level == -1) level = vo->vo_levels [arg2 == POWER_PROFILE_ECONOMY ? BCL_ECONOMY : BCL_FULLPOWER]; else if ((err = acpi_video_vo_check_level(vo, level)) != 0) goto out; if (vo->vo_brightness == -1 && (power_profile_get_state() == arg2)) vo_set_brightness(vo->handle, level); *preset = level; out: ACPI_UNLOCK; return (err); } /* ARGSUSED */ static int acpi_video_vo_levels_sysctl(SYSCTL_HANDLER_ARGS) { struct acpi_video_output *vo; int err; ACPI_LOCK_DECL; ACPI_LOCK; vo = (struct acpi_video_output *)arg1; if (vo->vo_levels == NULL) { err = ENODEV; goto out; } if (req->newptr != NULL) { err = EPERM; goto out; } err = sysctl_handle_opaque(oidp, vo->vo_levels, vo->vo_numlevels * sizeof *vo->vo_levels, req); out: ACPI_UNLOCK; return (err); } static int vid_check_requirements(ACPI_HANDLE handle) { ACPI_HANDLE h_dod, h_dos; ACPI_OBJECT_TYPE t_dos; ACPI_ASSERTLOCK; /* check for _DOD, _DOS methods */ return (ACPI_SUCCESS(AcpiGetHandle(handle, "_DOD", &h_dod)) && ACPI_SUCCESS(AcpiGetHandle(handle, "_DOS", &h_dos)) && ACPI_SUCCESS(AcpiGetType(h_dos, &t_dos)) && t_dos == ACPI_TYPE_METHOD); } static void vid_set_switch_policy(ACPI_HANDLE handle, UINT32 policy) { ACPI_STATUS status; ACPI_ASSERTLOCK; status = acpi_SetInteger(handle, "_DOS", policy); if (ACPI_FAILURE(status)) printf("can't evaluate %s._DOS - %s\n", acpi_name(handle), AcpiFormatException(status)); } struct enum_callback_arg { void (*callback)(ACPI_HANDLE, UINT32, void *); void *context; ACPI_OBJECT *dod_pkg; }; static ACPI_STATUS vid_enum_outputs_subr(ACPI_HANDLE handle, UINT32 level __unused, void *context, void **retp) { ACPI_STATUS status; ACPI_OBJECT *tmp; UINT32 adr; struct enum_callback_arg *argset; size_t i; argset = context; status = acpi_GetInteger(handle, "_ADR", &adr); if (ACPI_SUCCESS(status)) { for (i = 0; i < argset->dod_pkg->Package.Count; i++) { tmp = &argset->dod_pkg->Package.Elements[i]; if (tmp != NULL && tmp->Type == ACPI_TYPE_INTEGER && (tmp->Integer.Value & DOD_DEVID_MASK) == adr) { argset->callback(handle, tmp->Integer.Value, argset->context); (**(int**)retp)++; } } } return (AE_OK); } static int vid_enum_outputs(ACPI_HANDLE handle, void (*callback)(ACPI_HANDLE, UINT32, void *), void *context) { ACPI_STATUS status; ACPI_BUFFER dod_buf; ACPI_OBJECT *res; int num = 0; void *pnum; struct enum_callback_arg argset; ACPI_ASSERTLOCK; dod_buf.Length = ACPI_ALLOCATE_BUFFER; dod_buf.Pointer = NULL; status = AcpiEvaluateObject(handle, "_DOD", NULL, &dod_buf); if (ACPI_FAILURE(status)) { if (status != AE_NOT_FOUND) printf("can't evaluate %s._DOD - %s\n", acpi_name(handle), AcpiFormatException(status)); num = -1; goto out; } res = (ACPI_OBJECT *)dod_buf.Pointer; if (res == NULL || res->Type != ACPI_TYPE_PACKAGE) { printf("evaluation of %s._DOD makes no sense\n", acpi_name(handle)); num = -1; goto out; } if (callback == NULL) { num = res->Package.Count; goto out; } argset.callback = callback; argset.context = context; argset.dod_pkg = res; pnum = # status = AcpiWalkNamespace(ACPI_TYPE_DEVICE, handle, 1, vid_enum_outputs_subr, &argset, &pnum); if (ACPI_FAILURE(status)) printf("failed walking down %s - %s\n", acpi_name(handle), AcpiFormatException(status)); out: if (dod_buf.Pointer != NULL) AcpiOsFree(dod_buf.Pointer); return (num); } static int vo_query_brightness_levels(ACPI_HANDLE handle, int **levelp) { ACPI_STATUS status; ACPI_BUFFER bcl_buf; ACPI_OBJECT *res, *tmp; int num = 0, i, n, *levels; ACPI_ASSERTLOCK; bcl_buf.Length = ACPI_ALLOCATE_BUFFER; bcl_buf.Pointer = NULL; status = AcpiEvaluateObject(handle, "_BCL", NULL, &bcl_buf); if (ACPI_FAILURE(status)) { if (status != AE_NOT_FOUND) printf("can't evaluate %s._BCL - %s\n", acpi_name(handle), AcpiFormatException(status)); num = -1; goto out; } res = (ACPI_OBJECT *)bcl_buf.Pointer; if (res == NULL || res->Type != ACPI_TYPE_PACKAGE || res->Package.Count < 2) { printf("evaluation of %s._BCL makes no sense\n", acpi_name(handle)); num = -1; goto out; } num = res->Package.Count; if (levelp == NULL) goto out; levels = AcpiOsAllocate(num * sizeof *levels); if (levels == NULL) { num = -1; goto out; } for (i = 0, n = 0; i < num; i++) { tmp = &res->Package.Elements[i]; if (tmp != NULL && tmp->Type == ACPI_TYPE_INTEGER) levels[n++] = tmp->Integer.Value; } if (n < 2) { num = -1; AcpiOsFree(levels); } else { num = n; *levelp = levels; } out: if (bcl_buf.Pointer != NULL) AcpiOsFree(bcl_buf.Pointer); return (num); } static void vo_set_brightness(ACPI_HANDLE handle, int level) { ACPI_STATUS status; ACPI_ASSERTLOCK; status = acpi_SetInteger(handle, "_BCM", level); if (ACPI_FAILURE(status)) printf("can't evaluate %s._BCM - %s\n", acpi_name(handle), AcpiFormatException(status)); } static UINT32 vo_get_device_status(ACPI_HANDLE handle) { UINT32 dcs = 0; ACPI_STATUS status; ACPI_ASSERTLOCK; status = acpi_GetInteger(handle, "_DCS", &dcs); if (ACPI_FAILURE(status)) printf("can't evaluate %s._DCS - %s\n", acpi_name(handle), AcpiFormatException(status)); return (dcs); } static UINT32 vo_query_graphics_state(ACPI_HANDLE handle) { UINT32 dgs = 0; ACPI_STATUS status; ACPI_ASSERTLOCK; status = acpi_GetInteger(handle, "_DGS", &dgs); if (ACPI_FAILURE(status)) printf("can't evaluate %s._DGS - %s\n", acpi_name(handle), AcpiFormatException(status)); return (dgs); } static void vo_set_device_state(ACPI_HANDLE handle, UINT32 state) { ACPI_STATUS status; ACPI_ASSERTLOCK; status = acpi_SetInteger(handle, "_DSS", state); if (ACPI_FAILURE(status)) printf("can't evaluate %s._DSS - %s\n", acpi_name(handle), AcpiFormatException(status)); } Index: head/sys/dev/advansys/adv_isa.c =================================================================== --- head/sys/dev/advansys/adv_isa.c (revision 129878) +++ head/sys/dev/advansys/adv_isa.c (revision 129879) @@ -1,426 +1,427 @@ /* * Device probe and attach routines for the following * Advanced Systems Inc. SCSI controllers: * * Connectivity Products: * ABP510/5150 - Bus-Master ISA (240 CDB) * * ABP5140 - Bus-Master ISA PnP (16 CDB) * ** * ABP5142 - Bus-Master ISA PnP with floppy (16 CDB) *** * * Single Channel Products: * ABP542 - Bus-Master ISA with floppy (240 CDB) * ABP842 - Bus-Master VL (240 CDB) * * Dual Channel Products: * ABP852 - Dual Channel Bus-Master VL (240 CDB Per Channel) * * * This board has been shipped by HP with the 4020i CD-R drive. * The board has no BIOS so it cannot control a boot device, but * it can control any secondary SCSI device. * ** This board has been sold by SIIG as the i540 SpeedMaster. * *** This board has been sold by SIIG as the i542 SpeedMaster. * * Copyright (c) 1996, 1997 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #define ADV_ISA_MAX_DMA_ADDR (0x00FFFFFFL) #define ADV_ISA_MAX_DMA_COUNT (0x00FFFFFFL) #define ADV_VL_MAX_DMA_ADDR (0x07FFFFFFL) #define ADV_VL_MAX_DMA_COUNT (0x07FFFFFFL) /* * The overrun buffer shared amongst all ISA/VL adapters. */ static u_int8_t* overrun_buf; static bus_dma_tag_t overrun_dmat; static bus_dmamap_t overrun_dmamap; static bus_addr_t overrun_physbase; /* Possible port addresses an ISA or VL adapter can live at */ static u_int16_t adv_isa_ioports[] = { 0x100, 0x110, /* First selection in BIOS setup */ 0x120, 0x130, /* Second selection in BIOS setup */ 0x140, 0x150, /* Third selection in BIOS setup */ 0x190, /* Fourth selection in BIOS setup */ 0x210, /* Fifth selection in BIOS setup */ 0x230, /* Sixth selection in BIOS setup */ 0x250, /* Seventh selection in BIOS setup */ 0x330 /* Eighth and default selection in BIOS setup */ }; #define MAX_ISA_IOPORT_INDEX (sizeof(adv_isa_ioports)/sizeof(u_int16_t) - 1) static int adv_isa_probe(device_t dev); static int adv_isa_attach(device_t dev); static void adv_set_isapnp_wait_for_key(void); static int adv_get_isa_dma_channel(struct adv_softc *adv); static int adv_set_isa_dma_settings(struct adv_softc *adv); static int adv_isa_probe(device_t dev) { int port_index; int max_port_index; u_long iobase, iocount, irq; int user_iobase = 0; int rid = 0; void *ih; struct resource *iores, *irqres; /* * Default to scanning all possible device locations. */ port_index = 0; max_port_index = MAX_ISA_IOPORT_INDEX; if (bus_get_resource(dev, SYS_RES_IOPORT, 0, &iobase, &iocount) == 0) { user_iobase = 1; for (;port_index <= max_port_index; port_index++) if (iobase <= adv_isa_ioports[port_index]) break; if ((port_index > max_port_index) || (iobase != adv_isa_ioports[port_index])) { if (bootverbose) printf("adv%d: Invalid baseport of 0x%lx specified. " "Nearest valid baseport is 0x%x. Failing " "probe.\n", device_get_unit(dev), iobase, (port_index <= max_port_index) ? adv_isa_ioports[port_index] : adv_isa_ioports[max_port_index]); return ENXIO; } max_port_index = port_index; } /* Perform the actual probing */ adv_set_isapnp_wait_for_key(); for (;port_index <= max_port_index; port_index++) { u_int16_t port_addr = adv_isa_ioports[port_index]; bus_size_t maxsegsz; bus_size_t maxsize; bus_addr_t lowaddr; int error; struct adv_softc *adv; if (port_addr == 0) /* Already been attached */ continue; if (bus_set_resource(dev, SYS_RES_IOPORT, 0, port_addr, 1)) continue; /* XXX what is the real portsize? */ iores = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (iores == NULL) continue; if (adv_find_signature(rman_get_bustag(iores), rman_get_bushandle(iores)) == 0) { bus_release_resource(dev, SYS_RES_IOPORT, 0, iores); continue; } /* * Got one. Now allocate our softc * and see if we can initialize the card. */ adv = adv_alloc(dev, rman_get_bustag(iores), rman_get_bushandle(iores)); if (adv == NULL) { bus_release_resource(dev, SYS_RES_IOPORT, 0, iores); break; } /* * Stop the chip. */ ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT); ADV_OUTW(adv, ADV_CHIP_STATUS, 0); /* * Determine the chip version. */ adv->chip_version = ADV_INB(adv, ADV_NONEISA_CHIP_REVISION); if ((adv->chip_version >= ADV_CHIP_MIN_VER_VL) && (adv->chip_version <= ADV_CHIP_MAX_VER_VL)) { adv->type = ADV_VL; maxsegsz = ADV_VL_MAX_DMA_COUNT; maxsize = BUS_SPACE_MAXSIZE_32BIT; lowaddr = ADV_VL_MAX_DMA_ADDR; bus_delete_resource(dev, SYS_RES_DRQ, 0); } else if ((adv->chip_version >= ADV_CHIP_MIN_VER_ISA) && (adv->chip_version <= ADV_CHIP_MAX_VER_ISA)) { if (adv->chip_version >= ADV_CHIP_MIN_VER_ISA_PNP) { adv->type = ADV_ISAPNP; ADV_OUTB(adv, ADV_REG_IFC, ADV_IFC_INIT_DEFAULT); } else { adv->type = ADV_ISA; } maxsegsz = ADV_ISA_MAX_DMA_COUNT; maxsize = BUS_SPACE_MAXSIZE_24BIT; lowaddr = ADV_ISA_MAX_DMA_ADDR; adv->isa_dma_speed = ADV_DEF_ISA_DMA_SPEED; adv->isa_dma_channel = adv_get_isa_dma_channel(adv); bus_set_resource(dev, SYS_RES_DRQ, 0, adv->isa_dma_channel, 1); } else { panic("advisaprobe: Unknown card revision\n"); } /* * Allocate a parent dmatag for all tags created * by the MI portions of the advansys driver */ /* XXX Should be a child of the ISA bus dma tag */ error = bus_dma_tag_create( /* parent */ NULL, /* alignemnt */ 1, /* boundary */ 0, /* lowaddr */ lowaddr, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ maxsize, /* nsegments */ ~0, /* maxsegsz */ maxsegsz, /* flags */ 0, /* lockfunc */ busdma_lock_mutex, /* lockarg */ &Giant, &adv->parent_dmat); if (error != 0) { printf("%s: Could not allocate DMA tag - error %d\n", adv_name(adv), error); adv_free(adv); bus_release_resource(dev, SYS_RES_IOPORT, 0, iores); break; } adv->init_level += 2; if (overrun_buf == NULL) { /* Need to allocate our overrun buffer */ if (bus_dma_tag_create( /* parent */ adv->parent_dmat, /* alignment */ 8, /* boundary */ 0, /* lowaddr */ ADV_ISA_MAX_DMA_ADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ ADV_OVERRUN_BSIZE, /* nsegments */ 1, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ 0, /* lockfunc */ NULL, /* lockarg */ NULL, &overrun_dmat) != 0) { adv_free(adv); bus_release_resource(dev, SYS_RES_IOPORT, 0, iores); break; } if (bus_dmamem_alloc(overrun_dmat, (void **)&overrun_buf, BUS_DMA_NOWAIT, &overrun_dmamap) != 0) { bus_dma_tag_destroy(overrun_dmat); adv_free(adv); bus_release_resource(dev, SYS_RES_IOPORT, 0, iores); break; } /* And permanently map it in */ bus_dmamap_load(overrun_dmat, overrun_dmamap, overrun_buf, ADV_OVERRUN_BSIZE, adv_map, &overrun_physbase, /*flags*/0); } adv->overrun_physbase = overrun_physbase; if (adv_init(adv) != 0) { bus_dmamap_unload(overrun_dmat, overrun_dmamap); bus_dmamem_free(overrun_dmat, overrun_buf, overrun_dmamap); bus_dma_tag_destroy(overrun_dmat); adv_free(adv); bus_release_resource(dev, SYS_RES_IOPORT, 0, iores); break; } switch (adv->type) { case ADV_ISAPNP: if (adv->chip_version == ADV_CHIP_VER_ASYN_BUG) { adv->bug_fix_control |= ADV_BUG_FIX_ASYN_USE_SYN; adv->fix_asyn_xfer = ~0; } /* Fall Through */ case ADV_ISA: adv->max_dma_count = ADV_ISA_MAX_DMA_COUNT; adv->max_dma_addr = ADV_ISA_MAX_DMA_ADDR; adv_set_isa_dma_settings(adv); break; case ADV_VL: adv->max_dma_count = ADV_VL_MAX_DMA_COUNT; adv->max_dma_addr = ADV_VL_MAX_DMA_ADDR; break; default: panic("advisaprobe: Invalid card type\n"); } /* Determine our IRQ */ if (bus_get_resource(dev, SYS_RES_IRQ, 0, &irq, NULL)) bus_set_resource(dev, SYS_RES_IRQ, 0, adv_get_chip_irq(adv), 1); else adv_set_chip_irq(adv, irq); irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (irqres == NULL || bus_setup_intr(dev, irqres, INTR_TYPE_CAM|INTR_ENTROPY, adv_intr, adv, &ih)) { bus_dmamap_unload(overrun_dmat, overrun_dmamap); bus_dmamem_free(overrun_dmat, overrun_buf, overrun_dmamap); bus_dma_tag_destroy(overrun_dmat); adv_free(adv); bus_release_resource(dev, SYS_RES_IOPORT, 0, iores); break; } /* Mark as probed */ adv_isa_ioports[port_index] = 0; return 0; } if (user_iobase) bus_set_resource(dev, SYS_RES_IOPORT, 0, iobase, iocount); else bus_delete_resource(dev, SYS_RES_IOPORT, 0); return ENXIO; } static int adv_isa_attach(device_t dev) { struct adv_softc *adv = device_get_softc(dev); return (adv_attach(adv)); } static int adv_get_isa_dma_channel(struct adv_softc *adv) { int channel; channel = ADV_INW(adv, ADV_CONFIG_LSW) & ADV_CFG_LSW_ISA_DMA_CHANNEL; if (channel == 0x03) return (0); else if (channel == 0x00) return (7); return (channel + 4); } static int adv_set_isa_dma_settings(struct adv_softc *adv) { u_int16_t cfg_lsw; u_int8_t value; if ((adv->isa_dma_channel >= 5) && (adv->isa_dma_channel <= 7)) { if (adv->isa_dma_channel == 7) value = 0x00; else value = adv->isa_dma_channel - 4; cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & ~ADV_CFG_LSW_ISA_DMA_CHANNEL; cfg_lsw |= value; ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw); adv->isa_dma_speed &= 0x07; adv_set_bank(adv, 1); ADV_OUTB(adv, ADV_DMA_SPEED, adv->isa_dma_speed); adv_set_bank(adv, 0); isa_dmacascade(adv->isa_dma_channel); } return (0); } static void adv_set_isapnp_wait_for_key(void) { static int isapnp_wait_set = 0; if (isapnp_wait_set == 0) { outb(ADV_ISA_PNP_PORT_ADDR, 0x02); outb(ADV_ISA_PNP_PORT_WRITE, 0x02); isapnp_wait_set++; } } static device_method_t adv_isa_methods[] = { /* Device interface */ DEVMETHOD(device_probe, adv_isa_probe), DEVMETHOD(device_attach, adv_isa_attach), { 0, 0 } }; static driver_t adv_isa_driver = { "adv", adv_isa_methods, sizeof(struct adv_softc) }; static devclass_t adv_isa_devclass; DRIVER_MODULE(adv, isa, adv_isa_driver, adv_isa_devclass, 0, 0); Index: head/sys/dev/advansys/adv_pci.c =================================================================== --- head/sys/dev/advansys/adv_pci.c (revision 129878) +++ head/sys/dev/advansys/adv_pci.c (revision 129879) @@ -1,334 +1,335 @@ /* * Device probe and attach routines for the following * Advanced Systems Inc. SCSI controllers: * * Connectivity Products: * ABP902/3902 - Bus-Master PCI (16 CDB) * ABP3905 - Bus-Master PCI (16 CDB) * ABP915 - Bus-Master PCI (16 CDB) * ABP920 - Bus-Master PCI (16 CDB) * ABP3922 - Bus-Master PCI (16 CDB) * ABP3925 - Bus-Master PCI (16 CDB) * ABP930 - Bus-Master PCI (16 CDB) * * ABP930U - Bus-Master PCI Ultra (16 CDB) * ABP930UA - Bus-Master PCI Ultra (16 CDB) * ABP960 - Bus-Master PCI MAC/PC (16 CDB) ** * ABP960U - Bus-Master PCI MAC/PC (16 CDB) ** * * Single Channel Products: * ABP940 - Bus-Master PCI (240 CDB) * ABP940U - Bus-Master PCI Ultra (240 CDB) * ABP940UA/3940UA - Bus-Master PCI Ultra (240 CDB) * ABP3960UA - Bus-Master PCI MAC/PC (240 CDB) * ABP970 - Bus-Master PCI MAC/PC (240 CDB) * ABP970U - Bus-Master PCI MAC/PC Ultra (240 CDB) * * Dual Channel Products: * ABP950 - Dual Channel Bus-Master PCI (240 CDB Per Channel) * ABP980 - Four Channel Bus-Master PCI (240 CDB Per Channel) * ABP980U - Four Channel Bus-Master PCI Ultra (240 CDB Per Channel) * ABP980UA/3980UA - Four Channel Bus-Master PCI Ultra (16 CDB Per Chan.) * * Footnotes: * * This board has been sold by SIIG as the Fast SCSI Pro PCI. * ** This board has been sold by Iomega as a Jaz Jet PCI adapter. * * Copyright (c) 1997 Justin Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #define PCI_BASEADR0 PCIR_BAR(0) /* I/O Address */ #define PCI_BASEADR1 PCIR_BAR(1) /* Mem I/O Address */ #define PCI_DEVICE_ID_ADVANSYS_1200A 0x110010CD #define PCI_DEVICE_ID_ADVANSYS_1200B 0x120010CD #define PCI_DEVICE_ID_ADVANSYS_3000 0x130010CD #define PCI_DEVICE_REV_ADVANSYS_3150 0x02 #define PCI_DEVICE_REV_ADVANSYS_3050 0x03 #define ADV_PCI_MAX_DMA_ADDR (0xFFFFFFFFL) #define ADV_PCI_MAX_DMA_COUNT (0xFFFFFFFFL) static int adv_pci_probe(device_t); static int adv_pci_attach(device_t); /* * The overrun buffer shared amongst all PCI adapters. */ static u_int8_t* overrun_buf; static bus_dma_tag_t overrun_dmat; static bus_dmamap_t overrun_dmamap; static bus_addr_t overrun_physbase; static int adv_pci_probe(device_t dev) { int rev = pci_get_revid(dev); switch (pci_get_devid(dev)) { case PCI_DEVICE_ID_ADVANSYS_1200A: device_set_desc(dev, "AdvanSys ASC1200A SCSI controller"); return 0; case PCI_DEVICE_ID_ADVANSYS_1200B: device_set_desc(dev, "AdvanSys ASC1200B SCSI controller"); return 0; case PCI_DEVICE_ID_ADVANSYS_3000: if (rev == PCI_DEVICE_REV_ADVANSYS_3150) { device_set_desc(dev, "AdvanSys ASC3150 SCSI controller"); return 0; } else if (rev == PCI_DEVICE_REV_ADVANSYS_3050) { device_set_desc(dev, "AdvanSys ASC3030/50 SCSI controller"); return 0; } else if (rev >= PCI_DEVICE_REV_ADVANSYS_3150) { device_set_desc(dev, "Unknown AdvanSys controller"); return 0; } break; default: break; } return ENXIO; } static int adv_pci_attach(device_t dev) { struct adv_softc *adv; u_int32_t id; u_int32_t command; int error, rid, irqrid; void *ih; struct resource *iores, *irqres; /* * Determine the chip version. */ id = pci_read_config(dev, PCIR_DEVVENDOR, /*bytes*/4); command = pci_read_config(dev, PCIR_COMMAND, /*bytes*/1); /* * These cards do not allow memory mapped accesses, so we must * ensure that I/O accesses are available or we won't be able * to talk to them. */ if ((command & (PCIM_CMD_PORTEN|PCIM_CMD_BUSMASTEREN)) != (PCIM_CMD_PORTEN|PCIM_CMD_BUSMASTEREN)) { command |= PCIM_CMD_PORTEN|PCIM_CMD_BUSMASTEREN; pci_write_config(dev, PCIR_COMMAND, command, /*bytes*/1); } /* * Early chips can't handle non-zero latency timer settings. */ if (id == PCI_DEVICE_ID_ADVANSYS_1200A || id == PCI_DEVICE_ID_ADVANSYS_1200B) { pci_write_config(dev, PCIR_LATTIMER, /*value*/0, /*bytes*/1); } rid = PCI_BASEADR0; iores = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (iores == NULL) return ENXIO; if (adv_find_signature(rman_get_bustag(iores), rman_get_bushandle(iores)) == 0) { bus_release_resource(dev, SYS_RES_IOPORT, rid, iores); return ENXIO; } adv = adv_alloc(dev, rman_get_bustag(iores), rman_get_bushandle(iores)); if (adv == NULL) { bus_release_resource(dev, SYS_RES_IOPORT, rid, iores); return ENXIO; } /* Allocate a dmatag for our transfer DMA maps */ /* XXX Should be a child of the PCI bus dma tag */ error = bus_dma_tag_create( /* parent */ NULL, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ ADV_PCI_MAX_DMA_ADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ BUS_SPACE_MAXSIZE_32BIT, /* nsegments */ ~0, /* maxsegsz */ ADV_PCI_MAX_DMA_COUNT, /* flags */ 0, /* lockfunc */ busdma_lock_mutex, /* lockarg */ &Giant, &adv->parent_dmat); if (error != 0) { printf("%s: Could not allocate DMA tag - error %d\n", adv_name(adv), error); adv_free(adv); bus_release_resource(dev, SYS_RES_IOPORT, rid, iores); return ENXIO; } adv->init_level++; if (overrun_buf == NULL) { /* Need to allocate our overrun buffer */ if (bus_dma_tag_create( /* parent */ adv->parent_dmat, /* alignment */ 8, /* boundary */ 0, /* lowaddr */ ADV_PCI_MAX_DMA_ADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ ADV_OVERRUN_BSIZE, /* nsegments */ 1, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ 0, /* lockfunc */ busdma_lock_mutex, /* lockarg */ &Giant, &overrun_dmat) != 0) { bus_dma_tag_destroy(adv->parent_dmat); adv_free(adv); bus_release_resource(dev, SYS_RES_IOPORT, rid, iores); return ENXIO; } if (bus_dmamem_alloc(overrun_dmat, (void **)&overrun_buf, BUS_DMA_NOWAIT, &overrun_dmamap) != 0) { bus_dma_tag_destroy(overrun_dmat); bus_dma_tag_destroy(adv->parent_dmat); adv_free(adv); bus_release_resource(dev, SYS_RES_IOPORT, rid, iores); return ENXIO; } /* And permanently map it in */ bus_dmamap_load(overrun_dmat, overrun_dmamap, overrun_buf, ADV_OVERRUN_BSIZE, adv_map, &overrun_physbase, /*flags*/0); } adv->overrun_physbase = overrun_physbase; /* * Stop the chip. */ ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT); ADV_OUTW(adv, ADV_CHIP_STATUS, 0); adv->chip_version = ADV_INB(adv, ADV_NONEISA_CHIP_REVISION); adv->type = ADV_PCI; /* * Setup active negation and signal filtering. */ { u_int8_t extra_cfg; if (adv->chip_version >= ADV_CHIP_VER_PCI_ULTRA_3150) adv->type |= ADV_ULTRA; if (adv->chip_version == ADV_CHIP_VER_PCI_ULTRA_3050) extra_cfg = ADV_IFC_ACT_NEG | ADV_IFC_WR_EN_FILTER; else extra_cfg = ADV_IFC_ACT_NEG | ADV_IFC_SLEW_RATE; ADV_OUTB(adv, ADV_REG_IFC, extra_cfg); } if (adv_init(adv) != 0) { adv_free(adv); bus_release_resource(dev, SYS_RES_IOPORT, rid, iores); return ENXIO; } adv->max_dma_count = ADV_PCI_MAX_DMA_COUNT; adv->max_dma_addr = ADV_PCI_MAX_DMA_ADDR; #if CC_DISABLE_PCI_PARITY_INT { u_int16_t config_msw; config_msw = ADV_INW(adv, ADV_CONFIG_MSW); config_msw &= 0xFFC0; ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw); } #endif if (id == PCI_DEVICE_ID_ADVANSYS_1200A || id == PCI_DEVICE_ID_ADVANSYS_1200B) { adv->bug_fix_control |= ADV_BUG_FIX_IF_NOT_DWB; adv->bug_fix_control |= ADV_BUG_FIX_ASYN_USE_SYN; adv->fix_asyn_xfer = ~0; } irqrid = 0; irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &irqrid, RF_SHAREABLE | RF_ACTIVE); if (irqres == NULL || bus_setup_intr(dev, irqres, INTR_TYPE_CAM|INTR_ENTROPY, adv_intr, adv, &ih)) { adv_free(adv); bus_release_resource(dev, SYS_RES_IOPORT, rid, iores); return ENXIO; } adv_attach(adv); return 0; } static device_method_t adv_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, adv_pci_probe), DEVMETHOD(device_attach, adv_pci_attach), { 0, 0 } }; static driver_t adv_pci_driver = { "adv", adv_pci_methods, sizeof(struct adv_softc) }; static devclass_t adv_pci_devclass; DRIVER_MODULE(adv, pci, adv_pci_driver, adv_pci_devclass, 0, 0); Index: head/sys/dev/aic7xxx/aic79xx_osm.h =================================================================== --- head/sys/dev/aic7xxx/aic79xx_osm.h (revision 129878) +++ head/sys/dev/aic7xxx/aic79xx_osm.h (revision 129879) @@ -1,322 +1,323 @@ /* * FreeBSD platform specific driver option settings, data structures, * function declarations and includes. * * Copyright (c) 1994-2001 Justin T. Gibbs. * Copyright (c) 2001-2002 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU Public License ("GPL"). * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: //depot/aic7xxx/freebsd/dev/aic7xxx/aic79xx_osm.h#23 $ * * $FreeBSD$ */ #ifndef _AIC79XX_FREEBSD_H_ #define _AIC79XX_FREEBSD_H_ #include /* for config options */ #include #include #include /* For device_t */ #if __FreeBSD_version >= 500000 #include #endif #include #include #include +#include #include #define AIC_PCI_CONFIG 1 #include #include #include #include #include #include #include #if __FreeBSD_version >= 500000 #include #include #else #include #include #endif #include #include #include #include #include #include #include #include #ifdef CAM_NEW_TRAN_CODE #define AHD_NEW_TRAN_SETTINGS #endif /* CAM_NEW_TRAN_CODE */ /****************************** Platform Macros *******************************/ #define SIM_IS_SCSIBUS_B(ahd, sim) \ (0) #define SIM_CHANNEL(ahd, sim) \ ('A') #define SIM_SCSI_ID(ahd, sim) \ (ahd->our_id) #define SIM_PATH(ahd, sim) \ (ahd->platform_data->path) #define BUILD_SCSIID(ahd, sim, target_id, our_id) \ ((((target_id) << TID_SHIFT) & TID) | (our_id)) #define SCB_GET_SIM(ahd, scb) \ ((ahd)->platform_data->sim) #ifndef offsetof #define offsetof(type, member) ((size_t)(&((type *)0)->member)) #endif /************************ Tunable Driver Parameters **************************/ /* * The number of dma segments supported. The sequencer can handle any number * of physically contiguous S/G entrys. To reduce the driver's memory * consumption, we limit the number supported to be sufficient to handle * the largest mapping supported by the kernel, MAXPHYS. Assuming the * transfer is as fragmented as possible and unaligned, this turns out to * be the number of paged sized transfers in MAXPHYS plus an extra element * to handle any unaligned residual. The sequencer fetches SG elements * in cacheline sized chucks, so make the number per-transaction an even * multiple of 16 which should align us on even the largest of cacheline * boundaries. */ #define AHD_NSEG (roundup(btoc(MAXPHYS) + 1, 16)) /* This driver supports target mode */ #if NOT_YET #define AHD_TARGET_MODE 1 #endif /************************** Softc/SCB Platform Data ***************************/ struct ahd_platform_data { /* * Hooks into the XPT. */ struct cam_sim *sim; struct cam_path *path; int regs_res_type[2]; int regs_res_id[2]; int irq_res_type; struct resource *regs[2]; struct resource *irq; void *ih; eventhandler_tag eh; struct proc *recovery_thread; }; struct scb_platform_data { }; /***************************** Core Includes **********************************/ #if AHD_REG_PRETTY_PRINT #define AIC_DEBUG_REGISTERS 1 #else #define AIC_DEBUG_REGISTERS 0 #endif #define AIC_CORE_INCLUDE #define AIC_LIB_PREFIX ahd #define AIC_CONST_PREFIX AHD #include /*************************** Device Access ************************************/ #define ahd_inb(ahd, port) \ bus_space_read_1((ahd)->tags[(port) >> 8], \ (ahd)->bshs[(port) >> 8], (port) & 0xFF) #define ahd_outb(ahd, port, value) \ bus_space_write_1((ahd)->tags[(port) >> 8], \ (ahd)->bshs[(port) >> 8], (port) & 0xFF, value) #define ahd_inw_atomic(ahd, port) \ aic_le16toh(bus_space_read_2((ahd)->tags[(port) >> 8], \ (ahd)->bshs[(port) >> 8], (port) & 0xFF)) #define ahd_outw_atomic(ahd, port, value) \ bus_space_write_2((ahd)->tags[(port) >> 8], \ (ahd)->bshs[(port) >> 8], \ (port & 0xFF), aic_htole16(value)) #define ahd_outsb(ahd, port, valp, count) \ bus_space_write_multi_1((ahd)->tags[(port) >> 8], \ (ahd)->bshs[(port) >> 8], \ (port & 0xFF), valp, count) #define ahd_insb(ahd, port, valp, count) \ bus_space_read_multi_1((ahd)->tags[(port) >> 8], \ (ahd)->bshs[(port) >> 8], \ (port & 0xFF), valp, count) static __inline void ahd_flush_device_writes(struct ahd_softc *); static __inline void ahd_flush_device_writes(struct ahd_softc *ahd) { /* XXX Is this sufficient for all architectures??? */ ahd_inb(ahd, INTSTAT); } /**************************** Locking Primitives ******************************/ /* Lock protecting internal data structures */ static __inline void ahd_lockinit(struct ahd_softc *); static __inline void ahd_lock(struct ahd_softc *, unsigned long *flags); static __inline void ahd_unlock(struct ahd_softc *, unsigned long *flags); /* Lock held during command compeletion to the upper layer */ static __inline void ahd_done_lockinit(struct ahd_softc *); static __inline void ahd_done_lock(struct ahd_softc *, unsigned long *flags); static __inline void ahd_done_unlock(struct ahd_softc *, unsigned long *flags); /* Lock held during ahd_list manipulation and ahd softc frees */ static __inline void ahd_list_lockinit(void); static __inline void ahd_list_lock(unsigned long *flags); static __inline void ahd_list_unlock(unsigned long *flags); static __inline void ahd_lockinit(struct ahd_softc *ahd) { } static __inline void ahd_lock(struct ahd_softc *ahd, unsigned long *flags) { *flags = splcam(); } static __inline void ahd_unlock(struct ahd_softc *ahd, unsigned long *flags) { splx(*flags); } /* Lock held during command compeletion to the upper layer */ static __inline void ahd_done_lockinit(struct ahd_softc *ahd) { } static __inline void ahd_done_lock(struct ahd_softc *ahd, unsigned long *flags) { } static __inline void ahd_done_unlock(struct ahd_softc *ahd, unsigned long *flags) { } /* Lock held during ahd_list manipulation and ahd softc frees */ static __inline void ahd_list_lockinit(void) { } static __inline void ahd_list_lock(unsigned long *flags) { } static __inline void ahd_list_unlock(unsigned long *flags) { } /********************************** PCI ***************************************/ int ahd_pci_map_registers(struct ahd_softc *ahd); int ahd_pci_map_int(struct ahd_softc *ahd); /************************** Transaction Operations ****************************/ static __inline void aic_freeze_simq(struct aic_softc*); static __inline void aic_release_simq(struct aic_softc*); static __inline void aic_freeze_simq(struct aic_softc *aic) { xpt_freeze_simq(aic->platform_data->sim, /*count*/1); } static __inline void aic_release_simq(struct aic_softc *aic) { xpt_release_simq(aic->platform_data->sim, /*run queue*/TRUE); } /********************************* Debug **************************************/ static __inline void ahd_print_path(struct ahd_softc *, struct scb *); static __inline void ahd_platform_dump_card_state(struct ahd_softc *ahd); static __inline void ahd_print_path(struct ahd_softc *ahd, struct scb *scb) { xpt_print_path(scb->io_ctx->ccb_h.path); } static __inline void ahd_platform_dump_card_state(struct ahd_softc *ahd) { /* Nothing to do here for FreeBSD */ } /**************************** Transfer Settings *******************************/ void ahd_notify_xfer_settings_change(struct ahd_softc *, struct ahd_devinfo *); void ahd_platform_set_tags(struct ahd_softc *, struct ahd_devinfo *, int /*enable*/); /************************* Initialization/Teardown ****************************/ int ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg); void ahd_platform_free(struct ahd_softc *ahd); int ahd_map_int(struct ahd_softc *ahd); int ahd_attach(struct ahd_softc *); int ahd_softc_comp(struct ahd_softc *lahd, struct ahd_softc *rahd); int ahd_detach(device_t); #define ahd_platform_init(arg) /****************************** Interrupts ************************************/ void ahd_platform_intr(void *); static __inline void ahd_platform_flushwork(struct ahd_softc *ahd); static __inline void ahd_platform_flushwork(struct ahd_softc *ahd) { } /************************ Misc Function Declarations **************************/ void ahd_done(struct ahd_softc *ahd, struct scb *scb); void ahd_send_async(struct ahd_softc *, char /*channel*/, u_int /*target*/, u_int /*lun*/, ac_code, void *arg); #endif /* _AIC79XX_FREEBSD_H_ */ Index: head/sys/dev/aic7xxx/aic7xxx_osm.h =================================================================== --- head/sys/dev/aic7xxx/aic7xxx_osm.h (revision 129878) +++ head/sys/dev/aic7xxx/aic7xxx_osm.h (revision 129879) @@ -1,309 +1,310 @@ /* * FreeBSD platform specific driver option settings, data structures, * function declarations and includes. * * Copyright (c) 1994-2001 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU Public License ("GPL"). * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: //depot/aic7xxx/freebsd/dev/aic7xxx/aic7xxx_osm.h#18 $ * * $FreeBSD$ */ #ifndef _AIC7XXX_FREEBSD_H_ #define _AIC7XXX_FREEBSD_H_ #include /* for config options */ #include #include #include /* For device_t */ #if __FreeBSD_version >= 500000 #include #endif #include #include #include +#include #include #if __FreeBSD_version < 500000 #include #else #define NPCI 1 #endif #if NPCI > 0 #define AIC_PCI_CONFIG 1 #include #endif #include #include #include #include #include #include #if NPCI > 0 #if __FreeBSD_version >= 500000 #include #include #else #include #include #endif #endif #include #include #include #include #include #include #include #ifdef CAM_NEW_TRAN_CODE #define AHC_NEW_TRAN_SETTINGS #endif /* CAM_NEW_TRAN_CODE */ /*************************** Attachment Bookkeeping ***************************/ extern devclass_t ahc_devclass; /****************************** Platform Macros *******************************/ #define SIM_IS_SCSIBUS_B(ahc, sim) \ ((sim) == ahc->platform_data->sim_b) #define SIM_CHANNEL(ahc, sim) \ (((sim) == ahc->platform_data->sim_b) ? 'B' : 'A') #define SIM_SCSI_ID(ahc, sim) \ (((sim) == ahc->platform_data->sim_b) ? ahc->our_id_b : ahc->our_id) #define SIM_PATH(ahc, sim) \ (((sim) == ahc->platform_data->sim_b) ? ahc->platform_data->path_b \ : ahc->platform_data->path) #define BUILD_SCSIID(ahc, sim, target_id, our_id) \ ((((target_id) << TID_SHIFT) & TID) | (our_id) \ | (SIM_IS_SCSIBUS_B(ahc, sim) ? TWIN_CHNLB : 0)) #define SCB_GET_SIM(ahc, scb) \ (SCB_GET_CHANNEL(ahc, scb) == 'A' ? (ahc)->platform_data->sim \ : (ahc)->platform_data->sim_b) #ifndef offsetof #define offsetof(type, member) ((size_t)(&((type *)0)->member)) #endif /************************ Tunable Driver Parameters **************************/ /* * The number of dma segments supported. The sequencer can handle any number * of physically contiguous S/G entrys. To reduce the driver's memory * consumption, we limit the number supported to be sufficient to handle * the largest mapping supported by the kernel, MAXPHYS. Assuming the * transfer is as fragmented as possible and unaligned, this turns out to * be the number of paged sized transfers in MAXPHYS plus an extra element * to handle any unaligned residual. The sequencer fetches SG elements * in cacheline sized chucks, so make the number per-transaction an even * multiple of 16 which should align us on even the largest of cacheline * boundaries. */ #define AHC_NSEG (roundup(btoc(MAXPHYS) + 1, 16)) /* This driver supports target mode */ #define AHC_TARGET_MODE 1 /************************** Softc/SCB Platform Data ***************************/ struct ahc_platform_data { /* * Hooks into the XPT. */ struct cam_sim *sim; struct cam_sim *sim_b; struct cam_path *path; struct cam_path *path_b; int regs_res_type; int regs_res_id; int irq_res_type; struct resource *regs; struct resource *irq; void *ih; eventhandler_tag eh; struct proc *recovery_thread; }; struct scb_platform_data { }; /***************************** Core Includes **********************************/ #if AHC_REG_PRETTY_PRINT #define AIC_DEBUG_REGISTERS 1 #else #define AIC_DEBUG_REGISTERS 0 #endif #define AIC_CORE_INCLUDE #define AIC_LIB_PREFIX ahc #define AIC_CONST_PREFIX AHC #include /*************************** Device Access ************************************/ #define ahc_inb(ahc, port) \ bus_space_read_1((ahc)->tag, (ahc)->bsh, port) #define ahc_outb(ahc, port, value) \ bus_space_write_1((ahc)->tag, (ahc)->bsh, port, value) #define ahc_outsb(ahc, port, valp, count) \ bus_space_write_multi_1((ahc)->tag, (ahc)->bsh, port, valp, count) #define ahc_insb(ahc, port, valp, count) \ bus_space_read_multi_1((ahc)->tag, (ahc)->bsh, port, valp, count) static __inline void ahc_flush_device_writes(struct ahc_softc *); static __inline void ahc_flush_device_writes(struct ahc_softc *ahc) { /* XXX Is this sufficient for all architectures??? */ ahc_inb(ahc, INTSTAT); } /**************************** Locking Primitives ******************************/ /* Lock protecting internal data structures */ static __inline void ahc_lockinit(struct ahc_softc *); static __inline void ahc_lock(struct ahc_softc *, unsigned long *flags); static __inline void ahc_unlock(struct ahc_softc *, unsigned long *flags); /* Lock held during command compeletion to the upper layer */ static __inline void ahc_done_lockinit(struct ahc_softc *); static __inline void ahc_done_lock(struct ahc_softc *, unsigned long *flags); static __inline void ahc_done_unlock(struct ahc_softc *, unsigned long *flags); /* Lock held during ahc_list manipulation and ahc softc frees */ static __inline void ahc_list_lockinit(void); static __inline void ahc_list_lock(unsigned long *flags); static __inline void ahc_list_unlock(unsigned long *flags); static __inline void ahc_lockinit(struct ahc_softc *ahc) { } static __inline void ahc_lock(struct ahc_softc *ahc, unsigned long *flags) { *flags = splcam(); } static __inline void ahc_unlock(struct ahc_softc *ahc, unsigned long *flags) { splx(*flags); } /* Lock held during command compeletion to the upper layer */ static __inline void ahc_done_lockinit(struct ahc_softc *ahc) { } static __inline void ahc_done_lock(struct ahc_softc *ahc, unsigned long *flags) { } static __inline void ahc_done_unlock(struct ahc_softc *ahc, unsigned long *flags) { } /* Lock held during ahc_list manipulation and ahc softc frees */ static __inline void ahc_list_lockinit(void) { } static __inline void ahc_list_lock(unsigned long *flags) { } static __inline void ahc_list_unlock(unsigned long *flags) { } /********************************** PCI ***************************************/ #ifdef AIC_PCI_CONFIG int ahc_pci_map_registers(struct ahc_softc *ahc); int ahc_pci_map_int(struct ahc_softc *ahc); #endif /*AIC_PCI_CONFIG*/ /******************************** VL/EISA *************************************/ int aic7770_map_registers(struct ahc_softc *ahc, u_int port); int aic7770_map_int(struct ahc_softc *ahc, int irq); /********************************* Debug **************************************/ static __inline void ahc_print_path(struct ahc_softc *, struct scb *); static __inline void ahc_platform_dump_card_state(struct ahc_softc *ahc); static __inline void ahc_print_path(struct ahc_softc *ahc, struct scb *scb) { xpt_print_path(scb->io_ctx->ccb_h.path); } static __inline void ahc_platform_dump_card_state(struct ahc_softc *ahc) { /* Nothing to do here for FreeBSD */ } /**************************** Transfer Settings *******************************/ void ahc_notify_xfer_settings_change(struct ahc_softc *, struct ahc_devinfo *); void ahc_platform_set_tags(struct ahc_softc *, struct ahc_devinfo *, int /*enable*/); /************************* Initialization/Teardown ****************************/ int ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg); void ahc_platform_free(struct ahc_softc *ahc); int ahc_map_int(struct ahc_softc *ahc); int ahc_attach(struct ahc_softc *); int ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc); int ahc_detach(device_t); /****************************** Interrupts ************************************/ void ahc_platform_intr(void *); static __inline void ahc_platform_flushwork(struct ahc_softc *ahc); static __inline void ahc_platform_flushwork(struct ahc_softc *ahc) { } /************************ Misc Function Declarations **************************/ void ahc_done(struct ahc_softc *ahc, struct scb *scb); void ahc_send_async(struct ahc_softc *, char /*channel*/, u_int /*target*/, u_int /*lun*/, ac_code, void *arg); #endif /* _AIC7XXX_FREEBSD_H_ */ Index: head/sys/dev/amd/amd.c =================================================================== --- head/sys/dev/amd/amd.c (revision 129878) +++ head/sys/dev/amd/amd.c (revision 129879) @@ -1,2512 +1,2513 @@ /* ********************************************************************* * FILE NAME : amd.c * BY : C.L. Huang (ching@tekram.com.tw) * Erich Chen (erich@tekram.com.tw) * Description: Device Driver for the amd53c974 PCI Bus Master * SCSI Host adapter found on cards such as * the Tekram DC-390(T). * (C)Copyright 1995-1999 Tekram Technology Co., Ltd. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ********************************************************************* * $FreeBSD$ */ /* ********************************************************************* * HISTORY: * * REV# DATE NAME DESCRIPTION * 1.00 07/02/96 CLH First release for RELEASE-2.1.0 * 1.01 08/20/96 CLH Update for RELEASE-2.1.5 * 1.02 11/06/96 CLH Fixed more than 1 LUN scanning * 1.03 12/20/96 CLH Modify to support 2.2-ALPHA * 1.04 12/26/97 CLH Modify to support RELEASE-2.2.5 * 1.05 01/01/99 ERICH CHEN Modify to support RELEASE-3.0.x (CAM) ********************************************************************* */ /* #define AMD_DEBUG0 */ /* #define AMD_DEBUG_SCSI_PHASE */ #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define PCI_DEVICE_ID_AMD53C974 0x20201022ul #define PCI_BASE_ADDR0 0x10 typedef u_int (phase_handler_t)(struct amd_softc *, struct amd_srb *, u_int); typedef phase_handler_t *phase_handler_func_t; static void amd_intr(void *vamd); static int amdstart(struct amd_softc *amd, struct amd_srb * pSRB); static phase_handler_t amd_NopPhase; static phase_handler_t amd_DataOutPhase0; static phase_handler_t amd_DataInPhase0; #define amd_CommandPhase0 amd_NopPhase static phase_handler_t amd_StatusPhase0; static phase_handler_t amd_MsgOutPhase0; static phase_handler_t amd_MsgInPhase0; static phase_handler_t amd_DataOutPhase1; static phase_handler_t amd_DataInPhase1; static phase_handler_t amd_CommandPhase1; static phase_handler_t amd_StatusPhase1; static phase_handler_t amd_MsgOutPhase1; static phase_handler_t amd_MsgInPhase1; static void amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb); static int amdparsemsg(struct amd_softc *amd); static int amdhandlemsgreject(struct amd_softc *amd); static void amdconstructsdtr(struct amd_softc *amd, u_int period, u_int offset); static u_int amdfindclockrate(struct amd_softc *amd, u_int *period); static int amdsentmsg(struct amd_softc *amd, u_int msgtype, int full); static void DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int dir); static void amd_Disconnect(struct amd_softc *amd); static void amd_Reselect(struct amd_softc *amd); static void SRBdone(struct amd_softc *amd, struct amd_srb *pSRB); static void amd_ScsiRstDetect(struct amd_softc *amd); static void amd_ResetSCSIBus(struct amd_softc *amd); static void RequestSense(struct amd_softc *amd, struct amd_srb *pSRB); static void amd_InvalidCmd(struct amd_softc *amd); static void amd_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error); #if 0 static void amd_timeout(void *arg1); static void amd_reset(struct amd_softc *amd); #endif static u_int8_t * phystovirt(struct amd_srb *pSRB, u_int32_t xferCnt); void amd_linkSRB(struct amd_softc *amd); static int amd_init(device_t); static void amd_load_defaults(struct amd_softc *amd); static void amd_load_eeprom_or_defaults(struct amd_softc *amd); static int amd_EEpromInDO(struct amd_softc *amd); static u_int16_t EEpromGetData1(struct amd_softc *amd); static void amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval); static void amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry); static void amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd); static void amd_ReadEEprom(struct amd_softc *amd); static int amd_probe(device_t); static int amd_attach(device_t); static void amdcompletematch(struct amd_softc *amd, target_id_t target, lun_id_t lun, u_int tag, struct srb_queue *queue, cam_status status); static void amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate, u_int period, u_int offset, u_int type); static void amdsettags(struct amd_softc *amd, u_int target, int tagenb); static __inline void amd_clear_msg_state(struct amd_softc *amd); static __inline void amd_clear_msg_state(struct amd_softc *amd) { amd->msgout_len = 0; amd->msgout_index = 0; amd->msgin_index = 0; } static __inline uint32_t amd_get_sense_bufaddr(struct amd_softc *amd, struct amd_srb *pSRB) { int offset; offset = pSRB->TagNumber; return (amd->sense_busaddr + (offset * sizeof(struct scsi_sense_data))); } static __inline struct scsi_sense_data * amd_get_sense_buf(struct amd_softc *amd, struct amd_srb *pSRB) { int offset; offset = pSRB->TagNumber; return (&amd->sense_buffers[offset]); } static __inline uint32_t amd_get_sense_bufsize(struct amd_softc *amd, struct amd_srb *pSRB) { return (sizeof(struct scsi_sense_data)); } /* CAM SIM entry points */ #define ccb_srb_ptr spriv_ptr0 #define ccb_amd_ptr spriv_ptr1 static void amd_action(struct cam_sim *sim, union ccb *ccb); static void amd_poll(struct cam_sim *sim); /* * State engine function tables indexed by SCSI phase number */ phase_handler_func_t amd_SCSI_phase0[] = { amd_DataOutPhase0, amd_DataInPhase0, amd_CommandPhase0, amd_StatusPhase0, amd_NopPhase, amd_NopPhase, amd_MsgOutPhase0, amd_MsgInPhase0 }; phase_handler_func_t amd_SCSI_phase1[] = { amd_DataOutPhase1, amd_DataInPhase1, amd_CommandPhase1, amd_StatusPhase1, amd_NopPhase, amd_NopPhase, amd_MsgOutPhase1, amd_MsgInPhase1 }; /* * EEProm/BIOS negotiation periods */ u_int8_t eeprom_period[] = { 25, /* 10.0MHz */ 32, /* 8.0MHz */ 38, /* 6.6MHz */ 44, /* 5.7MHz */ 50, /* 5.0MHz */ 63, /* 4.0MHz */ 83, /* 3.0MHz */ 125 /* 2.0MHz */ }; /* * chip clock setting to SCSI specified sync parameter table. */ u_int8_t tinfo_sync_period[] = { 25, /* 10.0 */ 32, /* 8.0 */ 38, /* 6.6 */ 44, /* 5.7 */ 50, /* 5.0 */ 57, /* 4.4 */ 63, /* 4.0 */ 70, /* 3.6 */ 76, /* 3.3 */ 83 /* 3.0 */ }; static __inline struct amd_srb * amdgetsrb(struct amd_softc * amd) { int intflag; struct amd_srb * pSRB; intflag = splcam(); pSRB = TAILQ_FIRST(&amd->free_srbs); if (pSRB) TAILQ_REMOVE(&amd->free_srbs, pSRB, links); splx(intflag); return (pSRB); } static void amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb) { struct scsi_request_sense sense_cmd; u_int8_t *cdb; u_int cdb_len; if (srb->SRBFlag & AUTO_REQSENSE) { sense_cmd.opcode = REQUEST_SENSE; sense_cmd.byte2 = srb->pccb->ccb_h.target_lun << 5; sense_cmd.unused[0] = 0; sense_cmd.unused[1] = 0; sense_cmd.length = sizeof(struct scsi_sense_data); sense_cmd.control = 0; cdb = &sense_cmd.opcode; cdb_len = sizeof(sense_cmd); } else { cdb = &srb->CmdBlock[0]; cdb_len = srb->ScsiCmdLen; } amd_write8_multi(amd, SCSIFIFOREG, cdb, cdb_len); } /* * Attempt to start a waiting transaction. Interrupts must be disabled * upon entry to this function. */ static void amdrunwaiting(struct amd_softc *amd) { struct amd_srb *srb; if (amd->last_phase != SCSI_BUS_FREE) return; srb = TAILQ_FIRST(&amd->waiting_srbs); if (srb == NULL) return; if (amdstart(amd, srb) == 0) { TAILQ_REMOVE(&amd->waiting_srbs, srb, links); TAILQ_INSERT_HEAD(&amd->running_srbs, srb, links); } } static void amdexecutesrb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { struct amd_srb *srb; union ccb *ccb; struct amd_softc *amd; int s; srb = (struct amd_srb *)arg; ccb = srb->pccb; amd = (struct amd_softc *)ccb->ccb_h.ccb_amd_ptr; if (error != 0) { if (error != EFBIG) printf("amd%d: Unexepected error 0x%x returned from " "bus_dmamap_load\n", amd->unit, error); if (ccb->ccb_h.status == CAM_REQ_INPROG) { xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN; } TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links); xpt_done(ccb); return; } if (nseg != 0) { struct amd_sg *sg; bus_dma_segment_t *end_seg; bus_dmasync_op_t op; end_seg = dm_segs + nseg; /* Copy the segments into our SG list */ srb->pSGlist = &srb->SGsegment[0]; sg = srb->pSGlist; while (dm_segs < end_seg) { sg->SGXLen = dm_segs->ds_len; sg->SGXPtr = dm_segs->ds_addr; sg++; dm_segs++; } if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_PREREAD; else op = BUS_DMASYNC_PREWRITE; bus_dmamap_sync(amd->buffer_dmat, srb->dmamap, op); } srb->SGcount = nseg; srb->SGIndex = 0; srb->AdaptStatus = 0; srb->TargetStatus = 0; srb->MsgCnt = 0; srb->SRBStatus = 0; srb->SRBFlag = 0; srb->SRBState = 0; srb->TotalXferredLen = 0; srb->SGPhysAddr = 0; srb->SGToBeXferLen = 0; srb->EndMessage = 0; s = splcam(); /* * Last time we need to check if this CCB needs to * be aborted. */ if (ccb->ccb_h.status != CAM_REQ_INPROG) { if (nseg != 0) bus_dmamap_unload(amd->buffer_dmat, srb->dmamap); TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links); xpt_done(ccb); splx(s); return; } ccb->ccb_h.status |= CAM_SIM_QUEUED; #if 0 /* XXX Need a timeout handler */ ccb->ccb_h.timeout_ch = timeout(amdtimeout, (caddr_t)srb, (ccb->ccb_h.timeout * hz) / 1000); #endif TAILQ_INSERT_TAIL(&amd->waiting_srbs, srb, links); amdrunwaiting(amd); splx(s); } static void amd_action(struct cam_sim * psim, union ccb * pccb) { struct amd_softc * amd; u_int target_id; CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE, ("amd_action\n")); amd = (struct amd_softc *) cam_sim_softc(psim); target_id = pccb->ccb_h.target_id; switch (pccb->ccb_h.func_code) { case XPT_SCSI_IO: { struct amd_srb * pSRB; struct ccb_scsiio *pcsio; pcsio = &pccb->csio; /* * Assign an SRB and connect it with this ccb. */ pSRB = amdgetsrb(amd); if (!pSRB) { /* Freeze SIMQ */ pccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_done(pccb); return; } pSRB->pccb = pccb; pccb->ccb_h.ccb_srb_ptr = pSRB; pccb->ccb_h.ccb_amd_ptr = amd; pSRB->ScsiCmdLen = pcsio->cdb_len; bcopy(pcsio->cdb_io.cdb_bytes, pSRB->CmdBlock, pcsio->cdb_len); if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { if ((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { /* * We've been given a pointer * to a single buffer. */ if ((pccb->ccb_h.flags & CAM_DATA_PHYS) == 0) { int s; int error; s = splsoftvm(); error = bus_dmamap_load(amd->buffer_dmat, pSRB->dmamap, pcsio->data_ptr, pcsio->dxfer_len, amdexecutesrb, pSRB, /*flags*/0); if (error == EINPROGRESS) { /* * So as to maintain * ordering, freeze the * controller queue * until our mapping is * returned. */ xpt_freeze_simq(amd->psim, 1); pccb->ccb_h.status |= CAM_RELEASE_SIMQ; } splx(s); } else { struct bus_dma_segment seg; /* Pointer to physical buffer */ seg.ds_addr = (bus_addr_t)pcsio->data_ptr; seg.ds_len = pcsio->dxfer_len; amdexecutesrb(pSRB, &seg, 1, 0); } } else { struct bus_dma_segment *segs; if ((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0 || (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) { TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links); pccb->ccb_h.status = CAM_PROVIDE_FAIL; xpt_done(pccb); return; } /* Just use the segments provided */ segs = (struct bus_dma_segment *)pcsio->data_ptr; amdexecutesrb(pSRB, segs, pcsio->sglist_cnt, 0); } } else amdexecutesrb(pSRB, NULL, 0, 0); break; } case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &pccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = 7; cpi->max_lun = amd->max_lun; /* 7 or 0 */ cpi->initiator_id = amd->AdaptSCSIID; cpi->bus_id = cam_sim_bus(psim); strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "TRM-AMD", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(psim); cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(pccb); break; } case XPT_ABORT: pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; case XPT_RESET_BUS: { int i; amd_ResetSCSIBus(amd); amd->ACBFlag = 0; for (i = 0; i < 500; i++) { DELAY(1000); /* Wait until our interrupt * handler sees it */ } pccb->ccb_h.status = CAM_REQ_CMP; xpt_done(pccb); break; } case XPT_RESET_DEV: pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; case XPT_TERM_IO: pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); /* XXX: intentional fall-through ?? */ case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts; struct amd_target_info *targ_info; struct amd_transinfo *tinfo; int intflag; cts = &pccb->cts; intflag = splcam(); targ_info = &amd->tinfo[target_id]; if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) { /* current transfer settings */ if (targ_info->disc_tag & AMD_CUR_DISCENB) { cts->flags = CCB_TRANS_DISC_ENB; } else { cts->flags = 0; /* no tag & disconnect */ } if (targ_info->disc_tag & AMD_CUR_TAGENB) { cts->flags |= CCB_TRANS_TAG_ENB; } tinfo = &targ_info->current; } else { /* default(user) transfer settings */ if (targ_info->disc_tag & AMD_USR_DISCENB) { cts->flags = CCB_TRANS_DISC_ENB; } else { cts->flags = 0; } if (targ_info->disc_tag & AMD_USR_TAGENB) { cts->flags |= CCB_TRANS_TAG_ENB; } tinfo = &targ_info->user; } cts->sync_period = tinfo->period; cts->sync_offset = tinfo->offset; cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; splx(intflag); cts->valid = CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_SYNC_OFFSET_VALID | CCB_TRANS_BUS_WIDTH_VALID | CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; pccb->ccb_h.status = CAM_REQ_CMP; xpt_done(pccb); break; } case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings *cts; struct amd_target_info *targ_info; u_int update_type; int intflag; int last_entry; cts = &pccb->cts; update_type = 0; if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) { update_type |= AMD_TRANS_GOAL; } else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { update_type |= AMD_TRANS_USER; } if (update_type == 0 || update_type == (AMD_TRANS_USER|AMD_TRANS_GOAL)) { cts->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); } intflag = splcam(); targ_info = &amd->tinfo[target_id]; if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { if (update_type & AMD_TRANS_GOAL) { if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) { targ_info->disc_tag |= AMD_CUR_DISCENB; } else { targ_info->disc_tag &= ~AMD_CUR_DISCENB; } } if (update_type & AMD_TRANS_USER) { if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) { targ_info->disc_tag |= AMD_USR_DISCENB; } else { targ_info->disc_tag &= ~AMD_USR_DISCENB; } } } if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { if (update_type & AMD_TRANS_GOAL) { if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) { targ_info->disc_tag |= AMD_CUR_TAGENB; } else { targ_info->disc_tag &= ~AMD_CUR_TAGENB; } } if (update_type & AMD_TRANS_USER) { if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) { targ_info->disc_tag |= AMD_USR_TAGENB; } else { targ_info->disc_tag &= ~AMD_USR_TAGENB; } } } if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) { if (update_type & AMD_TRANS_GOAL) cts->sync_offset = targ_info->goal.offset; else cts->sync_offset = targ_info->user.offset; } if (cts->sync_offset > AMD_MAX_SYNC_OFFSET) cts->sync_offset = AMD_MAX_SYNC_OFFSET; if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) { if (update_type & AMD_TRANS_GOAL) cts->sync_period = targ_info->goal.period; else cts->sync_period = targ_info->user.period; } last_entry = sizeof(tinfo_sync_period) - 1; if ((cts->sync_period != 0) && (cts->sync_period < tinfo_sync_period[0])) cts->sync_period = tinfo_sync_period[0]; if (cts->sync_period > tinfo_sync_period[last_entry]) cts->sync_period = 0; if (cts->sync_offset == 0) cts->sync_period = 0; if ((update_type & AMD_TRANS_USER) != 0) { targ_info->user.period = cts->sync_period; targ_info->user.offset = cts->sync_offset; } if ((update_type & AMD_TRANS_GOAL) != 0) { targ_info->goal.period = cts->sync_period; targ_info->goal.offset = cts->sync_offset; } splx(intflag); pccb->ccb_h.status = CAM_REQ_CMP; xpt_done(pccb); break; } case XPT_CALC_GEOMETRY: { int extended; extended = (amd->eepromBuf[EE_MODE2] & GREATER_1G) != 0; cam_calc_geometry(&pccb->ccg, extended); xpt_done(pccb); break; } default: pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; } } static void amd_poll(struct cam_sim * psim) { amd_intr(cam_sim_softc(psim)); } static u_int8_t * phystovirt(struct amd_srb * pSRB, u_int32_t xferCnt) { intptr_t dataPtr; struct ccb_scsiio *pcsio; u_int8_t i; struct amd_sg * pseg; dataPtr = 0; pcsio = &pSRB->pccb->csio; dataPtr = (intptr_t) pcsio->data_ptr; pseg = pSRB->SGsegment; for (i = 0; i < pSRB->SGIndex; i++) { dataPtr += (int) pseg->SGXLen; pseg++; } dataPtr += (int) xferCnt; return ((u_int8_t *) dataPtr); } static void amd_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *baddr; baddr = (bus_addr_t *)arg; *baddr = segs->ds_addr; } static void ResetDevParam(struct amd_softc * amd) { u_int target; for (target = 0; target <= amd->max_id; target++) { if (amd->AdaptSCSIID != target) { amdsetsync(amd, target, /*clockrate*/0, /*period*/0, /*offset*/0, AMD_TRANS_CUR); } } } static void amdcompletematch(struct amd_softc *amd, target_id_t target, lun_id_t lun, u_int tag, struct srb_queue *queue, cam_status status) { struct amd_srb *srb; struct amd_srb *next_srb; for (srb = TAILQ_FIRST(queue); srb != NULL; srb = next_srb) { union ccb *ccb; next_srb = TAILQ_NEXT(srb, links); if (srb->pccb->ccb_h.target_id != target && target != CAM_TARGET_WILDCARD) continue; if (srb->pccb->ccb_h.target_lun != lun && lun != CAM_LUN_WILDCARD) continue; if (srb->TagNumber != tag && tag != AMD_TAG_WILDCARD) continue; ccb = srb->pccb; TAILQ_REMOVE(queue, srb, links); TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links); if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0 && (status & CAM_DEV_QFRZN) != 0) xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); ccb->ccb_h.status = status; xpt_done(ccb); } } static void amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate, u_int period, u_int offset, u_int type) { struct amd_target_info *tinfo; u_int old_period; u_int old_offset; tinfo = &amd->tinfo[target]; old_period = tinfo->current.period; old_offset = tinfo->current.offset; if ((type & AMD_TRANS_CUR) != 0 && (old_period != period || old_offset != offset)) { struct cam_path *path; tinfo->current.period = period; tinfo->current.offset = offset; tinfo->sync_period_reg = clockrate; tinfo->sync_offset_reg = offset; tinfo->CtrlR3 &= ~FAST_SCSI; tinfo->CtrlR4 &= ~EATER_25NS; if (clockrate > 7) tinfo->CtrlR4 |= EATER_25NS; else tinfo->CtrlR3 |= FAST_SCSI; if ((type & AMD_TRANS_ACTIVE) == AMD_TRANS_ACTIVE) { amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg); amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg); amd_write8(amd, CNTLREG3, tinfo->CtrlR3); amd_write8(amd, CNTLREG4, tinfo->CtrlR4); } /* If possible, update the XPT's notion of our transfer rate */ if (xpt_create_path(&path, /*periph*/NULL, cam_sim_path(amd->psim), target, CAM_LUN_WILDCARD) == CAM_REQ_CMP) { struct ccb_trans_settings neg; xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1); neg.sync_period = period; neg.sync_offset = offset; neg.valid = CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_SYNC_OFFSET_VALID; xpt_async(AC_TRANSFER_NEG, path, &neg); xpt_free_path(path); } } if ((type & AMD_TRANS_GOAL) != 0) { tinfo->goal.period = period; tinfo->goal.offset = offset; } if ((type & AMD_TRANS_USER) != 0) { tinfo->user.period = period; tinfo->user.offset = offset; } } static void amdsettags(struct amd_softc *amd, u_int target, int tagenb) { panic("Implement me!\n"); } #if 0 /* ********************************************************************** * Function : amd_reset (struct amd_softc * amd) * Purpose : perform a hard reset on the SCSI bus( and AMD chip). * Inputs : cmd - command which caused the SCSI RESET ********************************************************************** */ static void amd_reset(struct amd_softc * amd) { int intflag; u_int8_t bval; u_int16_t i; #ifdef AMD_DEBUG0 printf("DC390: RESET"); #endif intflag = splcam(); bval = amd_read8(amd, CNTLREG1); bval |= DIS_INT_ON_SCSI_RST; amd_write8(amd, CNTLREG1, bval); /* disable interrupt */ amd_ResetSCSIBus(amd); for (i = 0; i < 500; i++) { DELAY(1000); } bval = amd_read8(amd, CNTLREG1); bval &= ~DIS_INT_ON_SCSI_RST; amd_write8(amd, CNTLREG1, bval); /* re-enable interrupt */ amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD); amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD); ResetDevParam(amd); amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, AMD_TAG_WILDCARD, &amd->running_srbs, CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET); amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, AMD_TAG_WILDCARD, &amd->waiting_srbs, CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET); amd->active_srb = NULL; amd->ACBFlag = 0; splx(intflag); return; } void amd_timeout(void *arg1) { struct amd_srb * pSRB; pSRB = (struct amd_srb *) arg1; } #endif static int amdstart(struct amd_softc *amd, struct amd_srb *pSRB) { union ccb *pccb; struct ccb_scsiio *pcsio; struct amd_target_info *targ_info; u_int identify_msg; u_int command; u_int target; u_int lun; pccb = pSRB->pccb; pcsio = &pccb->csio; target = pccb->ccb_h.target_id; lun = pccb->ccb_h.target_lun; targ_info = &amd->tinfo[target]; amd_clear_msg_state(amd); amd_write8(amd, SCSIDESTIDREG, target); amd_write8(amd, SYNCPERIOREG, targ_info->sync_period_reg); amd_write8(amd, SYNCOFFREG, targ_info->sync_offset_reg); amd_write8(amd, CNTLREG1, targ_info->CtrlR1); amd_write8(amd, CNTLREG3, targ_info->CtrlR3); amd_write8(amd, CNTLREG4, targ_info->CtrlR4); amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD); identify_msg = MSG_IDENTIFYFLAG | lun; if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0 && (pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0 && (pSRB->CmdBlock[0] != REQUEST_SENSE) && (pSRB->SRBFlag & AUTO_REQSENSE) == 0) identify_msg |= MSG_IDENTIFY_DISCFLAG; amd_write8(amd, SCSIFIFOREG, identify_msg); if ((targ_info->disc_tag & AMD_CUR_TAGENB) == 0 || (identify_msg & MSG_IDENTIFY_DISCFLAG) == 0) pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; if (targ_info->current.period != targ_info->goal.period || targ_info->current.offset != targ_info->goal.offset) { command = SEL_W_ATN_STOP; amdconstructsdtr(amd, targ_info->goal.period, targ_info->goal.offset); } else if ((pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { command = SEL_W_ATN2; pSRB->SRBState = SRB_START; amd_write8(amd, SCSIFIFOREG, pcsio->tag_action); amd_write8(amd, SCSIFIFOREG, pSRB->TagNumber); } else { command = SEL_W_ATN; pSRB->SRBState = SRB_START; } if (command != SEL_W_ATN_STOP) amdsetupcommand(amd, pSRB); if (amd_read8(amd, SCSISTATREG) & INTERRUPT) { pSRB->SRBState = SRB_READY; return (1); } else { amd->last_phase = SCSI_ARBITRATING; amd_write8(amd, SCSICMDREG, command); amd->active_srb = pSRB; amd->cur_target = target; amd->cur_lun = lun; return (0); } } /* * Catch an interrupt from the adapter. * Process pending device interrupts. */ static void amd_intr(void *arg) { struct amd_softc *amd; struct amd_srb *pSRB; u_int internstat = 0; u_int scsistat; u_int intstat; amd = (struct amd_softc *)arg; if (amd == NULL) { #ifdef AMD_DEBUG0 printf("amd_intr: amd NULL return......"); #endif return; } scsistat = amd_read8(amd, SCSISTATREG); if (!(scsistat & INTERRUPT)) { #ifdef AMD_DEBUG0 printf("amd_intr: scsistat = NULL ,return......"); #endif return; } #ifdef AMD_DEBUG_SCSI_PHASE printf("scsistat=%2x,", scsistat); #endif internstat = amd_read8(amd, INTERNSTATREG); intstat = amd_read8(amd, INTSTATREG); #ifdef AMD_DEBUG_SCSI_PHASE printf("intstat=%2x,", intstat); #endif if (intstat & DISCONNECTED) { amd_Disconnect(amd); return; } if (intstat & RESELECTED) { amd_Reselect(amd); return; } if (intstat & INVALID_CMD) { amd_InvalidCmd(amd); return; } if (intstat & SCSI_RESET_) { amd_ScsiRstDetect(amd); return; } if (intstat & (SUCCESSFUL_OP + SERVICE_REQUEST)) { pSRB = amd->active_srb; /* * Run our state engine. First perform * post processing for the last phase we * were in, followed by any processing * required to handle the current phase. */ scsistat = amd_SCSI_phase0[amd->last_phase](amd, pSRB, scsistat); amd->last_phase = scsistat & SCSI_PHASE_MASK; (void)amd_SCSI_phase1[amd->last_phase](amd, pSRB, scsistat); } } static u_int amd_DataOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat) { struct amd_sg *psgl; u_int32_t ResidCnt, xferCnt; if (!(pSRB->SRBState & SRB_XFERPAD)) { if (scsistat & PARITY_ERR) { pSRB->SRBStatus |= PARITY_ERROR; } if (scsistat & COUNT_2_ZERO) { while ((amd_read8(amd, DMA_Status)&DMA_XFER_DONE) == 0) ; pSRB->TotalXferredLen += pSRB->SGToBeXferLen; pSRB->SGIndex++; if (pSRB->SGIndex < pSRB->SGcount) { pSRB->pSGlist++; psgl = pSRB->pSGlist; pSRB->SGPhysAddr = psgl->SGXPtr; pSRB->SGToBeXferLen = psgl->SGXLen; } else { pSRB->SGToBeXferLen = 0; } } else { ResidCnt = amd_read8(amd, CURRENTFIFOREG) & 0x1f; ResidCnt += amd_read8(amd, CTCREG_LOW) | (amd_read8(amd, CTCREG_MID) << 8) | (amd_read8(amd, CURTXTCNTREG) << 16); xferCnt = pSRB->SGToBeXferLen - ResidCnt; pSRB->SGPhysAddr += xferCnt; pSRB->TotalXferredLen += xferCnt; pSRB->SGToBeXferLen = ResidCnt; } } amd_write8(amd, DMA_Cmd, WRITE_DIRECTION | DMA_IDLE_CMD); return (scsistat); } static u_int amd_DataInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat) { u_int8_t bval; u_int16_t i, residual; struct amd_sg *psgl; u_int32_t ResidCnt, xferCnt; u_int8_t * ptr; if (!(pSRB->SRBState & SRB_XFERPAD)) { if (scsistat & PARITY_ERR) { pSRB->SRBStatus |= PARITY_ERROR; } if (scsistat & COUNT_2_ZERO) { while (1) { bval = amd_read8(amd, DMA_Status); if ((bval & DMA_XFER_DONE) != 0) break; } amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD); pSRB->TotalXferredLen += pSRB->SGToBeXferLen; pSRB->SGIndex++; if (pSRB->SGIndex < pSRB->SGcount) { pSRB->pSGlist++; psgl = pSRB->pSGlist; pSRB->SGPhysAddr = psgl->SGXPtr; pSRB->SGToBeXferLen = psgl->SGXLen; } else { pSRB->SGToBeXferLen = 0; } } else { /* phase changed */ residual = 0; bval = amd_read8(amd, CURRENTFIFOREG); while (bval & 0x1f) { if ((bval & 0x1f) == 1) { for (i = 0; i < 0x100; i++) { bval = amd_read8(amd, CURRENTFIFOREG); if (!(bval & 0x1f)) { goto din_1; } else if (i == 0x0ff) { residual = 1; goto din_1; } } } else { bval = amd_read8(amd, CURRENTFIFOREG); } } din_1: amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_BLAST_CMD); for (i = 0; i < 0x8000; i++) { if ((amd_read8(amd, DMA_Status)&BLAST_COMPLETE)) break; } amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD); ResidCnt = amd_read8(amd, CTCREG_LOW) | (amd_read8(amd, CTCREG_MID) << 8) | (amd_read8(amd, CURTXTCNTREG) << 16); xferCnt = pSRB->SGToBeXferLen - ResidCnt; pSRB->SGPhysAddr += xferCnt; pSRB->TotalXferredLen += xferCnt; pSRB->SGToBeXferLen = ResidCnt; if (residual) { /* get residual byte */ bval = amd_read8(amd, SCSIFIFOREG); ptr = phystovirt(pSRB, xferCnt); *ptr = bval; pSRB->SGPhysAddr++; pSRB->TotalXferredLen++; pSRB->SGToBeXferLen--; } } } return (scsistat); } static u_int amd_StatusPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat) { pSRB->TargetStatus = amd_read8(amd, SCSIFIFOREG); /* get message */ pSRB->EndMessage = amd_read8(amd, SCSIFIFOREG); pSRB->SRBState = SRB_COMPLETED; amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD); return (SCSI_NOP0); } static u_int amd_MsgOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat) { if (pSRB->SRBState & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT)) { scsistat = SCSI_NOP0; } return (scsistat); } static u_int amd_MsgInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat) { int done; amd->msgin_buf[amd->msgin_index] = amd_read8(amd, SCSIFIFOREG); done = amdparsemsg(amd); if (done) amd->msgin_index = 0; else amd->msgin_index++; return (SCSI_NOP0); } static int amdparsemsg(struct amd_softc *amd) { int reject; int done; int response; done = FALSE; response = FALSE; reject = FALSE; /* * Parse as much of the message as is availible, * rejecting it if we don't support it. When * the entire message is availible and has been * handled, return TRUE indicating that we have * parsed an entire message. */ switch (amd->msgin_buf[0]) { case MSG_DISCONNECT: amd->active_srb->SRBState = SRB_DISCONNECT; amd->disc_count[amd->cur_target][amd->cur_lun]++; done = TRUE; break; case MSG_SIMPLE_Q_TAG: { struct amd_srb *disc_srb; if (amd->msgin_index < 1) break; disc_srb = &amd->SRB_array[amd->msgin_buf[1]]; if (amd->active_srb != NULL || disc_srb->SRBState != SRB_DISCONNECT || disc_srb->pccb->ccb_h.target_id != amd->cur_target || disc_srb->pccb->ccb_h.target_lun != amd->cur_lun) { printf("amd%d: Unexpected tagged reselection " "for target %d, Issuing Abort\n", amd->unit, amd->cur_target); amd->msgout_buf[0] = MSG_ABORT; amd->msgout_len = 1; response = TRUE; break; } amd->active_srb = disc_srb; amd->disc_count[amd->cur_target][amd->cur_lun]--; done = TRUE; break; } case MSG_MESSAGE_REJECT: response = amdhandlemsgreject(amd); if (response == FALSE) amd_write8(amd, SCSICMDREG, RESET_ATN_CMD); /* FALLTHROUGH */ case MSG_NOOP: done = TRUE; break; case MSG_EXTENDED: { u_int clockrate; u_int period; u_int offset; u_int saved_offset; /* Wait for enough of the message to begin validation */ if (amd->msgin_index < 1) break; if (amd->msgin_buf[1] != MSG_EXT_SDTR_LEN) { reject = TRUE; break; } /* Wait for opcode */ if (amd->msgin_index < 2) break; if (amd->msgin_buf[2] != MSG_EXT_SDTR) { reject = TRUE; break; } /* * Wait until we have both args before validating * and acting on this message. * * Add one to MSG_EXT_SDTR_LEN to account for * the extended message preamble. */ if (amd->msgin_index < (MSG_EXT_SDTR_LEN + 1)) break; period = amd->msgin_buf[3]; saved_offset = offset = amd->msgin_buf[4]; clockrate = amdfindclockrate(amd, &period); if (offset > AMD_MAX_SYNC_OFFSET) offset = AMD_MAX_SYNC_OFFSET; if (period == 0 || offset == 0) { offset = 0; period = 0; clockrate = 0; } amdsetsync(amd, amd->cur_target, clockrate, period, offset, AMD_TRANS_ACTIVE|AMD_TRANS_GOAL); /* * See if we initiated Sync Negotiation * and didn't have to fall down to async * transfers. */ if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/TRUE)) { /* We started it */ if (saved_offset != offset) { /* Went too low - force async */ reject = TRUE; } } else { /* * Send our own SDTR in reply */ if (bootverbose) printf("Sending SDTR!\n"); amd->msgout_index = 0; amd->msgout_len = 0; amdconstructsdtr(amd, period, offset); amd->msgout_index = 0; response = TRUE; } done = TRUE; break; } case MSG_SAVEDATAPOINTER: case MSG_RESTOREPOINTERS: /* XXX Implement!!! */ done = TRUE; break; default: reject = TRUE; break; } if (reject) { amd->msgout_index = 0; amd->msgout_len = 1; amd->msgout_buf[0] = MSG_MESSAGE_REJECT; done = TRUE; response = TRUE; } if (response) amd_write8(amd, SCSICMDREG, SET_ATN_CMD); if (done && !response) /* Clear the outgoing message buffer */ amd->msgout_len = 0; /* Drop Ack */ amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD); return (done); } static u_int amdfindclockrate(struct amd_softc *amd, u_int *period) { u_int i; u_int clockrate; for (i = 0; i < sizeof(tinfo_sync_period); i++) { u_int8_t *table_entry; table_entry = &tinfo_sync_period[i]; if (*period <= *table_entry) { /* * When responding to a target that requests * sync, the requested rate may fall between * two rates that we can output, but still be * a rate that we can receive. Because of this, * we want to respond to the target with * the same rate that it sent to us even * if the period we use to send data to it * is lower. Only lower the response period * if we must. */ if (i == 0) { *period = *table_entry; } break; } } if (i == sizeof(tinfo_sync_period)) { /* Too slow for us. Use asnyc transfers. */ *period = 0; clockrate = 0; } else clockrate = i + 4; return (clockrate); } /* * See if we sent a particular extended message to the target. * If "full" is true, the target saw the full message. * If "full" is false, the target saw at least the first * byte of the message. */ static int amdsentmsg(struct amd_softc *amd, u_int msgtype, int full) { int found; int index; found = FALSE; index = 0; while (index < amd->msgout_len) { if ((amd->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0 || amd->msgout_buf[index] == MSG_MESSAGE_REJECT) index++; else if (amd->msgout_buf[index] >= MSG_SIMPLE_Q_TAG && amd->msgout_buf[index] < MSG_IGN_WIDE_RESIDUE) { /* Skip tag type and tag id */ index += 2; } else if (amd->msgout_buf[index] == MSG_EXTENDED) { /* Found a candidate */ if (amd->msgout_buf[index+2] == msgtype) { u_int end_index; end_index = index + 1 + amd->msgout_buf[index + 1]; if (full) { if (amd->msgout_index > end_index) found = TRUE; } else if (amd->msgout_index > index) found = TRUE; } break; } else { panic("amdsentmsg: Inconsistent msg buffer"); } } return (found); } static void amdconstructsdtr(struct amd_softc *amd, u_int period, u_int offset) { amd->msgout_buf[amd->msgout_index++] = MSG_EXTENDED; amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR_LEN; amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR; amd->msgout_buf[amd->msgout_index++] = period; amd->msgout_buf[amd->msgout_index++] = offset; amd->msgout_len += 5; } static int amdhandlemsgreject(struct amd_softc *amd) { /* * If we had an outstanding SDTR for this * target, this is a signal that the target * is refusing negotiation. Also watch out * for rejected tag messages. */ struct amd_srb *srb; struct amd_target_info *targ_info; int response = FALSE; srb = amd->active_srb; targ_info = &amd->tinfo[amd->cur_target]; if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/FALSE)) { /* note asynch xfers and clear flag */ amdsetsync(amd, amd->cur_target, /*clockrate*/0, /*period*/0, /*offset*/0, AMD_TRANS_ACTIVE|AMD_TRANS_GOAL); printf("amd%d:%d: refuses synchronous negotiation. " "Using asynchronous transfers\n", amd->unit, amd->cur_target); } else if ((srb != NULL) && (srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { struct ccb_trans_settings neg; printf("amd%d:%d: refuses tagged commands. Performing " "non-tagged I/O\n", amd->unit, amd->cur_target); amdsettags(amd, amd->cur_target, FALSE); neg.flags = 0; neg.valid = CCB_TRANS_TQ_VALID; xpt_setup_ccb(&neg.ccb_h, srb->pccb->ccb_h.path, /*priority*/1); xpt_async(AC_TRANSFER_NEG, srb->pccb->ccb_h.path, &neg); /* * Resend the identify for this CCB as the target * may believe that the selection is invalid otherwise. */ if (amd->msgout_len != 0) bcopy(&amd->msgout_buf[0], &amd->msgout_buf[1], amd->msgout_len); amd->msgout_buf[0] = MSG_IDENTIFYFLAG | srb->pccb->ccb_h.target_lun; amd->msgout_len++; if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0 && (srb->pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0) amd->msgout_buf[0] |= MSG_IDENTIFY_DISCFLAG; srb->pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; /* * Requeue all tagged commands for this target * currently in our posession so they can be * converted to untagged commands. */ amdcompletematch(amd, amd->cur_target, amd->cur_lun, AMD_TAG_WILDCARD, &amd->waiting_srbs, CAM_DEV_QFRZN|CAM_REQUEUE_REQ); } else { /* * Otherwise, we ignore it. */ printf("amd%d:%d: Message reject received -- ignored\n", amd->unit, amd->cur_target); } return (response); } #if 0 if (!(pSRB->SRBState & SRB_MSGIN_MULTI)) { if (bval == MSG_DISCONNECT) { pSRB->SRBState = SRB_DISCONNECT; } else if (bval == MSG_SAVEDATAPOINTER) { goto min6; } else if ((bval == MSG_EXTENDED) || ((bval >= MSG_SIMPLE_Q_TAG) && (bval <= MSG_ORDERED_Q_TAG))) { pSRB->SRBState |= SRB_MSGIN_MULTI; pSRB->MsgInBuf[0] = bval; pSRB->MsgCnt = 1; pSRB->pMsgPtr = &pSRB->MsgInBuf[1]; } else if (bval == MSG_MESSAGE_REJECT) { amd_write8(amd, SCSICMDREG, RESET_ATN_CMD); if (pSRB->SRBState & DO_SYNC_NEGO) { goto set_async; } } else if (bval == MSG_RESTOREPOINTERS) { goto min6; } else { goto min6; } } else { /* minx: */ *pSRB->pMsgPtr = bval; pSRB->MsgCnt++; pSRB->pMsgPtr++; if ((pSRB->MsgInBuf[0] >= MSG_SIMPLE_Q_TAG) && (pSRB->MsgInBuf[0] <= MSG_ORDERED_Q_TAG)) { if (pSRB->MsgCnt == 2) { pSRB->SRBState = 0; pSRB = &amd->SRB_array[pSRB->MsgInBuf[1]]; if (pSRB->SRBState & SRB_DISCONNECT) == 0) { pSRB = amd->pTmpSRB; pSRB->SRBState = SRB_UNEXPECT_RESEL; pDCB->pActiveSRB = pSRB; pSRB->MsgOutBuf[0] = MSG_ABORT_TAG; EnableMsgOut2(amd, pSRB); } else { if (pDCB->DCBFlag & ABORT_DEV_) { pSRB->SRBState = SRB_ABORT_SENT; EnableMsgOut1(amd, pSRB); } pDCB->pActiveSRB = pSRB; pSRB->SRBState = SRB_DATA_XFER; } } } else if ((pSRB->MsgInBuf[0] == MSG_EXTENDED) && (pSRB->MsgCnt == 5)) { pSRB->SRBState &= ~(SRB_MSGIN_MULTI + DO_SYNC_NEGO); if ((pSRB->MsgInBuf[1] != 3) || (pSRB->MsgInBuf[2] != 1)) { /* reject_msg: */ pSRB->MsgCnt = 1; pSRB->MsgInBuf[0] = MSG_MESSAGE_REJECT; amd_write8(amd, SCSICMDREG, SET_ATN_CMD); } else if (!(pSRB->MsgInBuf[3]) || !(pSRB->MsgInBuf[4])) { set_async: /* set async */ pDCB = pSRB->pSRBDCB; /* disable sync & sync nego */ pDCB->SyncMode &= ~(SYNC_ENABLE|SYNC_NEGO_DONE); pDCB->SyncPeriod = 0; pDCB->SyncOffset = 0; pDCB->tinfo.goal.period = 0; pDCB->tinfo.goal.offset = 0; pDCB->tinfo.current.period = 0; pDCB->tinfo.current.offset = 0; pDCB->tinfo.current.width = MSG_EXT_WDTR_BUS_8_BIT; pDCB->CtrlR3 = FAST_CLK; /* non_fast */ pDCB->CtrlR4 &= 0x3f; pDCB->CtrlR4 |= EATER_25NS; goto re_prog; } else {/* set sync */ pDCB = pSRB->pSRBDCB; /* enable sync & sync nego */ pDCB->SyncMode |= SYNC_ENABLE|SYNC_NEGO_DONE; /* set sync offset */ pDCB->SyncOffset &= 0x0f0; pDCB->SyncOffset |= pSRB->MsgInBuf[4]; /* set sync period */ pDCB->MaxNegoPeriod = pSRB->MsgInBuf[3]; wval = (u_int16_t) pSRB->MsgInBuf[3]; wval = wval << 2; wval--; wval1 = wval / 25; if ((wval1 * 25) != wval) { wval1++; } bval = FAST_CLK|FAST_SCSI; pDCB->CtrlR4 &= 0x3f; if (wval1 >= 8) { /* Fast SCSI */ wval1--; bval = FAST_CLK; pDCB->CtrlR4 |= EATER_25NS; } pDCB->CtrlR3 = bval; pDCB->SyncPeriod = (u_int8_t) wval1; pDCB->tinfo.goal.period = tinfo_sync_period[pDCB->SyncPeriod - 4]; pDCB->tinfo.goal.offset = pDCB->SyncOffset; pDCB->tinfo.current.period = tinfo_sync_period[pDCB->SyncPeriod - 4];; pDCB->tinfo.current.offset = pDCB->SyncOffset; /* * program SCSI control register */ re_prog: amd_write8(amd, SYNCPERIOREG, pDCB->SyncPeriod); amd_write8(amd, SYNCOFFREG, pDCB->SyncOffset); amd_write8(amd, CNTLREG3, pDCB->CtrlR3); amd_write8(amd, CNTLREG4, pDCB->CtrlR4); } } } min6: amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD); return (SCSI_NOP0); } #endif static u_int amd_DataOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat) { DataIO_Comm(amd, pSRB, WRITE_DIRECTION); return (scsistat); } static u_int amd_DataInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat) { DataIO_Comm(amd, pSRB, READ_DIRECTION); return (scsistat); } static void DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int ioDir) { struct amd_sg * psgl; u_int32_t lval; if (pSRB->SGIndex < pSRB->SGcount) { amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir);/* |EN_DMA_INT */ if (!pSRB->SGToBeXferLen) { psgl = pSRB->pSGlist; pSRB->SGPhysAddr = psgl->SGXPtr; pSRB->SGToBeXferLen = psgl->SGXLen; } lval = pSRB->SGToBeXferLen; amd_write8(amd, CTCREG_LOW, lval); amd_write8(amd, CTCREG_MID, lval >> 8); amd_write8(amd, CURTXTCNTREG, lval >> 16); amd_write32(amd, DMA_XferCnt, pSRB->SGToBeXferLen); amd_write32(amd, DMA_XferAddr, pSRB->SGPhysAddr); pSRB->SRBState = SRB_DATA_XFER; amd_write8(amd, SCSICMDREG, DMA_COMMAND|INFO_XFER_CMD); amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir); /* |EN_DMA_INT */ amd_write8(amd, DMA_Cmd, DMA_START_CMD|ioDir);/* |EN_DMA_INT */ } else { /* xfer pad */ if (pSRB->SGcount) { pSRB->AdaptStatus = H_OVER_UNDER_RUN; pSRB->SRBStatus |= OVER_RUN; } amd_write8(amd, CTCREG_LOW, 0); amd_write8(amd, CTCREG_MID, 0); amd_write8(amd, CURTXTCNTREG, 0); pSRB->SRBState |= SRB_XFERPAD; amd_write8(amd, SCSICMDREG, DMA_COMMAND|XFER_PAD_BYTE); } } static u_int amd_CommandPhase1(struct amd_softc *amd, struct amd_srb *srb, u_int scsistat) { amd_write8(amd, SCSICMDREG, RESET_ATN_CMD); amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD); amdsetupcommand(amd, srb); srb->SRBState = SRB_COMMAND; amd_write8(amd, SCSICMDREG, INFO_XFER_CMD); return (scsistat); } static u_int amd_StatusPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat) { amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD); pSRB->SRBState = SRB_STATUS; amd_write8(amd, SCSICMDREG, INITIATOR_CMD_CMPLTE); return (scsistat); } static u_int amd_MsgOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat) { amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD); if (amd->msgout_len == 0) { amd->msgout_buf[0] = MSG_NOOP; amd->msgout_len = 1; } amd_write8_multi(amd, SCSIFIFOREG, amd->msgout_buf, amd->msgout_len); amd_write8(amd, SCSICMDREG, INFO_XFER_CMD); return (scsistat); } static u_int amd_MsgInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat) { amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD); amd_write8(amd, SCSICMDREG, INFO_XFER_CMD); return (scsistat); } static u_int amd_NopPhase(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat) { return (scsistat); } static void amd_Disconnect(struct amd_softc * amd) { struct amd_srb *srb; int target; int lun; srb = amd->active_srb; amd->active_srb = NULL; amd->last_phase = SCSI_BUS_FREE; amd_write8(amd, SCSICMDREG, EN_SEL_RESEL); target = amd->cur_target; lun = amd->cur_lun; if (srb == NULL) { /* Invalid reselection */ amdrunwaiting(amd); } else if (srb->SRBState & SRB_ABORT_SENT) { /* Clean up and done this srb */ #if 0 while (( = TAILQ_FIRST(&amd->running_srbs)) != NULL) { /* XXX What about "done'ing" these srbs??? */ if (pSRB->pSRBDCB == pDCB) { TAILQ_REMOVE(&amd->running_srbs, pSRB, links); TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links); } } amdrunwaiting(amd); #endif } else { if ((srb->SRBState & (SRB_START | SRB_MSGOUT)) || !(srb->SRBState & (SRB_DISCONNECT | SRB_COMPLETED))) { srb->TargetStatus = AMD_SCSI_STAT_SEL_TIMEOUT; goto disc1; } else if (srb->SRBState & SRB_DISCONNECT) { if (!(srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID)) amd->untagged_srbs[target][lun] = srb; amdrunwaiting(amd); } else if (srb->SRBState & SRB_COMPLETED) { disc1: srb->SRBState = SRB_FREE; SRBdone(amd, srb); } } return; } static void amd_Reselect(struct amd_softc *amd) { struct amd_target_info *tinfo; u_int16_t disc_count; amd_clear_msg_state(amd); if (amd->active_srb != NULL) { /* Requeue the SRB for our attempted Selection */ TAILQ_REMOVE(&amd->running_srbs, amd->active_srb, links); TAILQ_INSERT_HEAD(&amd->waiting_srbs, amd->active_srb, links); amd->active_srb = NULL; } /* get ID */ amd->cur_target = amd_read8(amd, SCSIFIFOREG); amd->cur_target ^= amd->HostID_Bit; amd->cur_target = ffs(amd->cur_target) - 1; amd->cur_lun = amd_read8(amd, SCSIFIFOREG) & 7; tinfo = &amd->tinfo[amd->cur_target]; amd->active_srb = amd->untagged_srbs[amd->cur_target][amd->cur_lun]; disc_count = amd->disc_count[amd->cur_target][amd->cur_lun]; if (disc_count == 0) { printf("amd%d: Unexpected reselection for target %d, " "Issuing Abort\n", amd->unit, amd->cur_target); amd->msgout_buf[0] = MSG_ABORT; amd->msgout_len = 1; amd_write8(amd, SCSICMDREG, SET_ATN_CMD); } if (amd->active_srb != NULL) { amd->disc_count[amd->cur_target][amd->cur_lun]--; amd->untagged_srbs[amd->cur_target][amd->cur_lun] = NULL; } amd_write8(amd, SCSIDESTIDREG, amd->cur_target); amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg); amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg); amd_write8(amd, CNTLREG1, tinfo->CtrlR1); amd_write8(amd, CNTLREG3, tinfo->CtrlR3); amd_write8(amd, CNTLREG4, tinfo->CtrlR4); amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);/* drop /ACK */ amd->last_phase = SCSI_NOP0; } static void SRBdone(struct amd_softc *amd, struct amd_srb *pSRB) { u_int8_t bval, i, status; union ccb *pccb; struct ccb_scsiio *pcsio; int intflag; struct amd_sg *ptr2; u_int32_t swlval; pccb = pSRB->pccb; pcsio = &pccb->csio; CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE, ("SRBdone - TagNumber %d\n", pSRB->TagNumber)); if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if ((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_POSTREAD; else op = BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(amd->buffer_dmat, pSRB->dmamap, op); bus_dmamap_unload(amd->buffer_dmat, pSRB->dmamap); } status = pSRB->TargetStatus; pccb->ccb_h.status = CAM_REQ_CMP; if (pSRB->SRBFlag & AUTO_REQSENSE) { pSRB->SRBFlag &= ~AUTO_REQSENSE; pSRB->AdaptStatus = 0; pSRB->TargetStatus = SCSI_STATUS_CHECK_COND; if (status == SCSI_STATUS_CHECK_COND) { pccb->ccb_h.status = CAM_AUTOSENSE_FAIL; goto ckc_e; } *((u_int32_t *)&(pSRB->CmdBlock[0])) = pSRB->Segment0[0]; pcsio->sense_resid = pcsio->sense_len - pSRB->TotalXferredLen; pSRB->TotalXferredLen = pSRB->Segment1[1]; if (pSRB->TotalXferredLen) { /* ???? */ pcsio->resid = pcsio->dxfer_len - pSRB->TotalXferredLen; /* The resid field contains valid data */ /* Flush resid bytes on complete */ } else { pcsio->scsi_status = SCSI_STATUS_CHECK_COND; } bzero(&pcsio->sense_data, pcsio->sense_len); bcopy(amd_get_sense_buf(amd, pSRB), &pcsio->sense_data, pcsio->sense_len); pccb->ccb_h.status = CAM_AUTOSNS_VALID; goto ckc_e; } if (status) { if (status == SCSI_STATUS_CHECK_COND) { if ((pSRB->SGIndex < pSRB->SGcount) && (pSRB->SGcount) && (pSRB->SGToBeXferLen)) { bval = pSRB->SGcount; swlval = pSRB->SGToBeXferLen; ptr2 = pSRB->pSGlist; ptr2++; for (i = pSRB->SGIndex + 1; i < bval; i++) { swlval += ptr2->SGXLen; ptr2++; } /* ??????? */ pcsio->resid = (u_int32_t) swlval; #ifdef AMD_DEBUG0 printf("XferredLen=%8x,NotYetXferLen=%8x,", pSRB->TotalXferredLen, swlval); #endif } if ((pcsio->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) { #ifdef AMD_DEBUG0 printf("RequestSense..................\n"); #endif RequestSense(amd, pSRB); return; } pcsio->scsi_status = SCSI_STATUS_CHECK_COND; pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; goto ckc_e; } else if (status == SCSI_STATUS_QUEUE_FULL) { pSRB->AdaptStatus = 0; pSRB->TargetStatus = 0; pcsio->scsi_status = SCSI_STATUS_QUEUE_FULL; pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; goto ckc_e; } else if (status == AMD_SCSI_STAT_SEL_TIMEOUT) { pSRB->AdaptStatus = H_SEL_TIMEOUT; pSRB->TargetStatus = 0; pcsio->scsi_status = AMD_SCSI_STAT_SEL_TIMEOUT; pccb->ccb_h.status = CAM_SEL_TIMEOUT; } else if (status == SCSI_STATUS_BUSY) { #ifdef AMD_DEBUG0 printf("DC390: target busy at %s %d\n", __FILE__, __LINE__); #endif pcsio->scsi_status = SCSI_STATUS_BUSY; pccb->ccb_h.status = CAM_SCSI_BUSY; } else if (status == SCSI_STATUS_RESERV_CONFLICT) { #ifdef AMD_DEBUG0 printf("DC390: target reserved at %s %d\n", __FILE__, __LINE__); #endif pcsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT; pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; /* XXX */ } else { pSRB->AdaptStatus = 0; #ifdef AMD_DEBUG0 printf("DC390: driver stuffup at %s %d\n", __FILE__, __LINE__); #endif pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; } } else { status = pSRB->AdaptStatus; if (status & H_OVER_UNDER_RUN) { pSRB->TargetStatus = 0; pccb->ccb_h.status = CAM_DATA_RUN_ERR; } else if (pSRB->SRBStatus & PARITY_ERROR) { #ifdef AMD_DEBUG0 printf("DC390: driver stuffup %s %d\n", __FILE__, __LINE__); #endif /* Driver failed to perform operation */ pccb->ccb_h.status = CAM_UNCOR_PARITY; } else { /* No error */ pSRB->AdaptStatus = 0; pSRB->TargetStatus = 0; pcsio->resid = 0; /* there is no error, (sense is invalid) */ } } ckc_e: intflag = splcam(); if ((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { /* CAM request not yet complete =>device_Q frozen */ xpt_freeze_devq(pccb->ccb_h.path, 1); pccb->ccb_h.status |= CAM_DEV_QFRZN; } TAILQ_REMOVE(&amd->running_srbs, pSRB, links); TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links); amdrunwaiting(amd); splx(intflag); xpt_done(pccb); } static void amd_ResetSCSIBus(struct amd_softc * amd) { int intflag; intflag = splcam(); amd->ACBFlag |= RESET_DEV; amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD); amd_write8(amd, SCSICMDREG, RST_SCSI_BUS_CMD); splx(intflag); return; } static void amd_ScsiRstDetect(struct amd_softc * amd) { int intflag; u_int32_t wlval; #ifdef AMD_DEBUG0 printf("amd_ScsiRstDetect \n"); #endif wlval = 1000; while (--wlval) { /* delay 1 sec */ DELAY(1000); } intflag = splcam(); amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD); amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD); if (amd->ACBFlag & RESET_DEV) { amd->ACBFlag |= RESET_DONE; } else { amd->ACBFlag |= RESET_DETECT; ResetDevParam(amd); amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, AMD_TAG_WILDCARD, &amd->running_srbs, CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET); amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, AMD_TAG_WILDCARD, &amd->waiting_srbs, CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET); amd->active_srb = NULL; amd->ACBFlag = 0; amdrunwaiting(amd); } splx(intflag); return; } static void RequestSense(struct amd_softc *amd, struct amd_srb *pSRB) { union ccb *pccb; struct ccb_scsiio *pcsio; pccb = pSRB->pccb; pcsio = &pccb->csio; pSRB->SRBFlag |= AUTO_REQSENSE; pSRB->Segment0[0] = *((u_int32_t *) & (pSRB->CmdBlock[0])); pSRB->Segment0[1] = *((u_int32_t *) & (pSRB->CmdBlock[4])); pSRB->Segment1[0] = (pSRB->ScsiCmdLen << 8) + pSRB->SGcount; pSRB->Segment1[1] = pSRB->TotalXferredLen; pSRB->AdaptStatus = 0; pSRB->TargetStatus = 0; pSRB->Segmentx.SGXPtr = amd_get_sense_bufaddr(amd, pSRB); pSRB->Segmentx.SGXLen = amd_get_sense_bufsize(amd, pSRB); pSRB->pSGlist = &pSRB->Segmentx; pSRB->SGcount = 1; pSRB->SGIndex = 0; pSRB->CmdBlock[0] = REQUEST_SENSE; pSRB->CmdBlock[1] = pSRB->pccb->ccb_h.target_lun << 5; pSRB->CmdBlock[2] = 0; pSRB->CmdBlock[3] = 0; pSRB->CmdBlock[4] = pcsio->sense_len; pSRB->CmdBlock[5] = 0; pSRB->ScsiCmdLen = 6; pSRB->TotalXferredLen = 0; pSRB->SGToBeXferLen = 0; if (amdstart(amd, pSRB) != 0) { TAILQ_REMOVE(&amd->running_srbs, pSRB, links); TAILQ_INSERT_HEAD(&amd->waiting_srbs, pSRB, links); } } static void amd_InvalidCmd(struct amd_softc * amd) { struct amd_srb *srb; srb = amd->active_srb; if (srb->SRBState & (SRB_START|SRB_MSGOUT)) amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD); } void amd_linkSRB(struct amd_softc *amd) { u_int16_t count, i; struct amd_srb *psrb; int error; count = amd->SRBCount; for (i = 0; i < count; i++) { psrb = (struct amd_srb *)&amd->SRB_array[i]; psrb->TagNumber = i; /* * Create the dmamap. This is no longer optional! * * XXX Since there is no detach method in this driver, * this does not get freed! */ if ((error = bus_dmamap_create(amd->buffer_dmat, 0, &psrb->dmamap)) != 0) { device_printf(amd->dev, "Error %d creating buffer " "dmamap!\n", error); return; } TAILQ_INSERT_TAIL(&amd->free_srbs, psrb, links); } } static void amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval) { if (mode == ENABLE_CE) { *regval = 0xc0; } else { *regval = 0x80; } pci_write_config(amd->dev, *regval, 0, /*bytes*/1); if (mode == DISABLE_CE) { pci_write_config(amd->dev, *regval, 0, /*bytes*/1); } DELAY(160); } static void amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry) { u_int bval; bval = 0; if (Carry) { bval = 0x40; *regval = 0x80; pci_write_config(amd->dev, *regval, bval, /*bytes*/1); } DELAY(160); bval |= 0x80; pci_write_config(amd->dev, *regval, bval, /*bytes*/1); DELAY(160); pci_write_config(amd->dev, *regval, 0, /*bytes*/1); DELAY(160); } static int amd_EEpromInDO(struct amd_softc *amd) { pci_write_config(amd->dev, 0x80, 0x80, /*bytes*/1); DELAY(160); pci_write_config(amd->dev, 0x80, 0x40, /*bytes*/1); DELAY(160); if (pci_read_config(amd->dev, 0, /*bytes*/1) == 0x22) return (1); return (0); } static u_int16_t EEpromGetData1(struct amd_softc *amd) { u_int i; u_int carryFlag; u_int16_t wval; wval = 0; for (i = 0; i < 16; i++) { wval <<= 1; carryFlag = amd_EEpromInDO(amd); wval |= carryFlag; } return (wval); } static void amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd) { u_int i, j; int carryFlag; carryFlag = 1; j = 0x80; for (i = 0; i < 9; i++) { amd_EEpromOutDI(amd, regval, carryFlag); carryFlag = (EEpromCmd & j) ? 1 : 0; j >>= 1; } } static void amd_ReadEEprom(struct amd_softc *amd) { int regval; u_int i; u_int16_t *ptr; u_int8_t cmd; ptr = (u_int16_t *)&amd->eepromBuf[0]; cmd = EEPROM_READ; for (i = 0; i < 0x40; i++) { amd_EnDisableCE(amd, ENABLE_CE, ®val); amd_Prepare(amd, ®val, cmd); *ptr = EEpromGetData1(amd); ptr++; cmd++; amd_EnDisableCE(amd, DISABLE_CE, ®val); } } static void amd_load_defaults(struct amd_softc *amd) { int target; bzero(&amd->eepromBuf, sizeof amd->eepromBuf); for (target = 0; target < MAX_SCSI_ID; target++) amd->eepromBuf[target << 2] = (TAG_QUEUING|EN_DISCONNECT|SYNC_NEGO|PARITY_CHK); amd->eepromBuf[EE_ADAPT_SCSI_ID] = 7; amd->eepromBuf[EE_MODE2] = ACTIVE_NEGATION|LUN_CHECK|GREATER_1G; amd->eepromBuf[EE_TAG_CMD_NUM] = 4; } static void amd_load_eeprom_or_defaults(struct amd_softc *amd) { u_int16_t wval, *ptr; u_int8_t i; amd_ReadEEprom(amd); wval = 0; ptr = (u_int16_t *) & amd->eepromBuf[0]; for (i = 0; i < EE_DATA_SIZE; i += 2, ptr++) wval += *ptr; if (wval != EE_CHECKSUM) { if (bootverbose) printf("amd%d: SEEPROM data unavailable. " "Using default device parameters.\n", amd->unit); amd_load_defaults(amd); } } /* ********************************************************************** * Function : static int amd_init (struct Scsi_Host *host) * Purpose : initialize the internal structures for a given SCSI host * Inputs : host - pointer to this host adapter's structure/ ********************************************************************** */ static int amd_init(device_t dev) { struct amd_softc *amd = device_get_softc(dev); struct resource *iores; int i, rid; u_int bval; rid = PCI_BASE_ADDR0; iores = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (iores == NULL) { if (bootverbose) printf("amd_init: bus_alloc_resource failure!\n"); return ENXIO; } amd->tag = rman_get_bustag(iores); amd->bsh = rman_get_bushandle(iores); /* DMA tag for mapping buffers into device visible space. */ if (bus_dma_tag_create(/*parent_dmat*/NULL, /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/MAXBSIZE, /*nsegments*/AMD_NSEG, /*maxsegsz*/AMD_MAXTRANSFER_SIZE, /*flags*/BUS_DMA_ALLOCNOW, /*lockfunc*/busdma_lock_mutex, /*lockarg*/&Giant, &amd->buffer_dmat) != 0) { if (bootverbose) printf("amd_init: bus_dma_tag_create failure!\n"); return ENXIO; } /* Create, allocate, and map DMA buffers for autosense data */ if (bus_dma_tag_create(/*parent_dmat*/NULL, /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, sizeof(struct scsi_sense_data) * MAX_SRB_CNT, /*nsegments*/1, /*maxsegsz*/AMD_MAXTRANSFER_SIZE, /*flags*/0, /*lockfunc*/busdma_lock_mutex, /*lockarg*/&Giant, &amd->sense_dmat) != 0) { if (bootverbose) device_printf(dev, "cannot create sense buffer dmat\n"); return (ENXIO); } if (bus_dmamem_alloc(amd->sense_dmat, (void **)&amd->sense_buffers, BUS_DMA_NOWAIT, &amd->sense_dmamap) != 0) return (ENOMEM); bus_dmamap_load(amd->sense_dmat, amd->sense_dmamap, amd->sense_buffers, sizeof(struct scsi_sense_data) * MAX_SRB_CNT, amd_dmamap_cb, &amd->sense_busaddr, /*flags*/0); TAILQ_INIT(&amd->free_srbs); TAILQ_INIT(&amd->running_srbs); TAILQ_INIT(&amd->waiting_srbs); amd->last_phase = SCSI_BUS_FREE; amd->dev = dev; amd->unit = device_get_unit(dev); amd->SRBCount = MAX_SRB_CNT; amd->status = 0; amd_load_eeprom_or_defaults(amd); amd->max_id = 7; if (amd->eepromBuf[EE_MODE2] & LUN_CHECK) { amd->max_lun = 7; } else { amd->max_lun = 0; } amd->AdaptSCSIID = amd->eepromBuf[EE_ADAPT_SCSI_ID]; amd->HostID_Bit = (1 << amd->AdaptSCSIID); amd->AdaptSCSILUN = 0; /* (eepromBuf[EE_TAG_CMD_NUM]) << 2; */ amd->ACBFlag = 0; amd->Gmode2 = amd->eepromBuf[EE_MODE2]; amd_linkSRB(amd); for (i = 0; i <= amd->max_id; i++) { if (amd->AdaptSCSIID != i) { struct amd_target_info *tinfo; PEEprom prom; tinfo = &amd->tinfo[i]; prom = (PEEprom)&amd->eepromBuf[i << 2]; if ((prom->EE_MODE1 & EN_DISCONNECT) != 0) { tinfo->disc_tag |= AMD_USR_DISCENB; if ((prom->EE_MODE1 & TAG_QUEUING) != 0) tinfo->disc_tag |= AMD_USR_TAGENB; } if ((prom->EE_MODE1 & SYNC_NEGO) != 0) { tinfo->user.period = eeprom_period[prom->EE_SPEED]; tinfo->user.offset = AMD_MAX_SYNC_OFFSET; } tinfo->CtrlR1 = amd->AdaptSCSIID; if ((prom->EE_MODE1 & PARITY_CHK) != 0) tinfo->CtrlR1 |= PARITY_ERR_REPO; tinfo->CtrlR3 = FAST_CLK; tinfo->CtrlR4 = EATER_25NS; if ((amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) != 0) tinfo->CtrlR4 |= NEGATE_REQACKDATA; } } amd_write8(amd, SCSITIMEOUTREG, 153); /* 250ms selection timeout */ /* Conversion factor = 0 , 40MHz clock */ amd_write8(amd, CLKFACTREG, CLK_FREQ_40MHZ); /* NOP cmd - clear command register */ amd_write8(amd, SCSICMDREG, NOP_CMD); amd_write8(amd, CNTLREG2, EN_FEATURE|EN_SCSI2_CMD); amd_write8(amd, CNTLREG3, FAST_CLK); bval = EATER_25NS; if (amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) { bval |= NEGATE_REQACKDATA; } amd_write8(amd, CNTLREG4, bval); /* Disable SCSI bus reset interrupt */ amd_write8(amd, CNTLREG1, DIS_INT_ON_SCSI_RST); return 0; } /* * attach and init a host adapter */ static int amd_attach(device_t dev) { struct cam_devq *devq; /* Device Queue to use for this SIM */ u_int8_t intstat; struct amd_softc *amd = device_get_softc(dev); int unit = device_get_unit(dev); int rid; void *ih; struct resource *irqres; if (amd_init(dev)) { if (bootverbose) printf("amd_attach: amd_init failure!\n"); return ENXIO; } /* Reset Pending INT */ intstat = amd_read8(amd, INTSTATREG); /* After setting up the adapter, map our interrupt */ rid = 0; irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (irqres == NULL || bus_setup_intr(dev, irqres, INTR_TYPE_CAM | INTR_ENTROPY, amd_intr, amd, &ih)) { if (bootverbose) printf("amd%d: unable to register interrupt handler!\n", unit); return ENXIO; } /* * Now let the CAM generic SCSI layer find the SCSI devices on * the bus * start queue to reset to the idle loop. * * Create device queue of SIM(s) * (MAX_START_JOB - 1) : * max_sim_transactions */ devq = cam_simq_alloc(MAX_START_JOB); if (devq == NULL) { if (bootverbose) printf("amd_attach: cam_simq_alloc failure!\n"); return ENXIO; } amd->psim = cam_sim_alloc(amd_action, amd_poll, "amd", amd, amd->unit, 1, MAX_TAGS_CMD_QUEUE, devq); if (amd->psim == NULL) { cam_simq_free(devq); if (bootverbose) printf("amd_attach: cam_sim_alloc failure!\n"); return ENXIO; } if (xpt_bus_register(amd->psim, 0) != CAM_SUCCESS) { cam_sim_free(amd->psim, /*free_devq*/TRUE); if (bootverbose) printf("amd_attach: xpt_bus_register failure!\n"); return ENXIO; } if (xpt_create_path(&amd->ppath, /* periph */ NULL, cam_sim_path(amd->psim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(amd->psim)); cam_sim_free(amd->psim, /* free_simq */ TRUE); if (bootverbose) printf("amd_attach: xpt_create_path failure!\n"); return ENXIO; } return 0; } static int amd_probe(device_t dev) { if (pci_get_devid(dev) == PCI_DEVICE_ID_AMD53C974) { device_set_desc(dev, "Tekram DC390(T)/AMD53c974 SCSI Host Adapter"); return 0; } return ENXIO; } static device_method_t amd_methods[] = { /* Device interface */ DEVMETHOD(device_probe, amd_probe), DEVMETHOD(device_attach, amd_attach), { 0, 0 } }; static driver_t amd_driver = { "amd", amd_methods, sizeof(struct amd_softc) }; static devclass_t amd_devclass; DRIVER_MODULE(amd, pci, amd_driver, amd_devclass, 0, 0); MODULE_DEPEND(amd, cam, 1, 1, 1); Index: head/sys/dev/amr/amr_disk.c =================================================================== --- head/sys/dev/amr/amr_disk.c (revision 129878) +++ head/sys/dev/amr/amr_disk.c (revision 129879) @@ -1,285 +1,286 @@ /*- * Copyright (c) 1999 Jonathan Lemon * Copyright (c) 1999, 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Copyright (c) 2002 Eric Moore * Copyright (c) 2002 LSI Logic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The party using or redistributing the source code and binary forms * agrees to the disclaimer below and the terms and conditions set forth * herein. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Disk driver for AMI MegaRaid controllers */ #include #include #include +#include #include #include #include #include #include #include #include #include #include /* prototypes */ static int amrd_probe(device_t dev); static int amrd_attach(device_t dev); static int amrd_detach(device_t dev); static disk_open_t amrd_open; static disk_strategy_t amrd_strategy; static devclass_t amrd_devclass; #ifdef FREEBSD_4 static int disks_registered = 0; #endif static device_method_t amrd_methods[] = { DEVMETHOD(device_probe, amrd_probe), DEVMETHOD(device_attach, amrd_attach), DEVMETHOD(device_detach, amrd_detach), { 0, 0 } }; static driver_t amrd_driver = { "amrd", amrd_methods, sizeof(struct amrd_softc) }; DRIVER_MODULE(amrd, amr, amrd_driver, amrd_devclass, 0, 0); static int amrd_open(struct disk *dp) { struct amrd_softc *sc = (struct amrd_softc *)dp->d_drv1; #if __FreeBSD_version < 500000 /* old buf style */ struct disklabel *label; #endif debug_called(1); if (sc == NULL) return (ENXIO); /* controller not active? */ if (sc->amrd_controller->amr_state & AMR_STATE_SHUTDOWN) return(ENXIO); #if __FreeBSD_version < 500000 /* old buf style */ label = &sc->amrd_disk.d_label; bzero(label, sizeof(*label)); label->d_type = DTYPE_SCSI; label->d_secsize = AMR_BLKSIZE; label->d_nsectors = sc->amrd_drive->al_sectors; label->d_ntracks = sc->amrd_drive->al_heads; label->d_ncylinders = sc->amrd_drive->al_cylinders; label->d_secpercyl = sc->amrd_drive->al_sectors * sc->amrd_drive->al_heads; label->d_secperunit = sc->amrd_drive->al_size; #else sc->amrd_disk->d_sectorsize = AMR_BLKSIZE; sc->amrd_disk->d_mediasize = (off_t)sc->amrd_drive->al_size * AMR_BLKSIZE; sc->amrd_disk->d_fwsectors = sc->amrd_drive->al_sectors; sc->amrd_disk->d_fwheads = sc->amrd_drive->al_heads; #endif return (0); } /******************************************************************************** * System crashdump support */ static int amrd_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length) { struct amrd_softc *amrd_sc; struct amr_softc *amr_sc; int error; struct disk *dp; dp = arg; amrd_sc = (struct amrd_softc *)dp->d_drv1; if (amrd_sc == NULL) return(ENXIO); amr_sc = (struct amr_softc *)amrd_sc->amrd_controller; if (length > 0) { int driveno = amrd_sc->amrd_drive - amr_sc->amr_drive; if ((error = amr_dump_blocks(amr_sc,driveno,offset / AMR_BLKSIZE ,(void *)virtual,(int) length / AMR_BLKSIZE )) != 0) return(error); } return(0); } /* * Read/write routine for a buffer. Finds the proper unit, range checks * arguments, and schedules the transfer. Does not wait for the transfer * to complete. Multi-page transfers are supported. All I/O requests must * be a multiple of a sector in length. */ static void amrd_strategy(struct bio *bio) { struct amrd_softc *sc = (struct amrd_softc *)bio->bio_disk->d_drv1; /* bogus disk? */ if (sc == NULL) { bio->bio_error = EINVAL; goto bad; } amr_submit_bio(sc->amrd_controller, bio); return; bad: bio->bio_flags |= BIO_ERROR; /* * Correctly set the buf to indicate a completed transfer */ bio->bio_resid = bio->bio_bcount; biodone(bio); return; } void amrd_intr(void *data) { struct bio *bio = (struct bio *)data; debug_called(2); if (bio->bio_flags & BIO_ERROR) { bio->bio_error = EIO; debug(1, "i/o error\n"); } else { bio->bio_resid = 0; } AMR_BIO_FINISH(bio); } static int amrd_probe(device_t dev) { debug_called(1); device_set_desc(dev, "LSILogic MegaRAID logical drive"); return (0); } static int amrd_attach(device_t dev) { struct amrd_softc *sc = (struct amrd_softc *)device_get_softc(dev); device_t parent; debug_called(1); parent = device_get_parent(dev); sc->amrd_controller = (struct amr_softc *)device_get_softc(parent); sc->amrd_unit = device_get_unit(dev); sc->amrd_drive = device_get_ivars(dev); sc->amrd_dev = dev; device_printf(dev, "%uMB (%u sectors) RAID %d (%s)\n", sc->amrd_drive->al_size / ((1024 * 1024) / AMR_BLKSIZE), sc->amrd_drive->al_size, sc->amrd_drive->al_properties & AMR_DRV_RAID_MASK, amr_describe_code(amr_table_drvstate, AMR_DRV_CURSTATE(sc->amrd_drive->al_state))); sc->amrd_disk = disk_alloc(); sc->amrd_disk->d_drv1 = sc; sc->amrd_disk->d_maxsize = (AMR_NSEG - 1) * PAGE_SIZE; sc->amrd_disk->d_open = amrd_open; sc->amrd_disk->d_strategy = amrd_strategy; sc->amrd_disk->d_name = "amrd"; sc->amrd_disk->d_dump = (dumper_t *)amrd_dump; sc->amrd_disk->d_unit = sc->amrd_unit; sc->amrd_disk->d_flags = DISKFLAG_NEEDSGIANT; disk_create(sc->amrd_disk, DISK_VERSION); #ifdef FREEBSD_4 disks_registered++; #endif return (0); } static int amrd_detach(device_t dev) { struct amrd_softc *sc = (struct amrd_softc *)device_get_softc(dev); debug_called(1); if (sc->amrd_disk->d_flags & DISKFLAG_OPEN) return(EBUSY); #ifdef FREEBSD_4 if (--disks_registered == 0) cdevsw_remove(&amrddisk_cdevsw); #else disk_destroy(sc->amrd_disk); #endif return(0); } Index: head/sys/dev/amr/amr_pci.c =================================================================== --- head/sys/dev/amr/amr_pci.c (revision 129878) +++ head/sys/dev/amr/amr_pci.c (revision 129879) @@ -1,612 +1,613 @@ /*- * Copyright (c) 1999,2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Copyright (c) 2002 Eric Moore * Copyright (c) 2002 LSI Logic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The party using or redistributing the source code and binary forms * agrees to the disclaimer below and the terms and conditions set forth * herein. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include static int amr_pci_probe(device_t dev); static int amr_pci_attach(device_t dev); static int amr_pci_detach(device_t dev); static int amr_pci_shutdown(device_t dev); static int amr_pci_suspend(device_t dev); static int amr_pci_resume(device_t dev); static void amr_pci_intr(void *arg); static void amr_pci_free(struct amr_softc *sc); static void amr_sglist_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error); static int amr_sglist_map(struct amr_softc *sc); static void amr_setup_mbox_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error); static int amr_setup_mbox(struct amr_softc *sc); static device_method_t amr_methods[] = { /* Device interface */ DEVMETHOD(device_probe, amr_pci_probe), DEVMETHOD(device_attach, amr_pci_attach), DEVMETHOD(device_detach, amr_pci_detach), DEVMETHOD(device_shutdown, amr_pci_shutdown), DEVMETHOD(device_suspend, amr_pci_suspend), DEVMETHOD(device_resume, amr_pci_resume), DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), { 0, 0 } }; static driver_t amr_pci_driver = { "amr", amr_methods, sizeof(struct amr_softc) }; static devclass_t amr_devclass; DRIVER_MODULE(amr, pci, amr_pci_driver, amr_devclass, 0, 0); static struct { int vendor; int device; int flag; #define PROBE_SIGNATURE (1<<0) } amr_device_ids[] = { {0x101e, 0x9010, 0}, {0x101e, 0x9060, 0}, {0x8086, 0x1960, PROBE_SIGNATURE},/* generic i960RD, check for signature */ {0x101e, 0x1960, 0}, {0x1000, 0x1960, PROBE_SIGNATURE}, {0x1000, 0x0407, 0}, {0x1028, 0x000e, PROBE_SIGNATURE}, /* perc4/di i960 */ {0x1028, 0x000f, 0}, /* perc4/di Verde*/ {0, 0, 0} }; static int amr_pci_probe(device_t dev) { int i, sig; debug_called(1); for (i = 0; amr_device_ids[i].vendor != 0; i++) { if ((pci_get_vendor(dev) == amr_device_ids[i].vendor) && (pci_get_device(dev) == amr_device_ids[i].device)) { /* do we need to test for a signature? */ if (amr_device_ids[i].flag & PROBE_SIGNATURE) { sig = pci_read_config(dev, AMR_CFG_SIG, 2); if ((sig != AMR_SIGNATURE_1) && (sig != AMR_SIGNATURE_2)) continue; } device_set_desc(dev, "LSILogic MegaRAID"); return(-10); /* allow room to be overridden */ } } return(ENXIO); } static int amr_pci_attach(device_t dev) { struct amr_softc *sc; int rid, rtype, error; u_int32_t command; debug_called(1); /* * Initialise softc. */ sc = device_get_softc(dev); bzero(sc, sizeof(*sc)); sc->amr_dev = dev; /* assume failure is 'not configured' */ error = ENXIO; /* * Determine board type. */ command = pci_read_config(dev, PCIR_COMMAND, 1); if ((pci_get_device(dev) == 0x1960) || (pci_get_device(dev) == 0x0407) || (pci_get_device(dev) == 0x000e) || (pci_get_device(dev) == 0x000f)) { /* * Make sure we are going to be able to talk to this board. */ if ((command & PCIM_CMD_MEMEN) == 0) { device_printf(dev, "memory window not available\n"); goto out; } sc->amr_type |= AMR_TYPE_QUARTZ; } else { /* * Make sure we are going to be able to talk to this board. */ if ((command & PCIM_CMD_PORTEN) == 0) { device_printf(dev, "I/O window not available\n"); goto out; } } /* force the busmaster enable bit on */ if (!(command & PCIM_CMD_BUSMASTEREN)) { device_printf(dev, "busmaster bit not set, enabling\n"); command |= PCIM_CMD_BUSMASTEREN; pci_write_config(dev, PCIR_COMMAND, command, 2); } /* * Allocate the PCI register window. */ rid = PCIR_BAR(0); rtype = AMR_IS_QUARTZ(sc) ? SYS_RES_MEMORY : SYS_RES_IOPORT; sc->amr_reg = bus_alloc_resource_any(dev, rtype, &rid, RF_ACTIVE); if (sc->amr_reg == NULL) { device_printf(sc->amr_dev, "can't allocate register window\n"); goto out; } sc->amr_btag = rman_get_bustag(sc->amr_reg); sc->amr_bhandle = rman_get_bushandle(sc->amr_reg); /* * Allocate and connect our interrupt. */ rid = 0; sc->amr_irq = bus_alloc_resource_any(sc->amr_dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->amr_irq == NULL) { device_printf(sc->amr_dev, "can't allocate interrupt\n"); goto out; } if (bus_setup_intr(sc->amr_dev, sc->amr_irq, INTR_TYPE_BIO | INTR_ENTROPY, amr_pci_intr, sc, &sc->amr_intr)) { device_printf(sc->amr_dev, "can't set up interrupt\n"); goto out; } debug(2, "interrupt attached"); /* assume failure is 'out of memory' */ error = ENOMEM; /* * Allocate the parent bus DMA tag appropriate for PCI. */ if (bus_dma_tag_create(NULL, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MAXBSIZE, AMR_NSEG, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->amr_parent_dmat)) { device_printf(dev, "can't allocate parent DMA tag\n"); goto out; } /* * Create DMA tag for mapping buffers into controller-addressable space. */ if (bus_dma_tag_create(sc->amr_parent_dmat, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MAXBSIZE, AMR_NSEG, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ busdma_lock_mutex, &Giant, /* lockfunc, lockarg */ &sc->amr_buffer_dmat)) { device_printf(sc->amr_dev, "can't allocate buffer DMA tag\n"); goto out; } debug(2, "dma tag done"); /* * Allocate and set up mailbox in a bus-visible fashion. */ if ((error = amr_setup_mbox(sc)) != 0) goto out; debug(2, "mailbox setup"); /* * Build the scatter/gather buffers. */ if (amr_sglist_map(sc)) goto out; debug(2, "s/g list mapped"); /* * Do bus-independant initialisation, bring controller online. */ error = amr_attach(sc); out: if (error) amr_pci_free(sc); return(error); } /******************************************************************************** * Disconnect from the controller completely, in preparation for unload. */ static int amr_pci_detach(device_t dev) { struct amr_softc *sc = device_get_softc(dev); int error; debug_called(1); if (sc->amr_state & AMR_STATE_OPEN) return(EBUSY); if ((error = amr_pci_shutdown(dev))) return(error); amr_pci_free(sc); return(0); } /******************************************************************************** * Bring the controller down to a dormant state and detach all child devices. * * This function is called before detach, system shutdown, or before performing * an operation which may add or delete system disks. (Call amr_startup to * resume normal operation.) * * Note that we can assume that the bioq on the controller is empty, as we won't * allow shutdown if any device is open. */ static int amr_pci_shutdown(device_t dev) { struct amr_softc *sc = device_get_softc(dev); int i,error,s; debug_called(1); /* mark ourselves as in-shutdown */ sc->amr_state |= AMR_STATE_SHUTDOWN; /* flush controller */ device_printf(sc->amr_dev, "flushing cache..."); printf("%s\n", amr_flush(sc) ? "failed" : "done"); s = splbio(); error = 0; /* delete all our child devices */ for(i = 0 ; i < AMR_MAXLD; i++) { if( sc->amr_drive[i].al_disk != 0) { if((error = device_delete_child(sc->amr_dev,sc->amr_drive[i].al_disk)) != 0) goto shutdown_out; sc->amr_drive[i].al_disk = 0; } } /* XXX disable interrupts? */ shutdown_out: splx(s); return(error); } /******************************************************************************** * Bring the controller to a quiescent state, ready for system suspend. */ static int amr_pci_suspend(device_t dev) { struct amr_softc *sc = device_get_softc(dev); debug_called(1); sc->amr_state |= AMR_STATE_SUSPEND; /* flush controller */ device_printf(sc->amr_dev, "flushing cache..."); printf("%s\n", amr_flush(sc) ? "failed" : "done"); /* XXX disable interrupts? */ return(0); } /******************************************************************************** * Bring the controller back to a state ready for operation. */ static int amr_pci_resume(device_t dev) { struct amr_softc *sc = device_get_softc(dev); debug_called(1); sc->amr_state &= ~AMR_STATE_SUSPEND; /* XXX enable interrupts? */ return(0); } /******************************************************************************* * Take an interrupt, or be poked by other code to look for interrupt-worthy * status. */ static void amr_pci_intr(void *arg) { struct amr_softc *sc = (struct amr_softc *)arg; debug_called(2); /* collect finished commands, queue anything waiting */ amr_done(sc); } /******************************************************************************** * Free all of the resources associated with (sc) * * Should not be called if the controller is active. */ static void amr_pci_free(struct amr_softc *sc) { u_int8_t *p; debug_called(1); amr_free(sc); /* destroy data-transfer DMA tag */ if (sc->amr_buffer_dmat) bus_dma_tag_destroy(sc->amr_buffer_dmat); /* free and destroy DMA memory and tag for s/g lists */ if (sc->amr_sgtable) bus_dmamem_free(sc->amr_sg_dmat, sc->amr_sgtable, sc->amr_sg_dmamap); if (sc->amr_sg_dmat) bus_dma_tag_destroy(sc->amr_sg_dmat); /* free and destroy DMA memory and tag for mailbox */ if (sc->amr_mailbox) { p = (u_int8_t *)(uintptr_t)(volatile void *)sc->amr_mailbox; bus_dmamem_free(sc->amr_mailbox_dmat, p - 16, sc->amr_mailbox_dmamap); } if (sc->amr_mailbox_dmat) bus_dma_tag_destroy(sc->amr_mailbox_dmat); /* disconnect the interrupt handler */ if (sc->amr_intr) bus_teardown_intr(sc->amr_dev, sc->amr_irq, sc->amr_intr); if (sc->amr_irq != NULL) bus_release_resource(sc->amr_dev, SYS_RES_IRQ, 0, sc->amr_irq); /* destroy the parent DMA tag */ if (sc->amr_parent_dmat) bus_dma_tag_destroy(sc->amr_parent_dmat); /* release the register window mapping */ if (sc->amr_reg != NULL) bus_release_resource(sc->amr_dev, AMR_IS_QUARTZ(sc) ? SYS_RES_MEMORY : SYS_RES_IOPORT, PCIR_BAR(0), sc->amr_reg); } /******************************************************************************** * Allocate and map the scatter/gather table in bus space. */ static void amr_sglist_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct amr_softc *sc = (struct amr_softc *)arg; debug_called(1); /* save base of s/g table's address in bus space */ sc->amr_sgbusaddr = segs->ds_addr; } static int amr_sglist_map(struct amr_softc *sc) { size_t segsize; int error; debug_called(1); /* * Create a single tag describing a region large enough to hold all of * the s/g lists we will need. * * Note that we could probably use AMR_LIMITCMD here, but that may become tunable. */ segsize = sizeof(struct amr_sgentry) * AMR_NSEG * AMR_MAXCMD; error = bus_dma_tag_create(sc->amr_parent_dmat, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ segsize, 1, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ busdma_lock_mutex, /* lockfunc */ &Giant, /* lockarg */ &sc->amr_sg_dmat); if (error != 0) { device_printf(sc->amr_dev, "can't allocate scatter/gather DMA tag\n"); return(ENOMEM); } /* * Allocate enough s/g maps for all commands and permanently map them into * controller-visible space. * * XXX this assumes we can get enough space for all the s/g maps in one * contiguous slab. We may need to switch to a more complex arrangement where * we allocate in smaller chunks and keep a lookup table from slot to bus address. * * XXX HACK ALERT: at least some controllers don't like the s/g memory being * allocated below 0x2000. We leak some memory if we get some * below this mark and allocate again. We should be able to * avoid this with the tag setup, but that does't seem to work. */ retry: error = bus_dmamem_alloc(sc->amr_sg_dmat, (void **)&sc->amr_sgtable, BUS_DMA_NOWAIT, &sc->amr_sg_dmamap); if (error) { device_printf(sc->amr_dev, "can't allocate s/g table\n"); return(ENOMEM); } bus_dmamap_load(sc->amr_sg_dmat, sc->amr_sg_dmamap, sc->amr_sgtable, segsize, amr_sglist_map_helper, sc, 0); if (sc->amr_sgbusaddr < 0x2000) { debug(1, "s/g table too low (0x%x), reallocating\n", sc->amr_sgbusaddr); goto retry; } return(0); } /******************************************************************************** * Allocate and set up mailbox areas for the controller (sc) * * The basic mailbox structure should be 16-byte aligned. This means that the * mailbox64 structure has 4 bytes hanging off the bottom. */ static void amr_setup_mbox_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct amr_softc *sc = (struct amr_softc *)arg; debug_called(1); /* save phsyical base of the basic mailbox structure */ sc->amr_mailboxphys = segs->ds_addr + 16; } static int amr_setup_mbox(struct amr_softc *sc) { int error; u_int8_t *p; debug_called(1); /* * Create a single tag describing a region large enough to hold the entire * mailbox. */ error = bus_dma_tag_create(sc->amr_parent_dmat, /* parent */ 16, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sizeof(struct amr_mailbox) + 16, 1, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ busdma_lock_mutex, /* lockfunc */ &Giant, /* lockarg */ &sc->amr_mailbox_dmat); if (error != 0) { device_printf(sc->amr_dev, "can't allocate mailbox tag\n"); return(ENOMEM); } /* * Allocate the mailbox structure and permanently map it into * controller-visible space. */ error = bus_dmamem_alloc(sc->amr_mailbox_dmat, (void **)&p, BUS_DMA_NOWAIT, &sc->amr_mailbox_dmamap); if (error) { device_printf(sc->amr_dev, "can't allocate mailbox memory\n"); return(ENOMEM); } bus_dmamap_load(sc->amr_mailbox_dmat, sc->amr_mailbox_dmamap, p, sizeof(struct amr_mailbox64), amr_setup_mbox_helper, sc, 0); /* * Conventional mailbox is inside the mailbox64 region. */ bzero(p, sizeof(struct amr_mailbox64)); sc->amr_mailbox64 = (struct amr_mailbox64 *)(p + 12); sc->amr_mailbox = (struct amr_mailbox *)(p + 16); return(0); } Index: head/sys/dev/ar/if_ar_pci.c =================================================================== --- head/sys/dev/ar/if_ar_pci.c (revision 129878) +++ head/sys/dev/ar/if_ar_pci.c (revision 129879) @@ -1,169 +1,170 @@ /*- * Copyright (c) 1999 - 2001 John Hay. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #ifdef TRACE #define TRC(x) x #else #define TRC(x) #endif #define TRCL(x) x static int ar_pci_probe(device_t); static int ar_pci_attach(device_t); static device_method_t ar_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ar_pci_probe), DEVMETHOD(device_attach, ar_pci_attach), DEVMETHOD(device_detach, ar_detach), { 0, 0 } }; static driver_t ar_pci_driver = { "ar", ar_pci_methods, sizeof(struct ar_hardc), }; DRIVER_MODULE(ar, pci, ar_pci_driver, ar_devclass, 0, 0); MODULE_DEPEND(ar, pci, 1, 1, 1); static int ar_pci_probe(device_t device) { u_int32_t type = pci_get_devid(device); switch(type) { case 0x5012114f: device_set_desc(device, "Digi SYNC/570i-PCI 2 port"); return (0); break; case 0x5010114f: printf("Digi SYNC/570i-PCI 2 port (mapped below 1M)\n"); printf("Please change the jumper to select linear mode.\n"); break; case 0x5013114f: device_set_desc(device, "Digi SYNC/570i-PCI 4 port"); return (0); break; case 0x5011114f: printf("Digi SYNC/570i-PCI 4 port (mapped below 1M)\n"); printf("Please change the jumper to select linear mode.\n"); break; default: break; } return (ENXIO); } static int ar_pci_attach(device_t device) { int error; u_int i, tmp; struct ar_hardc *hc; hc = (struct ar_hardc *)device_get_softc(device); bzero(hc, sizeof(struct ar_hardc)); error = ar_allocate_plx_memory(device, 0x10, 1); if(error) goto errexit; error = ar_allocate_memory(device, 0x18, 1); if(error) goto errexit; error = ar_allocate_irq(device, 0, 1); if(error) goto errexit; hc->mem_start = rman_get_virtual(hc->res_memory); hc->cunit = device_get_unit(device); hc->sca[0] = (sca_regs *)(hc->mem_start + AR_PCI_SCA_1_OFFSET); hc->sca[1] = (sca_regs *)(hc->mem_start + AR_PCI_SCA_2_OFFSET); hc->orbase = (u_char *)(hc->mem_start + AR_PCI_ORBASE_OFFSET); tmp = hc->orbase[AR_BMI * 4]; hc->bustype = tmp & AR_BUS_MSK; hc->memsize = (tmp & AR_MEM_MSK) >> AR_MEM_SHFT; hc->memsize = 1 << hc->memsize; hc->memsize <<= 16; hc->interface[0] = (tmp & AR_IFACE_MSK); tmp = hc->orbase[AR_REV * 4]; hc->revision = tmp & AR_REV_MSK; hc->winsize = (1 << ((tmp & AR_WSIZ_MSK) >> AR_WSIZ_SHFT)) * 16 * 1024; hc->mem_end = (caddr_t)(hc->mem_start + hc->winsize); hc->winmsk = hc->winsize - 1; hc->numports = hc->orbase[AR_PNUM * 4]; hc->handshake = hc->orbase[AR_HNDSH * 4]; for(i = 1; i < hc->numports; i++) hc->interface[i] = hc->interface[0]; TRC(printf("arp%d: bus %x, rev %d, memstart %p, winsize %d, " "winmsk %x, interface %x\n", unit, hc->bustype, hc->revision, hc->mem_start, hc->winsize, hc->winmsk, hc->interface[0])); ar_attach(device); /* Magic to enable the card to generate interrupts. */ bus_space_write_1(rman_get_bustag(hc->res_plx_memory), rman_get_bushandle(hc->res_plx_memory), 0x69, 0x09); return (0); errexit: ar_deallocate_resources(device); return (ENXIO); } Index: head/sys/dev/asr/asr.c =================================================================== --- head/sys/dev/asr/asr.c (revision 129878) +++ head/sys/dev/asr/asr.c (revision 129879) @@ -1,3601 +1,3602 @@ /*- * Copyright (c) 1996-2000 Distributed Processing Technology Corporation * Copyright (c) 2000-2001 Adaptec Corporation * All rights reserved. * * TERMS AND CONDITIONS OF USE * * Redistribution and use in source form, with or without modification, are * permitted provided that redistributions of source code must retain the * above copyright notice, this list of conditions and the following disclaimer. * * This software is provided `as is' by Adaptec and any express or implied * warranties, including, but not limited to, the implied warranties of * merchantability and fitness for a particular purpose, are disclaimed. In no * event shall Adaptec be liable for any direct, indirect, incidental, special, * exemplary or consequential damages (including, but not limited to, * procurement of substitute goods or services; loss of use, data, or profits; * or business interruptions) however caused and on any theory of liability, * whether in contract, strict liability, or tort (including negligence or * otherwise) arising in any way out of the use of this driver software, even * if advised of the possibility of such damage. * * SCSI I2O host adapter driver * * V1.10 2004/05/05 scottl@freebsd.org * - Massive cleanup of the driver to remove dead code and * non-conformant style. * - Removed most i386-specific code to make it more portable. * - Converted to the bus_space API. * V1.08 2001/08/21 Mark_Salyzyn@adaptec.com * - The 2000S and 2005S do not initialize on some machines, * increased timeout to 255ms from 50ms for the StatusGet * command. * V1.07 2001/05/22 Mark_Salyzyn@adaptec.com * - I knew this one was too good to be true. The error return * on ioctl commands needs to be compared to CAM_REQ_CMP, not * to the bit masked status. * V1.06 2001/05/08 Mark_Salyzyn@adaptec.com * - The 2005S that was supported is affectionately called the * Conjoined BAR Firmware. In order to support RAID-5 in a * 16MB low-cost configuration, Firmware was forced to go * to a Split BAR Firmware. This requires a separate IOP and * Messaging base address. * V1.05 2001/04/25 Mark_Salyzyn@adaptec.com * - Handle support for 2005S Zero Channel RAID solution. * - System locked up if the Adapter locked up. Do not try * to send other commands if the resetIOP command fails. The * fail outstanding command discovery loop was flawed as the * removal of the command from the list prevented discovering * all the commands. * - Comment changes to clarify driver. * - SysInfo searched for an EATA SmartROM, not an I2O SmartROM. * - We do not use the AC_FOUND_DEV event because of I2O. * Removed asr_async. * V1.04 2000/09/22 Mark_Salyzyn@adaptec.com, msmith@freebsd.org, * lampa@fee.vutbr.cz and Scott_Long@adaptec.com. * - Removed support for PM1554, PM2554 and PM2654 in Mode-0 * mode as this is confused with competitor adapters in run * mode. * - critical locking needed in ASR_ccbAdd and ASR_ccbRemove * to prevent operating system panic. * - moved default major number to 154 from 97. * V1.03 2000/07/12 Mark_Salyzyn@adaptec.com * - The controller is not actually an ASR (Adaptec SCSI RAID) * series that is visible, it's more of an internal code name. * remove any visible references within reason for now. * - bus_ptr->LUN was not correctly zeroed when initially * allocated causing a possible panic of the operating system * during boot. * V1.02 2000/06/26 Mark_Salyzyn@adaptec.com * - Code always fails for ASR_getTid affecting performance. * - initiated a set of changes that resulted from a formal * code inspection by Mark_Salyzyn@adaptec.com, * George_Dake@adaptec.com, Jeff_Zeak@adaptec.com, * Martin_Wilson@adaptec.com and Vincent_Trandoan@adaptec.com. * Their findings were focussed on the LCT & TID handler, and * all resulting changes were to improve code readability, * consistency or have a positive effect on performance. * V1.01 2000/06/14 Mark_Salyzyn@adaptec.com * - Passthrough returned an incorrect error. * - Passthrough did not migrate the intrinsic scsi layer wakeup * on command completion. * - generate control device nodes using make_dev and delete_dev. * - Performance affected by TID caching reallocing. * - Made suggested changes by Justin_Gibbs@adaptec.com * - use splcam instead of splbio. * - use cam_imask instead of bio_imask. * - use u_int8_t instead of u_char. * - use u_int16_t instead of u_short. * - use u_int32_t instead of u_long where appropriate. * - use 64 bit context handler instead of 32 bit. * - create_ccb should only allocate the worst case * requirements for the driver since CAM may evolve * making union ccb much larger than needed here. * renamed create_ccb to asr_alloc_ccb. * - go nutz justifying all debug prints as macros * defined at the top and remove unsightly ifdefs. * - INLINE STATIC viewed as confusing. Historically * utilized to affect code performance and debug * issues in OS, Compiler or OEM specific situations. * V1.00 2000/05/31 Mark_Salyzyn@adaptec.com * - Ported from FreeBSD 2.2.X DPT I2O driver. * changed struct scsi_xfer to union ccb/struct ccb_hdr * changed variable name xs to ccb * changed struct scsi_link to struct cam_path * changed struct scsibus_data to struct cam_sim * stopped using fordriver for holding on to the TID * use proprietary packet creation instead of scsi_inquire * CAM layer sends synchronize commands. */ #include #include /* TRUE=1 and FALSE=0 defined here */ #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) #include #elif defined(__alpha__) #include #endif #include #include #include #define osdSwap4(x) ((u_long)ntohl((u_long)(x))) #define KVTOPHYS(x) vtophys(x) #include "dev/asr/dptalign.h" #include "dev/asr/i2oexec.h" #include "dev/asr/i2obscsi.h" #include "dev/asr/i2odpt.h" #include "dev/asr/i2oadptr.h" #include "dev/asr/sys_info.h" __FBSDID("$FreeBSD$"); #define ASR_VERSION 1 #define ASR_REVISION '1' #define ASR_SUBREVISION '0' #define ASR_MONTH 5 #define ASR_DAY 5 #define ASR_YEAR (2004 - 1980) /* * Debug macros to reduce the unsightly ifdefs */ #if (defined(DEBUG_ASR) || defined(DEBUG_ASR_USR_CMD) || defined(DEBUG_ASR_CMD)) static __inline void debug_asr_message(PI2O_MESSAGE_FRAME message) { u_int32_t * pointer = (u_int32_t *)message; u_int32_t length = I2O_MESSAGE_FRAME_getMessageSize(message); u_int32_t counter = 0; while (length--) { printf("%08lx%c", (u_long)*(pointer++), (((++counter & 7) == 0) || (length == 0)) ? '\n' : ' '); } } #endif /* DEBUG_ASR || DEBUG_ASR_USR_CMD || DEBUG_ASR_CMD */ #ifdef DEBUG_ASR /* Breaks on none STDC based compilers :-( */ #define debug_asr_printf(fmt,args...) printf(fmt, ##args) #define debug_asr_dump_message(message) debug_asr_message(message) #define debug_asr_print_path(ccb) xpt_print_path(ccb->ccb_h.path); #else /* DEBUG_ASR */ #define debug_asr_printf(fmt,args...) #define debug_asr_dump_message(message) #define debug_asr_print_path(ccb) #endif /* DEBUG_ASR */ /* * If DEBUG_ASR_CMD is defined: * 0 - Display incoming SCSI commands * 1 - add in a quick character before queueing. * 2 - add in outgoing message frames. */ #if (defined(DEBUG_ASR_CMD)) #define debug_asr_cmd_printf(fmt,args...) printf(fmt,##args) static __inline void debug_asr_dump_ccb(union ccb *ccb) { u_int8_t *cp = (unsigned char *)&(ccb->csio.cdb_io); int len = ccb->csio.cdb_len; while (len) { debug_asr_cmd_printf (" %02x", *(cp++)); --len; } } #if (DEBUG_ASR_CMD > 0) #define debug_asr_cmd1_printf debug_asr_cmd_printf #else #define debug_asr_cmd1_printf(fmt,args...) #endif #if (DEBUG_ASR_CMD > 1) #define debug_asr_cmd2_printf debug_asr_cmd_printf #define debug_asr_cmd2_dump_message(message) debug_asr_message(message) #else #define debug_asr_cmd2_printf(fmt,args...) #define debug_asr_cmd2_dump_message(message) #endif #else /* DEBUG_ASR_CMD */ #define debug_asr_cmd_printf(fmt,args...) #define debug_asr_dump_ccb(ccb) #define debug_asr_cmd1_printf(fmt,args...) #define debug_asr_cmd2_printf(fmt,args...) #define debug_asr_cmd2_dump_message(message) #endif /* DEBUG_ASR_CMD */ #if (defined(DEBUG_ASR_USR_CMD)) #define debug_usr_cmd_printf(fmt,args...) printf(fmt,##args) #define debug_usr_cmd_dump_message(message) debug_usr_message(message) #else /* DEBUG_ASR_USR_CMD */ #define debug_usr_cmd_printf(fmt,args...) #define debug_usr_cmd_dump_message(message) #endif /* DEBUG_ASR_USR_CMD */ #include "dev/asr/dptsig.h" static dpt_sig_S ASR_sig = { { 'd', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, FT_HBADRVR, 0, OEM_DPT, OS_FREE_BSD, CAP_ABOVE16MB, DEV_ALL, ADF_ALL_SC5, 0, 0, ASR_VERSION, ASR_REVISION, ASR_SUBREVISION, ASR_MONTH, ASR_DAY, ASR_YEAR, /* 01234567890123456789012345678901234567890123456789 < 50 chars */ "Adaptec FreeBSD 4.0.0 Unix SCSI I2O HBA Driver" /* ^^^^^ asr_attach alters these to match OS */ }; /* Configuration Definitions */ #define SG_SIZE 58 /* Scatter Gather list Size */ #define MAX_TARGET_ID 126 /* Maximum Target ID supported */ #define MAX_LUN 255 /* Maximum LUN Supported */ #define MAX_CHANNEL 7 /* Maximum Channel # Supported by driver */ #define MAX_INBOUND 2000 /* Max CCBs, Also Max Queue Size */ #define MAX_OUTBOUND 256 /* Maximum outbound frames/adapter */ #define MAX_INBOUND_SIZE 512 /* Maximum inbound frame size */ #define MAX_MAP 4194304L /* Maximum mapping size of IOP */ /* Also serves as the minimum map for */ /* the 2005S zero channel RAID product */ /* I2O register set */ #define I2O_REG_STATUS 0x30 #define I2O_REG_MASK 0x34 #define I2O_REG_TOFIFO 0x40 #define I2O_REG_FROMFIFO 0x44 #define Mask_InterruptsDisabled 0x08 /* * A MIX of performance and space considerations for TID lookups */ typedef u_int16_t tid_t; typedef struct { u_int32_t size; /* up to MAX_LUN */ tid_t TID[1]; } lun2tid_t; typedef struct { u_int32_t size; /* up to MAX_TARGET */ lun2tid_t * LUN[1]; } target2lun_t; /* * To ensure that we only allocate and use the worst case ccb here, lets * make our own local ccb union. If asr_alloc_ccb is utilized for another * ccb type, ensure that you add the additional structures into our local * ccb union. To ensure strict type checking, we will utilize the local * ccb definition wherever possible. */ union asr_ccb { struct ccb_hdr ccb_h; /* For convenience */ struct ccb_scsiio csio; struct ccb_setasync csa; }; /************************************************************************** ** ASR Host Adapter structure - One Structure For Each Host Adapter That ** ** Is Configured Into The System. The Structure Supplies Configuration ** ** Information, Status Info, Queue Info And An Active CCB List Pointer. ** ***************************************************************************/ typedef struct Asr_softc { u_int16_t ha_irq; u_long ha_Base; /* base port for each board */ bus_size_t ha_blinkLED; bus_space_handle_t ha_i2o_bhandle; bus_space_tag_t ha_i2o_btag; bus_space_handle_t ha_frame_bhandle; bus_space_tag_t ha_frame_btag; I2O_IOP_ENTRY ha_SystemTable; LIST_HEAD(,ccb_hdr) ha_ccb; /* ccbs in use */ struct cam_path * ha_path[MAX_CHANNEL+1]; struct cam_sim * ha_sim[MAX_CHANNEL+1]; struct resource * ha_mem_res; struct resource * ha_mes_res; struct resource * ha_irq_res; void * ha_intr; PI2O_LCT ha_LCT; /* Complete list of devices */ #define le_type IdentityTag[0] #define I2O_BSA 0x20 #define I2O_FCA 0x40 #define I2O_SCSI 0x00 #define I2O_PORT 0x80 #define I2O_UNKNOWN 0x7F #define le_bus IdentityTag[1] #define le_target IdentityTag[2] #define le_lun IdentityTag[3] target2lun_t * ha_targets[MAX_CHANNEL+1]; PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME ha_Msgs; u_long ha_Msgs_Phys; u_int8_t ha_in_reset; #define HA_OPERATIONAL 0 #define HA_IN_RESET 1 #define HA_OFF_LINE 2 #define HA_OFF_LINE_RECOVERY 3 /* Configuration information */ /* The target id maximums we take */ u_int8_t ha_MaxBus; /* Maximum bus */ u_int8_t ha_MaxId; /* Maximum target ID */ u_int8_t ha_MaxLun; /* Maximum target LUN */ u_int8_t ha_SgSize; /* Max SG elements */ u_int8_t ha_pciBusNum; u_int8_t ha_pciDeviceNum; u_int8_t ha_adapter_target[MAX_CHANNEL+1]; u_int16_t ha_QueueSize; /* Max outstanding commands */ u_int16_t ha_Msgs_Count; /* Links into other parents and HBAs */ struct Asr_softc * ha_next; /* HBA list */ dev_t ha_devt; } Asr_softc_t; static Asr_softc_t * Asr_softc; /* * Prototypes of the routines we have in this object. */ /* I2O HDM interface */ static int asr_probe(device_t tag); static int asr_attach(device_t tag); static int asr_ioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct thread *td); static int asr_open(dev_t dev, int32_t flags, int32_t ifmt, struct thread *td); static int asr_close(dev_t dev, int flags, int ifmt, struct thread *td); static int asr_intr(Asr_softc_t *sc); static void asr_timeout(void *arg); static int ASR_init(Asr_softc_t *sc); static int ASR_acquireLct(Asr_softc_t *sc); static int ASR_acquireHrt(Asr_softc_t *sc); static void asr_action(struct cam_sim *sim, union ccb *ccb); static void asr_poll(struct cam_sim *sim); static int ASR_queue(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message); /* * Here is the auto-probe structure used to nest our tests appropriately * during the startup phase of the operating system. */ static device_method_t asr_methods[] = { DEVMETHOD(device_probe, asr_probe), DEVMETHOD(device_attach, asr_attach), { 0, 0 } }; static driver_t asr_driver = { "asr", asr_methods, sizeof(Asr_softc_t) }; static devclass_t asr_devclass; DRIVER_MODULE(asr, pci, asr_driver, asr_devclass, 0, 0); /* * devsw for asr hba driver * * only ioctl is used. the sd driver provides all other access. */ static struct cdevsw asr_cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT, .d_open = asr_open, .d_close = asr_close, .d_ioctl = asr_ioctl, .d_name = "asr", }; /* I2O support routines */ static __inline u_int32_t asr_get_FromFIFO(Asr_softc_t *sc) { return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_FROMFIFO)); } static __inline u_int32_t asr_get_ToFIFO(Asr_softc_t *sc) { return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_TOFIFO)); } static __inline u_int32_t asr_get_intr(Asr_softc_t *sc) { return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_MASK)); } static __inline u_int32_t asr_get_status(Asr_softc_t *sc) { return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_STATUS)); } static __inline void asr_set_FromFIFO(Asr_softc_t *sc, u_int32_t val) { bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_FROMFIFO, val); } static __inline void asr_set_ToFIFO(Asr_softc_t *sc, u_int32_t val) { bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_TOFIFO, val); } static __inline void asr_set_intr(Asr_softc_t *sc, u_int32_t val) { bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_MASK, val); } static __inline void asr_set_frame(Asr_softc_t *sc, void *frame, u_int32_t offset, int len) { bus_space_write_region_4(sc->ha_frame_btag, sc->ha_frame_bhandle, offset, (u_int32_t *)frame, len); } /* * Fill message with default. */ static PI2O_MESSAGE_FRAME ASR_fillMessage(void *Message, u_int16_t size) { PI2O_MESSAGE_FRAME Message_Ptr; Message_Ptr = (I2O_MESSAGE_FRAME *)Message; bzero(Message_Ptr, size); I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11); I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr, (size + sizeof(U32) - 1) >> 2); I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1); KASSERT(Message_Ptr != NULL, ("Message_Ptr == NULL")); return (Message_Ptr); } /* ASR_fillMessage */ #define EMPTY_QUEUE (-1L) static __inline U32 ASR_getMessage(Asr_softc_t *sc) { U32 MessageOffset; MessageOffset = asr_get_ToFIFO(sc); if (MessageOffset == EMPTY_QUEUE) MessageOffset = asr_get_ToFIFO(sc); return (MessageOffset); } /* ASR_getMessage */ /* Issue a polled command */ static U32 ASR_initiateCp(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message) { U32 Mask = -1L; U32 MessageOffset; u_int Delay = 1500; /* * ASR_initiateCp is only used for synchronous commands and will * be made more resiliant to adapter delays since commands like * resetIOP can cause the adapter to be deaf for a little time. */ while (((MessageOffset = ASR_getMessage(sc)) == EMPTY_QUEUE) && (--Delay != 0)) { DELAY (10000); } if (MessageOffset != EMPTY_QUEUE) { asr_set_frame(sc, Message, MessageOffset, I2O_MESSAGE_FRAME_getMessageSize(Message)); /* * Disable the Interrupts */ Mask = asr_get_intr(sc); asr_set_intr(sc, Mask | Mask_InterruptsDisabled); asr_set_ToFIFO(sc, MessageOffset); } return (Mask); } /* ASR_initiateCp */ /* * Reset the adapter. */ static U32 ASR_resetIOP(Asr_softc_t *sc) { struct resetMessage { I2O_EXEC_IOP_RESET_MESSAGE M; U32 R; } Message; PI2O_EXEC_IOP_RESET_MESSAGE Message_Ptr; U32 * volatile Reply_Ptr; U32 Old; /* * Build up our copy of the Message. */ Message_Ptr = (PI2O_EXEC_IOP_RESET_MESSAGE)ASR_fillMessage(&Message, sizeof(I2O_EXEC_IOP_RESET_MESSAGE)); I2O_EXEC_IOP_RESET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_IOP_RESET); /* * Reset the Reply Status */ *(Reply_Ptr = (U32 *)((char *)Message_Ptr + sizeof(I2O_EXEC_IOP_RESET_MESSAGE))) = 0; I2O_EXEC_IOP_RESET_MESSAGE_setStatusWordLowAddress(Message_Ptr, KVTOPHYS((void *)Reply_Ptr)); /* * Send the Message out */ if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) != -1L) { /* * Wait for a response (Poll), timeouts are dangerous if * the card is truly responsive. We assume response in 2s. */ u_int8_t Delay = 200; while ((*Reply_Ptr == 0) && (--Delay != 0)) { DELAY (10000); } /* * Re-enable the interrupts. */ asr_set_intr(sc, Old); KASSERT(*Reply_Ptr != 0, ("*Reply_Ptr == 0")); return(*Reply_Ptr); } KASSERT(Old != -1L, ("Old == -1")); return (0); } /* ASR_resetIOP */ /* * Get the curent state of the adapter */ static PI2O_EXEC_STATUS_GET_REPLY ASR_getStatus(Asr_softc_t *sc, PI2O_EXEC_STATUS_GET_REPLY buffer) { I2O_EXEC_STATUS_GET_MESSAGE Message; PI2O_EXEC_STATUS_GET_MESSAGE Message_Ptr; U32 Old; /* * Build up our copy of the Message. */ Message_Ptr = (PI2O_EXEC_STATUS_GET_MESSAGE)ASR_fillMessage(&Message, sizeof(I2O_EXEC_STATUS_GET_MESSAGE)); I2O_EXEC_STATUS_GET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_STATUS_GET); I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferAddressLow(Message_Ptr, KVTOPHYS((void *)buffer)); /* This one is a Byte Count */ I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferLength(Message_Ptr, sizeof(I2O_EXEC_STATUS_GET_REPLY)); /* * Reset the Reply Status */ bzero(buffer, sizeof(I2O_EXEC_STATUS_GET_REPLY)); /* * Send the Message out */ if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) != -1L) { /* * Wait for a response (Poll), timeouts are dangerous if * the card is truly responsive. We assume response in 50ms. */ u_int8_t Delay = 255; while (*((U8 * volatile)&(buffer->SyncByte)) == 0) { if (--Delay == 0) { buffer = NULL; break; } DELAY (1000); } /* * Re-enable the interrupts. */ asr_set_intr(sc, Old); return (buffer); } return (NULL); } /* ASR_getStatus */ /* * Check if the device is a SCSI I2O HBA, and add it to the list. */ /* * Probe for ASR controller. If we find it, we will use it. * virtual adapters. */ static int asr_probe(device_t tag) { u_int32_t id; id = (pci_get_device(tag) << 16) | pci_get_vendor(tag); if ((id == 0xA5011044) || (id == 0xA5111044)) { device_set_desc(tag, "Adaptec Caching SCSI RAID"); return (-10); } return (ENXIO); } /* asr_probe */ static __inline union asr_ccb * asr_alloc_ccb(Asr_softc_t *sc) { union asr_ccb *new_ccb; if ((new_ccb = (union asr_ccb *)malloc(sizeof(*new_ccb), M_DEVBUF, M_WAITOK | M_ZERO)) != NULL) { new_ccb->ccb_h.pinfo.priority = 1; new_ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX; new_ccb->ccb_h.spriv_ptr0 = sc; } return (new_ccb); } /* asr_alloc_ccb */ static __inline void asr_free_ccb(union asr_ccb *free_ccb) { free(free_ccb, M_DEVBUF); } /* asr_free_ccb */ /* * Print inquiry data `carefully' */ static void ASR_prstring(u_int8_t *s, int len) { while ((--len >= 0) && (*s) && (*s != ' ') && (*s != '-')) { printf ("%c", *(s++)); } } /* ASR_prstring */ /* * Send a message synchronously and without Interrupt to a ccb. */ static int ASR_queue_s(union asr_ccb *ccb, PI2O_MESSAGE_FRAME Message) { int s; U32 Mask; Asr_softc_t *sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0); /* * We do not need any (optional byteswapping) method access to * the Initiator context field. */ I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb); /* Prevent interrupt service */ s = splcam (); Mask = asr_get_intr(sc); asr_set_intr(sc, Mask | Mask_InterruptsDisabled); if (ASR_queue(sc, Message) == EMPTY_QUEUE) { ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQUEUE_REQ; } /* * Wait for this board to report a finished instruction. */ while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { (void)asr_intr (sc); } /* Re-enable Interrupts */ asr_set_intr(sc, Mask); splx(s); return (ccb->ccb_h.status); } /* ASR_queue_s */ /* * Send a message synchronously to an Asr_softc_t. */ static int ASR_queue_c(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message) { union asr_ccb *ccb; int status; if ((ccb = asr_alloc_ccb (sc)) == NULL) { return (CAM_REQUEUE_REQ); } status = ASR_queue_s (ccb, Message); asr_free_ccb(ccb); return (status); } /* ASR_queue_c */ /* * Add the specified ccb to the active queue */ static __inline void ASR_ccbAdd(Asr_softc_t *sc, union asr_ccb *ccb) { int s; s = splcam(); LIST_INSERT_HEAD(&(sc->ha_ccb), &(ccb->ccb_h), sim_links.le); if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) { /* * RAID systems can take considerable time to * complete some commands given the large cache * flashes switching from write back to write thru. */ ccb->ccb_h.timeout = 6 * 60 * 1000; } ccb->ccb_h.timeout_ch = timeout(asr_timeout, (caddr_t)ccb, (ccb->ccb_h.timeout * hz) / 1000); } splx(s); } /* ASR_ccbAdd */ /* * Remove the specified ccb from the active queue. */ static __inline void ASR_ccbRemove(Asr_softc_t *sc, union asr_ccb *ccb) { int s; s = splcam(); untimeout(asr_timeout, (caddr_t)ccb, ccb->ccb_h.timeout_ch); LIST_REMOVE(&(ccb->ccb_h), sim_links.le); splx(s); } /* ASR_ccbRemove */ /* * Fail all the active commands, so they get re-issued by the operating * system. */ static void ASR_failActiveCommands(Asr_softc_t *sc) { struct ccb_hdr *ccb; int s; s = splcam(); /* * We do not need to inform the CAM layer that we had a bus * reset since we manage it on our own, this also prevents the * SCSI_DELAY settling that would be required on other systems. * The `SCSI_DELAY' has already been handled by the card via the * acquisition of the LCT table while we are at CAM priority level. * for (int bus = 0; bus <= sc->ha_MaxBus; ++bus) { * xpt_async (AC_BUS_RESET, sc->ha_path[bus], NULL); * } */ while ((ccb = LIST_FIRST(&(sc->ha_ccb))) != NULL) { ASR_ccbRemove (sc, (union asr_ccb *)ccb); ccb->status &= ~CAM_STATUS_MASK; ccb->status |= CAM_REQUEUE_REQ; /* Nothing Transfered */ ((struct ccb_scsiio *)ccb)->resid = ((struct ccb_scsiio *)ccb)->dxfer_len; if (ccb->path) { xpt_done ((union ccb *)ccb); } else { wakeup (ccb); } } splx(s); } /* ASR_failActiveCommands */ /* * The following command causes the HBA to reset the specific bus */ static void ASR_resetBus(Asr_softc_t *sc, int bus) { I2O_HBA_BUS_RESET_MESSAGE Message; I2O_HBA_BUS_RESET_MESSAGE *Message_Ptr; PI2O_LCT_ENTRY Device; Message_Ptr = (I2O_HBA_BUS_RESET_MESSAGE *)ASR_fillMessage(&Message, sizeof(I2O_HBA_BUS_RESET_MESSAGE)); I2O_MESSAGE_FRAME_setFunction(&Message_Ptr->StdMessageFrame, I2O_HBA_BUS_RESET); for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY) (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); ++Device) { if (((Device->le_type & I2O_PORT) != 0) && (Device->le_bus == bus)) { I2O_MESSAGE_FRAME_setTargetAddress( &Message_Ptr->StdMessageFrame, I2O_LCT_ENTRY_getLocalTID(Device)); /* Asynchronous command, with no expectations */ (void)ASR_queue(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); break; } } } /* ASR_resetBus */ static __inline int ASR_getBlinkLedCode(Asr_softc_t *sc) { U8 blink; if (sc == NULL) return (0); blink = bus_space_read_1(sc->ha_frame_btag, sc->ha_frame_bhandle, sc->ha_blinkLED + 1); if (blink != 0xBC) return (0); blink = bus_space_read_1(sc->ha_frame_btag, sc->ha_frame_bhandle, sc->ha_blinkLED); return (blink); } /* ASR_getBlinkCode */ /* * Determine the address of an TID lookup. Must be done at high priority * since the address can be changed by other threads of execution. * * Returns NULL pointer if not indexible (but will attempt to generate * an index if `new_entry' flag is set to TRUE). * * All addressible entries are to be guaranteed zero if never initialized. */ static tid_t * ASR_getTidAddress(Asr_softc_t *sc, int bus, int target, int lun, int new_entry) { target2lun_t *bus_ptr; lun2tid_t *target_ptr; unsigned new_size; /* * Validity checking of incoming parameters. More of a bound * expansion limit than an issue with the code dealing with the * values. * * sc must be valid before it gets here, so that check could be * dropped if speed a critical issue. */ if ((sc == NULL) || (bus > MAX_CHANNEL) || (target > sc->ha_MaxId) || (lun > sc->ha_MaxLun)) { debug_asr_printf("(%lx,%d,%d,%d) target out of range\n", (u_long)sc, bus, target, lun); return (NULL); } /* * See if there is an associated bus list. * * for performance, allocate in size of BUS_CHUNK chunks. * BUS_CHUNK must be a power of two. This is to reduce * fragmentation effects on the allocations. */ #define BUS_CHUNK 8 new_size = ((target + BUS_CHUNK - 1) & ~(BUS_CHUNK - 1)); if ((bus_ptr = sc->ha_targets[bus]) == NULL) { /* * Allocate a new structure? * Since one element in structure, the +1 * needed for size has been abstracted. */ if ((new_entry == FALSE) || ((sc->ha_targets[bus] = bus_ptr = (target2lun_t *)malloc ( sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size), M_TEMP, M_WAITOK | M_ZERO)) == NULL)) { debug_asr_printf("failed to allocate bus list\n"); return (NULL); } bus_ptr->size = new_size + 1; } else if (bus_ptr->size <= new_size) { target2lun_t * new_bus_ptr; /* * Reallocate a new structure? * Since one element in structure, the +1 * needed for size has been abstracted. */ if ((new_entry == FALSE) || ((new_bus_ptr = (target2lun_t *)malloc ( sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size), M_TEMP, M_WAITOK | M_ZERO)) == NULL)) { debug_asr_printf("failed to reallocate bus list\n"); return (NULL); } /* * Copy the whole thing, safer, simpler coding * and not really performance critical at this point. */ bcopy(bus_ptr, new_bus_ptr, sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * (bus_ptr->size - 1))); sc->ha_targets[bus] = new_bus_ptr; free(bus_ptr, M_TEMP); bus_ptr = new_bus_ptr; bus_ptr->size = new_size + 1; } /* * We now have the bus list, lets get to the target list. * Since most systems have only *one* lun, we do not allocate * in chunks as above, here we allow one, then in chunk sizes. * TARGET_CHUNK must be a power of two. This is to reduce * fragmentation effects on the allocations. */ #define TARGET_CHUNK 8 if ((new_size = lun) != 0) { new_size = ((lun + TARGET_CHUNK - 1) & ~(TARGET_CHUNK - 1)); } if ((target_ptr = bus_ptr->LUN[target]) == NULL) { /* * Allocate a new structure? * Since one element in structure, the +1 * needed for size has been abstracted. */ if ((new_entry == FALSE) || ((bus_ptr->LUN[target] = target_ptr = (lun2tid_t *)malloc ( sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size), M_TEMP, M_WAITOK | M_ZERO)) == NULL)) { debug_asr_printf("failed to allocate target list\n"); return (NULL); } target_ptr->size = new_size + 1; } else if (target_ptr->size <= new_size) { lun2tid_t * new_target_ptr; /* * Reallocate a new structure? * Since one element in structure, the +1 * needed for size has been abstracted. */ if ((new_entry == FALSE) || ((new_target_ptr = (lun2tid_t *)malloc ( sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size), M_TEMP, M_WAITOK | M_ZERO)) == NULL)) { debug_asr_printf("failed to reallocate target list\n"); return (NULL); } /* * Copy the whole thing, safer, simpler coding * and not really performance critical at this point. */ bcopy(target_ptr, new_target_ptr, sizeof(*target_ptr) + (sizeof(target_ptr->TID) * (target_ptr->size - 1))); bus_ptr->LUN[target] = new_target_ptr; free(target_ptr, M_TEMP); target_ptr = new_target_ptr; target_ptr->size = new_size + 1; } /* * Now, acquire the TID address from the LUN indexed list. */ return (&(target_ptr->TID[lun])); } /* ASR_getTidAddress */ /* * Get a pre-existing TID relationship. * * If the TID was never set, return (tid_t)-1. * * should use mutex rather than spl. */ static __inline tid_t ASR_getTid(Asr_softc_t *sc, int bus, int target, int lun) { tid_t *tid_ptr; int s; tid_t retval; s = splcam(); if (((tid_ptr = ASR_getTidAddress(sc, bus, target, lun, FALSE)) == NULL) /* (tid_t)0 or (tid_t)-1 indicate no TID */ || (*tid_ptr == (tid_t)0)) { splx(s); return ((tid_t)-1); } retval = *tid_ptr; splx(s); return (retval); } /* ASR_getTid */ /* * Set a TID relationship. * * If the TID was not set, return (tid_t)-1. * * should use mutex rather than spl. */ static __inline tid_t ASR_setTid(Asr_softc_t *sc, int bus, int target, int lun, tid_t TID) { tid_t *tid_ptr; int s; if (TID != (tid_t)-1) { if (TID == 0) { return ((tid_t)-1); } s = splcam(); if ((tid_ptr = ASR_getTidAddress(sc, bus, target, lun, TRUE)) == NULL) { splx(s); return ((tid_t)-1); } *tid_ptr = TID; splx(s); } return (TID); } /* ASR_setTid */ /*-------------------------------------------------------------------------*/ /* Function ASR_rescan */ /*-------------------------------------------------------------------------*/ /* The Parameters Passed To This Function Are : */ /* Asr_softc_t * : HBA miniport driver's adapter data storage. */ /* */ /* This Function Will rescan the adapter and resynchronize any data */ /* */ /* Return : 0 For OK, Error Code Otherwise */ /*-------------------------------------------------------------------------*/ static int ASR_rescan(Asr_softc_t *sc) { int bus; int error; /* * Re-acquire the LCT table and synchronize us to the adapter. */ if ((error = ASR_acquireLct(sc)) == 0) { error = ASR_acquireHrt(sc); } if (error != 0) { return error; } bus = sc->ha_MaxBus; /* Reset all existing cached TID lookups */ do { int target, event = 0; /* * Scan for all targets on this bus to see if they * got affected by the rescan. */ for (target = 0; target <= sc->ha_MaxId; ++target) { int lun; /* Stay away from the controller ID */ if (target == sc->ha_adapter_target[bus]) { continue; } for (lun = 0; lun <= sc->ha_MaxLun; ++lun) { PI2O_LCT_ENTRY Device; tid_t TID = (tid_t)-1; tid_t LastTID; /* * See if the cached TID changed. Search for * the device in our new LCT. */ for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)(((U32 *)sc->ha_LCT) + I2O_LCT_getTableSize(sc->ha_LCT)); ++Device) { if ((Device->le_type != I2O_UNKNOWN) && (Device->le_bus == bus) && (Device->le_target == target) && (Device->le_lun == lun) && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) { TID = I2O_LCT_ENTRY_getLocalTID( Device); break; } } /* * Indicate to the OS that the label needs * to be recalculated, or that the specific * open device is no longer valid (Merde) * because the cached TID changed. */ LastTID = ASR_getTid (sc, bus, target, lun); if (LastTID != TID) { struct cam_path * path; if (xpt_create_path(&path, /*periph*/NULL, cam_sim_path(sc->ha_sim[bus]), target, lun) != CAM_REQ_CMP) { if (TID == (tid_t)-1) { event |= AC_LOST_DEVICE; } else { event |= AC_INQ_CHANGED | AC_GETDEV_CHANGED; } } else { if (TID == (tid_t)-1) { xpt_async( AC_LOST_DEVICE, path, NULL); } else if (LastTID == (tid_t)-1) { struct ccb_getdev ccb; xpt_setup_ccb( &(ccb.ccb_h), path, /*priority*/5); xpt_async( AC_FOUND_DEVICE, path, &ccb); } else { xpt_async( AC_INQ_CHANGED, path, NULL); xpt_async( AC_GETDEV_CHANGED, path, NULL); } } } /* * We have the option of clearing the * cached TID for it to be rescanned, or to * set it now even if the device never got * accessed. We chose the later since we * currently do not use the condition that * the TID ever got cached. */ ASR_setTid (sc, bus, target, lun, TID); } } /* * The xpt layer can not handle multiple events at the * same call. */ if (event & AC_LOST_DEVICE) { xpt_async(AC_LOST_DEVICE, sc->ha_path[bus], NULL); } if (event & AC_INQ_CHANGED) { xpt_async(AC_INQ_CHANGED, sc->ha_path[bus], NULL); } if (event & AC_GETDEV_CHANGED) { xpt_async(AC_GETDEV_CHANGED, sc->ha_path[bus], NULL); } } while (--bus >= 0); return (error); } /* ASR_rescan */ /*-------------------------------------------------------------------------*/ /* Function ASR_reset */ /*-------------------------------------------------------------------------*/ /* The Parameters Passed To This Function Are : */ /* Asr_softc_t * : HBA miniport driver's adapter data storage. */ /* */ /* This Function Will reset the adapter and resynchronize any data */ /* */ /* Return : None */ /*-------------------------------------------------------------------------*/ static int ASR_reset(Asr_softc_t *sc) { int s, retVal; s = splcam(); if ((sc->ha_in_reset == HA_IN_RESET) || (sc->ha_in_reset == HA_OFF_LINE_RECOVERY)) { splx (s); return (EBUSY); } /* * Promotes HA_OPERATIONAL to HA_IN_RESET, * or HA_OFF_LINE to HA_OFF_LINE_RECOVERY. */ ++(sc->ha_in_reset); if (ASR_resetIOP(sc) == 0) { debug_asr_printf ("ASR_resetIOP failed\n"); /* * We really need to take this card off-line, easier said * than make sense. Better to keep retrying for now since if a * UART cable is connected the blinkLEDs the adapter is now in * a hard state requiring action from the monitor commands to * the HBA to continue. For debugging waiting forever is a * good thing. In a production system, however, one may wish * to instead take the card off-line ... */ /* Wait Forever */ while (ASR_resetIOP(sc) == 0); } retVal = ASR_init (sc); splx (s); if (retVal != 0) { debug_asr_printf ("ASR_init failed\n"); sc->ha_in_reset = HA_OFF_LINE; return (ENXIO); } if (ASR_rescan (sc) != 0) { debug_asr_printf ("ASR_rescan failed\n"); } ASR_failActiveCommands (sc); if (sc->ha_in_reset == HA_OFF_LINE_RECOVERY) { printf ("asr%d: Brining adapter back on-line\n", sc->ha_path[0] ? cam_sim_unit(xpt_path_sim(sc->ha_path[0])) : 0); } sc->ha_in_reset = HA_OPERATIONAL; return (0); } /* ASR_reset */ /* * Device timeout handler. */ static void asr_timeout(void *arg) { union asr_ccb *ccb = (union asr_ccb *)arg; Asr_softc_t *sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0); int s; debug_asr_print_path(ccb); debug_asr_printf("timed out"); /* * Check if the adapter has locked up? */ if ((s = ASR_getBlinkLedCode(sc)) != 0) { /* Reset Adapter */ printf ("asr%d: Blink LED 0x%x resetting adapter\n", cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), s); if (ASR_reset (sc) == ENXIO) { /* Try again later */ ccb->ccb_h.timeout_ch = timeout(asr_timeout, (caddr_t)ccb, (ccb->ccb_h.timeout * hz) / 1000); } return; } /* * Abort does not function on the ASR card!!! Walking away from * the SCSI command is also *very* dangerous. A SCSI BUS reset is * our best bet, followed by a complete adapter reset if that fails. */ s = splcam(); /* Check if we already timed out once to raise the issue */ if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_CMD_TIMEOUT) { debug_asr_printf (" AGAIN\nreinitializing adapter\n"); if (ASR_reset (sc) == ENXIO) { ccb->ccb_h.timeout_ch = timeout(asr_timeout, (caddr_t)ccb, (ccb->ccb_h.timeout * hz) / 1000); } splx(s); return; } debug_asr_printf ("\nresetting bus\n"); /* If the BUS reset does not take, then an adapter reset is next! */ ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_CMD_TIMEOUT; ccb->ccb_h.timeout_ch = timeout(asr_timeout, (caddr_t)ccb, (ccb->ccb_h.timeout * hz) / 1000); ASR_resetBus (sc, cam_sim_bus(xpt_path_sim(ccb->ccb_h.path))); xpt_async (AC_BUS_RESET, ccb->ccb_h.path, NULL); splx(s); } /* asr_timeout */ /* * send a message asynchronously */ static int ASR_queue(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message) { U32 MessageOffset; union asr_ccb *ccb; debug_asr_printf("Host Command Dump:\n"); debug_asr_dump_message(Message); ccb = (union asr_ccb *)(long) I2O_MESSAGE_FRAME_getInitiatorContext64(Message); if ((MessageOffset = ASR_getMessage(sc)) != EMPTY_QUEUE) { asr_set_frame(sc, Message, MessageOffset, I2O_MESSAGE_FRAME_getMessageSize(Message)); if (ccb) { ASR_ccbAdd (sc, ccb); } /* Post the command */ asr_set_ToFIFO(sc, MessageOffset); } else { if (ASR_getBlinkLedCode(sc)) { /* * Unlikely we can do anything if we can't grab a * message frame :-(, but lets give it a try. */ (void)ASR_reset(sc); } } return (MessageOffset); } /* ASR_queue */ /* Simple Scatter Gather elements */ #define SG(SGL,Index,Flags,Buffer,Size) \ I2O_FLAGS_COUNT_setCount( \ &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \ Size); \ I2O_FLAGS_COUNT_setFlags( \ &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \ I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | (Flags)); \ I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress( \ &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index]), \ (Buffer == NULL) ? 0 : KVTOPHYS(Buffer)) /* * Retrieve Parameter Group. */ static void * ASR_getParams(Asr_softc_t *sc, tid_t TID, int Group, void *Buffer, unsigned BufferSize) { struct paramGetMessage { I2O_UTIL_PARAMS_GET_MESSAGE M; char F[sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)]; struct Operations { I2O_PARAM_OPERATIONS_LIST_HEADER Header; I2O_PARAM_OPERATION_ALL_TEMPLATE Template[1]; } O; } Message; struct Operations *Operations_Ptr; I2O_UTIL_PARAMS_GET_MESSAGE *Message_Ptr; struct ParamBuffer { I2O_PARAM_RESULTS_LIST_HEADER Header; I2O_PARAM_READ_OPERATION_RESULT Read; char Info[1]; } *Buffer_Ptr; Message_Ptr = (I2O_UTIL_PARAMS_GET_MESSAGE *)ASR_fillMessage(&Message, sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)); Operations_Ptr = (struct Operations *)((char *)Message_Ptr + sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)); bzero(Operations_Ptr, sizeof(struct Operations)); I2O_PARAM_OPERATIONS_LIST_HEADER_setOperationCount( &(Operations_Ptr->Header), 1); I2O_PARAM_OPERATION_ALL_TEMPLATE_setOperation( &(Operations_Ptr->Template[0]), I2O_PARAMS_OPERATION_FIELD_GET); I2O_PARAM_OPERATION_ALL_TEMPLATE_setFieldCount( &(Operations_Ptr->Template[0]), 0xFFFF); I2O_PARAM_OPERATION_ALL_TEMPLATE_setGroupNumber( &(Operations_Ptr->Template[0]), Group); Buffer_Ptr = (struct ParamBuffer *)Buffer; bzero(Buffer_Ptr, BufferSize); I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame), I2O_VERSION_11 + (((sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4)); I2O_MESSAGE_FRAME_setTargetAddress (&(Message_Ptr->StdMessageFrame), TID); I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame), I2O_UTIL_PARAMS_GET); /* * Set up the buffers as scatter gather elements. */ SG(&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, Operations_Ptr, sizeof(struct Operations)); SG(&(Message_Ptr->SGL), 1, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, Buffer_Ptr, BufferSize); if ((ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) == CAM_REQ_CMP) && (Buffer_Ptr->Header.ResultCount)) { return ((void *)(Buffer_Ptr->Info)); } return (NULL); } /* ASR_getParams */ /* * Acquire the LCT information. */ static int ASR_acquireLct(Asr_softc_t *sc) { PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr; PI2O_SGE_SIMPLE_ELEMENT sg; int MessageSizeInBytes; caddr_t v; int len; I2O_LCT Table; PI2O_LCT_ENTRY Entry; /* * sc value assumed valid */ MessageSizeInBytes = sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT); if ((Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)malloc( MessageSizeInBytes, M_TEMP, M_WAITOK)) == NULL) { return (ENOMEM); } (void)ASR_fillMessage((void *)Message_Ptr, MessageSizeInBytes); I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame), (I2O_VERSION_11 + (((sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4))); I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), I2O_EXEC_LCT_NOTIFY); I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr, I2O_CLASS_MATCH_ANYCLASS); /* * Call the LCT table to determine the number of device entries * to reserve space for. */ SG(&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, &Table, sizeof(I2O_LCT)); /* * since this code is reused in several systems, code efficiency * is greater by using a shift operation rather than a divide by * sizeof(u_int32_t). */ I2O_LCT_setTableSize(&Table, (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2); (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); /* * Determine the size of the LCT table. */ if (sc->ha_LCT) { free(sc->ha_LCT, M_TEMP); } /* * malloc only generates contiguous memory when less than a * page is expected. We must break the request up into an SG list ... */ if (((len = (I2O_LCT_getTableSize(&Table) << 2)) <= (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY))) || (len > (128 * 1024))) { /* Arbitrary */ free(Message_Ptr, M_TEMP); return (EINVAL); } if ((sc->ha_LCT = (PI2O_LCT)malloc (len, M_TEMP, M_WAITOK)) == NULL) { free(Message_Ptr, M_TEMP); return (ENOMEM); } /* * since this code is reused in several systems, code efficiency * is greater by using a shift operation rather than a divide by * sizeof(u_int32_t). */ I2O_LCT_setTableSize(sc->ha_LCT, (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2); /* * Convert the access to the LCT table into a SG list. */ sg = Message_Ptr->SGL.u.Simple; v = (caddr_t)(sc->ha_LCT); for (;;) { int next, base, span; span = 0; next = base = KVTOPHYS(v); I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base); /* How far can we go contiguously */ while ((len > 0) && (base == next)) { int size; next = trunc_page(base) + PAGE_SIZE; size = next - base; if (size > len) { size = len; } span += size; v += size; len -= size; base = KVTOPHYS(v); } /* Construct the Flags */ I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span); { int rw = I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT; if (len <= 0) { rw = (I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER); } I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), rw); } if (len <= 0) { break; } /* * Incrementing requires resizing of the packet. */ ++sg; MessageSizeInBytes += sizeof(*sg); I2O_MESSAGE_FRAME_setMessageSize( &(Message_Ptr->StdMessageFrame), I2O_MESSAGE_FRAME_getMessageSize( &(Message_Ptr->StdMessageFrame)) + (sizeof(*sg) / sizeof(U32))); { PI2O_EXEC_LCT_NOTIFY_MESSAGE NewMessage_Ptr; if ((NewMessage_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE) malloc(MessageSizeInBytes, M_TEMP, M_WAITOK)) == NULL) { free(sc->ha_LCT, M_TEMP); sc->ha_LCT = NULL; free(Message_Ptr, M_TEMP); return (ENOMEM); } span = ((caddr_t)sg) - (caddr_t)Message_Ptr; bcopy(Message_Ptr, NewMessage_Ptr, span); free(Message_Ptr, M_TEMP); sg = (PI2O_SGE_SIMPLE_ELEMENT) (((caddr_t)NewMessage_Ptr) + span); Message_Ptr = NewMessage_Ptr; } } { int retval; retval = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); free(Message_Ptr, M_TEMP); if (retval != CAM_REQ_CMP) { return (ENODEV); } } /* If the LCT table grew, lets truncate accesses */ if (I2O_LCT_getTableSize(&Table) < I2O_LCT_getTableSize(sc->ha_LCT)) { I2O_LCT_setTableSize(sc->ha_LCT, I2O_LCT_getTableSize(&Table)); } for (Entry = sc->ha_LCT->LCTEntry; Entry < (PI2O_LCT_ENTRY) (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); ++Entry) { Entry->le_type = I2O_UNKNOWN; switch (I2O_CLASS_ID_getClass(&(Entry->ClassID))) { case I2O_CLASS_RANDOM_BLOCK_STORAGE: Entry->le_type = I2O_BSA; break; case I2O_CLASS_SCSI_PERIPHERAL: Entry->le_type = I2O_SCSI; break; case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL: Entry->le_type = I2O_FCA; break; case I2O_CLASS_BUS_ADAPTER_PORT: Entry->le_type = I2O_PORT | I2O_SCSI; /* FALLTHRU */ case I2O_CLASS_FIBRE_CHANNEL_PORT: if (I2O_CLASS_ID_getClass(&(Entry->ClassID)) == I2O_CLASS_FIBRE_CHANNEL_PORT) { Entry->le_type = I2O_PORT | I2O_FCA; } { struct ControllerInfo { I2O_PARAM_RESULTS_LIST_HEADER Header; I2O_PARAM_READ_OPERATION_RESULT Read; I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info; } Buffer; PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info; Entry->le_bus = 0xff; Entry->le_target = 0xff; Entry->le_lun = 0xff; if ((Info = (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR) ASR_getParams(sc, I2O_LCT_ENTRY_getLocalTID(Entry), I2O_HBA_SCSI_CONTROLLER_INFO_GROUP_NO, &Buffer, sizeof(struct ControllerInfo))) == NULL) { continue; } Entry->le_target = I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR_getInitiatorID( Info); Entry->le_lun = 0; } /* FALLTHRU */ default: continue; } { struct DeviceInfo { I2O_PARAM_RESULTS_LIST_HEADER Header; I2O_PARAM_READ_OPERATION_RESULT Read; I2O_DPT_DEVICE_INFO_SCALAR Info; } Buffer; PI2O_DPT_DEVICE_INFO_SCALAR Info; Entry->le_bus = 0xff; Entry->le_target = 0xff; Entry->le_lun = 0xff; if ((Info = (PI2O_DPT_DEVICE_INFO_SCALAR) ASR_getParams(sc, I2O_LCT_ENTRY_getLocalTID(Entry), I2O_DPT_DEVICE_INFO_GROUP_NO, &Buffer, sizeof(struct DeviceInfo))) == NULL) { continue; } Entry->le_type |= I2O_DPT_DEVICE_INFO_SCALAR_getDeviceType(Info); Entry->le_bus = I2O_DPT_DEVICE_INFO_SCALAR_getBus(Info); if ((Entry->le_bus > sc->ha_MaxBus) && (Entry->le_bus <= MAX_CHANNEL)) { sc->ha_MaxBus = Entry->le_bus; } Entry->le_target = I2O_DPT_DEVICE_INFO_SCALAR_getIdentifier(Info); Entry->le_lun = I2O_DPT_DEVICE_INFO_SCALAR_getLunInfo(Info); } } /* * A zero return value indicates success. */ return (0); } /* ASR_acquireLct */ /* * Initialize a message frame. * We assume that the CDB has already been set up, so all we do here is * generate the Scatter Gather list. */ static PI2O_MESSAGE_FRAME ASR_init_message(union asr_ccb *ccb, PI2O_MESSAGE_FRAME Message) { PI2O_MESSAGE_FRAME Message_Ptr; PI2O_SGE_SIMPLE_ELEMENT sg; Asr_softc_t *sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0); vm_size_t size, len; caddr_t v; U32 MessageSize; int next, span, base, rw; int target = ccb->ccb_h.target_id; int lun = ccb->ccb_h.target_lun; int bus =cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); tid_t TID; /* We only need to zero out the PRIVATE_SCSI_SCB_EXECUTE_MESSAGE */ Message_Ptr = (I2O_MESSAGE_FRAME *)Message; bzero(Message_Ptr, (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT))); if ((TID = ASR_getTid (sc, bus, target, lun)) == (tid_t)-1) { PI2O_LCT_ENTRY Device; TID = 0; for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY) (((U32 *)sc->ha_LCT) + I2O_LCT_getTableSize(sc->ha_LCT)); ++Device) { if ((Device->le_type != I2O_UNKNOWN) && (Device->le_bus == bus) && (Device->le_target == target) && (Device->le_lun == lun) && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) { TID = I2O_LCT_ENTRY_getLocalTID(Device); ASR_setTid(sc, Device->le_bus, Device->le_target, Device->le_lun, TID); break; } } } if (TID == (tid_t)0) { return (NULL); } I2O_MESSAGE_FRAME_setTargetAddress(Message_Ptr, TID); PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID( (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, TID); I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4)); I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr, (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT)) / sizeof(U32)); I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1); I2O_MESSAGE_FRAME_setFunction(Message_Ptr, I2O_PRIVATE_MESSAGE); I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode ( (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC); PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags ( (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, I2O_SCB_FLAG_ENABLE_DISCONNECT | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER); /* * We do not need any (optional byteswapping) method access to * the Initiator & Transaction context field. */ I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb); I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID( (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID); /* * copy the cdb over */ PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength( (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, ccb->csio.cdb_len); bcopy(&(ccb->csio.cdb_io), ((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->CDB, ccb->csio.cdb_len); /* * Given a buffer describing a transfer, set up a scatter/gather map * in a ccb to map that SCSI transfer. */ rw = (ccb->ccb_h.flags & CAM_DIR_IN) ? 0 : I2O_SGL_FLAGS_DIR; PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags ( (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, (ccb->csio.dxfer_len) ? ((rw) ? (I2O_SCB_FLAG_XFER_TO_DEVICE | I2O_SCB_FLAG_ENABLE_DISCONNECT | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER) : (I2O_SCB_FLAG_XFER_FROM_DEVICE | I2O_SCB_FLAG_ENABLE_DISCONNECT | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)) : (I2O_SCB_FLAG_ENABLE_DISCONNECT | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)); /* * Given a transfer described by a `data', fill in the SG list. */ sg = &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->SGL.u.Simple[0]; len = ccb->csio.dxfer_len; v = ccb->csio.data_ptr; KASSERT(ccb->csio.dxfer_len >= 0, ("csio.dxfer_len < 0")); MessageSize = I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr); PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount( (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, len); while ((len > 0) && (sg < &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE) Message_Ptr)->SGL.u.Simple[SG_SIZE])) { span = 0; next = base = KVTOPHYS(v); I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base); /* How far can we go contiguously */ while ((len > 0) && (base == next)) { next = trunc_page(base) + PAGE_SIZE; size = next - base; if (size > len) { size = len; } span += size; v += size; len -= size; base = KVTOPHYS(v); } I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span); if (len == 0) { rw |= I2O_SGL_FLAGS_LAST_ELEMENT; } I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | rw); ++sg; MessageSize += sizeof(*sg) / sizeof(U32); } /* We always do the request sense ... */ if ((span = ccb->csio.sense_len) == 0) { span = sizeof(ccb->csio.sense_data); } SG(sg, 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, &(ccb->csio.sense_data), span); I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr, MessageSize + (sizeof(*sg) / sizeof(U32))); return (Message_Ptr); } /* ASR_init_message */ /* * Reset the adapter. */ static U32 ASR_initOutBound(Asr_softc_t *sc) { struct initOutBoundMessage { I2O_EXEC_OUTBOUND_INIT_MESSAGE M; U32 R; } Message; PI2O_EXEC_OUTBOUND_INIT_MESSAGE Message_Ptr; U32 *volatile Reply_Ptr; U32 Old; /* * Build up our copy of the Message. */ Message_Ptr = (PI2O_EXEC_OUTBOUND_INIT_MESSAGE)ASR_fillMessage(&Message, sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE)); I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), I2O_EXEC_OUTBOUND_INIT); I2O_EXEC_OUTBOUND_INIT_MESSAGE_setHostPageFrameSize(Message_Ptr, PAGE_SIZE); I2O_EXEC_OUTBOUND_INIT_MESSAGE_setOutboundMFrameSize(Message_Ptr, sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)); /* * Reset the Reply Status */ *(Reply_Ptr = (U32 *)((char *)Message_Ptr + sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE))) = 0; SG (&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT, Reply_Ptr, sizeof(U32)); /* * Send the Message out */ if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) != -1L) { u_long size, addr; /* * Wait for a response (Poll). */ while (*Reply_Ptr < I2O_EXEC_OUTBOUND_INIT_REJECTED); /* * Re-enable the interrupts. */ asr_set_intr(sc, Old); /* * Populate the outbound table. */ if (sc->ha_Msgs == NULL) { /* Allocate the reply frames */ size = sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) * sc->ha_Msgs_Count; /* * contigmalloc only works reliably at * initialization time. */ if ((sc->ha_Msgs = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) contigmalloc (size, M_DEVBUF, M_WAITOK, 0ul, 0xFFFFFFFFul, (u_long)sizeof(U32), 0ul)) != NULL) { bzero(sc->ha_Msgs, size); sc->ha_Msgs_Phys = KVTOPHYS(sc->ha_Msgs); } } /* Initialize the outbound FIFO */ if (sc->ha_Msgs != NULL) for(size = sc->ha_Msgs_Count, addr = sc->ha_Msgs_Phys; size; --size) { asr_set_FromFIFO(sc, addr); addr += sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME); } return (*Reply_Ptr); } return (0); } /* ASR_initOutBound */ /* * Set the system table */ static int ASR_setSysTab(Asr_softc_t *sc) { PI2O_EXEC_SYS_TAB_SET_MESSAGE Message_Ptr; PI2O_SET_SYSTAB_HEADER SystemTable; Asr_softc_t * ha; PI2O_SGE_SIMPLE_ELEMENT sg; int retVal; if ((SystemTable = (PI2O_SET_SYSTAB_HEADER)malloc ( sizeof(I2O_SET_SYSTAB_HEADER), M_TEMP, M_WAITOK | M_ZERO)) == NULL) { return (ENOMEM); } for (ha = Asr_softc; ha; ha = ha->ha_next) { ++SystemTable->NumberEntries; } if ((Message_Ptr = (PI2O_EXEC_SYS_TAB_SET_MESSAGE)malloc ( sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT) + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)), M_TEMP, M_WAITOK)) == NULL) { free(SystemTable, M_TEMP); return (ENOMEM); } (void)ASR_fillMessage((void *)Message_Ptr, sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT) + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT))); I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame), (I2O_VERSION_11 + (((sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4))); I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), I2O_EXEC_SYS_TAB_SET); /* * Call the LCT table to determine the number of device entries * to reserve space for. * since this code is reused in several systems, code efficiency * is greater by using a shift operation rather than a divide by * sizeof(u_int32_t). */ sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr + ((I2O_MESSAGE_FRAME_getVersionOffset( &(Message_Ptr->StdMessageFrame)) & 0xF0) >> 2)); SG(sg, 0, I2O_SGL_FLAGS_DIR, SystemTable, sizeof(I2O_SET_SYSTAB_HEADER)); ++sg; for (ha = Asr_softc; ha; ha = ha->ha_next) { SG(sg, 0, ((ha->ha_next) ? (I2O_SGL_FLAGS_DIR) : (I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER)), &(ha->ha_SystemTable), sizeof(ha->ha_SystemTable)); ++sg; } SG(sg, 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0); SG(sg, 1, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0); retVal = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); free(Message_Ptr, M_TEMP); free(SystemTable, M_TEMP); return (retVal); } /* ASR_setSysTab */ static int ASR_acquireHrt(Asr_softc_t *sc) { I2O_EXEC_HRT_GET_MESSAGE Message; I2O_EXEC_HRT_GET_MESSAGE *Message_Ptr; struct { I2O_HRT Header; I2O_HRT_ENTRY Entry[MAX_CHANNEL]; } Hrt; u_int8_t NumberOfEntries; PI2O_HRT_ENTRY Entry; bzero(&Hrt, sizeof (Hrt)); Message_Ptr = (I2O_EXEC_HRT_GET_MESSAGE *)ASR_fillMessage(&Message, sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)); I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame), (I2O_VERSION_11 + (((sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4))); I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame), I2O_EXEC_HRT_GET); /* * Set up the buffers as scatter gather elements. */ SG(&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, &Hrt, sizeof(Hrt)); if (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != CAM_REQ_CMP) { return (ENODEV); } if ((NumberOfEntries = I2O_HRT_getNumberEntries(&Hrt.Header)) > (MAX_CHANNEL + 1)) { NumberOfEntries = MAX_CHANNEL + 1; } for (Entry = Hrt.Header.HRTEntry; NumberOfEntries != 0; ++Entry, --NumberOfEntries) { PI2O_LCT_ENTRY Device; for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY) (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); ++Device) { if (I2O_LCT_ENTRY_getLocalTID(Device) == (I2O_HRT_ENTRY_getAdapterID(Entry) & 0xFFF)) { Device->le_bus = I2O_HRT_ENTRY_getAdapterID( Entry) >> 16; if ((Device->le_bus > sc->ha_MaxBus) && (Device->le_bus <= MAX_CHANNEL)) { sc->ha_MaxBus = Device->le_bus; } } } } return (0); } /* ASR_acquireHrt */ /* * Enable the adapter. */ static int ASR_enableSys(Asr_softc_t *sc) { I2O_EXEC_SYS_ENABLE_MESSAGE Message; PI2O_EXEC_SYS_ENABLE_MESSAGE Message_Ptr; Message_Ptr = (PI2O_EXEC_SYS_ENABLE_MESSAGE)ASR_fillMessage(&Message, sizeof(I2O_EXEC_SYS_ENABLE_MESSAGE)); I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), I2O_EXEC_SYS_ENABLE); return (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != 0); } /* ASR_enableSys */ /* * Perform the stages necessary to initialize the adapter */ static int ASR_init(Asr_softc_t *sc) { return ((ASR_initOutBound(sc) == 0) || (ASR_setSysTab(sc) != CAM_REQ_CMP) || (ASR_enableSys(sc) != CAM_REQ_CMP)); } /* ASR_init */ /* * Send a Synchronize Cache command to the target device. */ static void ASR_sync(Asr_softc_t *sc, int bus, int target, int lun) { tid_t TID; /* * We will not synchronize the device when there are outstanding * commands issued by the OS (this is due to a locked up device, * as the OS normally would flush all outstanding commands before * issuing a shutdown or an adapter reset). */ if ((sc != NULL) && (LIST_FIRST(&(sc->ha_ccb)) != NULL) && ((TID = ASR_getTid (sc, bus, target, lun)) != (tid_t)-1) && (TID != (tid_t)0)) { PRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message; PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr; Message_Ptr = (PRIVATE_SCSI_SCB_EXECUTE_MESSAGE *)&Message; bzero(Message_Ptr, sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)); I2O_MESSAGE_FRAME_setVersionOffset( (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_VERSION_11 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4)); I2O_MESSAGE_FRAME_setMessageSize( (PI2O_MESSAGE_FRAME)Message_Ptr, (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT)) / sizeof(U32)); I2O_MESSAGE_FRAME_setInitiatorAddress ( (PI2O_MESSAGE_FRAME)Message_Ptr, 1); I2O_MESSAGE_FRAME_setFunction( (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE); I2O_MESSAGE_FRAME_setTargetAddress( (PI2O_MESSAGE_FRAME)Message_Ptr, TID); I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode ( (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC); PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(Message_Ptr, TID); PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr, I2O_SCB_FLAG_ENABLE_DISCONNECT | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER); I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID( (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID); PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6); Message_Ptr->CDB[0] = SYNCHRONIZE_CACHE; Message_Ptr->CDB[1] = (lun << 5); PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr, (I2O_SCB_FLAG_XFER_FROM_DEVICE | I2O_SCB_FLAG_ENABLE_DISCONNECT | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)); (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); } } static void ASR_synchronize(Asr_softc_t *sc) { int bus, target, lun; for (bus = 0; bus <= sc->ha_MaxBus; ++bus) { for (target = 0; target <= sc->ha_MaxId; ++target) { for (lun = 0; lun <= sc->ha_MaxLun; ++lun) { ASR_sync(sc,bus,target,lun); } } } } /* * Reset the HBA, targets and BUS. * Currently this resets *all* the SCSI busses. */ static __inline void asr_hbareset(Asr_softc_t *sc) { ASR_synchronize(sc); (void)ASR_reset(sc); } /* asr_hbareset */ /* * A reduced copy of the real pci_map_mem, incorporating the MAX_MAP * limit and a reduction in error checking (in the pre 4.0 case). */ static int asr_pci_map_mem(device_t tag, Asr_softc_t *sc) { int rid; u_int32_t p, l, s; /* * I2O specification says we must find first *memory* mapped BAR */ for (rid = 0; rid < 4; rid++) { p = pci_read_config(tag, PCIR_BAR(rid), sizeof(p)); if ((p & 1) == 0) { break; } } /* * Give up? */ if (rid >= 4) { rid = 0; } rid = PCIR_BAR(rid); p = pci_read_config(tag, rid, sizeof(p)); pci_write_config(tag, rid, -1, sizeof(p)); l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15); pci_write_config(tag, rid, p, sizeof(p)); if (l > MAX_MAP) { l = MAX_MAP; } /* * The 2005S Zero Channel RAID solution is not a perfect PCI * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to * BAR0+2MB and sets it's size to 2MB. The IOP registers are * accessible via BAR0, the messaging registers are accessible * via BAR1. If the subdevice code is 50 to 59 decimal. */ s = pci_read_config(tag, PCIR_DEVVENDOR, sizeof(s)); if (s != 0xA5111044) { s = pci_read_config(tag, PCIR_SUBVEND_0, sizeof(s)); if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0) && (ADPTDOMINATOR_SUB_ID_START <= s) && (s <= ADPTDOMINATOR_SUB_ID_END)) { l = MAX_MAP; /* Conjoined BAR Raptor Daptor */ } } p &= ~15; sc->ha_mem_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid, p, p + l, l, RF_ACTIVE); if (sc->ha_mem_res == NULL) { return (0); } sc->ha_Base = rman_get_start(sc->ha_mem_res); sc->ha_i2o_bhandle = rman_get_bushandle(sc->ha_mem_res); sc->ha_i2o_btag = rman_get_bustag(sc->ha_mem_res); if (s == 0xA5111044) { /* Split BAR Raptor Daptor */ if ((rid += sizeof(u_int32_t)) >= PCIR_BAR(4)) { return (0); } p = pci_read_config(tag, rid, sizeof(p)); pci_write_config(tag, rid, -1, sizeof(p)); l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15); pci_write_config(tag, rid, p, sizeof(p)); if (l > MAX_MAP) { l = MAX_MAP; } p &= ~15; sc->ha_mes_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid, p, p + l, l, RF_ACTIVE); if (sc->ha_mes_res == NULL) { return (0); } sc->ha_frame_bhandle = rman_get_bushandle(sc->ha_mes_res); sc->ha_frame_btag = rman_get_bustag(sc->ha_mes_res); } else { sc->ha_frame_bhandle = sc->ha_i2o_bhandle; sc->ha_frame_btag = sc->ha_i2o_btag; } return (1); } /* asr_pci_map_mem */ /* * A simplified copy of the real pci_map_int with additional * registration requirements. */ static int asr_pci_map_int(device_t tag, Asr_softc_t *sc) { int rid = 0; sc->ha_irq_res = bus_alloc_resource_any(tag, SYS_RES_IRQ, &rid, RF_ACTIVE | RF_SHAREABLE); if (sc->ha_irq_res == NULL) { return (0); } if (bus_setup_intr(tag, sc->ha_irq_res, INTR_TYPE_CAM | INTR_ENTROPY, (driver_intr_t *)asr_intr, (void *)sc, &(sc->ha_intr))) { return (0); } sc->ha_irq = pci_read_config(tag, PCIR_INTLINE, sizeof(char)); return (1); } /* asr_pci_map_int */ /* * Attach the devices, and virtual devices to the driver list. */ static int asr_attach(device_t tag) { PI2O_EXEC_STATUS_GET_REPLY status; PI2O_LCT_ENTRY Device; Asr_softc_t *sc, **ha; struct scsi_inquiry_data *iq; union asr_ccb *ccb; int bus, size, unit = device_get_unit(tag); if ((sc = malloc(sizeof(*sc), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) { return(ENOMEM); } if (Asr_softc == NULL) { /* * Fixup the OS revision as saved in the dptsig for the * engine (dptioctl.h) to pick up. */ bcopy(osrelease, &ASR_sig.dsDescription[16], 5); } /* * Initialize the software structure */ LIST_INIT(&(sc->ha_ccb)); /* Link us into the HA list */ for (ha = &Asr_softc; *ha; ha = &((*ha)->ha_next)); *(ha) = sc; /* * This is the real McCoy! */ if (!asr_pci_map_mem(tag, sc)) { printf ("asr%d: could not map memory\n", unit); return(ENXIO); } /* Enable if not formerly enabled */ pci_write_config(tag, PCIR_COMMAND, pci_read_config(tag, PCIR_COMMAND, sizeof(char)) | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN, sizeof(char)); /* Knowledge is power, responsibility is direct */ { struct pci_devinfo { STAILQ_ENTRY(pci_devinfo) pci_links; struct resource_list resources; pcicfgregs cfg; } * dinfo = device_get_ivars(tag); sc->ha_pciBusNum = dinfo->cfg.bus; sc->ha_pciDeviceNum = (dinfo->cfg.slot << 3) | dinfo->cfg.func; } /* Check if the device is there? */ if ((ASR_resetIOP(sc) == 0) || ((status = (PI2O_EXEC_STATUS_GET_REPLY)malloc( sizeof(I2O_EXEC_STATUS_GET_REPLY), M_TEMP, M_WAITOK)) == NULL) || (ASR_getStatus(sc, status) == NULL)) { printf ("asr%d: could not initialize hardware\n", unit); return(ENODEV); /* Get next, maybe better luck */ } sc->ha_SystemTable.OrganizationID = status->OrganizationID; sc->ha_SystemTable.IOP_ID = status->IOP_ID; sc->ha_SystemTable.I2oVersion = status->I2oVersion; sc->ha_SystemTable.IopState = status->IopState; sc->ha_SystemTable.MessengerType = status->MessengerType; sc->ha_SystemTable.InboundMessageFrameSize = status->InboundMFrameSize; sc->ha_SystemTable.MessengerInfo.InboundMessagePortAddressLow = (U32)(sc->ha_Base + I2O_REG_TOFIFO); /* XXX 64-bit */ if (!asr_pci_map_int(tag, (void *)sc)) { printf ("asr%d: could not map interrupt\n", unit); return(ENXIO); } /* Adjust the maximim inbound count */ if (((sc->ha_QueueSize = I2O_EXEC_STATUS_GET_REPLY_getMaxInboundMFrames(status)) > MAX_INBOUND) || (sc->ha_QueueSize == 0)) { sc->ha_QueueSize = MAX_INBOUND; } /* Adjust the maximum outbound count */ if (((sc->ha_Msgs_Count = I2O_EXEC_STATUS_GET_REPLY_getMaxOutboundMFrames(status)) > MAX_OUTBOUND) || (sc->ha_Msgs_Count == 0)) { sc->ha_Msgs_Count = MAX_OUTBOUND; } if (sc->ha_Msgs_Count > sc->ha_QueueSize) { sc->ha_Msgs_Count = sc->ha_QueueSize; } /* Adjust the maximum SG size to adapter */ if ((size = (I2O_EXEC_STATUS_GET_REPLY_getInboundMFrameSize(status) << 2)) > MAX_INBOUND_SIZE) { size = MAX_INBOUND_SIZE; } free(status, M_TEMP); sc->ha_SgSize = (size - sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) + sizeof(I2O_SG_ELEMENT)) / sizeof(I2O_SGE_SIMPLE_ELEMENT); /* * Only do a bus/HBA reset on the first time through. On this * first time through, we do not send a flush to the devices. */ if (ASR_init(sc) == 0) { struct BufferInfo { I2O_PARAM_RESULTS_LIST_HEADER Header; I2O_PARAM_READ_OPERATION_RESULT Read; I2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info; } Buffer; PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info; #define FW_DEBUG_BLED_OFFSET 8 if ((Info = (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR) ASR_getParams(sc, 0, I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO, &Buffer, sizeof(struct BufferInfo))) != NULL) { sc->ha_blinkLED = FW_DEBUG_BLED_OFFSET + I2O_DPT_EXEC_IOP_BUFFERS_SCALAR_getSerialOutputOffset(Info); } if (ASR_acquireLct(sc) == 0) { (void)ASR_acquireHrt(sc); } } else { printf ("asr%d: failed to initialize\n", unit); return(ENXIO); } /* * Add in additional probe responses for more channels. We * are reusing the variable `target' for a channel loop counter. * Done here because of we need both the acquireLct and * acquireHrt data. */ for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY) (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); ++Device) { if (Device->le_type == I2O_UNKNOWN) { continue; } if (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF) { if (Device->le_target > sc->ha_MaxId) { sc->ha_MaxId = Device->le_target; } if (Device->le_lun > sc->ha_MaxLun) { sc->ha_MaxLun = Device->le_lun; } } if (((Device->le_type & I2O_PORT) != 0) && (Device->le_bus <= MAX_CHANNEL)) { /* Do not increase MaxId for efficiency */ sc->ha_adapter_target[Device->le_bus] = Device->le_target; } } /* * Print the HBA model number as inquired from the card. */ printf("asr%d:", unit); if ((iq = (struct scsi_inquiry_data *)malloc( sizeof(struct scsi_inquiry_data), M_TEMP, M_WAITOK | M_ZERO)) != NULL) { PRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message; PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr; int posted = 0; Message_Ptr = (PRIVATE_SCSI_SCB_EXECUTE_MESSAGE *)&Message; bzero(Message_Ptr, sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)); I2O_MESSAGE_FRAME_setVersionOffset( (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_VERSION_11 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4)); I2O_MESSAGE_FRAME_setMessageSize( (PI2O_MESSAGE_FRAME)Message_Ptr, (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)) / sizeof(U32)); I2O_MESSAGE_FRAME_setInitiatorAddress( (PI2O_MESSAGE_FRAME)Message_Ptr, 1); I2O_MESSAGE_FRAME_setFunction( (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE); I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode( (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC); PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr, I2O_SCB_FLAG_ENABLE_DISCONNECT | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER); PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setInterpret(Message_Ptr, 1); I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID( (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID); PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6); Message_Ptr->CDB[0] = INQUIRY; Message_Ptr->CDB[4] = (unsigned char)sizeof(struct scsi_inquiry_data); if (Message_Ptr->CDB[4] == 0) { Message_Ptr->CDB[4] = 255; } PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr, (I2O_SCB_FLAG_XFER_FROM_DEVICE | I2O_SCB_FLAG_ENABLE_DISCONNECT | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)); PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount( (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, sizeof(struct scsi_inquiry_data)); SG(&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, iq, sizeof(struct scsi_inquiry_data)); (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); if (iq->vendor[0] && (iq->vendor[0] != ' ')) { printf (" "); ASR_prstring (iq->vendor, 8); ++posted; } if (iq->product[0] && (iq->product[0] != ' ')) { printf (" "); ASR_prstring (iq->product, 16); ++posted; } if (iq->revision[0] && (iq->revision[0] != ' ')) { printf (" FW Rev. "); ASR_prstring (iq->revision, 4); ++posted; } free(iq, M_TEMP); if (posted) { printf (","); } } printf (" %d channel, %d CCBs, Protocol I2O\n", sc->ha_MaxBus + 1, (sc->ha_QueueSize > MAX_INBOUND) ? MAX_INBOUND : sc->ha_QueueSize); /* * fill in the prototype cam_path. */ if ((ccb = asr_alloc_ccb(sc)) == NULL) { printf ("asr%d: CAM could not be notified of asynchronous callback parameters\n", unit); return(ENOMEM); } for (bus = 0; bus <= sc->ha_MaxBus; ++bus) { struct cam_devq * devq; int QueueSize = sc->ha_QueueSize; if (QueueSize > MAX_INBOUND) { QueueSize = MAX_INBOUND; } /* * Create the device queue for our SIM(s). */ if ((devq = cam_simq_alloc(QueueSize)) == NULL) { continue; } /* * Construct our first channel SIM entry */ sc->ha_sim[bus] = cam_sim_alloc(asr_action, asr_poll, "asr", sc, unit, 1, QueueSize, devq); if (sc->ha_sim[bus] == NULL) { continue; } if (xpt_bus_register(sc->ha_sim[bus], bus) != CAM_SUCCESS) { cam_sim_free(sc->ha_sim[bus], /*free_devq*/TRUE); sc->ha_sim[bus] = NULL; continue; } if (xpt_create_path(&(sc->ha_path[bus]), /*periph*/NULL, cam_sim_path(sc->ha_sim[bus]), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister( cam_sim_path(sc->ha_sim[bus])); cam_sim_free(sc->ha_sim[bus], /*free_devq*/TRUE); sc->ha_sim[bus] = NULL; continue; } } asr_free_ccb(ccb); /* * Generate the device node information */ sc->ha_devt = make_dev(&asr_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640, "asr%d", unit); sc->ha_devt->si_drv1 = sc; return(0); } /* asr_attach */ static void asr_poll(struct cam_sim *sim) { asr_intr(cam_sim_softc(sim)); } /* asr_poll */ static void asr_action(struct cam_sim *sim, union ccb *ccb) { struct Asr_softc *sc; debug_asr_printf("asr_action(%lx,%lx{%x})\n", (u_long)sim, (u_long)ccb, ccb->ccb_h.func_code); CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("asr_action\n")); ccb->ccb_h.spriv_ptr0 = sc = (struct Asr_softc *)cam_sim_softc(sim); switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_SCSI_IO: /* Execute the requested I/O operation */ { struct Message { char M[MAX_INBOUND_SIZE]; } Message; PI2O_MESSAGE_FRAME Message_Ptr; /* Reject incoming commands while we are resetting the card */ if (sc->ha_in_reset != HA_OPERATIONAL) { ccb->ccb_h.status &= ~CAM_STATUS_MASK; if (sc->ha_in_reset >= HA_OFF_LINE) { /* HBA is now off-line */ ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR; } else { /* HBA currently resetting, try again later. */ ccb->ccb_h.status |= CAM_REQUEUE_REQ; } debug_asr_cmd_printf (" e\n"); xpt_done(ccb); debug_asr_cmd_printf (" q\n"); break; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { printf( "asr%d WARNING: scsi_cmd(%x) already done on b%dt%du%d\n", cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), ccb->csio.cdb_io.cdb_bytes[0], cam_sim_bus(sim), ccb->ccb_h.target_id, ccb->ccb_h.target_lun); } debug_asr_cmd_printf("(%d,%d,%d,%d)", cam_sim_unit(sim), cam_sim_bus(sim), ccb->ccb_h.target_id, ccb->ccb_h.target_lun); debug_asr_dump_ccb(ccb); if ((Message_Ptr = ASR_init_message((union asr_ccb *)ccb, (PI2O_MESSAGE_FRAME)&Message)) != NULL) { debug_asr_cmd2_printf ("TID=%x:\n", PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_getTID( (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)); debug_asr_cmd2_dump_message(Message_Ptr); debug_asr_cmd1_printf (" q"); if (ASR_queue (sc, Message_Ptr) == EMPTY_QUEUE) { ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQUEUE_REQ; debug_asr_cmd_printf (" E\n"); xpt_done(ccb); } debug_asr_cmd_printf(" Q\n"); break; } /* * We will get here if there is no valid TID for the device * referenced in the scsi command packet. */ ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_SEL_TIMEOUT; debug_asr_cmd_printf (" B\n"); xpt_done(ccb); break; } case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ /* Rese HBA device ... */ asr_hbareset (sc); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; #if (defined(REPORT_LUNS)) case REPORT_LUNS: #endif case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_SET_TRAN_SETTINGS: /* XXX Implement */ ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; xpt_done(ccb); break; case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { struct ccb_trans_settings *cts; u_int target_mask; cts = &(ccb->cts); target_mask = 0x01 << ccb->ccb_h.target_id; if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { cts->flags = CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB; cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; cts->sync_period = 6; /* 40MHz */ cts->sync_offset = 15; cts->valid = CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_SYNC_OFFSET_VALID | CCB_TRANS_BUS_WIDTH_VALID | CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; ccb->ccb_h.status = CAM_REQ_CMP; } else { ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; } xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; u_int32_t size_mb; u_int32_t secs_per_cylinder; ccg = &(ccb->ccg); size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size); if (size_mb > 4096) { ccg->heads = 255; ccg->secs_per_track = 63; } else if (size_mb > 2048) { ccg->heads = 128; ccg->secs_per_track = 63; } else if (size_mb > 1024) { ccg->heads = 65; ccg->secs_per_track = 63; } else { ccg->heads = 64; ccg->secs_per_track = 32; } secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ ASR_resetBus (sc, cam_sim_bus(sim)); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &(ccb->cpi); cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; cpi->target_sprt = 0; /* Not necessary to reset bus, done by HDM initialization */ cpi->hba_misc = PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = sc->ha_MaxId; cpi->max_lun = sc->ha_MaxLun; cpi->initiator_id = sc->ha_adapter_target[cam_sim_bus(sim)]; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } /* asr_action */ /* * Handle processing of current CCB as pointed to by the Status. */ static int asr_intr(Asr_softc_t *sc) { int processed; for(processed = 0; asr_get_status(sc) & Mask_InterruptsDisabled; processed = 1) { union asr_ccb *ccb; U32 ReplyOffset; PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply; if (((ReplyOffset = asr_get_FromFIFO(sc)) == EMPTY_QUEUE) && ((ReplyOffset = asr_get_FromFIFO(sc)) == EMPTY_QUEUE)) { break; } Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)(ReplyOffset - sc->ha_Msgs_Phys + (char *)(sc->ha_Msgs)); /* * We do not need any (optional byteswapping) method access to * the Initiator context field. */ ccb = (union asr_ccb *)(long) I2O_MESSAGE_FRAME_getInitiatorContext64( &(Reply->StdReplyFrame.StdMessageFrame)); if (I2O_MESSAGE_FRAME_getMsgFlags( &(Reply->StdReplyFrame.StdMessageFrame)) & I2O_MESSAGE_FLAGS_FAIL) { I2O_UTIL_NOP_MESSAGE Message; PI2O_UTIL_NOP_MESSAGE Message_Ptr; U32 MessageOffset; MessageOffset = (u_long) I2O_FAILURE_REPLY_MESSAGE_FRAME_getPreservedMFA( (PI2O_FAILURE_REPLY_MESSAGE_FRAME)Reply); /* * Get the Original Message Frame's address, and get * it's Transaction Context into our space. (Currently * unused at original authorship, but better to be * safe than sorry). Straight copy means that we * need not concern ourselves with the (optional * byteswapping) method access. */ Reply->StdReplyFrame.TransactionContext = bus_space_read_4(sc->ha_frame_btag, sc->ha_frame_bhandle, MessageOffset + offsetof(I2O_SINGLE_REPLY_MESSAGE_FRAME, TransactionContext)); /* * For 64 bit machines, we need to reconstruct the * 64 bit context. */ ccb = (union asr_ccb *)(long) I2O_MESSAGE_FRAME_getInitiatorContext64( &(Reply->StdReplyFrame.StdMessageFrame)); /* * Unique error code for command failure. */ I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode( &(Reply->StdReplyFrame), (u_int16_t)-2); /* * Modify the message frame to contain a NOP and * re-issue it to the controller. */ Message_Ptr = (PI2O_UTIL_NOP_MESSAGE)ASR_fillMessage( &Message, sizeof(I2O_UTIL_NOP_MESSAGE)); #if (I2O_UTIL_NOP != 0) I2O_MESSAGE_FRAME_setFunction ( &(Message_Ptr->StdMessageFrame), I2O_UTIL_NOP); #endif /* * Copy the packet out to the Original Message */ asr_set_frame(sc, Message_Ptr, MessageOffset, sizeof(I2O_UTIL_NOP_MESSAGE)); /* * Issue the NOP */ asr_set_ToFIFO(sc, MessageOffset); } /* * Asynchronous command with no return requirements, * and a generic handler for immunity against odd error * returns from the adapter. */ if (ccb == NULL) { /* * Return Reply so that it can be used for the * next command */ asr_set_FromFIFO(sc, ReplyOffset); continue; } /* Welease Wadjah! (and stop timeouts) */ ASR_ccbRemove (sc, ccb); switch ( I2O_SINGLE_REPLY_MESSAGE_FRAME_getDetailedStatusCode( &(Reply->StdReplyFrame))) { case I2O_SCSI_DSC_SUCCESS: ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQ_CMP; break; case I2O_SCSI_DSC_CHECK_CONDITION: ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQ_CMP|CAM_AUTOSNS_VALID; break; case I2O_SCSI_DSC_BUSY: /* FALLTHRU */ case I2O_SCSI_HBA_DSC_ADAPTER_BUSY: /* FALLTHRU */ case I2O_SCSI_HBA_DSC_SCSI_BUS_RESET: /* FALLTHRU */ case I2O_SCSI_HBA_DSC_BUS_BUSY: ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_SCSI_BUSY; break; case I2O_SCSI_HBA_DSC_SELECTION_TIMEOUT: ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_SEL_TIMEOUT; break; case I2O_SCSI_HBA_DSC_COMMAND_TIMEOUT: /* FALLTHRU */ case I2O_SCSI_HBA_DSC_DEVICE_NOT_PRESENT: /* FALLTHRU */ case I2O_SCSI_HBA_DSC_LUN_INVALID: /* FALLTHRU */ case I2O_SCSI_HBA_DSC_SCSI_TID_INVALID: ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_CMD_TIMEOUT; break; case I2O_SCSI_HBA_DSC_DATA_OVERRUN: /* FALLTHRU */ case I2O_SCSI_HBA_DSC_REQUEST_LENGTH_ERROR: ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_DATA_RUN_ERR; break; default: ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQUEUE_REQ; break; } if ((ccb->csio.resid = ccb->csio.dxfer_len) != 0) { ccb->csio.resid -= I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getTransferCount( Reply); } /* Sense data in reply packet */ if (ccb->ccb_h.status & CAM_AUTOSNS_VALID) { u_int16_t size = I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getAutoSenseTransferCount(Reply); if (size) { if (size > sizeof(ccb->csio.sense_data)) { size = sizeof(ccb->csio.sense_data); } if (size > I2O_SCSI_SENSE_DATA_SZ) { size = I2O_SCSI_SENSE_DATA_SZ; } if ((ccb->csio.sense_len) && (size > ccb->csio.sense_len)) { size = ccb->csio.sense_len; } bcopy(Reply->SenseData, &(ccb->csio.sense_data), size); } } /* * Return Reply so that it can be used for the next command * since we have no more need for it now */ asr_set_FromFIFO(sc, ReplyOffset); if (ccb->ccb_h.path) { xpt_done ((union ccb *)ccb); } else { wakeup (ccb); } } return (processed); } /* asr_intr */ #undef QueueSize /* Grrrr */ #undef SG_Size /* Grrrr */ /* * Meant to be included at the bottom of asr.c !!! */ /* * Included here as hard coded. Done because other necessary include * files utilize C++ comment structures which make them a nuisance to * included here just to pick up these three typedefs. */ typedef U32 DPT_TAG_T; typedef U32 DPT_MSG_T; typedef U32 DPT_RTN_T; #undef SCSI_RESET /* Conflicts with "scsi/scsiconf.h" defintion */ #include "dev/asr/osd_unix.h" #define asr_unit(dev) minor(dev) static u_int8_t ASR_ctlr_held; static int asr_open(dev_t dev, int32_t flags, int32_t ifmt, struct thread *td) { int s; int error; if (dev->si_drv1 == NULL) { return (ENODEV); } s = splcam (); if (ASR_ctlr_held) { error = EBUSY; } else if ((error = suser(td)) == 0) { ++ASR_ctlr_held; } splx(s); return (error); } /* asr_open */ static int asr_close(dev_t dev, int flags, int ifmt, struct thread *td) { ASR_ctlr_held = 0; return (0); } /* asr_close */ /*-------------------------------------------------------------------------*/ /* Function ASR_queue_i */ /*-------------------------------------------------------------------------*/ /* The Parameters Passed To This Function Are : */ /* Asr_softc_t * : HBA miniport driver's adapter data storage. */ /* PI2O_MESSAGE_FRAME : Msg Structure Pointer For This Command */ /* I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME following the Msg Structure */ /* */ /* This Function Will Take The User Request Packet And Convert It To An */ /* I2O MSG And Send It Off To The Adapter. */ /* */ /* Return : 0 For OK, Error Code Otherwise */ /*-------------------------------------------------------------------------*/ static int ASR_queue_i(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Packet) { union asr_ccb * ccb; PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply; PI2O_MESSAGE_FRAME Message_Ptr; PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply_Ptr; int MessageSizeInBytes; int ReplySizeInBytes; int error; int s; /* Scatter Gather buffer list */ struct ioctlSgList_S { SLIST_ENTRY(ioctlSgList_S) link; caddr_t UserSpace; I2O_FLAGS_COUNT FlagsCount; char KernelSpace[sizeof(long)]; } * elm; /* Generates a `first' entry */ SLIST_HEAD(ioctlSgListHead_S, ioctlSgList_S) sgList; if (ASR_getBlinkLedCode(sc)) { debug_usr_cmd_printf ("Adapter currently in BlinkLed %x\n", ASR_getBlinkLedCode(sc)); return (EIO); } /* Copy in the message into a local allocation */ if ((Message_Ptr = (PI2O_MESSAGE_FRAME)malloc ( sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) == NULL) { debug_usr_cmd_printf ( "Failed to acquire I2O_MESSAGE_FRAME memory\n"); return (ENOMEM); } if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr, sizeof(I2O_MESSAGE_FRAME))) != 0) { free(Message_Ptr, M_TEMP); debug_usr_cmd_printf ("Can't copy in packet errno=%d\n", error); return (error); } /* Acquire information to determine type of packet */ MessageSizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)<<2); /* The offset of the reply information within the user packet */ Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)((char *)Packet + MessageSizeInBytes); /* Check if the message is a synchronous initialization command */ s = I2O_MESSAGE_FRAME_getFunction(Message_Ptr); free(Message_Ptr, M_TEMP); switch (s) { case I2O_EXEC_IOP_RESET: { U32 status; status = ASR_resetIOP(sc); ReplySizeInBytes = sizeof(status); debug_usr_cmd_printf ("resetIOP done\n"); return (copyout ((caddr_t)&status, (caddr_t)Reply, ReplySizeInBytes)); } case I2O_EXEC_STATUS_GET: { I2O_EXEC_STATUS_GET_REPLY status; if (ASR_getStatus(sc, &status) == NULL) { debug_usr_cmd_printf ("getStatus failed\n"); return (ENXIO); } ReplySizeInBytes = sizeof(status); debug_usr_cmd_printf ("getStatus done\n"); return (copyout ((caddr_t)&status, (caddr_t)Reply, ReplySizeInBytes)); } case I2O_EXEC_OUTBOUND_INIT: { U32 status; status = ASR_initOutBound(sc); ReplySizeInBytes = sizeof(status); debug_usr_cmd_printf ("intOutBound done\n"); return (copyout ((caddr_t)&status, (caddr_t)Reply, ReplySizeInBytes)); } } /* Determine if the message size is valid */ if ((MessageSizeInBytes < sizeof(I2O_MESSAGE_FRAME)) || (MAX_INBOUND_SIZE < MessageSizeInBytes)) { debug_usr_cmd_printf ("Packet size %d incorrect\n", MessageSizeInBytes); return (EINVAL); } if ((Message_Ptr = (PI2O_MESSAGE_FRAME)malloc (MessageSizeInBytes, M_TEMP, M_WAITOK)) == NULL) { debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n", MessageSizeInBytes); return (ENOMEM); } if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr, MessageSizeInBytes)) != 0) { free(Message_Ptr, M_TEMP); debug_usr_cmd_printf ("Can't copy in packet[%d] errno=%d\n", MessageSizeInBytes, error); return (error); } /* Check the size of the reply frame, and start constructing */ if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)malloc ( sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) == NULL) { free(Message_Ptr, M_TEMP); debug_usr_cmd_printf ( "Failed to acquire I2O_MESSAGE_FRAME memory\n"); return (ENOMEM); } if ((error = copyin ((caddr_t)Reply, (caddr_t)Reply_Ptr, sizeof(I2O_MESSAGE_FRAME))) != 0) { free(Reply_Ptr, M_TEMP); free(Message_Ptr, M_TEMP); debug_usr_cmd_printf ( "Failed to copy in reply frame, errno=%d\n", error); return (error); } ReplySizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize( &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) << 2); free(Reply_Ptr, M_TEMP); if (ReplySizeInBytes < sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)) { free(Message_Ptr, M_TEMP); debug_usr_cmd_printf ( "Failed to copy in reply frame[%d], errno=%d\n", ReplySizeInBytes, error); return (EINVAL); } if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)malloc ( ((ReplySizeInBytes > sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)) ? ReplySizeInBytes : sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)), M_TEMP, M_WAITOK)) == NULL) { free(Message_Ptr, M_TEMP); debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n", ReplySizeInBytes); return (ENOMEM); } (void)ASR_fillMessage((void *)Reply_Ptr, ReplySizeInBytes); Reply_Ptr->StdReplyFrame.StdMessageFrame.InitiatorContext = Message_Ptr->InitiatorContext; Reply_Ptr->StdReplyFrame.TransactionContext = ((PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr)->TransactionContext; I2O_MESSAGE_FRAME_setMsgFlags( &(Reply_Ptr->StdReplyFrame.StdMessageFrame), I2O_MESSAGE_FRAME_getMsgFlags( &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) | I2O_MESSAGE_FLAGS_REPLY); /* Check if the message is a special case command */ switch (I2O_MESSAGE_FRAME_getFunction(Message_Ptr)) { case I2O_EXEC_SYS_TAB_SET: /* Special Case of empty Scatter Gather */ if (MessageSizeInBytes == ((I2O_MESSAGE_FRAME_getVersionOffset( Message_Ptr) & 0xF0) >> 2)) { free(Message_Ptr, M_TEMP); I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode( &(Reply_Ptr->StdReplyFrame), (ASR_setSysTab(sc) != CAM_REQ_CMP)); I2O_MESSAGE_FRAME_setMessageSize( &(Reply_Ptr->StdReplyFrame.StdMessageFrame), sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)); error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply, ReplySizeInBytes); free(Reply_Ptr, M_TEMP); return (error); } } /* Deal in the general case */ /* First allocate and optionally copy in each scatter gather element */ SLIST_INIT(&sgList); if ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) != 0) { PI2O_SGE_SIMPLE_ELEMENT sg; /* * since this code is reused in several systems, code * efficiency is greater by using a shift operation rather * than a divide by sizeof(u_int32_t). */ sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr + ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) >> 2)); while (sg < (PI2O_SGE_SIMPLE_ELEMENT)(((caddr_t)Message_Ptr) + MessageSizeInBytes)) { caddr_t v; int len; if ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount)) & I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT) == 0) { error = EINVAL; break; } len = I2O_FLAGS_COUNT_getCount(&(sg->FlagsCount)); debug_usr_cmd_printf ("SG[%d] = %x[%d]\n", sg - (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr + ((I2O_MESSAGE_FRAME_getVersionOffset( Message_Ptr) & 0xF0) >> 2)), I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), len); if ((elm = (struct ioctlSgList_S *)malloc ( sizeof(*elm) - sizeof(elm->KernelSpace) + len, M_TEMP, M_WAITOK)) == NULL) { debug_usr_cmd_printf ( "Failed to allocate SG[%d]\n", len); error = ENOMEM; break; } SLIST_INSERT_HEAD(&sgList, elm, link); elm->FlagsCount = sg->FlagsCount; elm->UserSpace = (caddr_t) (I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg)); v = elm->KernelSpace; /* Copy in outgoing data (DIR bit could be invalid) */ if ((error = copyin (elm->UserSpace, (caddr_t)v, len)) != 0) { break; } /* * If the buffer is not contiguous, lets * break up the scatter/gather entries. */ while ((len > 0) && (sg < (PI2O_SGE_SIMPLE_ELEMENT) (((caddr_t)Message_Ptr) + MAX_INBOUND_SIZE))) { int next, base, span; span = 0; next = base = KVTOPHYS(v); I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base); /* How far can we go physically contiguously */ while ((len > 0) && (base == next)) { int size; next = trunc_page(base) + PAGE_SIZE; size = next - base; if (size > len) { size = len; } span += size; v += size; len -= size; base = KVTOPHYS(v); } /* Construct the Flags */ I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span); { int flags = I2O_FLAGS_COUNT_getFlags( &(elm->FlagsCount)); /* Any remaining length? */ if (len > 0) { flags &= ~(I2O_SGL_FLAGS_END_OF_BUFFER | I2O_SGL_FLAGS_LAST_ELEMENT); } I2O_FLAGS_COUNT_setFlags( &(sg->FlagsCount), flags); } debug_usr_cmd_printf ("sg[%d] = %x[%d]\n", sg - (PI2O_SGE_SIMPLE_ELEMENT) ((char *)Message_Ptr + ((I2O_MESSAGE_FRAME_getVersionOffset( Message_Ptr) & 0xF0) >> 2)), I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), span); if (len <= 0) { break; } /* * Incrementing requires resizing of the * packet, and moving up the existing SG * elements. */ ++sg; MessageSizeInBytes += sizeof(*sg); I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr, I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr) + (sizeof(*sg) / sizeof(U32))); { PI2O_MESSAGE_FRAME NewMessage_Ptr; if ((NewMessage_Ptr = (PI2O_MESSAGE_FRAME) malloc (MessageSizeInBytes, M_TEMP, M_WAITOK)) == NULL) { debug_usr_cmd_printf ( "Failed to acquire frame[%d] memory\n", MessageSizeInBytes); error = ENOMEM; break; } span = ((caddr_t)sg) - (caddr_t)Message_Ptr; bcopy(Message_Ptr,NewMessage_Ptr, span); bcopy((caddr_t)(sg-1), ((caddr_t)NewMessage_Ptr) + span, MessageSizeInBytes - span); free(Message_Ptr, M_TEMP); sg = (PI2O_SGE_SIMPLE_ELEMENT) (((caddr_t)NewMessage_Ptr) + span); Message_Ptr = NewMessage_Ptr; } } if ((error) || ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount)) & I2O_SGL_FLAGS_LAST_ELEMENT) != 0)) { break; } ++sg; } if (error) { while ((elm = SLIST_FIRST(&sgList)) != NULL) { SLIST_REMOVE_HEAD(&sgList, link); free(elm, M_TEMP); } free(Reply_Ptr, M_TEMP); free(Message_Ptr, M_TEMP); return (error); } } debug_usr_cmd_printf ("Inbound: "); debug_usr_cmd_dump_message(Message_Ptr); /* Send the command */ if ((ccb = asr_alloc_ccb (sc)) == NULL) { /* Free up in-kernel buffers */ while ((elm = SLIST_FIRST(&sgList)) != NULL) { SLIST_REMOVE_HEAD(&sgList, link); free(elm, M_TEMP); } free(Reply_Ptr, M_TEMP); free(Message_Ptr, M_TEMP); return (ENOMEM); } /* * We do not need any (optional byteswapping) method access to * the Initiator context field. */ I2O_MESSAGE_FRAME_setInitiatorContext64( (PI2O_MESSAGE_FRAME)Message_Ptr, (long)ccb); (void)ASR_queue (sc, (PI2O_MESSAGE_FRAME)Message_Ptr); free(Message_Ptr, M_TEMP); /* * Wait for the board to report a finished instruction. */ s = splcam(); while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { if (ASR_getBlinkLedCode(sc)) { /* Reset Adapter */ printf ("asr%d: Blink LED 0x%x resetting adapter\n", cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), ASR_getBlinkLedCode(sc)); if (ASR_reset (sc) == ENXIO) { /* Command Cleanup */ ASR_ccbRemove(sc, ccb); } splx(s); /* Free up in-kernel buffers */ while ((elm = SLIST_FIRST(&sgList)) != NULL) { SLIST_REMOVE_HEAD(&sgList, link); free(elm, M_TEMP); } free(Reply_Ptr, M_TEMP); asr_free_ccb(ccb); return (EIO); } /* Check every second for BlinkLed */ /* There is no PRICAM, but outwardly PRIBIO is functional */ tsleep(ccb, PRIBIO, "asr", hz); } splx(s); debug_usr_cmd_printf ("Outbound: "); debug_usr_cmd_dump_message(Reply_Ptr); I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode( &(Reply_Ptr->StdReplyFrame), (ccb->ccb_h.status != CAM_REQ_CMP)); if (ReplySizeInBytes >= (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) - I2O_SCSI_SENSE_DATA_SZ - sizeof(U32))) { I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setTransferCount(Reply_Ptr, ccb->csio.dxfer_len - ccb->csio.resid); } if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) && (ReplySizeInBytes > (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) - I2O_SCSI_SENSE_DATA_SZ))) { int size = ReplySizeInBytes - sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) - I2O_SCSI_SENSE_DATA_SZ; if (size > sizeof(ccb->csio.sense_data)) { size = sizeof(ccb->csio.sense_data); } bcopy(&(ccb->csio.sense_data), Reply_Ptr->SenseData, size); I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setAutoSenseTransferCount( Reply_Ptr, size); } /* Free up in-kernel buffers */ while ((elm = SLIST_FIRST(&sgList)) != NULL) { /* Copy out as necessary */ if ((error == 0) /* DIR bit considered `valid', error due to ignorance works */ && ((I2O_FLAGS_COUNT_getFlags(&(elm->FlagsCount)) & I2O_SGL_FLAGS_DIR) == 0)) { error = copyout((caddr_t)(elm->KernelSpace), elm->UserSpace, I2O_FLAGS_COUNT_getCount(&(elm->FlagsCount))); } SLIST_REMOVE_HEAD(&sgList, link); free(elm, M_TEMP); } if (error == 0) { /* Copy reply frame to user space */ error = copyout((caddr_t)Reply_Ptr, (caddr_t)Reply, ReplySizeInBytes); } free(Reply_Ptr, M_TEMP); asr_free_ccb(ccb); return (error); } /* ASR_queue_i */ /*----------------------------------------------------------------------*/ /* Function asr_ioctl */ /*----------------------------------------------------------------------*/ /* The parameters passed to this function are : */ /* dev : Device number. */ /* cmd : Ioctl Command */ /* data : User Argument Passed In. */ /* flag : Mode Parameter */ /* proc : Process Parameter */ /* */ /* This function is the user interface into this adapter driver */ /* */ /* Return : zero if OK, error code if not */ /*----------------------------------------------------------------------*/ static int asr_ioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct thread *td) { Asr_softc_t *sc = dev->si_drv1; int i, error = 0; if (sc != NULL) switch(cmd) { case DPT_SIGNATURE: return (copyout((caddr_t)(&ASR_sig), *((caddr_t *)data), sizeof(dpt_sig_S))); /* Traditional version of the ioctl interface */ case DPT_CTRLINFO & 0x0000FFFF: case DPT_CTRLINFO: { struct { u_int16_t length; u_int16_t drvrHBAnum; u_int32_t baseAddr; u_int16_t blinkState; u_int8_t pciBusNum; u_int8_t pciDeviceNum; u_int16_t hbaFlags; u_int16_t Interrupt; u_int32_t reserved1; u_int32_t reserved2; u_int32_t reserved3; } CtlrInfo; bzero(&CtlrInfo, sizeof(CtlrInfo)); CtlrInfo.length = sizeof(CtlrInfo) - sizeof(u_int16_t); CtlrInfo.drvrHBAnum = asr_unit(dev); CtlrInfo.baseAddr = sc->ha_Base; i = ASR_getBlinkLedCode (sc); if (i == -1) i = 0; CtlrInfo.blinkState = i; CtlrInfo.pciBusNum = sc->ha_pciBusNum; CtlrInfo.pciDeviceNum = sc->ha_pciDeviceNum; #define FLG_OSD_PCI_VALID 0x0001 #define FLG_OSD_DMA 0x0002 #define FLG_OSD_I2O 0x0004 CtlrInfo.hbaFlags = FLG_OSD_PCI_VALID|FLG_OSD_DMA|FLG_OSD_I2O; CtlrInfo.Interrupt = sc->ha_irq; error = copyout(&CtlrInfo, *(caddr_t *)data, sizeof(CtlrInfo)); } return (error); /* Traditional version of the ioctl interface */ case DPT_SYSINFO & 0x0000FFFF: case DPT_SYSINFO: { sysInfo_S Info; bzero(&Info, sizeof(Info)); Info.processorFamily = ASR_sig.dsProcessorFamily; #if defined(__i386__) switch (cpu) { case CPU_386SX: case CPU_386: Info.processorType = PROC_386; break; case CPU_486SX: case CPU_486: Info.processorType = PROC_486; break; case CPU_586: Info.processorType = PROC_PENTIUM; break; case CPU_686: Info.processorType = PROC_SEXIUM; break; } #elif defined(__alpha__) Info.processorType = PROC_ALPHA; #endif Info.osType = OS_BSDI_UNIX; Info.osMajorVersion = osrelease[0] - '0'; Info.osMinorVersion = osrelease[2] - '0'; /* Info.osRevision = 0; */ /* Info.osSubRevision = 0; */ Info.busType = SI_PCI_BUS; Info.flags = SI_OSversionValid|SI_BusTypeValid|SI_NO_SmartROM; error = copyout(&Info, *(caddr_t *)data, sizeof(Info)); return (error); } /* Get The BlinkLED State */ case DPT_BLINKLED: i = ASR_getBlinkLedCode (sc); if (i == -1) { i = 0; } error = copyout(&i, *(caddr_t *)data, sizeof(i)); break; /* Send an I2O command */ case I2OUSRCMD: return (ASR_queue_i(sc, *((PI2O_MESSAGE_FRAME *)data))); /* Reset and re-initialize the adapter */ case I2ORESETCMD: return (ASR_reset(sc)); /* Rescan the LCT table and resynchronize the information */ case I2ORESCANCMD: return (ASR_rescan(sc)); } return (EINVAL); } /* asr_ioctl */ Index: head/sys/dev/bfe/if_bfe.c =================================================================== --- head/sys/dev/bfe/if_bfe.c (revision 129878) +++ head/sys/dev/bfe/if_bfe.c (revision 129879) @@ -1,1604 +1,1605 @@ /* * Copyright (c) 2003 Stuart Walsh * and Duncan Barclay */ /* * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS 'AS IS' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for DELAY */ #include #include #include #include #include #include #include #include "miidevs.h" #include #include #include MODULE_DEPEND(bfe, pci, 1, 1, 1); MODULE_DEPEND(bfe, ether, 1, 1, 1); MODULE_DEPEND(bfe, miibus, 1, 1, 1); /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #define BFE_DEVDESC_MAX 64 /* Maximum device description length */ static struct bfe_type bfe_devs[] = { { BCOM_VENDORID, BCOM_DEVICEID_BCM4401, "Broadcom BCM4401 Fast Ethernet" }, { 0, 0, NULL } }; static int bfe_probe (device_t); static int bfe_attach (device_t); static int bfe_detach (device_t); static void bfe_release_resources (struct bfe_softc *); static void bfe_intr (void *); static void bfe_start (struct ifnet *); static int bfe_ioctl (struct ifnet *, u_long, caddr_t); static void bfe_init (void *); static void bfe_stop (struct bfe_softc *); static void bfe_watchdog (struct ifnet *); static void bfe_shutdown (device_t); static void bfe_tick (void *); static void bfe_txeof (struct bfe_softc *); static void bfe_rxeof (struct bfe_softc *); static void bfe_set_rx_mode (struct bfe_softc *); static int bfe_list_rx_init (struct bfe_softc *); static int bfe_list_newbuf (struct bfe_softc *, int, struct mbuf*); static void bfe_rx_ring_free (struct bfe_softc *); static void bfe_pci_setup (struct bfe_softc *, u_int32_t); static int bfe_ifmedia_upd (struct ifnet *); static void bfe_ifmedia_sts (struct ifnet *, struct ifmediareq *); static int bfe_miibus_readreg (device_t, int, int); static int bfe_miibus_writereg (device_t, int, int, int); static void bfe_miibus_statchg (device_t); static int bfe_wait_bit (struct bfe_softc *, u_int32_t, u_int32_t, u_long, const int); static void bfe_get_config (struct bfe_softc *sc); static void bfe_read_eeprom (struct bfe_softc *, u_int8_t *); static void bfe_stats_update (struct bfe_softc *); static void bfe_clear_stats (struct bfe_softc *); static int bfe_readphy (struct bfe_softc *, u_int32_t, u_int32_t*); static int bfe_writephy (struct bfe_softc *, u_int32_t, u_int32_t); static int bfe_resetphy (struct bfe_softc *); static int bfe_setupphy (struct bfe_softc *); static void bfe_chip_reset (struct bfe_softc *); static void bfe_chip_halt (struct bfe_softc *); static void bfe_core_reset (struct bfe_softc *); static void bfe_core_disable (struct bfe_softc *); static int bfe_dma_alloc (device_t); static void bfe_dma_map_desc (void *, bus_dma_segment_t *, int, int); static void bfe_dma_map (void *, bus_dma_segment_t *, int, int); static void bfe_cam_write (struct bfe_softc *, u_char *, int); static device_method_t bfe_methods[] = { /* Device interface */ DEVMETHOD(device_probe, bfe_probe), DEVMETHOD(device_attach, bfe_attach), DEVMETHOD(device_detach, bfe_detach), DEVMETHOD(device_shutdown, bfe_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, bfe_miibus_readreg), DEVMETHOD(miibus_writereg, bfe_miibus_writereg), DEVMETHOD(miibus_statchg, bfe_miibus_statchg), { 0, 0 } }; static driver_t bfe_driver = { "bfe", bfe_methods, sizeof(struct bfe_softc) }; static devclass_t bfe_devclass; DRIVER_MODULE(bfe, pci, bfe_driver, bfe_devclass, 0, 0); DRIVER_MODULE(miibus, bfe, miibus_driver, miibus_devclass, 0, 0); /* * Probe for a Broadcom 4401 chip. */ static int bfe_probe(device_t dev) { struct bfe_type *t; struct bfe_softc *sc; t = bfe_devs; sc = device_get_softc(dev); bzero(sc, sizeof(struct bfe_softc)); sc->bfe_unit = device_get_unit(dev); sc->bfe_dev = dev; while(t->bfe_name != NULL) { if ((pci_get_vendor(dev) == t->bfe_vid) && (pci_get_device(dev) == t->bfe_did)) { device_set_desc_copy(dev, t->bfe_name); return(0); } t++; } return(ENXIO); } static int bfe_dma_alloc(device_t dev) { struct bfe_softc *sc; int error, i; sc = device_get_softc(dev); /* parent tag */ error = bus_dma_tag_create(NULL, /* parent */ PAGE_SIZE, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR_32BIT, /* highaddr */ NULL, NULL, /* filter, filterarg */ MAXBSIZE, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* num of segments */ BUS_SPACE_MAXSIZE_32BIT, /* max segment size */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->bfe_parent_tag); /* tag for TX ring */ error = bus_dma_tag_create(sc->bfe_parent_tag, BFE_TX_LIST_SIZE, BFE_TX_LIST_SIZE, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BFE_TX_LIST_SIZE, 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->bfe_tx_tag); if (error) { device_printf(dev, "could not allocate dma tag\n"); return(ENOMEM); } /* tag for RX ring */ error = bus_dma_tag_create(sc->bfe_parent_tag, BFE_RX_LIST_SIZE, BFE_RX_LIST_SIZE, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BFE_RX_LIST_SIZE, 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->bfe_rx_tag); if (error) { device_printf(dev, "could not allocate dma tag\n"); return(ENOMEM); } /* tag for mbufs */ error = bus_dma_tag_create(sc->bfe_parent_tag, ETHER_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->bfe_tag); if (error) { device_printf(dev, "could not allocate dma tag\n"); return(ENOMEM); } /* pre allocate dmamaps for RX list */ for (i = 0; i < BFE_RX_LIST_CNT; i++) { error = bus_dmamap_create(sc->bfe_tag, 0, &sc->bfe_rx_ring[i].bfe_map); if (error) { device_printf(dev, "cannot create DMA map for RX\n"); return(ENOMEM); } } /* pre allocate dmamaps for TX list */ for (i = 0; i < BFE_TX_LIST_CNT; i++) { error = bus_dmamap_create(sc->bfe_tag, 0, &sc->bfe_tx_ring[i].bfe_map); if (error) { device_printf(dev, "cannot create DMA map for TX\n"); return(ENOMEM); } } /* Alloc dma for rx ring */ error = bus_dmamem_alloc(sc->bfe_rx_tag, (void *)&sc->bfe_rx_list, BUS_DMA_NOWAIT, &sc->bfe_rx_map); if(error) return(ENOMEM); bzero(sc->bfe_rx_list, BFE_RX_LIST_SIZE); error = bus_dmamap_load(sc->bfe_rx_tag, sc->bfe_rx_map, sc->bfe_rx_list, sizeof(struct bfe_desc), bfe_dma_map, &sc->bfe_rx_dma, 0); if(error) return(ENOMEM); bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map, BUS_DMASYNC_PREREAD); error = bus_dmamem_alloc(sc->bfe_tx_tag, (void *)&sc->bfe_tx_list, BUS_DMA_NOWAIT, &sc->bfe_tx_map); if (error) return(ENOMEM); error = bus_dmamap_load(sc->bfe_tx_tag, sc->bfe_tx_map, sc->bfe_tx_list, sizeof(struct bfe_desc), bfe_dma_map, &sc->bfe_tx_dma, 0); if(error) return(ENOMEM); bzero(sc->bfe_tx_list, BFE_TX_LIST_SIZE); bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map, BUS_DMASYNC_PREREAD); return(0); } static int bfe_attach(device_t dev) { struct ifnet *ifp; struct bfe_softc *sc; int unit, error = 0, rid; sc = device_get_softc(dev); mtx_init(&sc->bfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); unit = device_get_unit(dev); sc->bfe_dev = dev; sc->bfe_unit = unit; /* * Handle power management nonsense. */ if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { u_int32_t membase, irq; /* Save important PCI config data. */ membase = pci_read_config(dev, BFE_PCI_MEMLO, 4); irq = pci_read_config(dev, BFE_PCI_INTLINE, 4); /* Reset the power state. */ printf("bfe%d: chip is is in D%d power mode -- setting to D0\n", sc->bfe_unit, pci_get_powerstate(dev)); pci_set_powerstate(dev, PCI_POWERSTATE_D0); /* Restore PCI config data. */ pci_write_config(dev, BFE_PCI_MEMLO, membase, 4); pci_write_config(dev, BFE_PCI_INTLINE, irq, 4); } /* * Map control/status registers. */ pci_enable_busmaster(dev); rid = BFE_PCI_MEMLO; sc->bfe_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->bfe_res == NULL) { printf ("bfe%d: couldn't map memory\n", unit); error = ENXIO; goto fail; } sc->bfe_btag = rman_get_bustag(sc->bfe_res); sc->bfe_bhandle = rman_get_bushandle(sc->bfe_res); sc->bfe_vhandle = (vm_offset_t)rman_get_virtual(sc->bfe_res); /* Allocate interrupt */ rid = 0; sc->bfe_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->bfe_irq == NULL) { printf("bfe%d: couldn't map interrupt\n", unit); error = ENXIO; goto fail; } if (bfe_dma_alloc(dev)) { printf("bfe%d: failed to allocate DMA resources\n", sc->bfe_unit); bfe_release_resources(sc); error = ENXIO; goto fail; } /* Set up ifnet structure */ ifp = &sc->arpcom.ac_if; ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = bfe_ioctl; ifp->if_start = bfe_start; ifp->if_watchdog = bfe_watchdog; ifp->if_init = bfe_init; ifp->if_mtu = ETHERMTU; ifp->if_baudrate = 100000000; ifp->if_snd.ifq_maxlen = BFE_TX_QLEN; bfe_get_config(sc); /* Reset the chip and turn on the PHY */ bfe_chip_reset(sc); if (mii_phy_probe(dev, &sc->bfe_miibus, bfe_ifmedia_upd, bfe_ifmedia_sts)) { printf("bfe%d: MII without any PHY!\n", sc->bfe_unit); error = ENXIO; goto fail; } ether_ifattach(ifp, sc->arpcom.ac_enaddr); callout_handle_init(&sc->bfe_stat_ch); /* * Tell the upper layer(s) we support long frames. */ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_VLAN_MTU; ifp->if_capenable |= IFCAP_VLAN_MTU; /* * Hook interrupt last to avoid having to lock softc */ error = bus_setup_intr(dev, sc->bfe_irq, INTR_TYPE_NET, bfe_intr, sc, &sc->bfe_intrhand); if (error) { bfe_release_resources(sc); printf("bfe%d: couldn't set up irq\n", unit); goto fail; } fail: if(error) bfe_release_resources(sc); return(error); } static int bfe_detach(device_t dev) { struct bfe_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); KASSERT(mtx_initialized(&sc->bfe_mtx), ("bfe mutex not initialized")); BFE_LOCK(scp); ifp = &sc->arpcom.ac_if; if (device_is_attached(dev)) { bfe_stop(sc); ether_ifdetach(ifp); } bfe_chip_reset(sc); bus_generic_detach(dev); if(sc->bfe_miibus != NULL) device_delete_child(dev, sc->bfe_miibus); bfe_release_resources(sc); BFE_UNLOCK(sc); mtx_destroy(&sc->bfe_mtx); return(0); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void bfe_shutdown(device_t dev) { struct bfe_softc *sc; sc = device_get_softc(dev); BFE_LOCK(sc); bfe_stop(sc); BFE_UNLOCK(sc); return; } static int bfe_miibus_readreg(device_t dev, int phy, int reg) { struct bfe_softc *sc; u_int32_t ret; sc = device_get_softc(dev); if(phy != sc->bfe_phyaddr) return(0); bfe_readphy(sc, reg, &ret); return(ret); } static int bfe_miibus_writereg(device_t dev, int phy, int reg, int val) { struct bfe_softc *sc; sc = device_get_softc(dev); if(phy != sc->bfe_phyaddr) return(0); bfe_writephy(sc, reg, val); return(0); } static void bfe_miibus_statchg(device_t dev) { return; } static void bfe_tx_ring_free(struct bfe_softc *sc) { int i; for(i = 0; i < BFE_TX_LIST_CNT; i++) { if(sc->bfe_tx_ring[i].bfe_mbuf != NULL) { m_freem(sc->bfe_tx_ring[i].bfe_mbuf); sc->bfe_tx_ring[i].bfe_mbuf = NULL; bus_dmamap_unload(sc->bfe_tag, sc->bfe_tx_ring[i].bfe_map); bus_dmamap_destroy(sc->bfe_tag, sc->bfe_tx_ring[i].bfe_map); } } bzero(sc->bfe_tx_list, BFE_TX_LIST_SIZE); bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map, BUS_DMASYNC_PREREAD); } static void bfe_rx_ring_free(struct bfe_softc *sc) { int i; for (i = 0; i < BFE_RX_LIST_CNT; i++) { if (sc->bfe_rx_ring[i].bfe_mbuf != NULL) { m_freem(sc->bfe_rx_ring[i].bfe_mbuf); sc->bfe_rx_ring[i].bfe_mbuf = NULL; bus_dmamap_unload(sc->bfe_tag, sc->bfe_rx_ring[i].bfe_map); bus_dmamap_destroy(sc->bfe_tag, sc->bfe_rx_ring[i].bfe_map); } } bzero(sc->bfe_rx_list, BFE_RX_LIST_SIZE); bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map, BUS_DMASYNC_PREREAD); } static int bfe_list_rx_init(struct bfe_softc *sc) { int i; for(i = 0; i < BFE_RX_LIST_CNT; i++) { if(bfe_list_newbuf(sc, i, NULL) == ENOBUFS) return ENOBUFS; } bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map, BUS_DMASYNC_PREREAD); CSR_WRITE_4(sc, BFE_DMARX_PTR, (i * sizeof(struct bfe_desc))); sc->bfe_rx_cons = 0; return(0); } static int bfe_list_newbuf(struct bfe_softc *sc, int c, struct mbuf *m) { struct bfe_rxheader *rx_header; struct bfe_desc *d; struct bfe_data *r; u_int32_t ctrl; if ((c < 0) || (c >= BFE_RX_LIST_CNT)) return(EINVAL); if(m == NULL) { m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if(m == NULL) return(ENOBUFS); m->m_len = m->m_pkthdr.len = MCLBYTES; } else m->m_data = m->m_ext.ext_buf; rx_header = mtod(m, struct bfe_rxheader *); rx_header->len = 0; rx_header->flags = 0; /* Map the mbuf into DMA */ sc->bfe_rx_cnt = c; d = &sc->bfe_rx_list[c]; r = &sc->bfe_rx_ring[c]; bus_dmamap_load(sc->bfe_tag, r->bfe_map, mtod(m, void *), MCLBYTES, bfe_dma_map_desc, d, 0); bus_dmamap_sync(sc->bfe_tag, r->bfe_map, BUS_DMASYNC_PREWRITE); ctrl = ETHER_MAX_LEN + 32; if(c == BFE_RX_LIST_CNT - 1) ctrl |= BFE_DESC_EOT; d->bfe_ctrl = ctrl; r->bfe_mbuf = m; bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map, BUS_DMASYNC_PREREAD); return(0); } static void bfe_get_config(struct bfe_softc *sc) { u_int8_t eeprom[128]; bfe_read_eeprom(sc, eeprom); sc->arpcom.ac_enaddr[0] = eeprom[79]; sc->arpcom.ac_enaddr[1] = eeprom[78]; sc->arpcom.ac_enaddr[2] = eeprom[81]; sc->arpcom.ac_enaddr[3] = eeprom[80]; sc->arpcom.ac_enaddr[4] = eeprom[83]; sc->arpcom.ac_enaddr[5] = eeprom[82]; sc->bfe_phyaddr = eeprom[90] & 0x1f; sc->bfe_mdc_port = (eeprom[90] >> 14) & 0x1; sc->bfe_core_unit = 0; sc->bfe_dma_offset = BFE_PCI_DMA; } static void bfe_pci_setup(struct bfe_softc *sc, u_int32_t cores) { u_int32_t bar_orig, pci_rev, val; bar_orig = pci_read_config(sc->bfe_dev, BFE_BAR0_WIN, 4); pci_write_config(sc->bfe_dev, BFE_BAR0_WIN, BFE_REG_PCI, 4); pci_rev = CSR_READ_4(sc, BFE_SBIDHIGH) & BFE_RC_MASK; val = CSR_READ_4(sc, BFE_SBINTVEC); val |= cores; CSR_WRITE_4(sc, BFE_SBINTVEC, val); val = CSR_READ_4(sc, BFE_SSB_PCI_TRANS_2); val |= BFE_SSB_PCI_PREF | BFE_SSB_PCI_BURST; CSR_WRITE_4(sc, BFE_SSB_PCI_TRANS_2, val); pci_write_config(sc->bfe_dev, BFE_BAR0_WIN, bar_orig, 4); } static void bfe_clear_stats(struct bfe_softc *sc) { u_long reg; BFE_LOCK(sc); CSR_WRITE_4(sc, BFE_MIB_CTRL, BFE_MIB_CLR_ON_READ); for (reg = BFE_TX_GOOD_O; reg <= BFE_TX_PAUSE; reg += 4) CSR_READ_4(sc, reg); for (reg = BFE_RX_GOOD_O; reg <= BFE_RX_NPAUSE; reg += 4) CSR_READ_4(sc, reg); BFE_UNLOCK(sc); } static int bfe_resetphy(struct bfe_softc *sc) { u_int32_t val; BFE_LOCK(sc); bfe_writephy(sc, 0, BMCR_RESET); DELAY(100); bfe_readphy(sc, 0, &val); if (val & BMCR_RESET) { printf("bfe%d: PHY Reset would not complete.\n", sc->bfe_unit); BFE_UNLOCK(sc); return ENXIO; } BFE_UNLOCK(sc); return 0; } static void bfe_chip_halt(struct bfe_softc *sc) { BFE_LOCK(sc); /* disable interrupts - not that it actually does..*/ CSR_WRITE_4(sc, BFE_IMASK, 0); CSR_READ_4(sc, BFE_IMASK); CSR_WRITE_4(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE); bfe_wait_bit(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE, 200, 1); CSR_WRITE_4(sc, BFE_DMARX_CTRL, 0); CSR_WRITE_4(sc, BFE_DMATX_CTRL, 0); DELAY(10); BFE_UNLOCK(sc); } static void bfe_chip_reset(struct bfe_softc *sc) { u_int32_t val; BFE_LOCK(sc); /* Set the interrupt vector for the enet core */ bfe_pci_setup(sc, BFE_INTVEC_ENET0); /* is core up? */ val = CSR_READ_4(sc, BFE_SBTMSLOW) & (BFE_RESET | BFE_REJECT | BFE_CLOCK); if (val == BFE_CLOCK) { /* It is, so shut it down */ CSR_WRITE_4(sc, BFE_RCV_LAZY, 0); CSR_WRITE_4(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE); bfe_wait_bit(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE, 100, 1); CSR_WRITE_4(sc, BFE_DMATX_CTRL, 0); sc->bfe_tx_cnt = sc->bfe_tx_prod = sc->bfe_tx_cons = 0; if (CSR_READ_4(sc, BFE_DMARX_STAT) & BFE_STAT_EMASK) bfe_wait_bit(sc, BFE_DMARX_STAT, BFE_STAT_SIDLE, 100, 0); CSR_WRITE_4(sc, BFE_DMARX_CTRL, 0); sc->bfe_rx_prod = sc->bfe_rx_cons = 0; } bfe_core_reset(sc); bfe_clear_stats(sc); /* * We want the phy registers to be accessible even when * the driver is "downed" so initialize MDC preamble, frequency, * and whether internal or external phy here. */ /* 4402 has 62.5Mhz SB clock and internal phy */ CSR_WRITE_4(sc, BFE_MDIO_CTRL, 0x8d); /* Internal or external PHY? */ val = CSR_READ_4(sc, BFE_DEVCTRL); if(!(val & BFE_IPP)) CSR_WRITE_4(sc, BFE_ENET_CTRL, BFE_ENET_EPSEL); else if(CSR_READ_4(sc, BFE_DEVCTRL) & BFE_EPR) { BFE_AND(sc, BFE_DEVCTRL, ~BFE_EPR); DELAY(100); } /* Enable CRC32 generation and set proper LED modes */ BFE_OR(sc, BFE_MAC_CTRL, BFE_CTRL_CRC32_ENAB | BFE_CTRL_LED); /* Reset or clear powerdown control bit */ BFE_AND(sc, BFE_MAC_CTRL, ~BFE_CTRL_PDOWN); CSR_WRITE_4(sc, BFE_RCV_LAZY, ((1 << BFE_LAZY_FC_SHIFT) & BFE_LAZY_FC_MASK)); /* * We don't want lazy interrupts, so just send them at * the end of a frame, please */ BFE_OR(sc, BFE_RCV_LAZY, 0); /* Set max lengths, accounting for VLAN tags */ CSR_WRITE_4(sc, BFE_RXMAXLEN, ETHER_MAX_LEN+32); CSR_WRITE_4(sc, BFE_TXMAXLEN, ETHER_MAX_LEN+32); /* Set watermark XXX - magic */ CSR_WRITE_4(sc, BFE_TX_WMARK, 56); /* * Initialise DMA channels * - not forgetting dma addresses need to be added to BFE_PCI_DMA */ CSR_WRITE_4(sc, BFE_DMATX_CTRL, BFE_TX_CTRL_ENABLE); CSR_WRITE_4(sc, BFE_DMATX_ADDR, sc->bfe_tx_dma + BFE_PCI_DMA); CSR_WRITE_4(sc, BFE_DMARX_CTRL, (BFE_RX_OFFSET << BFE_RX_CTRL_ROSHIFT) | BFE_RX_CTRL_ENABLE); CSR_WRITE_4(sc, BFE_DMARX_ADDR, sc->bfe_rx_dma + BFE_PCI_DMA); bfe_resetphy(sc); bfe_setupphy(sc); BFE_UNLOCK(sc); } static void bfe_core_disable(struct bfe_softc *sc) { if((CSR_READ_4(sc, BFE_SBTMSLOW)) & BFE_RESET) return; /* * Set reject, wait for it set, then wait for the core to stop * being busy, then set reset and reject and enable the clocks. */ CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_REJECT | BFE_CLOCK)); bfe_wait_bit(sc, BFE_SBTMSLOW, BFE_REJECT, 1000, 0); bfe_wait_bit(sc, BFE_SBTMSHIGH, BFE_BUSY, 1000, 1); CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_FGC | BFE_CLOCK | BFE_REJECT | BFE_RESET)); CSR_READ_4(sc, BFE_SBTMSLOW); DELAY(10); /* Leave reset and reject set */ CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_REJECT | BFE_RESET)); DELAY(10); } static void bfe_core_reset(struct bfe_softc *sc) { u_int32_t val; /* Disable the core */ bfe_core_disable(sc); /* and bring it back up */ CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_RESET | BFE_CLOCK | BFE_FGC)); CSR_READ_4(sc, BFE_SBTMSLOW); DELAY(10); /* Chip bug, clear SERR, IB and TO if they are set. */ if (CSR_READ_4(sc, BFE_SBTMSHIGH) & BFE_SERR) CSR_WRITE_4(sc, BFE_SBTMSHIGH, 0); val = CSR_READ_4(sc, BFE_SBIMSTATE); if (val & (BFE_IBE | BFE_TO)) CSR_WRITE_4(sc, BFE_SBIMSTATE, val & ~(BFE_IBE | BFE_TO)); /* Clear reset and allow it to move through the core */ CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_CLOCK | BFE_FGC)); CSR_READ_4(sc, BFE_SBTMSLOW); DELAY(10); /* Leave the clock set */ CSR_WRITE_4(sc, BFE_SBTMSLOW, BFE_CLOCK); CSR_READ_4(sc, BFE_SBTMSLOW); DELAY(10); } static void bfe_cam_write(struct bfe_softc *sc, u_char *data, int index) { u_int32_t val; val = ((u_int32_t) data[2]) << 24; val |= ((u_int32_t) data[3]) << 16; val |= ((u_int32_t) data[4]) << 8; val |= ((u_int32_t) data[5]); CSR_WRITE_4(sc, BFE_CAM_DATA_LO, val); val = (BFE_CAM_HI_VALID | (((u_int32_t) data[0]) << 8) | (((u_int32_t) data[1]))); CSR_WRITE_4(sc, BFE_CAM_DATA_HI, val); CSR_WRITE_4(sc, BFE_CAM_CTRL, (BFE_CAM_WRITE | ((u_int32_t) index << BFE_CAM_INDEX_SHIFT))); bfe_wait_bit(sc, BFE_CAM_CTRL, BFE_CAM_BUSY, 10000, 1); } static void bfe_set_rx_mode(struct bfe_softc *sc) { struct ifnet *ifp = &sc->arpcom.ac_if; struct ifmultiaddr *ifma; u_int32_t val; int i = 0; val = CSR_READ_4(sc, BFE_RXCONF); if (ifp->if_flags & IFF_PROMISC) val |= BFE_RXCONF_PROMISC; else val &= ~BFE_RXCONF_PROMISC; if (ifp->if_flags & IFF_BROADCAST) val &= ~BFE_RXCONF_DBCAST; else val |= BFE_RXCONF_DBCAST; CSR_WRITE_4(sc, BFE_CAM_CTRL, 0); bfe_cam_write(sc, sc->arpcom.ac_enaddr, i++); if (ifp->if_flags & IFF_ALLMULTI) val |= BFE_RXCONF_ALLMULTI; else { val &= ~BFE_RXCONF_ALLMULTI; TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; bfe_cam_write(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i++); } } CSR_WRITE_4(sc, BFE_RXCONF, val); BFE_OR(sc, BFE_CAM_CTRL, BFE_CAM_ENABLE); } static void bfe_dma_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) { u_int32_t *ptr; ptr = arg; *ptr = segs->ds_addr; } static void bfe_dma_map_desc(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct bfe_desc *d; d = arg; /* The chip needs all addresses to be added to BFE_PCI_DMA */ d->bfe_addr = segs->ds_addr + BFE_PCI_DMA; } static void bfe_release_resources(struct bfe_softc *sc) { device_t dev; int i; dev = sc->bfe_dev; if (sc->bfe_vpd_prodname != NULL) free(sc->bfe_vpd_prodname, M_DEVBUF); if (sc->bfe_vpd_readonly != NULL) free(sc->bfe_vpd_readonly, M_DEVBUF); if (sc->bfe_intrhand != NULL) bus_teardown_intr(dev, sc->bfe_irq, sc->bfe_intrhand); if (sc->bfe_irq != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bfe_irq); if (sc->bfe_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0x10, sc->bfe_res); if(sc->bfe_tx_tag != NULL) { bus_dmamap_unload(sc->bfe_tx_tag, sc->bfe_tx_map); bus_dmamem_free(sc->bfe_tx_tag, sc->bfe_tx_list, sc->bfe_tx_map); bus_dma_tag_destroy(sc->bfe_tx_tag); sc->bfe_tx_tag = NULL; } if(sc->bfe_rx_tag != NULL) { bus_dmamap_unload(sc->bfe_rx_tag, sc->bfe_rx_map); bus_dmamem_free(sc->bfe_rx_tag, sc->bfe_rx_list, sc->bfe_rx_map); bus_dma_tag_destroy(sc->bfe_rx_tag); sc->bfe_rx_tag = NULL; } if(sc->bfe_tag != NULL) { for(i = 0; i < BFE_TX_LIST_CNT; i++) { bus_dmamap_destroy(sc->bfe_tag, sc->bfe_tx_ring[i].bfe_map); } bus_dma_tag_destroy(sc->bfe_tag); sc->bfe_tag = NULL; } if(sc->bfe_parent_tag != NULL) bus_dma_tag_destroy(sc->bfe_parent_tag); return; } static void bfe_read_eeprom(struct bfe_softc *sc, u_int8_t *data) { long i; u_int16_t *ptr = (u_int16_t *)data; for(i = 0; i < 128; i += 2) ptr[i/2] = CSR_READ_4(sc, 4096 + i); } static int bfe_wait_bit(struct bfe_softc *sc, u_int32_t reg, u_int32_t bit, u_long timeout, const int clear) { u_long i; for (i = 0; i < timeout; i++) { u_int32_t val = CSR_READ_4(sc, reg); if (clear && !(val & bit)) break; if (!clear && (val & bit)) break; DELAY(10); } if (i == timeout) { printf("bfe%d: BUG! Timeout waiting for bit %08x of register " "%x to %s.\n", sc->bfe_unit, bit, reg, (clear ? "clear" : "set")); return -1; } return 0; } static int bfe_readphy(struct bfe_softc *sc, u_int32_t reg, u_int32_t *val) { int err; BFE_LOCK(sc); /* Clear MII ISR */ CSR_WRITE_4(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII); CSR_WRITE_4(sc, BFE_MDIO_DATA, (BFE_MDIO_SB_START | (BFE_MDIO_OP_READ << BFE_MDIO_OP_SHIFT) | (sc->bfe_phyaddr << BFE_MDIO_PMD_SHIFT) | (reg << BFE_MDIO_RA_SHIFT) | (BFE_MDIO_TA_VALID << BFE_MDIO_TA_SHIFT))); err = bfe_wait_bit(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII, 100, 0); *val = CSR_READ_4(sc, BFE_MDIO_DATA) & BFE_MDIO_DATA_DATA; BFE_UNLOCK(sc); return err; } static int bfe_writephy(struct bfe_softc *sc, u_int32_t reg, u_int32_t val) { int status; BFE_LOCK(sc); CSR_WRITE_4(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII); CSR_WRITE_4(sc, BFE_MDIO_DATA, (BFE_MDIO_SB_START | (BFE_MDIO_OP_WRITE << BFE_MDIO_OP_SHIFT) | (sc->bfe_phyaddr << BFE_MDIO_PMD_SHIFT) | (reg << BFE_MDIO_RA_SHIFT) | (BFE_MDIO_TA_VALID << BFE_MDIO_TA_SHIFT) | (val & BFE_MDIO_DATA_DATA))); status = bfe_wait_bit(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII, 100, 0); BFE_UNLOCK(sc); return status; } /* * XXX - I think this is handled by the PHY driver, but it can't hurt to do it * twice */ static int bfe_setupphy(struct bfe_softc *sc) { u_int32_t val; BFE_LOCK(sc); /* Enable activity LED */ bfe_readphy(sc, 26, &val); bfe_writephy(sc, 26, val & 0x7fff); bfe_readphy(sc, 26, &val); /* Enable traffic meter LED mode */ bfe_readphy(sc, 27, &val); bfe_writephy(sc, 27, val | (1 << 6)); BFE_UNLOCK(sc); return 0; } static void bfe_stats_update(struct bfe_softc *sc) { u_long reg; u_int32_t *val; val = &sc->bfe_hwstats.tx_good_octets; for (reg = BFE_TX_GOOD_O; reg <= BFE_TX_PAUSE; reg += 4) { *val++ += CSR_READ_4(sc, reg); } val = &sc->bfe_hwstats.rx_good_octets; for (reg = BFE_RX_GOOD_O; reg <= BFE_RX_NPAUSE; reg += 4) { *val++ += CSR_READ_4(sc, reg); } } static void bfe_txeof(struct bfe_softc *sc) { struct ifnet *ifp; int i, chipidx; BFE_LOCK(sc); ifp = &sc->arpcom.ac_if; chipidx = CSR_READ_4(sc, BFE_DMATX_STAT) & BFE_STAT_CDMASK; chipidx /= sizeof(struct bfe_desc); i = sc->bfe_tx_cons; /* Go through the mbufs and free those that have been transmitted */ while(i != chipidx) { struct bfe_data *r = &sc->bfe_tx_ring[i]; if(r->bfe_mbuf != NULL) { ifp->if_opackets++; m_freem(r->bfe_mbuf); r->bfe_mbuf = NULL; bus_dmamap_unload(sc->bfe_tag, r->bfe_map); } sc->bfe_tx_cnt--; BFE_INC(i, BFE_TX_LIST_CNT); } if(i != sc->bfe_tx_cons) { /* we freed up some mbufs */ sc->bfe_tx_cons = i; ifp->if_flags &= ~IFF_OACTIVE; } if(sc->bfe_tx_cnt == 0) ifp->if_timer = 0; else ifp->if_timer = 5; BFE_UNLOCK(sc); } /* Pass a received packet up the stack */ static void bfe_rxeof(struct bfe_softc *sc) { struct mbuf *m; struct ifnet *ifp; struct bfe_rxheader *rxheader; struct bfe_data *r; int cons; u_int32_t status, current, len, flags; BFE_LOCK(sc); cons = sc->bfe_rx_cons; status = CSR_READ_4(sc, BFE_DMARX_STAT); current = (status & BFE_STAT_CDMASK) / sizeof(struct bfe_desc); ifp = &sc->arpcom.ac_if; while(current != cons) { r = &sc->bfe_rx_ring[cons]; m = r->bfe_mbuf; rxheader = mtod(m, struct bfe_rxheader*); bus_dmamap_sync(sc->bfe_tag, r->bfe_map, BUS_DMASYNC_POSTWRITE); len = rxheader->len; r->bfe_mbuf = NULL; bus_dmamap_unload(sc->bfe_tag, r->bfe_map); flags = rxheader->flags; len -= ETHER_CRC_LEN; /* flag an error and try again */ if ((len > ETHER_MAX_LEN+32) || (flags & BFE_RX_FLAG_ERRORS)) { ifp->if_ierrors++; if (flags & BFE_RX_FLAG_SERR) ifp->if_collisions++; bfe_list_newbuf(sc, cons, m); BFE_INC(cons, BFE_RX_LIST_CNT); continue; } /* Go past the rx header */ if (bfe_list_newbuf(sc, cons, NULL) == 0) { m_adj(m, BFE_RX_OFFSET); m->m_len = m->m_pkthdr.len = len; } else { bfe_list_newbuf(sc, cons, m); ifp->if_ierrors++; BFE_INC(cons, BFE_RX_LIST_CNT); continue; } ifp->if_ipackets++; m->m_pkthdr.rcvif = ifp; BFE_UNLOCK(sc); (*ifp->if_input)(ifp, m); BFE_LOCK(sc); BFE_INC(cons, BFE_RX_LIST_CNT); } sc->bfe_rx_cons = cons; BFE_UNLOCK(sc); } static void bfe_intr(void *xsc) { struct bfe_softc *sc = xsc; struct ifnet *ifp; u_int32_t istat, imask, flag; ifp = &sc->arpcom.ac_if; BFE_LOCK(sc); istat = CSR_READ_4(sc, BFE_ISTAT); imask = CSR_READ_4(sc, BFE_IMASK); /* * Defer unsolicited interrupts - This is necessary because setting the * chips interrupt mask register to 0 doesn't actually stop the * interrupts */ istat &= imask; CSR_WRITE_4(sc, BFE_ISTAT, istat); CSR_READ_4(sc, BFE_ISTAT); /* not expecting this interrupt, disregard it */ if(istat == 0) { BFE_UNLOCK(sc); return; } if(istat & BFE_ISTAT_ERRORS) { flag = CSR_READ_4(sc, BFE_DMATX_STAT); if(flag & BFE_STAT_EMASK) ifp->if_oerrors++; flag = CSR_READ_4(sc, BFE_DMARX_STAT); if(flag & BFE_RX_FLAG_ERRORS) ifp->if_ierrors++; ifp->if_flags &= ~IFF_RUNNING; bfe_init(sc); } /* A packet was received */ if(istat & BFE_ISTAT_RX) bfe_rxeof(sc); /* A packet was sent */ if(istat & BFE_ISTAT_TX) bfe_txeof(sc); /* We have packets pending, fire them out */ if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL) bfe_start(ifp); BFE_UNLOCK(sc); } static int bfe_encap(struct bfe_softc *sc, struct mbuf *m_head, u_int32_t *txidx) { struct bfe_desc *d = NULL; struct bfe_data *r = NULL; struct mbuf *m; u_int32_t frag, cur, cnt = 0; int chainlen = 0; if(BFE_TX_LIST_CNT - sc->bfe_tx_cnt < 2) return(ENOBUFS); /* * Count the number of frags in this chain to see if * we need to m_defrag. Since the descriptor list is shared * by all packets, we'll m_defrag long chains so that they * do not use up the entire list, even if they would fit. */ for(m = m_head; m != NULL; m = m->m_next) chainlen++; if ((chainlen > BFE_TX_LIST_CNT / 4) || ((BFE_TX_LIST_CNT - (chainlen + sc->bfe_tx_cnt)) < 2)) { m = m_defrag(m_head, M_DONTWAIT); if (m == NULL) return(ENOBUFS); m_head = m; } /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ m = m_head; cur = frag = *txidx; cnt = 0; for(m = m_head; m != NULL; m = m->m_next) { if(m->m_len != 0) { if((BFE_TX_LIST_CNT - (sc->bfe_tx_cnt + cnt)) < 2) return(ENOBUFS); d = &sc->bfe_tx_list[cur]; r = &sc->bfe_tx_ring[cur]; d->bfe_ctrl = BFE_DESC_LEN & m->m_len; /* always intterupt on completion */ d->bfe_ctrl |= BFE_DESC_IOC; if(cnt == 0) /* Set start of frame */ d->bfe_ctrl |= BFE_DESC_SOF; if(cur == BFE_TX_LIST_CNT - 1) /* * Tell the chip to wrap to the start of * the descriptor list */ d->bfe_ctrl |= BFE_DESC_EOT; bus_dmamap_load(sc->bfe_tag, r->bfe_map, mtod(m, void*), m->m_len, bfe_dma_map_desc, d, 0); bus_dmamap_sync(sc->bfe_tag, r->bfe_map, BUS_DMASYNC_PREREAD); frag = cur; BFE_INC(cur, BFE_TX_LIST_CNT); cnt++; } } if (m != NULL) return(ENOBUFS); sc->bfe_tx_list[frag].bfe_ctrl |= BFE_DESC_EOF; sc->bfe_tx_ring[frag].bfe_mbuf = m_head; bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map, BUS_DMASYNC_PREREAD); *txidx = cur; sc->bfe_tx_cnt += cnt; return (0); } /* * Set up to transmit a packet */ static void bfe_start(struct ifnet *ifp) { struct bfe_softc *sc; struct mbuf *m_head = NULL; int idx; sc = ifp->if_softc; idx = sc->bfe_tx_prod; BFE_LOCK(sc); /* * Not much point trying to send if the link is down * or we have nothing to send. */ if (!sc->bfe_link && ifp->if_snd.ifq_len < 10) { BFE_UNLOCK(sc); return; } if (ifp->if_flags & IFF_OACTIVE) { BFE_UNLOCK(sc); return; } while(sc->bfe_tx_ring[idx].bfe_mbuf == NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); if(m_head == NULL) break; /* * Pack the data into the tx ring. If we dont have * enough room, let the chip drain the ring. */ if(bfe_encap(sc, m_head, &idx)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; break; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); } sc->bfe_tx_prod = idx; /* Transmit - twice due to apparent hardware bug */ CSR_WRITE_4(sc, BFE_DMATX_PTR, idx * sizeof(struct bfe_desc)); CSR_WRITE_4(sc, BFE_DMATX_PTR, idx * sizeof(struct bfe_desc)); /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; BFE_UNLOCK(sc); } static void bfe_init(void *xsc) { struct bfe_softc *sc = (struct bfe_softc*)xsc; struct ifnet *ifp = &sc->arpcom.ac_if; BFE_LOCK(sc); if (ifp->if_flags & IFF_RUNNING) { BFE_UNLOCK(sc); return; } bfe_stop(sc); bfe_chip_reset(sc); if (bfe_list_rx_init(sc) == ENOBUFS) { printf("bfe%d: bfe_init: Not enough memory for list buffers\n", sc->bfe_unit); bfe_stop(sc); return; } bfe_set_rx_mode(sc); /* Enable the chip and core */ BFE_OR(sc, BFE_ENET_CTRL, BFE_ENET_ENABLE); /* Enable interrupts */ CSR_WRITE_4(sc, BFE_IMASK, BFE_IMASK_DEF); bfe_ifmedia_upd(ifp); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; sc->bfe_stat_ch = timeout(bfe_tick, sc, hz); BFE_UNLOCK(sc); } /* * Set media options. */ static int bfe_ifmedia_upd(struct ifnet *ifp) { struct bfe_softc *sc; struct mii_data *mii; sc = ifp->if_softc; BFE_LOCK(sc); mii = device_get_softc(sc->bfe_miibus); sc->bfe_link = 0; if (mii->mii_instance) { struct mii_softc *miisc; for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; miisc = LIST_NEXT(miisc, mii_list)) mii_phy_reset(miisc); } mii_mediachg(mii); BFE_UNLOCK(sc); return(0); } /* * Report current media status. */ static void bfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct bfe_softc *sc = ifp->if_softc; struct mii_data *mii; BFE_LOCK(sc); mii = device_get_softc(sc->bfe_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; BFE_UNLOCK(sc); } static int bfe_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct bfe_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int error = 0; BFE_LOCK(sc); switch(command) { case SIOCSIFFLAGS: if(ifp->if_flags & IFF_UP) if(ifp->if_flags & IFF_RUNNING) bfe_set_rx_mode(sc); else bfe_init(sc); else if(ifp->if_flags & IFF_RUNNING) bfe_stop(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: if(ifp->if_flags & IFF_RUNNING) bfe_set_rx_mode(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->bfe_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: error = ether_ioctl(ifp, command, data); break; } BFE_UNLOCK(sc); return error; } static void bfe_watchdog(struct ifnet *ifp) { struct bfe_softc *sc; sc = ifp->if_softc; BFE_LOCK(sc); printf("bfe%d: watchdog timeout -- resetting\n", sc->bfe_unit); ifp->if_flags &= ~IFF_RUNNING; bfe_init(sc); ifp->if_oerrors++; BFE_UNLOCK(sc); } static void bfe_tick(void *xsc) { struct bfe_softc *sc = xsc; struct mii_data *mii; if (sc == NULL) return; BFE_LOCK(sc); mii = device_get_softc(sc->bfe_miibus); bfe_stats_update(sc); sc->bfe_stat_ch = timeout(bfe_tick, sc, hz); if(sc->bfe_link) { BFE_UNLOCK(sc); return; } mii_tick(mii); if (!sc->bfe_link && mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) sc->bfe_link++; BFE_UNLOCK(sc); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void bfe_stop(struct bfe_softc *sc) { struct ifnet *ifp; BFE_LOCK(sc); untimeout(bfe_tick, sc, sc->bfe_stat_ch); ifp = &sc->arpcom.ac_if; bfe_chip_halt(sc); bfe_tx_ring_free(sc); bfe_rx_ring_free(sc); ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); BFE_UNLOCK(sc); } Index: head/sys/dev/bge/if_bge.c =================================================================== --- head/sys/dev/bge/if_bge.c (revision 129878) +++ head/sys/dev/bge/if_bge.c (revision 129879) @@ -1,3633 +1,3634 @@ /* * Copyright (c) 2001 Wind River Systems * Copyright (c) 1997, 1998, 1999, 2001 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Broadcom BCM570x family gigabit ethernet driver for FreeBSD. * * The Broadcom BCM5700 is based on technology originally developed by * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has * two on-board MIPS R4000 CPUs and can have as much as 16MB of external * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo * frames, highly configurable RX filtering, and 16 RX and TX queues * (which, along with RX filter rules, can be used for QOS applications). * Other features, such as TCP segmentation, may be available as part * of value-added firmware updates. Unlike the Tigon I and Tigon II, * firmware images can be stored in hardware and need not be compiled * into the driver. * * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus. * * The BCM5701 is a single-chip solution incorporating both the BCM5700 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 * does not support external SSRAM. * * Broadcom also produces a variation of the BCM5700 under the "Altima" * brand name, which is functionally similar but lacks PCI-X support. * * Without external SSRAM, you can only have at most 4 TX rings, * and the use of the mini RX ring is disabled. This seems to imply * that these features are simply not available on the BCM5701. As a * result, this driver does not implement any support for the mini RX * ring. */ #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for DELAY */ #include #include #include #include #include #include #include #include "miidevs.h" #include #include #include #include #define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) MODULE_DEPEND(bge, pci, 1, 1, 1); MODULE_DEPEND(bge, ether, 1, 1, 1); MODULE_DEPEND(bge, miibus, 1, 1, 1); /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" /* * Various supported device vendors/types and their names. Note: the * spec seems to indicate that the hardware still has Alteon's vendor * ID burned into it, though it will always be overriden by the vendor * ID in the EEPROM. Just to be safe, we cover all possibilities. */ #define BGE_DEVDESC_MAX 64 /* Maximum device description length */ static struct bge_type bge_devs[] = { { ALT_VENDORID, ALT_DEVICEID_BCM5700, "Broadcom BCM5700 Gigabit Ethernet" }, { ALT_VENDORID, ALT_DEVICEID_BCM5701, "Broadcom BCM5701 Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5700, "Broadcom BCM5700 Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5701, "Broadcom BCM5701 Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5702, "Broadcom BCM5702 Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X, "Broadcom BCM5702X Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5703, "Broadcom BCM5703 Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X, "Broadcom BCM5703X Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C, "Broadcom BCM5704C Dual Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S, "Broadcom BCM5704S Dual Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5705, "Broadcom BCM5705 Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K, "Broadcom BCM5705K Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M, "Broadcom BCM5705M Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT, "Broadcom BCM5705M Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5782, "Broadcom BCM5782 Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5788, "Broadcom BCM5788 Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5901, "Broadcom BCM5901 Fast Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2, "Broadcom BCM5901A2 Fast Ethernet" }, { SK_VENDORID, SK_DEVICEID_ALTIMA, "SysKonnect Gigabit Ethernet" }, { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000, "Altima AC1000 Gigabit Ethernet" }, { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002, "Altima AC1002 Gigabit Ethernet" }, { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100, "Altima AC9100 Gigabit Ethernet" }, { 0, 0, NULL } }; static int bge_probe (device_t); static int bge_attach (device_t); static int bge_detach (device_t); static void bge_release_resources (struct bge_softc *); static void bge_dma_map_addr (void *, bus_dma_segment_t *, int, int); static void bge_dma_map_tx_desc (void *, bus_dma_segment_t *, int, bus_size_t, int); static int bge_dma_alloc (device_t); static void bge_dma_free (struct bge_softc *); static void bge_txeof (struct bge_softc *); static void bge_rxeof (struct bge_softc *); static void bge_tick_locked (struct bge_softc *); static void bge_tick (void *); static void bge_stats_update (struct bge_softc *); static void bge_stats_update_regs (struct bge_softc *); static int bge_encap (struct bge_softc *, struct mbuf *, u_int32_t *); static void bge_intr (void *); static void bge_start_locked (struct ifnet *); static void bge_start (struct ifnet *); static int bge_ioctl (struct ifnet *, u_long, caddr_t); static void bge_init_locked (struct bge_softc *); static void bge_init (void *); static void bge_stop (struct bge_softc *); static void bge_watchdog (struct ifnet *); static void bge_shutdown (device_t); static int bge_ifmedia_upd (struct ifnet *); static void bge_ifmedia_sts (struct ifnet *, struct ifmediareq *); static u_int8_t bge_eeprom_getbyte (struct bge_softc *, int, u_int8_t *); static int bge_read_eeprom (struct bge_softc *, caddr_t, int, int); static uint32_t bge_mchash (const uint8_t *); static void bge_setmulti (struct bge_softc *); static void bge_handle_events (struct bge_softc *); static int bge_alloc_jumbo_mem (struct bge_softc *); static void bge_free_jumbo_mem (struct bge_softc *); static void *bge_jalloc (struct bge_softc *); static void bge_jfree (void *, void *); static int bge_newbuf_std (struct bge_softc *, int, struct mbuf *); static int bge_newbuf_jumbo (struct bge_softc *, int, struct mbuf *); static int bge_init_rx_ring_std (struct bge_softc *); static void bge_free_rx_ring_std (struct bge_softc *); static int bge_init_rx_ring_jumbo (struct bge_softc *); static void bge_free_rx_ring_jumbo (struct bge_softc *); static void bge_free_tx_ring (struct bge_softc *); static int bge_init_tx_ring (struct bge_softc *); static int bge_chipinit (struct bge_softc *); static int bge_blockinit (struct bge_softc *); #ifdef notdef static u_int8_t bge_vpd_readbyte(struct bge_softc *, int); static void bge_vpd_read_res (struct bge_softc *, struct vpd_res *, int); static void bge_vpd_read (struct bge_softc *); #endif static u_int32_t bge_readmem_ind (struct bge_softc *, int); static void bge_writemem_ind (struct bge_softc *, int, int); #ifdef notdef static u_int32_t bge_readreg_ind (struct bge_softc *, int); #endif static void bge_writereg_ind (struct bge_softc *, int, int); static int bge_miibus_readreg (device_t, int, int); static int bge_miibus_writereg (device_t, int, int, int); static void bge_miibus_statchg (device_t); static void bge_reset (struct bge_softc *); static device_method_t bge_methods[] = { /* Device interface */ DEVMETHOD(device_probe, bge_probe), DEVMETHOD(device_attach, bge_attach), DEVMETHOD(device_detach, bge_detach), DEVMETHOD(device_shutdown, bge_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, bge_miibus_readreg), DEVMETHOD(miibus_writereg, bge_miibus_writereg), DEVMETHOD(miibus_statchg, bge_miibus_statchg), { 0, 0 } }; static driver_t bge_driver = { "bge", bge_methods, sizeof(struct bge_softc) }; static devclass_t bge_devclass; DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0); DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0); static u_int32_t bge_readmem_ind(sc, off) struct bge_softc *sc; int off; { device_t dev; dev = sc->bge_dev; pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4)); } static void bge_writemem_ind(sc, off, val) struct bge_softc *sc; int off, val; { device_t dev; dev = sc->bge_dev; pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); return; } #ifdef notdef static u_int32_t bge_readreg_ind(sc, off) struct bge_softc *sc; int off; { device_t dev; dev = sc->bge_dev; pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); return(pci_read_config(dev, BGE_PCI_REG_DATA, 4)); } #endif static void bge_writereg_ind(sc, off, val) struct bge_softc *sc; int off, val; { device_t dev; dev = sc->bge_dev; pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); pci_write_config(dev, BGE_PCI_REG_DATA, val, 4); return; } /* * Map a single buffer address. */ static void bge_dma_map_addr(arg, segs, nseg, error) void *arg; bus_dma_segment_t *segs; int nseg; int error; { struct bge_dmamap_arg *ctx; if (error) return; ctx = arg; if (nseg > ctx->bge_maxsegs) { ctx->bge_maxsegs = 0; return; } ctx->bge_busaddr = segs->ds_addr; return; } /* * Map an mbuf chain into an TX ring. */ static void bge_dma_map_tx_desc(arg, segs, nseg, mapsize, error) void *arg; bus_dma_segment_t *segs; int nseg; bus_size_t mapsize; int error; { struct bge_dmamap_arg *ctx; struct bge_tx_bd *d = NULL; int i = 0, idx; if (error) return; ctx = arg; /* Signal error to caller if there's too many segments */ if (nseg > ctx->bge_maxsegs) { ctx->bge_maxsegs = 0; return; } idx = ctx->bge_idx; while(1) { d = &ctx->bge_ring[idx]; d->bge_addr.bge_addr_lo = htole32(BGE_ADDR_LO(segs[i].ds_addr)); d->bge_addr.bge_addr_hi = htole32(BGE_ADDR_HI(segs[i].ds_addr)); d->bge_len = htole16(segs[i].ds_len); d->bge_flags = htole16(ctx->bge_flags); i++; if (i == nseg) break; BGE_INC(idx, BGE_TX_RING_CNT); } d->bge_flags |= htole16(BGE_TXBDFLAG_END); ctx->bge_maxsegs = nseg; ctx->bge_idx = idx; return; } #ifdef notdef static u_int8_t bge_vpd_readbyte(sc, addr) struct bge_softc *sc; int addr; { int i; device_t dev; u_int32_t val; dev = sc->bge_dev; pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2); for (i = 0; i < BGE_TIMEOUT * 10; i++) { DELAY(10); if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG) break; } if (i == BGE_TIMEOUT) { printf("bge%d: VPD read timed out\n", sc->bge_unit); return(0); } val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4); return((val >> ((addr % 4) * 8)) & 0xFF); } static void bge_vpd_read_res(sc, res, addr) struct bge_softc *sc; struct vpd_res *res; int addr; { int i; u_int8_t *ptr; ptr = (u_int8_t *)res; for (i = 0; i < sizeof(struct vpd_res); i++) ptr[i] = bge_vpd_readbyte(sc, i + addr); return; } static void bge_vpd_read(sc) struct bge_softc *sc; { int pos = 0, i; struct vpd_res res; if (sc->bge_vpd_prodname != NULL) free(sc->bge_vpd_prodname, M_DEVBUF); if (sc->bge_vpd_readonly != NULL) free(sc->bge_vpd_readonly, M_DEVBUF); sc->bge_vpd_prodname = NULL; sc->bge_vpd_readonly = NULL; bge_vpd_read_res(sc, &res, pos); if (res.vr_id != VPD_RES_ID) { printf("bge%d: bad VPD resource id: expected %x got %x\n", sc->bge_unit, VPD_RES_ID, res.vr_id); return; } pos += sizeof(res); sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); for (i = 0; i < res.vr_len; i++) sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos); sc->bge_vpd_prodname[i] = '\0'; pos += i; bge_vpd_read_res(sc, &res, pos); if (res.vr_id != VPD_RES_READ) { printf("bge%d: bad VPD resource id: expected %x got %x\n", sc->bge_unit, VPD_RES_READ, res.vr_id); return; } pos += sizeof(res); sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); for (i = 0; i < res.vr_len + 1; i++) sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos); return; } #endif /* * Read a byte of data stored in the EEPROM at address 'addr.' The * BCM570x supports both the traditional bitbang interface and an * auto access interface for reading the EEPROM. We use the auto * access method. */ static u_int8_t bge_eeprom_getbyte(sc, addr, dest) struct bge_softc *sc; int addr; u_int8_t *dest; { int i; u_int32_t byte = 0; /* * Enable use of auto EEPROM access so we can avoid * having to use the bitbang method. */ BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); /* Reset the EEPROM, load the clock period. */ CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); DELAY(20); /* Issue the read EEPROM command. */ CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); /* Wait for completion */ for(i = 0; i < BGE_TIMEOUT * 10; i++) { DELAY(10); if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) break; } if (i == BGE_TIMEOUT) { printf("bge%d: eeprom read timed out\n", sc->bge_unit); return(0); } /* Get result. */ byte = CSR_READ_4(sc, BGE_EE_DATA); *dest = (byte >> ((addr % 4) * 8)) & 0xFF; return(0); } /* * Read a sequence of bytes from the EEPROM. */ static int bge_read_eeprom(sc, dest, off, cnt) struct bge_softc *sc; caddr_t dest; int off; int cnt; { int err = 0, i; u_int8_t byte = 0; for (i = 0; i < cnt; i++) { err = bge_eeprom_getbyte(sc, off + i, &byte); if (err) break; *(dest + i) = byte; } return(err ? 1 : 0); } static int bge_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct bge_softc *sc; u_int32_t val, autopoll; int i; sc = device_get_softc(dev); /* * Broadcom's own driver always assumes the internal * PHY is at GMII address 1. On some chips, the PHY responds * to accesses at all addresses, which could cause us to * bogusly attach the PHY 32 times at probe type. Always * restricting the lookup to address 1 is simpler than * trying to figure out which chips revisions should be * special-cased. */ if (phy != 1) return(0); /* Reading with autopolling on may trigger PCI errors */ autopoll = CSR_READ_4(sc, BGE_MI_MODE); if (autopoll & BGE_MIMODE_AUTOPOLL) { BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); DELAY(40); } CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY| BGE_MIPHY(phy)|BGE_MIREG(reg)); for (i = 0; i < BGE_TIMEOUT; i++) { val = CSR_READ_4(sc, BGE_MI_COMM); if (!(val & BGE_MICOMM_BUSY)) break; } if (i == BGE_TIMEOUT) { printf("bge%d: PHY read timed out\n", sc->bge_unit); val = 0; goto done; } val = CSR_READ_4(sc, BGE_MI_COMM); done: if (autopoll & BGE_MIMODE_AUTOPOLL) { BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); DELAY(40); } if (val & BGE_MICOMM_READFAIL) return(0); return(val & 0xFFFF); } static int bge_miibus_writereg(dev, phy, reg, val) device_t dev; int phy, reg, val; { struct bge_softc *sc; u_int32_t autopoll; int i; sc = device_get_softc(dev); /* Reading with autopolling on may trigger PCI errors */ autopoll = CSR_READ_4(sc, BGE_MI_MODE); if (autopoll & BGE_MIMODE_AUTOPOLL) { BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); DELAY(40); } CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY| BGE_MIPHY(phy)|BGE_MIREG(reg)|val); for (i = 0; i < BGE_TIMEOUT; i++) { if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) break; } if (autopoll & BGE_MIMODE_AUTOPOLL) { BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); DELAY(40); } if (i == BGE_TIMEOUT) { printf("bge%d: PHY read timed out\n", sc->bge_unit); return(0); } return(0); } static void bge_miibus_statchg(dev) device_t dev; { struct bge_softc *sc; struct mii_data *mii; sc = device_get_softc(dev); mii = device_get_softc(sc->bge_miibus); BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); } else { BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); } if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); } else { BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); } return; } /* * Handle events that have triggered interrupts. */ static void bge_handle_events(sc) struct bge_softc *sc; { return; } /* * Memory management for jumbo frames. */ static int bge_alloc_jumbo_mem(sc) struct bge_softc *sc; { caddr_t ptr; register int i, error; struct bge_jpool_entry *entry; /* Create tag for jumbo buffer block */ error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BGE_JMEM, 1, BGE_JMEM, 0, NULL, NULL, &sc->bge_cdata.bge_jumbo_tag); if (error) { printf("bge%d: could not allocate jumbo dma tag\n", sc->bge_unit); return (ENOMEM); } /* Allocate DMA'able memory for jumbo buffer block */ error = bus_dmamem_alloc(sc->bge_cdata.bge_jumbo_tag, (void **)&sc->bge_ldata.bge_jumbo_buf, BUS_DMA_NOWAIT, &sc->bge_cdata.bge_jumbo_map); if (error) return (ENOMEM); SLIST_INIT(&sc->bge_jfree_listhead); SLIST_INIT(&sc->bge_jinuse_listhead); /* * Now divide it up into 9K pieces and save the addresses * in an array. */ ptr = sc->bge_ldata.bge_jumbo_buf; for (i = 0; i < BGE_JSLOTS; i++) { sc->bge_cdata.bge_jslots[i] = ptr; ptr += BGE_JLEN; entry = malloc(sizeof(struct bge_jpool_entry), M_DEVBUF, M_NOWAIT); if (entry == NULL) { bge_free_jumbo_mem(sc); sc->bge_ldata.bge_jumbo_buf = NULL; printf("bge%d: no memory for jumbo " "buffer queue!\n", sc->bge_unit); return(ENOBUFS); } entry->slot = i; SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries); } return(0); } static void bge_free_jumbo_mem(sc) struct bge_softc *sc; { int i; struct bge_jpool_entry *entry; for (i = 0; i < BGE_JSLOTS; i++) { entry = SLIST_FIRST(&sc->bge_jfree_listhead); SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); free(entry, M_DEVBUF); } /* Destroy jumbo buffer block */ if (sc->bge_ldata.bge_rx_jumbo_ring) bus_dmamem_free(sc->bge_cdata.bge_jumbo_tag, sc->bge_ldata.bge_jumbo_buf, sc->bge_cdata.bge_jumbo_map); if (sc->bge_cdata.bge_rx_jumbo_ring_map) bus_dmamap_destroy(sc->bge_cdata.bge_jumbo_tag, sc->bge_cdata.bge_jumbo_map); if (sc->bge_cdata.bge_jumbo_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_jumbo_tag); return; } /* * Allocate a jumbo buffer. */ static void * bge_jalloc(sc) struct bge_softc *sc; { struct bge_jpool_entry *entry; entry = SLIST_FIRST(&sc->bge_jfree_listhead); if (entry == NULL) { printf("bge%d: no free jumbo buffers\n", sc->bge_unit); return(NULL); } SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries); return(sc->bge_cdata.bge_jslots[entry->slot]); } /* * Release a jumbo buffer. */ static void bge_jfree(buf, args) void *buf; void *args; { struct bge_jpool_entry *entry; struct bge_softc *sc; int i; /* Extract the softc struct pointer. */ sc = (struct bge_softc *)args; if (sc == NULL) panic("bge_jfree: can't find softc pointer!"); /* calculate the slot this buffer belongs to */ i = ((vm_offset_t)buf - (vm_offset_t)sc->bge_ldata.bge_jumbo_buf) / BGE_JLEN; if ((i < 0) || (i >= BGE_JSLOTS)) panic("bge_jfree: asked to free buffer that we don't manage!"); entry = SLIST_FIRST(&sc->bge_jinuse_listhead); if (entry == NULL) panic("bge_jfree: buffer not in use!"); entry->slot = i; SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries); SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries); return; } /* * Intialize a standard receive ring descriptor. */ static int bge_newbuf_std(sc, i, m) struct bge_softc *sc; int i; struct mbuf *m; { struct mbuf *m_new = NULL; struct bge_rx_bd *r; struct bge_dmamap_arg ctx; int error; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { return(ENOBUFS); } MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { m_freem(m_new); return(ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_new->m_data = m_new->m_ext.ext_buf; } if (!sc->bge_rx_alignment_bug) m_adj(m_new, ETHER_ALIGN); sc->bge_cdata.bge_rx_std_chain[i] = m_new; r = &sc->bge_ldata.bge_rx_std_ring[i]; ctx.bge_maxsegs = 1; ctx.sc = sc; error = bus_dmamap_load(sc->bge_cdata.bge_mtag, sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *), m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); if (error || ctx.bge_maxsegs == 0) { if (m == NULL) m_freem(m_new); return(ENOMEM); } r->bge_addr.bge_addr_lo = htole32(BGE_ADDR_LO(ctx.bge_busaddr)); r->bge_addr.bge_addr_hi = htole32(BGE_ADDR_HI(ctx.bge_busaddr)); r->bge_flags = htole16(BGE_RXBDFLAG_END); r->bge_len = htole16(m_new->m_len); r->bge_idx = htole16(i); bus_dmamap_sync(sc->bge_cdata.bge_mtag, sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD); return(0); } /* * Initialize a jumbo receive ring descriptor. This allocates * a jumbo buffer from the pool managed internally by the driver. */ static int bge_newbuf_jumbo(sc, i, m) struct bge_softc *sc; int i; struct mbuf *m; { struct mbuf *m_new = NULL; struct bge_rx_bd *r; struct bge_dmamap_arg ctx; int error; if (m == NULL) { caddr_t *buf = NULL; /* Allocate the mbuf. */ MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { return(ENOBUFS); } /* Allocate the jumbo buffer */ buf = bge_jalloc(sc); if (buf == NULL) { m_freem(m_new); printf("bge%d: jumbo allocation failed " "-- packet dropped!\n", sc->bge_unit); return(ENOBUFS); } /* Attach the buffer to the mbuf. */ m_new->m_data = (void *) buf; m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN; MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, bge_jfree, (struct bge_softc *)sc, 0, EXT_NET_DRV); } else { m_new = m; m_new->m_data = m_new->m_ext.ext_buf; m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; } if (!sc->bge_rx_alignment_bug) m_adj(m_new, ETHER_ALIGN); /* Set up the descriptor. */ sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; r = &sc->bge_ldata.bge_rx_jumbo_ring[i]; ctx.bge_maxsegs = 1; ctx.sc = sc; error = bus_dmamap_load(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[i], mtod(m_new, void *), m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); if (error || ctx.bge_maxsegs == 0) { if (m == NULL) m_freem(m_new); return(ENOMEM); } r->bge_addr.bge_addr_lo = htole32(BGE_ADDR_LO(ctx.bge_busaddr)); r->bge_addr.bge_addr_hi = htole32(BGE_ADDR_HI(ctx.bge_busaddr)); r->bge_flags = htole16(BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING); r->bge_len = htole16(m_new->m_len); r->bge_idx = htole16(i); bus_dmamap_sync(sc->bge_cdata.bge_mtag, sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD); return(0); } /* * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, * that's 1MB or memory, which is a lot. For now, we fill only the first * 256 ring entries and hope that our CPU is fast enough to keep up with * the NIC. */ static int bge_init_rx_ring_std(sc) struct bge_softc *sc; { int i; for (i = 0; i < BGE_SSLOTS; i++) { if (bge_newbuf_std(sc, i, NULL) == ENOBUFS) return(ENOBUFS); }; bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); sc->bge_std = i - 1; CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); return(0); } static void bge_free_rx_ring_std(sc) struct bge_softc *sc; { int i; for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { m_freem(sc->bge_cdata.bge_rx_std_chain[i]); sc->bge_cdata.bge_rx_std_chain[i] = NULL; bus_dmamap_unload(sc->bge_cdata.bge_mtag, sc->bge_cdata.bge_rx_std_dmamap[i]); } bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i], sizeof(struct bge_rx_bd)); } return; } static int bge_init_rx_ring_jumbo(sc) struct bge_softc *sc; { int i; struct bge_rcb *rcb; for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) return(ENOBUFS); }; bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); sc->bge_jumbo = i - 1; rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0); CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); return(0); } static void bge_free_rx_ring_jumbo(sc) struct bge_softc *sc; { int i; for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[i]); } bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i], sizeof(struct bge_rx_bd)); } return; } static void bge_free_tx_ring(sc) struct bge_softc *sc; { int i; if (sc->bge_ldata.bge_tx_ring == NULL) return; for (i = 0; i < BGE_TX_RING_CNT; i++) { if (sc->bge_cdata.bge_tx_chain[i] != NULL) { m_freem(sc->bge_cdata.bge_tx_chain[i]); sc->bge_cdata.bge_tx_chain[i] = NULL; bus_dmamap_unload(sc->bge_cdata.bge_mtag, sc->bge_cdata.bge_tx_dmamap[i]); } bzero((char *)&sc->bge_ldata.bge_tx_ring[i], sizeof(struct bge_tx_bd)); } return; } static int bge_init_tx_ring(sc) struct bge_softc *sc; { sc->bge_txcnt = 0; sc->bge_tx_saved_considx = 0; CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); /* 5700 b2 errata */ if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); /* 5700 b2 errata */ if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); return(0); } #define BGE_POLY 0xEDB88320 static uint32_t bge_mchash(addr) const uint8_t *addr; { uint32_t crc; int idx, bit; uint8_t data; /* Compute CRC for the address value. */ crc = 0xFFFFFFFF; /* initial value */ for (idx = 0; idx < 6; idx++) { for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) crc = (crc >> 1) ^ (((crc ^ data) & 1) ? BGE_POLY : 0); } return(crc & 0x7F); } static void bge_setmulti(sc) struct bge_softc *sc; { struct ifnet *ifp; struct ifmultiaddr *ifma; u_int32_t hashes[4] = { 0, 0, 0, 0 }; int h, i; BGE_LOCK_ASSERT(sc); ifp = &sc->arpcom.ac_if; if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { for (i = 0; i < 4; i++) CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); return; } /* First, zot all the existing filters. */ for (i = 0; i < 4; i++) CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); /* Now program new ones. */ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = bge_mchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); } for (i = 0; i < 4; i++) CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); return; } /* * Do endian, PCI and DMA initialization. Also check the on-board ROM * self-test results. */ static int bge_chipinit(sc) struct bge_softc *sc; { int i; u_int32_t dma_rw_ctl; /* Set endianness before we access any non-PCI registers. */ #if BYTE_ORDER == BIG_ENDIAN pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_BIGENDIAN_INIT, 4); #else pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_LITTLEENDIAN_INIT, 4); #endif /* * Check the 'ROM failed' bit on the RX CPU to see if * self-tests passed. */ if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) { printf("bge%d: RX CPU self-diagnostics failed!\n", sc->bge_unit); return(ENODEV); } /* Clear the MAC control register */ CSR_WRITE_4(sc, BGE_MAC_MODE, 0); /* * Clear the MAC statistics block in the NIC's * internal memory. */ for (i = BGE_STATS_BLOCK; i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t)) BGE_MEMWIN_WRITE(sc, i, 0); for (i = BGE_STATUS_BLOCK; i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t)) BGE_MEMWIN_WRITE(sc, i, 0); /* Set up the PCI DMA control register. */ if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) & BGE_PCISTATE_PCI_BUSMODE) { /* Conventional PCI bus */ dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | (0x0F); } else { /* PCI-X bus */ /* * The 5704 uses a different encoding of read/write * watermarks. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5704) dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); else dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | (0x0F); /* * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround * for hardware bugs. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || sc->bge_asicrev == BGE_ASICREV_BCM5704) { u_int32_t tmp; tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f; if (tmp == 0x6 || tmp == 0x7) dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; } } if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || sc->bge_asicrev == BGE_ASICREV_BCM5704 || sc->bge_asicrev == BGE_ASICREV_BCM5705) dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); /* * Set up general mode register. */ CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME| BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS| BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM); /* * Disable memory write invalidate. Apparently it is not supported * properly by these devices. */ PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4); #ifdef __brokenalpha__ /* * Must insure that we do not cross an 8K (bytes) boundary * for DMA reads. Our highest limit is 1K bytes. This is a * restriction on some ALPHA platforms with early revision * 21174 PCI chipsets, such as the AlphaPC 164lx */ PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024BYTES, 4); #endif /* Set the timer prescaler (always 66Mhz) */ CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); return(0); } static int bge_blockinit(sc) struct bge_softc *sc; { struct bge_rcb *rcb; volatile struct bge_rcb *vrcb; int i; /* * Initialize the memory window pointer register so that * we can access the first 32K of internal NIC RAM. This will * allow us to set up the TX send ring RCBs and the RX return * ring RCBs, plus other things which live in NIC memory. */ CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); /* Note: the BCM5704 has a smaller mbuf space than other chips. */ if (sc->bge_asicrev != BGE_ASICREV_BCM5705) { /* Configure mbuf memory pool */ if (sc->bge_extram) { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_EXT_SSRAM); if (sc->bge_asicrev == BGE_ASICREV_BCM5704) CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); else CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); } else { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1); if (sc->bge_asicrev == BGE_ASICREV_BCM5704) CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); else CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); } /* Configure DMA resource pool */ CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, BGE_DMA_DESCRIPTORS); CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); } /* Configure mbuf pool watermarks */ if (sc->bge_asicrev == BGE_ASICREV_BCM5705) { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); } else { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); } CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); /* Configure DMA resource watermarks */ CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); /* Enable buffer manager */ if (sc->bge_asicrev != BGE_ASICREV_BCM5705) { CSR_WRITE_4(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN); /* Poll for buffer manager start indication */ for (i = 0; i < BGE_TIMEOUT; i++) { if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) break; DELAY(10); } if (i == BGE_TIMEOUT) { printf("bge%d: buffer manager failed to start\n", sc->bge_unit); return(ENXIO); } } /* Enable flow-through queues */ CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); /* Wait until queue initialization is complete */ for (i = 0; i < BGE_TIMEOUT; i++) { if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) break; DELAY(10); } if (i == BGE_TIMEOUT) { printf("bge%d: flow-through queue init failed\n", sc->bge_unit); return(ENXIO); } /* Initialize the standard RX ring control block */ rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb; rcb->bge_hostaddr.bge_addr_lo = BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr); rcb->bge_hostaddr.bge_addr_hi = BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr); bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD); if (sc->bge_asicrev == BGE_ASICREV_BCM5705) rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); else rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); if (sc->bge_extram) rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS; else rcb->bge_nicaddr = BGE_STD_RX_RINGS; CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); /* * Initialize the jumbo RX ring control block * We set the 'ring disabled' bit in the flags * field until we're actually ready to start * using this ring (i.e. once we set the MTU * high enough to require it). */ if (sc->bge_asicrev != BGE_ASICREV_BCM5705) { rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; rcb->bge_hostaddr.bge_addr_lo = BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr); rcb->bge_hostaddr.bge_addr_hi = BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr); bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREREAD); rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, BGE_RCB_FLAG_RING_DISABLED); if (sc->bge_extram) rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS; else rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); /* Set up dummy disabled mini ring RCB */ rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb; rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); } /* * Set the BD ring replentish thresholds. The recommended * values are 1/8th the number of descriptors allocated to * each ring. */ CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8); CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); /* * Disable all unused send rings by setting the 'ring disabled' * bit in the flags field of all the TX send ring control blocks. * These are located in NIC memory. */ vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START + BGE_SEND_RING_RCB); for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { vrcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); vrcb->bge_nicaddr = 0; vrcb++; } /* Configure TX RCB 0 (we use only the first ring) */ vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START + BGE_SEND_RING_RCB); vrcb->bge_hostaddr.bge_addr_lo = htole32(BGE_ADDR_LO(sc->bge_ldata.bge_tx_ring_paddr)); vrcb->bge_hostaddr.bge_addr_hi = htole32(BGE_ADDR_HI(sc->bge_ldata.bge_tx_ring_paddr)); vrcb->bge_nicaddr = BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT); if (sc->bge_asicrev != BGE_ASICREV_BCM5705) vrcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0); /* Disable all unused RX return rings */ vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB); for (i = 0; i < BGE_RX_RINGS_MAX; i++) { vrcb->bge_hostaddr.bge_addr_hi = 0; vrcb->bge_hostaddr.bge_addr_lo = 0; vrcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, BGE_RCB_FLAG_RING_DISABLED); vrcb->bge_nicaddr = 0; CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO + (i * (sizeof(u_int64_t))), 0); vrcb++; } /* Initialize RX ring indexes */ CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0); CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0); /* * Set up RX return ring 0 * Note that the NIC address for RX return rings is 0x00000000. * The return rings live entirely within the host, so the * nicaddr field in the RCB isn't used. */ vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB); vrcb->bge_hostaddr.bge_addr_lo = BGE_ADDR_LO(sc->bge_ldata.bge_rx_return_ring_paddr); vrcb->bge_hostaddr.bge_addr_hi = BGE_ADDR_HI(sc->bge_ldata.bge_rx_return_ring_paddr); bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREWRITE); vrcb->bge_nicaddr = 0x00000000; vrcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0); /* Set random backoff seed for TX */ CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] + sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] + sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] + BGE_TX_BACKOFF_SEED_MASK); /* Set inter-packet gap */ CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); /* * Specify which ring to use for packets that don't match * any RX rules. */ CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); /* * Configure number of RX lists. One interrupt distribution * list, sixteen active lists, one bad frames class. */ CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); /* Inialize RX list placement stats mask. */ CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); /* Disable host coalescing until we get it set up */ CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); /* Poll to make sure it's shut down. */ for (i = 0; i < BGE_TIMEOUT; i++) { if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) break; DELAY(10); } if (i == BGE_TIMEOUT) { printf("bge%d: host coalescing engine failed to idle\n", sc->bge_unit); return(ENXIO); } /* Set up host coalescing defaults */ CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); if (sc->bge_asicrev != BGE_ASICREV_BCM5705) { CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); } CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); /* Set up address of statistics block */ if (sc->bge_asicrev != BGE_ASICREV_BCM5705) { CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr)); CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr)); CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); } /* Set up address of status block */ CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr)); CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr)); bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE); sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0; sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0; /* Turn on host coalescing state machine */ CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); /* Turn on RX BD completion state machine and enable attentions */ CSR_WRITE_4(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); /* Turn on RX list placement state machine */ CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); /* Turn on RX list selector state machine. */ if (sc->bge_asicrev != BGE_ASICREV_BCM5705) CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); /* Turn on DMA, clear stats */ CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB| BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR| BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB| BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB| (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); /* Set misc. local control, enable interrupts on attentions */ CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); #ifdef notdef /* Assert GPIO pins for PHY reset */ BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); #endif /* Turn on DMA completion state machine */ if (sc->bge_asicrev != BGE_ASICREV_BCM5705) CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); /* Turn on write DMA state machine */ CSR_WRITE_4(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS); /* Turn on read DMA state machine */ CSR_WRITE_4(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS); /* Turn on RX data completion state machine */ CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); /* Turn on RX BD initiator state machine */ CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); /* Turn on RX data and RX BD initiator state machine */ CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); /* Turn on Mbuf cluster free state machine */ if (sc->bge_asicrev != BGE_ASICREV_BCM5705) CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); /* Turn on send BD completion state machine */ CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); /* Turn on send data completion state machine */ CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); /* Turn on send data initiator state machine */ CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); /* Turn on send BD initiator state machine */ CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); /* Turn on send BD selector state machine */ CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); /* ack/clear link change events */ CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| BGE_MACSTAT_LINK_CHANGED); CSR_WRITE_4(sc, BGE_MI_STS, 0); /* Enable PHY auto polling (for MII/GMII only) */ if (sc->bge_tbi) { CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); } else { BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16); if (sc->bge_asicrev == BGE_ASICREV_BCM5700) CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_MI_INTERRUPT); } /* Enable link state change attentions. */ BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); return(0); } /* * Probe for a Broadcom chip. Check the PCI vendor and device IDs * against our list and return its name if we find a match. Note * that since the Broadcom controller contains VPD support, we * can get the device name string from the controller itself instead * of the compiled-in string. This is a little slow, but it guarantees * we'll always announce the right product name. */ static int bge_probe(dev) device_t dev; { struct bge_type *t; struct bge_softc *sc; char *descbuf; t = bge_devs; sc = device_get_softc(dev); bzero(sc, sizeof(struct bge_softc)); sc->bge_unit = device_get_unit(dev); sc->bge_dev = dev; while(t->bge_name != NULL) { if ((pci_get_vendor(dev) == t->bge_vid) && (pci_get_device(dev) == t->bge_did)) { #ifdef notdef bge_vpd_read(sc); device_set_desc(dev, sc->bge_vpd_prodname); #endif descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_NOWAIT); if (descbuf == NULL) return(ENOMEM); snprintf(descbuf, BGE_DEVDESC_MAX, "%s, ASIC rev. %#04x", t->bge_name, pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16); device_set_desc_copy(dev, descbuf); if (pci_get_subvendor(dev) == DELL_VENDORID) sc->bge_no_3_led = 1; free(descbuf, M_TEMP); return(0); } t++; } return(ENXIO); } static void bge_dma_free(sc) struct bge_softc *sc; { int i; /* Destroy DMA maps for RX buffers */ for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { if (sc->bge_cdata.bge_rx_std_dmamap[i]) bus_dmamap_destroy(sc->bge_cdata.bge_mtag, sc->bge_cdata.bge_rx_std_dmamap[i]); } /* Destroy DMA maps for jumbo RX buffers */ for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { if (sc->bge_cdata.bge_rx_jumbo_dmamap[i]) bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[i]); } /* Destroy DMA maps for TX buffers */ for (i = 0; i < BGE_TX_RING_CNT; i++) { if (sc->bge_cdata.bge_tx_dmamap[i]) bus_dmamap_destroy(sc->bge_cdata.bge_mtag, sc->bge_cdata.bge_tx_dmamap[i]); } if (sc->bge_cdata.bge_mtag) bus_dma_tag_destroy(sc->bge_cdata.bge_mtag); /* Destroy standard RX ring */ if (sc->bge_ldata.bge_rx_std_ring) bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_ldata.bge_rx_std_ring, sc->bge_cdata.bge_rx_std_ring_map); if (sc->bge_cdata.bge_rx_std_ring_map) { bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map); bus_dmamap_destroy(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map); } if (sc->bge_cdata.bge_rx_std_ring_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag); /* Destroy jumbo RX ring */ if (sc->bge_ldata.bge_rx_jumbo_ring) bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_ldata.bge_rx_jumbo_ring, sc->bge_cdata.bge_rx_jumbo_ring_map); if (sc->bge_cdata.bge_rx_jumbo_ring_map) { bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map); bus_dmamap_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map); } if (sc->bge_cdata.bge_rx_jumbo_ring_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag); /* Destroy RX return ring */ if (sc->bge_ldata.bge_rx_return_ring) bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_ldata.bge_rx_return_ring, sc->bge_cdata.bge_rx_return_ring_map); if (sc->bge_cdata.bge_rx_return_ring_map) { bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_cdata.bge_rx_return_ring_map); bus_dmamap_destroy(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_cdata.bge_rx_return_ring_map); } if (sc->bge_cdata.bge_rx_return_ring_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag); /* Destroy TX ring */ if (sc->bge_ldata.bge_tx_ring) bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag, sc->bge_ldata.bge_tx_ring, sc->bge_cdata.bge_tx_ring_map); if (sc->bge_cdata.bge_tx_ring_map) { bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag, sc->bge_cdata.bge_tx_ring_map); bus_dmamap_destroy(sc->bge_cdata.bge_tx_ring_tag, sc->bge_cdata.bge_tx_ring_map); } if (sc->bge_cdata.bge_tx_ring_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag); /* Destroy status block */ if (sc->bge_ldata.bge_status_block) bus_dmamem_free(sc->bge_cdata.bge_status_tag, sc->bge_ldata.bge_status_block, sc->bge_cdata.bge_status_map); if (sc->bge_cdata.bge_status_map) { bus_dmamap_unload(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map); bus_dmamap_destroy(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map); } if (sc->bge_cdata.bge_status_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag); /* Destroy statistics block */ if (sc->bge_ldata.bge_stats) bus_dmamem_free(sc->bge_cdata.bge_stats_tag, sc->bge_ldata.bge_stats, sc->bge_cdata.bge_stats_map); if (sc->bge_cdata.bge_stats_map) { bus_dmamap_unload(sc->bge_cdata.bge_stats_tag, sc->bge_cdata.bge_stats_map); bus_dmamap_destroy(sc->bge_cdata.bge_stats_tag, sc->bge_cdata.bge_stats_map); } if (sc->bge_cdata.bge_stats_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag); /* Destroy the parent tag */ if (sc->bge_cdata.bge_parent_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag); return; } static int bge_dma_alloc(dev) device_t dev; { struct bge_softc *sc; int nseg, i, error; struct bge_dmamap_arg ctx; sc = device_get_softc(dev); /* * Allocate the parent bus DMA tag appropriate for PCI. */ #define BGE_NSEG_NEW 32 error = bus_dma_tag_create(NULL, /* parent */ PAGE_SIZE, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR_32BIT,/* highaddr */ NULL, NULL, /* filter, filterarg */ MAXBSIZE, BGE_NSEG_NEW, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->bge_cdata.bge_parent_tag); /* * Create tag for RX mbufs. */ nseg = 32; error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, ETHER_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * nseg, nseg, MCLBYTES, 0, NULL, NULL, &sc->bge_cdata.bge_mtag); if (error) { device_printf(dev, "could not allocate dma tag\n"); return (ENOMEM); } /* Create DMA maps for RX buffers */ for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0, &sc->bge_cdata.bge_rx_std_dmamap[i]); if (error) { device_printf(dev, "can't create DMA map for RX\n"); return(ENOMEM); } } /* Create DMA maps for TX buffers */ for (i = 0; i < BGE_TX_RING_CNT; i++) { error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0, &sc->bge_cdata.bge_tx_dmamap[i]); if (error) { device_printf(dev, "can't create DMA map for RX\n"); return(ENOMEM); } } /* Create tag for standard RX ring */ error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0, NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag); if (error) { device_printf(dev, "could not allocate dma tag\n"); return (ENOMEM); } /* Allocate DMA'able memory for standard RX ring */ error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag, (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_std_ring_map); if (error) return (ENOMEM); bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ); /* Load the address of the standard RX ring */ ctx.bge_maxsegs = 1; ctx.sc = sc; error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); if (error) return (ENOMEM); sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr; if (sc->bge_asicrev != BGE_ASICREV_BCM5705) { /* * Create tag for jumbo mbufs. * This is really a bit of a kludge. We allocate a special * jumbo buffer pool which (thanks to the way our DMA * memory allocation works) will consist of contiguous * pages. This means that even though a jumbo buffer might * be larger than a page size, we don't really need to * map it into more than one DMA segment. However, the * default mbuf tag will result in multi-segment mappings, * so we have to create a special jumbo mbuf tag that * lets us get away with mapping the jumbo buffers as * a single segment. I think eventually the driver should * be changed so that it uses ordinary mbufs and cluster * buffers, i.e. jumbo frames can span multiple DMA * descriptors. But that's a project for another day. */ error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, ETHER_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * nseg, nseg, BGE_JLEN, 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo); if (error) { device_printf(dev, "could not allocate dma tag\n"); return (ENOMEM); } /* Create tag for jumbo RX ring */ error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0, NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag); if (error) { device_printf(dev, "could not allocate dma tag\n"); return (ENOMEM); } /* Allocate DMA'able memory for jumbo RX ring */ error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag, (void **)&sc->bge_ldata.bge_rx_jumbo_ring, BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_ring_map); if (error) return (ENOMEM); bzero((char *)sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ); /* Load the address of the jumbo RX ring */ ctx.bge_maxsegs = 1; ctx.sc = sc; error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map, sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); if (error) return (ENOMEM); sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr; /* Create DMA maps for jumbo RX buffers */ for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo, 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]); if (error) { device_printf(dev, "can't create DMA map for RX\n"); return(ENOMEM); } } } /* Create tag for RX return ring */ error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0, NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag); if (error) { device_printf(dev, "could not allocate dma tag\n"); return (ENOMEM); } /* Allocate DMA'able memory for RX return ring */ error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag, (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_return_ring_map); if (error) return (ENOMEM); bzero((char *)sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc)); /* Load the address of the RX return ring */ ctx.bge_maxsegs = 1; ctx.sc = sc; error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_cdata.bge_rx_return_ring_map, sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc), bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); if (error) return (ENOMEM); sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr; /* Create tag for TX ring */ error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL, &sc->bge_cdata.bge_tx_ring_tag); if (error) { device_printf(dev, "could not allocate dma tag\n"); return (ENOMEM); } /* Allocate DMA'able memory for TX ring */ error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag, (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT, &sc->bge_cdata.bge_tx_ring_map); if (error) return (ENOMEM); bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ); /* Load the address of the TX ring */ ctx.bge_maxsegs = 1; ctx.sc = sc; error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag, sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); if (error) return (ENOMEM); sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr; /* Create tag for status block */ error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0, NULL, NULL, &sc->bge_cdata.bge_status_tag); if (error) { device_printf(dev, "could not allocate dma tag\n"); return (ENOMEM); } /* Allocate DMA'able memory for status block */ error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag, (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT, &sc->bge_cdata.bge_status_map); if (error) return (ENOMEM); bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ); /* Load the address of the status block */ ctx.sc = sc; ctx.bge_maxsegs = 1; error = bus_dmamap_load(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); if (error) return (ENOMEM); sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr; /* Create tag for statistics block */ error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL, &sc->bge_cdata.bge_stats_tag); if (error) { device_printf(dev, "could not allocate dma tag\n"); return (ENOMEM); } /* Allocate DMA'able memory for statistics block */ error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag, (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT, &sc->bge_cdata.bge_stats_map); if (error) return (ENOMEM); bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ); /* Load the address of the statstics block */ ctx.sc = sc; ctx.bge_maxsegs = 1; error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag, sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats, BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); if (error) return (ENOMEM); sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr; return(0); } static int bge_attach(dev) device_t dev; { struct ifnet *ifp; struct bge_softc *sc; u_int32_t hwcfg = 0; u_int32_t mac_addr = 0; int unit, error = 0, rid; sc = device_get_softc(dev); unit = device_get_unit(dev); sc->bge_dev = dev; sc->bge_unit = unit; /* * Map control/status registers. */ pci_enable_busmaster(dev); rid = BGE_PCI_BAR0; sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE|PCI_RF_DENSE); if (sc->bge_res == NULL) { printf ("bge%d: couldn't map memory\n", unit); error = ENXIO; goto fail; } sc->bge_btag = rman_get_bustag(sc->bge_res); sc->bge_bhandle = rman_get_bushandle(sc->bge_res); sc->bge_vhandle = (vm_offset_t)rman_get_virtual(sc->bge_res); /* Allocate interrupt */ rid = 0; sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->bge_irq == NULL) { printf("bge%d: couldn't map interrupt\n", unit); error = ENXIO; goto fail; } sc->bge_unit = unit; BGE_LOCK_INIT(sc, device_get_nameunit(dev)); /* Try to reset the chip. */ bge_reset(sc); if (bge_chipinit(sc)) { printf("bge%d: chip initialization failed\n", sc->bge_unit); bge_release_resources(sc); error = ENXIO; goto fail; } /* * Get station address from the EEPROM. */ mac_addr = bge_readmem_ind(sc, 0x0c14); if ((mac_addr >> 16) == 0x484b) { sc->arpcom.ac_enaddr[0] = (u_char)(mac_addr >> 8); sc->arpcom.ac_enaddr[1] = (u_char)mac_addr; mac_addr = bge_readmem_ind(sc, 0x0c18); sc->arpcom.ac_enaddr[2] = (u_char)(mac_addr >> 24); sc->arpcom.ac_enaddr[3] = (u_char)(mac_addr >> 16); sc->arpcom.ac_enaddr[4] = (u_char)(mac_addr >> 8); sc->arpcom.ac_enaddr[5] = (u_char)mac_addr; } else if (bge_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { printf("bge%d: failed to read station address\n", unit); bge_release_resources(sc); error = ENXIO; goto fail; } /* Save ASIC rev. */ sc->bge_chipid = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) & BGE_PCIMISCCTL_ASICREV; sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid); sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid); /* 5705 limits RX return ring to 512 entries. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5705) sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; else sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; if (bge_dma_alloc(dev)) { printf ("bge%d: failed to allocate DMA resources\n", sc->bge_unit); bge_release_resources(sc); error = ENXIO; goto fail; } /* * Try to allocate memory for jumbo buffers. * The 5705 does not appear to support jumbo frames. */ if (sc->bge_asicrev != BGE_ASICREV_BCM5705) { if (bge_alloc_jumbo_mem(sc)) { printf("bge%d: jumbo buffer allocation " "failed\n", sc->bge_unit); bge_release_resources(sc); error = ENXIO; goto fail; } } /* Set default tuneable values. */ sc->bge_stat_ticks = BGE_TICKS_PER_SEC; sc->bge_rx_coal_ticks = 150; sc->bge_tx_coal_ticks = 150; sc->bge_rx_max_coal_bds = 64; sc->bge_tx_max_coal_bds = 128; /* Set up ifnet structure */ ifp = &sc->arpcom.ac_if; ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = bge_ioctl; ifp->if_start = bge_start; ifp->if_watchdog = bge_watchdog; ifp->if_init = bge_init; ifp->if_mtu = ETHERMTU; ifp->if_snd.ifq_maxlen = BGE_TX_RING_CNT - 1; ifp->if_hwassist = BGE_CSUM_FEATURES; ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; ifp->if_capenable = ifp->if_capabilities; /* * Figure out what sort of media we have by checking the * hardware config word in the first 32k of NIC internal memory, * or fall back to examining the EEPROM if necessary. * Note: on some BCM5700 cards, this value appears to be unset. * If that's the case, we have to rely on identifying the NIC * by its PCI subsystem ID, as we do below for the SysKonnect * SK-9D41. */ if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); else { bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, sizeof(hwcfg)); hwcfg = ntohl(hwcfg); } if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) sc->bge_tbi = 1; /* The SysKonnect SK-9D41 is a 1000baseSX card. */ if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41) sc->bge_tbi = 1; if (sc->bge_tbi) { ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, bge_ifmedia_sts); ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); } else { /* * Do transceiver setup. */ if (mii_phy_probe(dev, &sc->bge_miibus, bge_ifmedia_upd, bge_ifmedia_sts)) { printf("bge%d: MII without any PHY!\n", sc->bge_unit); bge_release_resources(sc); bge_free_jumbo_mem(sc); error = ENXIO; goto fail; } } /* * When using the BCM5701 in PCI-X mode, data corruption has * been observed in the first few bytes of some received packets. * Aligning the packet buffer in memory eliminates the corruption. * Unfortunately, this misaligns the packet payloads. On platforms * which do not support unaligned accesses, we will realign the * payloads by copying the received packets. */ switch (sc->bge_chipid) { case BGE_CHIPID_BCM5701_A0: case BGE_CHIPID_BCM5701_B0: case BGE_CHIPID_BCM5701_B2: case BGE_CHIPID_BCM5701_B5: /* If in PCI-X mode, work around the alignment bug. */ if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) & (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) == BGE_PCISTATE_PCI_BUSSPEED) sc->bge_rx_alignment_bug = 1; break; } /* * Call MI attach routine. */ ether_ifattach(ifp, sc->arpcom.ac_enaddr); callout_init(&sc->bge_stat_ch, CALLOUT_MPSAFE); /* * Hookup IRQ last. */ error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE, bge_intr, sc, &sc->bge_intrhand); if (error) { bge_release_resources(sc); printf("bge%d: couldn't set up irq\n", unit); } fail: return(error); } static int bge_detach(dev) device_t dev; { struct bge_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = &sc->arpcom.ac_if; BGE_LOCK(sc); bge_stop(sc); bge_reset(sc); BGE_UNLOCK(sc); ether_ifdetach(ifp); if (sc->bge_tbi) { ifmedia_removeall(&sc->bge_ifmedia); } else { bus_generic_detach(dev); device_delete_child(dev, sc->bge_miibus); } bge_release_resources(sc); if (sc->bge_asicrev != BGE_ASICREV_BCM5705) bge_free_jumbo_mem(sc); return(0); } static void bge_release_resources(sc) struct bge_softc *sc; { device_t dev; dev = sc->bge_dev; if (sc->bge_vpd_prodname != NULL) free(sc->bge_vpd_prodname, M_DEVBUF); if (sc->bge_vpd_readonly != NULL) free(sc->bge_vpd_readonly, M_DEVBUF); if (sc->bge_intrhand != NULL) bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand); if (sc->bge_irq != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq); if (sc->bge_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, BGE_PCI_BAR0, sc->bge_res); bge_dma_free(sc); if (mtx_initialized(&sc->bge_mtx)) /* XXX */ BGE_LOCK_DESTROY(sc); return; } static void bge_reset(sc) struct bge_softc *sc; { device_t dev; u_int32_t cachesize, command, pcistate; int i, val = 0; dev = sc->bge_dev; /* Save some important PCI state. */ cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); command = pci_read_config(dev, BGE_PCI_CMD, 4); pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); pci_write_config(dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4); /* Issue global reset */ bge_writereg_ind(sc, BGE_MISC_CFG, BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1)); DELAY(1000); /* Reset some of the PCI state that got zapped by reset */ pci_write_config(dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4); pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); pci_write_config(dev, BGE_PCI_CMD, command, 4); bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1)); /* Enable memory arbiter. */ if (sc->bge_asicrev != BGE_ASICREV_BCM5705) CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); /* * Prevent PXE restart: write a magic number to the * general communications memory at 0xB50. */ bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); /* * Poll the value location we just wrote until * we see the 1's complement of the magic number. * This indicates that the firmware initialization * is complete. */ for (i = 0; i < BGE_TIMEOUT; i++) { val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); if (val == ~BGE_MAGIC_NUMBER) break; DELAY(10); } if (i == BGE_TIMEOUT) { printf("bge%d: firmware handshake timed out\n", sc->bge_unit); return; } /* * XXX Wait for the value of the PCISTATE register to * return to its original pre-reset state. This is a * fairly good indicator of reset completion. If we don't * wait for the reset to fully complete, trying to read * from the device's non-PCI registers may yield garbage * results. */ for (i = 0; i < BGE_TIMEOUT; i++) { if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate) break; DELAY(10); } /* Fix up byte swapping */ CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_BYTESWAP_NONFRAME| BGE_MODECTL_BYTESWAP_DATA); CSR_WRITE_4(sc, BGE_MAC_MODE, 0); DELAY(10000); return; } /* * Frame reception handling. This is called if there's a frame * on the receive return list. * * Note: we have to be able to handle two possibilities here: * 1) the frame is from the jumbo recieve ring * 2) the frame is from the standard receive ring */ static void bge_rxeof(sc) struct bge_softc *sc; { struct ifnet *ifp; int stdcnt = 0, jumbocnt = 0; BGE_LOCK_ASSERT(sc); ifp = &sc->arpcom.ac_if; bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD); if (sc->bge_asicrev != BGE_ASICREV_BCM5705) { bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTREAD); } while(sc->bge_rx_saved_considx != sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) { struct bge_rx_bd *cur_rx; u_int32_t rxidx; struct ether_header *eh; struct mbuf *m = NULL; u_int16_t vlan_tag = 0; int have_tag = 0; cur_rx = &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx]; rxidx = cur_rx->bge_idx; BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt); if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { have_tag = 1; vlan_tag = cur_rx->bge_vlan_tag; } if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]); m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; jumbocnt++; if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { ifp->if_ierrors++; bge_newbuf_jumbo(sc, sc->bge_jumbo, m); continue; } if (bge_newbuf_jumbo(sc, sc->bge_jumbo, NULL) == ENOBUFS) { ifp->if_ierrors++; bge_newbuf_jumbo(sc, sc->bge_jumbo, m); continue; } } else { BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); bus_dmamap_sync(sc->bge_cdata.bge_mtag, sc->bge_cdata.bge_rx_std_dmamap[rxidx], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->bge_cdata.bge_mtag, sc->bge_cdata.bge_rx_std_dmamap[rxidx]); m = sc->bge_cdata.bge_rx_std_chain[rxidx]; sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; stdcnt++; if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { ifp->if_ierrors++; bge_newbuf_std(sc, sc->bge_std, m); continue; } if (bge_newbuf_std(sc, sc->bge_std, NULL) == ENOBUFS) { ifp->if_ierrors++; bge_newbuf_std(sc, sc->bge_std, m); continue; } } ifp->if_ipackets++; #ifndef __i386__ /* * The i386 allows unaligned accesses, but for other * platforms we must make sure the payload is aligned. */ if (sc->bge_rx_alignment_bug) { bcopy(m->m_data, m->m_data + ETHER_ALIGN, cur_rx->bge_len); m->m_data += ETHER_ALIGN; } #endif eh = mtod(m, struct ether_header *); m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; m->m_pkthdr.rcvif = ifp; #if 0 /* currently broken for some packets, possibly related to TCP options */ if (ifp->if_hwassist) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if ((cur_rx->bge_ip_csum ^ 0xffff) == 0) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { m->m_pkthdr.csum_data = cur_rx->bge_tcp_udp_csum; m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; } } #endif /* * If we received a packet with a vlan tag, * attach that information to the packet. */ if (have_tag) VLAN_INPUT_TAG(ifp, m, vlan_tag, continue); BGE_UNLOCK(sc); (*ifp->if_input)(ifp, m); BGE_LOCK(sc); } bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_PREWRITE); if (sc->bge_asicrev != BGE_ASICREV_BCM5705) { bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); } CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); if (stdcnt) CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); if (jumbocnt) CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); return; } static void bge_txeof(sc) struct bge_softc *sc; { struct bge_tx_bd *cur_tx = NULL; struct ifnet *ifp; BGE_LOCK_ASSERT(sc); ifp = &sc->arpcom.ac_if; /* * Go through our tx ring and free mbufs for those * frames that have been sent. */ while (sc->bge_tx_saved_considx != sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) { u_int32_t idx = 0; idx = sc->bge_tx_saved_considx; cur_tx = &sc->bge_ldata.bge_tx_ring[idx]; if (cur_tx->bge_flags & BGE_TXBDFLAG_END) ifp->if_opackets++; if (sc->bge_cdata.bge_tx_chain[idx] != NULL) { m_freem(sc->bge_cdata.bge_tx_chain[idx]); sc->bge_cdata.bge_tx_chain[idx] = NULL; bus_dmamap_unload(sc->bge_cdata.bge_mtag, sc->bge_cdata.bge_tx_dmamap[idx]); } sc->bge_txcnt--; BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); ifp->if_timer = 0; } if (cur_tx != NULL) ifp->if_flags &= ~IFF_OACTIVE; return; } static void bge_intr(xsc) void *xsc; { struct bge_softc *sc; struct ifnet *ifp; u_int32_t statusword; u_int32_t status; sc = xsc; ifp = &sc->arpcom.ac_if; BGE_LOCK(sc); bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTWRITE); statusword = atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status); #ifdef notdef /* Avoid this for now -- checking this register is expensive. */ /* Make sure this is really our interrupt. */ if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE)) return; #endif /* Ack interrupt and stop others from occuring. */ CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); /* * Process link state changes. * Grrr. The link status word in the status block does * not work correctly on the BCM5700 rev AX and BX chips, * according to all available information. Hence, we have * to enable MII interrupts in order to properly obtain * async link changes. Unfortunately, this also means that * we have to read the MAC status register to detect link * changes, thereby adding an additional register access to * the interrupt handler. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5700) { status = CSR_READ_4(sc, BGE_MAC_STS); if (status & BGE_MACSTAT_MI_INTERRUPT) { sc->bge_link = 0; callout_stop(&sc->bge_stat_ch); bge_tick_locked(sc); /* Clear the interrupt */ CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_MI_INTERRUPT); bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR); bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, BRGPHY_INTRS); } } else { if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) { /* * Sometimes PCS encoding errors are detected in * TBI mode (on fiber NICs), and for some reason * the chip will signal them as link changes. * If we get a link change event, but the 'PCS * encoding error' bit in the MAC status register * is set, don't bother doing a link check. * This avoids spurious "gigabit link up" messages * that sometimes appear on fiber NICs during * periods of heavy traffic. (There should be no * effect on copper NICs.) */ status = CSR_READ_4(sc, BGE_MAC_STS); if (!(status & (BGE_MACSTAT_PORT_DECODE_ERROR| BGE_MACSTAT_MI_COMPLETE))) { sc->bge_link = 0; callout_stop(&sc->bge_stat_ch); bge_tick_locked(sc); } /* Clear the interrupt */ CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| BGE_MACSTAT_LINK_CHANGED); /* Force flush the status block cached by PCI bridge */ CSR_READ_4(sc, BGE_MBX_IRQ0_LO); } } if (ifp->if_flags & IFF_RUNNING) { /* Check RX return ring producer/consumer */ bge_rxeof(sc); /* Check TX ring producer/consumer */ bge_txeof(sc); } bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE); bge_handle_events(sc); /* Re-enable interrupts. */ CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL) bge_start_locked(ifp); BGE_UNLOCK(sc); return; } static void bge_tick_locked(sc) struct bge_softc *sc; { struct mii_data *mii = NULL; struct ifmedia *ifm = NULL; struct ifnet *ifp; ifp = &sc->arpcom.ac_if; BGE_LOCK_ASSERT(sc); if (sc->bge_asicrev == BGE_ASICREV_BCM5705) bge_stats_update_regs(sc); else bge_stats_update(sc); callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); if (sc->bge_link) return; if (sc->bge_tbi) { ifm = &sc->bge_ifmedia; if (CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_TBI_PCS_SYNCHED) { sc->bge_link++; CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); printf("bge%d: gigabit link up\n", sc->bge_unit); if (ifp->if_snd.ifq_head != NULL) bge_start_locked(ifp); } return; } mii = device_get_softc(sc->bge_miibus); mii_tick(mii); if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->bge_link++; if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) printf("bge%d: gigabit link up\n", sc->bge_unit); if (ifp->if_snd.ifq_head != NULL) bge_start_locked(ifp); } return; } static void bge_tick(xsc) void *xsc; { struct bge_softc *sc; sc = xsc; BGE_LOCK(sc); bge_tick_locked(sc); BGE_UNLOCK(sc); } static void bge_stats_update_regs(sc) struct bge_softc *sc; { struct ifnet *ifp; struct bge_mac_stats_regs stats; u_int32_t *s; int i; ifp = &sc->arpcom.ac_if; s = (u_int32_t *)&stats; for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) { *s = CSR_READ_4(sc, BGE_RX_STATS + i); s++; } ifp->if_collisions += (stats.dot3StatsSingleCollisionFrames + stats.dot3StatsMultipleCollisionFrames + stats.dot3StatsExcessiveCollisions + stats.dot3StatsLateCollisions) - ifp->if_collisions; return; } static void bge_stats_update(sc) struct bge_softc *sc; { struct ifnet *ifp; struct bge_stats *stats; ifp = &sc->arpcom.ac_if; stats = (struct bge_stats *)(sc->bge_vhandle + BGE_MEMWIN_START + BGE_STATS_BLOCK); ifp->if_collisions += (stats->txstats.dot3StatsSingleCollisionFrames.bge_addr_lo + stats->txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo + stats->txstats.dot3StatsExcessiveCollisions.bge_addr_lo + stats->txstats.dot3StatsLateCollisions.bge_addr_lo) - ifp->if_collisions; #ifdef notdef ifp->if_collisions += (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - ifp->if_collisions; #endif return; } /* * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data * pointers to descriptors. */ static int bge_encap(sc, m_head, txidx) struct bge_softc *sc; struct mbuf *m_head; u_int32_t *txidx; { struct bge_tx_bd *f = NULL; u_int16_t csum_flags = 0; struct m_tag *mtag; struct bge_dmamap_arg ctx; bus_dmamap_t map; int error; if (m_head->m_pkthdr.csum_flags) { if (m_head->m_pkthdr.csum_flags & CSUM_IP) csum_flags |= BGE_TXBDFLAG_IP_CSUM; if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; if (m_head->m_flags & M_LASTFRAG) csum_flags |= BGE_TXBDFLAG_IP_FRAG_END; else if (m_head->m_flags & M_FRAG) csum_flags |= BGE_TXBDFLAG_IP_FRAG; } mtag = VLAN_OUTPUT_TAG(&sc->arpcom.ac_if, m_head); ctx.sc = sc; ctx.bge_idx = *txidx; ctx.bge_ring = sc->bge_ldata.bge_tx_ring; ctx.bge_flags = csum_flags; /* * Sanity check: avoid coming within 16 descriptors * of the end of the ring. */ ctx.bge_maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - 16; map = sc->bge_cdata.bge_tx_dmamap[*txidx]; error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag, map, m_head, bge_dma_map_tx_desc, &ctx, BUS_DMA_NOWAIT); if (error || ctx.bge_maxsegs == 0 /*|| ctx.bge_idx == sc->bge_tx_saved_considx*/) return (ENOBUFS); /* * Insure that the map for this transmission * is placed at the array index of the last descriptor * in this chain. */ sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[ctx.bge_idx]; sc->bge_cdata.bge_tx_dmamap[ctx.bge_idx] = map; sc->bge_cdata.bge_tx_chain[ctx.bge_idx] = m_head; sc->bge_txcnt += ctx.bge_maxsegs; f = &sc->bge_ldata.bge_tx_ring[*txidx]; if (mtag != NULL) { f->bge_flags |= htole16(BGE_TXBDFLAG_VLAN_TAG); f->bge_vlan_tag = htole16(VLAN_TAG_VALUE(mtag)); } else { f->bge_vlan_tag = 0; } BGE_INC(ctx.bge_idx, BGE_TX_RING_CNT); *txidx = ctx.bge_idx; return(0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit descriptors. */ static void bge_start_locked(ifp) struct ifnet *ifp; { struct bge_softc *sc; struct mbuf *m_head = NULL; u_int32_t prodidx = 0; sc = ifp->if_softc; if (!sc->bge_link && ifp->if_snd.ifq_len < 10) return; prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO); while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* * XXX * safety overkill. If this is a fragmented packet chain * with delayed TCP/UDP checksums, then only encapsulate * it if we have enough descriptors to handle the entire * chain at once. * (paranoia -- may not actually be needed) */ if (m_head->m_flags & M_FIRSTFRAG && m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { if ((BGE_TX_RING_CNT - sc->bge_txcnt) < m_head->m_pkthdr.csum_data + 16) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; break; } } /* * Pack the data into the transmit ring. If we * don't have room, set the OACTIVE flag and wait * for the NIC to drain the ring. */ if (bge_encap(sc, m_head, &prodidx)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; break; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); } /* Transmit */ CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); /* 5700 b2 errata */ if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; return; } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit descriptors. */ static void bge_start(ifp) struct ifnet *ifp; { struct bge_softc *sc; sc = ifp->if_softc; BGE_LOCK(sc); bge_start_locked(ifp); BGE_UNLOCK(sc); } static void bge_init_locked(sc) struct bge_softc *sc; { struct ifnet *ifp; u_int16_t *m; BGE_LOCK_ASSERT(sc); ifp = &sc->arpcom.ac_if; if (ifp->if_flags & IFF_RUNNING) return; /* Cancel pending I/O and flush buffers. */ bge_stop(sc); bge_reset(sc); bge_chipinit(sc); /* * Init the various state machines, ring * control blocks and firmware. */ if (bge_blockinit(sc)) { printf("bge%d: initialization failure\n", sc->bge_unit); return; } ifp = &sc->arpcom.ac_if; /* Specify MTU. */ CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); /* Load our MAC address. */ m = (u_int16_t *)&sc->arpcom.ac_enaddr[0]; CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); /* Enable or disable promiscuous mode as needed. */ if (ifp->if_flags & IFF_PROMISC) { BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); } else { BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); } /* Program multicast filter. */ bge_setmulti(sc); /* Init RX ring. */ bge_init_rx_ring_std(sc); /* * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's * memory to insure that the chip has in fact read the first * entry of the ring. */ if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { u_int32_t v, i; for (i = 0; i < 10; i++) { DELAY(20); v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); if (v == (MCLBYTES - ETHER_ALIGN)) break; } if (i == 10) printf ("bge%d: 5705 A0 chip failed to load RX ring\n", sc->bge_unit); } /* Init jumbo RX ring. */ if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) bge_init_rx_ring_jumbo(sc); /* Init our RX return ring index */ sc->bge_rx_saved_considx = 0; /* Init TX ring. */ bge_init_tx_ring(sc); /* Turn on transmitter */ BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); /* Turn on receiver */ BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); /* Tell firmware we're alive. */ BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); /* Enable host interrupts. */ BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); bge_ifmedia_upd(ifp); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); return; } static void bge_init(xsc) void *xsc; { struct bge_softc *sc = xsc; BGE_LOCK(sc); bge_init_locked(sc); BGE_UNLOCK(sc); return; } /* * Set media options. */ static int bge_ifmedia_upd(ifp) struct ifnet *ifp; { struct bge_softc *sc; struct mii_data *mii; struct ifmedia *ifm; sc = ifp->if_softc; ifm = &sc->bge_ifmedia; /* If this is a 1000baseX NIC, enable the TBI port. */ if (sc->bge_tbi) { if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return(EINVAL); switch(IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: break; case IFM_1000_SX: if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); } else { BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); } break; default: return(EINVAL); } return(0); } mii = device_get_softc(sc->bge_miibus); sc->bge_link = 0; if (mii->mii_instance) { struct mii_softc *miisc; for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; miisc = LIST_NEXT(miisc, mii_list)) mii_phy_reset(miisc); } mii_mediachg(mii); return(0); } /* * Report current media status. */ static void bge_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct bge_softc *sc; struct mii_data *mii; sc = ifp->if_softc; if (sc->bge_tbi) { ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_TBI_PCS_SYNCHED) ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= IFM_1000_SX; if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) ifmr->ifm_active |= IFM_HDX; else ifmr->ifm_active |= IFM_FDX; return; } mii = device_get_softc(sc->bge_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; return; } static int bge_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct bge_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; int mask, error = 0; struct mii_data *mii; switch(command) { case SIOCSIFMTU: /* Disallow jumbo frames on 5705. */ if ((sc->bge_asicrev == BGE_ASICREV_BCM5705 && ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU) error = EINVAL; else { ifp->if_mtu = ifr->ifr_mtu; ifp->if_flags &= ~IFF_RUNNING; bge_init(sc); } break; case SIOCSIFFLAGS: BGE_LOCK(sc); if (ifp->if_flags & IFF_UP) { /* * If only the state of the PROMISC flag changed, * then just use the 'set promisc mode' command * instead of reinitializing the entire NIC. Doing * a full re-init means reloading the firmware and * waiting for it to start up, which may take a * second or two. */ if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->bge_if_flags & IFF_PROMISC)) { BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->bge_if_flags & IFF_PROMISC) { BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); } else bge_init_locked(sc); } else { if (ifp->if_flags & IFF_RUNNING) { bge_stop(sc); } } sc->bge_if_flags = ifp->if_flags; BGE_UNLOCK(sc); error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: if (ifp->if_flags & IFF_RUNNING) { BGE_LOCK(sc); bge_setmulti(sc); BGE_UNLOCK(sc); error = 0; } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: if (sc->bge_tbi) { error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, command); } else { mii = device_get_softc(sc->bge_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); } break; case SIOCSIFCAP: mask = ifr->ifr_reqcap ^ ifp->if_capenable; if (mask & IFCAP_HWCSUM) { if (IFCAP_HWCSUM & ifp->if_capenable) ifp->if_capenable &= ~IFCAP_HWCSUM; else ifp->if_capenable |= IFCAP_HWCSUM; } error = 0; break; default: error = ether_ioctl(ifp, command, data); break; } return(error); } static void bge_watchdog(ifp) struct ifnet *ifp; { struct bge_softc *sc; sc = ifp->if_softc; printf("bge%d: watchdog timeout -- resetting\n", sc->bge_unit); ifp->if_flags &= ~IFF_RUNNING; bge_init(sc); ifp->if_oerrors++; return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void bge_stop(sc) struct bge_softc *sc; { struct ifnet *ifp; struct ifmedia_entry *ifm; struct mii_data *mii = NULL; int mtmp, itmp; BGE_LOCK_ASSERT(sc); ifp = &sc->arpcom.ac_if; if (!sc->bge_tbi) mii = device_get_softc(sc->bge_miibus); callout_stop(&sc->bge_stat_ch); /* * Disable all of the receiver blocks */ BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); if (sc->bge_asicrev != BGE_ASICREV_BCM5705) BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); /* * Disable all of the transmit blocks */ BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); if (sc->bge_asicrev != BGE_ASICREV_BCM5705) BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); /* * Shut down all of the memory managers and related * state machines. */ BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); if (sc->bge_asicrev != BGE_ASICREV_BCM5705) BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); if (sc->bge_asicrev != BGE_ASICREV_BCM5705) { BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); } /* Disable host interrupts. */ BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); /* * Tell firmware we're shutting down. */ BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); /* Free the RX lists. */ bge_free_rx_ring_std(sc); /* Free jumbo RX list. */ if (sc->bge_asicrev != BGE_ASICREV_BCM5705) bge_free_rx_ring_jumbo(sc); /* Free TX buffers. */ bge_free_tx_ring(sc); /* * Isolate/power down the PHY, but leave the media selection * unchanged so that things will be put back to normal when * we bring the interface back up. */ if (!sc->bge_tbi) { itmp = ifp->if_flags; ifp->if_flags |= IFF_UP; ifm = mii->mii_media.ifm_cur; mtmp = ifm->ifm_media; ifm->ifm_media = IFM_ETHER|IFM_NONE; mii_mediachg(mii); ifm->ifm_media = mtmp; ifp->if_flags = itmp; } sc->bge_link = 0; sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void bge_shutdown(dev) device_t dev; { struct bge_softc *sc; sc = device_get_softc(dev); BGE_LOCK(sc); bge_stop(sc); bge_reset(sc); BGE_UNLOCK(sc); return; } Index: head/sys/dev/bktr/bktr_mem.c =================================================================== --- head/sys/dev/bktr/bktr_mem.c (revision 129878) +++ head/sys/dev/bktr/bktr_mem.c (revision 129879) @@ -1,176 +1,177 @@ #include __FBSDID("$FreeBSD$"); /* * This is prt of the Driver for Video Capture Cards (Frame grabbers) * and TV Tuner cards using the Brooktree Bt848, Bt848A, Bt849A, Bt878, Bt879 * chipset. * Copyright Roger Hardiman. * * bktr_mem : This kernel module allows us to keep our allocated * contiguous memory for the video buffer, DMA programs and VBI data * while the main bktr driver is unloaded and reloaded. * This avoids the problem of trying to allocate contiguous each * time the bktr driver is loaded. */ /* * 1. Redistributions of source code must retain the * Copyright (c) 2000 Roger Hardiman * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Roger Hardiman * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include #include +#include #include #include struct memory_pointers { int addresses_stored; vm_offset_t dma_prog; vm_offset_t odd_dma_prog; vm_offset_t vbidata; vm_offset_t vbibuffer; vm_offset_t buf; } memory_pointers; static struct memory_pointers memory_list[BKTR_MEM_MAX_DEVICES]; /*************************************************************/ static int bktr_mem_modevent(module_t mod, int type, void *unused){ switch (type) { case MOD_LOAD: { printf("bktr_mem: memory holder loaded\n"); /* * bzero causes a panic. bzero((caddr_t)memory_list, sizeof(memory_list)); * So use a simple for loop for now. */ {int x; unsigned char *d = (unsigned char *)memory_list; for (x=0; x< sizeof(memory_list); x++) { d[x]=0; } } return 0; } case MOD_UNLOAD: { printf("bktr_mem: memory holder cannot be unloaded\n"); return EBUSY; } default: break; } return 0; }; /*************************************************************/ int bktr_has_stored_addresses(int unit) { if ((unit < 0) || (unit >= BKTR_MEM_MAX_DEVICES)) { printf("bktr_mem: Unit number %d invalid\n",unit); return 0; } return memory_list[unit].addresses_stored; } /*************************************************************/ void bktr_store_address(int unit, int type, vm_offset_t addr) { if ((unit < 0) || (unit >= BKTR_MEM_MAX_DEVICES)) { printf("bktr_mem: Unit number %d invalid for memory type %d, address 0x%x\n" ,unit,type,addr); return; } switch (type) { case BKTR_MEM_DMA_PROG: memory_list[unit].dma_prog = addr; memory_list[unit].addresses_stored = 1; break; case BKTR_MEM_ODD_DMA_PROG: memory_list[unit].odd_dma_prog = addr; memory_list[unit].addresses_stored = 1; break; case BKTR_MEM_VBIDATA: memory_list[unit].vbidata = addr; memory_list[unit].addresses_stored = 1; break; case BKTR_MEM_VBIBUFFER: memory_list[unit].vbibuffer = addr; memory_list[unit].addresses_stored = 1; break; case BKTR_MEM_BUF: memory_list[unit].buf = addr; memory_list[unit].addresses_stored = 1; break; default: printf("bktr_mem: Invalid memory type %d for bktr%d, address 0x%xn", type,unit,addr); break; } } /*************************************************************/ vm_offset_t bktr_retrieve_address(int unit, int type) { if ((unit < 0) || (unit >= BKTR_MEM_MAX_DEVICES)) { printf("bktr_mem: Unit number %d too large for memory type %d\n",unit,type); return NULL; } switch (type) { case BKTR_MEM_DMA_PROG: return memory_list[unit].dma_prog; case BKTR_MEM_ODD_DMA_PROG: return memory_list[unit].odd_dma_prog; case BKTR_MEM_VBIDATA: return memory_list[unit].vbidata; case BKTR_MEM_VBIBUFFER: return memory_list[unit].vbibuffer; case BKTR_MEM_BUF: return memory_list[unit].buf; default: printf("bktr_mem: Invalid memory type %d for bktr%d",type,unit); return NULL; } } /*************************************************************/ static moduledata_t bktr_mem_mod = { "bktr_mem", bktr_mem_modevent, 0 }; /* The load order is First and module type is Driver to make sure bktr_mem loads (and initialises) before bktr when both are loaded together */ DECLARE_MODULE(bktr_mem, bktr_mem_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST); MODULE_VERSION(bktr_mem, 1); Index: head/sys/dev/bktr/bktr_os.c =================================================================== --- head/sys/dev/bktr/bktr_os.c (revision 129878) +++ head/sys/dev/bktr/bktr_os.c (revision 129879) @@ -1,1332 +1,1333 @@ /*- * 1. Redistributions of source code must retain the * Copyright (c) 1997 Amancio Hasty, 1999 Roger Hardiman * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Amancio Hasty and * Roger Hardiman * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * This is part of the Driver for Video Capture Cards (Frame grabbers) * and TV Tuner cards using the Brooktree Bt848, Bt848A, Bt849A, Bt878, Bt879 * chipset. * Copyright Roger Hardiman and Amancio Hasty. * * bktr_os : This has all the Operating System dependant code, * probe/attach and open/close/ioctl/read/mmap * memory allocation * PCI bus interfacing */ #include "opt_bktr.h" /* include any kernel config options */ #define FIFO_RISC_DISABLED 0 #define ALL_INTS_DISABLED 0 /*******************/ /* *** FreeBSD *** */ /*******************/ #ifdef __FreeBSD__ #include #include #include #include #include +#include #include #include #include #include #if __FreeBSD_version >= 500014 #include #else #include #endif #include #include #include #include #include #if (__FreeBSD_version >=400000) #include /* used by smbus and newbus */ #endif #if (__FreeBSD_version >=300000) #include /* used by bus space */ #include /* used by bus space and newbus */ #include #endif #if (__FreeBSD_version >=400000) #include /* used by newbus */ #include /* used by newbus */ #endif #if (__FreeBSD_version < 500000) #include /* for DELAY */ #include #include #else #include #include #endif #include int bt848_card = -1; int bt848_tuner = -1; int bt848_reverse_mute = -1; int bt848_format = -1; int bt848_slow_msp_audio = -1; #ifdef BKTR_NEW_MSP34XX_DRIVER int bt848_stereo_once = 0; /* no continuous stereo monitoring */ int bt848_amsound = 0; /* hard-wire AM sound at 6.5 Hz (france), the autoscan seems work well only with FM... */ int bt848_dolby = 0; #endif SYSCTL_NODE(_hw, OID_AUTO, bt848, CTLFLAG_RW, 0, "Bt848 Driver mgmt"); SYSCTL_INT(_hw_bt848, OID_AUTO, card, CTLFLAG_RW, &bt848_card, -1, ""); SYSCTL_INT(_hw_bt848, OID_AUTO, tuner, CTLFLAG_RW, &bt848_tuner, -1, ""); SYSCTL_INT(_hw_bt848, OID_AUTO, reverse_mute, CTLFLAG_RW, &bt848_reverse_mute, -1, ""); SYSCTL_INT(_hw_bt848, OID_AUTO, format, CTLFLAG_RW, &bt848_format, -1, ""); SYSCTL_INT(_hw_bt848, OID_AUTO, slow_msp_audio, CTLFLAG_RW, &bt848_slow_msp_audio, -1, ""); #ifdef BKTR_NEW_MSP34XX_DRIVER SYSCTL_INT(_hw_bt848, OID_AUTO, stereo_once, CTLFLAG_RW, &bt848_stereo_once, 0, ""); SYSCTL_INT(_hw_bt848, OID_AUTO, amsound, CTLFLAG_RW, &bt848_amsound, 0, ""); SYSCTL_INT(_hw_bt848, OID_AUTO, dolby, CTLFLAG_RW, &bt848_dolby, 0, ""); #endif #endif /* end freebsd section */ /****************/ /* *** BSDI *** */ /****************/ #ifdef __bsdi__ #endif /* __bsdi__ */ /**************************/ /* *** OpenBSD/NetBSD *** */ /**************************/ #if defined(__NetBSD__) || defined(__OpenBSD__) #include #include #include #include #include #include #include #include #include #include #include #ifndef __NetBSD__ #include #include #include #endif #include #include #include #include #define BKTR_DEBUG #ifdef BKTR_DEBUG int bktr_debug = 0; #define DPR(x) (bktr_debug ? printf x : 0) #else #define DPR(x) #endif #endif /* __NetBSD__ || __OpenBSD__ */ #ifdef __NetBSD__ #include /* NetBSD location for .h files */ #include #include #include #include #include #include #else /* Traditional location for .h files */ #include #include /* extensions to ioctl_meteor.h */ #include #include #include #include #include #include #if defined(BKTR_USE_FREEBSD_SMBUS) #include #include "iicbb_if.h" #include "smbus_if.h" #endif #endif /****************************/ /* *** FreeBSD 4.x code *** */ /****************************/ #if (__FreeBSD_version >= 400000) static int bktr_probe( device_t dev ); static int bktr_attach( device_t dev ); static int bktr_detach( device_t dev ); static int bktr_shutdown( device_t dev ); static void bktr_intr(void *arg) { common_bktr_intr(arg); } static device_method_t bktr_methods[] = { /* Device interface */ DEVMETHOD(device_probe, bktr_probe), DEVMETHOD(device_attach, bktr_attach), DEVMETHOD(device_detach, bktr_detach), DEVMETHOD(device_shutdown, bktr_shutdown), #if defined(BKTR_USE_FREEBSD_SMBUS) /* iicbb interface */ DEVMETHOD(iicbb_callback, bti2c_iic_callback), DEVMETHOD(iicbb_setsda, bti2c_iic_setsda), DEVMETHOD(iicbb_setscl, bti2c_iic_setscl), DEVMETHOD(iicbb_getsda, bti2c_iic_getsda), DEVMETHOD(iicbb_getscl, bti2c_iic_getscl), DEVMETHOD(iicbb_reset, bti2c_iic_reset), /* smbus interface */ DEVMETHOD(smbus_callback, bti2c_smb_callback), DEVMETHOD(smbus_writeb, bti2c_smb_writeb), DEVMETHOD(smbus_writew, bti2c_smb_writew), DEVMETHOD(smbus_readb, bti2c_smb_readb), #endif { 0, 0 } }; static driver_t bktr_driver = { "bktr", bktr_methods, sizeof(struct bktr_softc), }; static devclass_t bktr_devclass; static d_open_t bktr_open; static d_close_t bktr_close; static d_read_t bktr_read; static d_write_t bktr_write; static d_ioctl_t bktr_ioctl; static d_mmap_t bktr_mmap; static d_poll_t bktr_poll; static struct cdevsw bktr_cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT, .d_open = bktr_open, .d_close = bktr_close, .d_read = bktr_read, .d_write = bktr_write, .d_ioctl = bktr_ioctl, .d_poll = bktr_poll, .d_mmap = bktr_mmap, .d_name = "bktr", }; DRIVER_MODULE(bktr, pci, bktr_driver, bktr_devclass, 0, 0); #if (__FreeBSD_version > 410000) MODULE_DEPEND(bktr, bktr_mem, 1,1,1); MODULE_VERSION(bktr, 1); #endif /* * the boot time probe routine. */ static int bktr_probe( device_t dev ) { unsigned int type = pci_get_devid(dev); unsigned int rev = pci_get_revid(dev); if (PCI_VENDOR(type) == PCI_VENDOR_BROOKTREE) { switch (PCI_PRODUCT(type)) { case PCI_PRODUCT_BROOKTREE_BT848: if (rev == 0x12) device_set_desc(dev, "BrookTree 848A"); else device_set_desc(dev, "BrookTree 848"); return 0; case PCI_PRODUCT_BROOKTREE_BT849: device_set_desc(dev, "BrookTree 849A"); return 0; case PCI_PRODUCT_BROOKTREE_BT878: device_set_desc(dev, "BrookTree 878"); return 0; case PCI_PRODUCT_BROOKTREE_BT879: device_set_desc(dev, "BrookTree 879"); return 0; } }; return ENXIO; } /* * the attach routine. */ static int bktr_attach( device_t dev ) { u_long latency; u_long fun; u_long val; unsigned int rev; unsigned int unit; int error = 0; #ifdef BROOKTREE_IRQ u_long old_irq, new_irq; #endif struct bktr_softc *bktr = device_get_softc(dev); unit = device_get_unit(dev); /* build the device name for bktr_name() */ snprintf(bktr->bktr_xname, sizeof(bktr->bktr_xname), "bktr%d",unit); /* * Enable bus mastering and Memory Mapped device */ val = pci_read_config(dev, PCIR_COMMAND, 4); val |= (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); pci_write_config(dev, PCIR_COMMAND, val, 4); /* * Map control/status registers. */ bktr->mem_rid = PCIR_BAR(0); bktr->res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bktr->mem_rid, RF_ACTIVE); if (!bktr->res_mem) { device_printf(dev, "could not map memory\n"); error = ENXIO; goto fail; } bktr->memt = rman_get_bustag(bktr->res_mem); bktr->memh = rman_get_bushandle(bktr->res_mem); /* * Disable the brooktree device */ OUTL(bktr, BKTR_INT_MASK, ALL_INTS_DISABLED); OUTW(bktr, BKTR_GPIO_DMA_CTL, FIFO_RISC_DISABLED); #ifdef BROOKTREE_IRQ /* from the configuration file */ old_irq = pci_conf_read(tag, PCI_INTERRUPT_REG); pci_conf_write(tag, PCI_INTERRUPT_REG, BROOKTREE_IRQ); new_irq = pci_conf_read(tag, PCI_INTERRUPT_REG); printf("bktr%d: attach: irq changed from %d to %d\n", unit, (old_irq & 0xff), (new_irq & 0xff)); #endif /* * Allocate our interrupt. */ bktr->irq_rid = 0; bktr->res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &bktr->irq_rid, RF_SHAREABLE | RF_ACTIVE); if (bktr->res_irq == NULL) { device_printf(dev, "could not map interrupt\n"); error = ENXIO; goto fail; } error = bus_setup_intr(dev, bktr->res_irq, INTR_TYPE_TTY, bktr_intr, bktr, &bktr->res_ih); if (error) { device_printf(dev, "could not setup irq\n"); goto fail; } /* Update the Device Control Register */ /* on Bt878 and Bt879 cards */ fun = pci_read_config( dev, 0x40, 2); fun = fun | 1; /* Enable writes to the sub-system vendor ID */ #if defined( BKTR_430_FX_MODE ) if (bootverbose) printf("Using 430 FX chipset compatibilty mode\n"); fun = fun | 2; /* Enable Intel 430 FX compatibility mode */ #endif #if defined( BKTR_SIS_VIA_MODE ) if (bootverbose) printf("Using SiS/VIA chipset compatibilty mode\n"); fun = fun | 4; /* Enable SiS/VIA compatibility mode (usefull for OPTi chipset motherboards too */ #endif pci_write_config(dev, 0x40, fun, 2); #if defined(BKTR_USE_FREEBSD_SMBUS) if (bt848_i2c_attach(dev)) printf("bktr%d: i2c_attach: can't attach\n", unit); #endif /* * PCI latency timer. 32 is a good value for 4 bus mastering slots, if * you have more than four, then 16 would probably be a better value. */ #ifndef BROOKTREE_DEF_LATENCY_VALUE #define BROOKTREE_DEF_LATENCY_VALUE 10 #endif latency = pci_read_config(dev, PCI_LATENCY_TIMER, 4); latency = (latency >> 8) & 0xff; if ( bootverbose ) { if (latency) printf("brooktree%d: PCI bus latency is", unit); else printf("brooktree%d: PCI bus latency was 0 changing to", unit); } if ( !latency ) { latency = BROOKTREE_DEF_LATENCY_VALUE; pci_write_config(dev, PCI_LATENCY_TIMER, latency<<8, 4); } if ( bootverbose ) { printf(" %d.\n", (int) latency); } /* read the pci device id and revision id */ fun = pci_get_devid(dev); rev = pci_get_revid(dev); /* call the common attach code */ common_bktr_attach( bktr, unit, fun, rev ); /* make the device entries */ bktr->bktrdev = make_dev(&bktr_cdevsw, unit, 0, 0, 0444, "bktr%d", unit); bktr->tunerdev= make_dev(&bktr_cdevsw, unit+16, 0, 0, 0444, "tuner%d", unit); bktr->vbidev = make_dev(&bktr_cdevsw, unit+32, 0, 0, 0444, "vbi%d" , unit); /* if this is unit 0 (/dev/bktr0, /dev/tuner0, /dev/vbi0) then make */ /* alias entries to /dev/bktr /dev/tuner and /dev/vbi */ #if (__FreeBSD_version >=500000) if (unit == 0) { bktr->bktrdev_alias = make_dev_alias(bktr->bktrdev, "bktr"); bktr->tunerdev_alias= make_dev_alias(bktr->tunerdev, "tuner"); bktr->vbidev_alias = make_dev_alias(bktr->vbidev, "vbi"); } #endif return 0; fail: if (bktr->res_irq) bus_release_resource(dev, SYS_RES_IRQ, bktr->irq_rid, bktr->res_irq); if (bktr->res_mem) bus_release_resource(dev, SYS_RES_IRQ, bktr->mem_rid, bktr->res_mem); return error; } /* * the detach routine. */ static int bktr_detach( device_t dev ) { struct bktr_softc *bktr = device_get_softc(dev); #ifdef BKTR_NEW_MSP34XX_DRIVER /* Disable the soundchip and kernel thread */ if (bktr->msp3400c_info != NULL) msp_detach(bktr); #endif /* Disable the brooktree device */ OUTL(bktr, BKTR_INT_MASK, ALL_INTS_DISABLED); OUTW(bktr, BKTR_GPIO_DMA_CTL, FIFO_RISC_DISABLED); #if defined(BKTR_USE_FREEBSD_SMBUS) if (bt848_i2c_detach(dev)) printf("bktr%d: i2c_attach: can't attach\n", device_get_unit(dev)); #endif #ifdef USE_VBIMUTEX mtx_destroy(&bktr->vbimutex); #endif /* Note: We do not free memory for RISC programs, grab buffer, vbi buffers */ /* The memory is retained by the bktr_mem module so we can unload and */ /* then reload the main bktr driver module */ /* Unregister the /dev/bktrN, tunerN and vbiN devices, * the aliases for unit 0 are automatically destroyed */ destroy_dev(bktr->vbidev); destroy_dev(bktr->tunerdev); destroy_dev(bktr->bktrdev); /* * Deallocate resources. */ bus_teardown_intr(dev, bktr->res_irq, bktr->res_ih); bus_release_resource(dev, SYS_RES_IRQ, bktr->irq_rid, bktr->res_irq); bus_release_resource(dev, SYS_RES_MEMORY, bktr->mem_rid, bktr->res_mem); return 0; } /* * the shutdown routine. */ static int bktr_shutdown( device_t dev ) { struct bktr_softc *bktr = device_get_softc(dev); /* Disable the brooktree device */ OUTL(bktr, BKTR_INT_MASK, ALL_INTS_DISABLED); OUTW(bktr, BKTR_GPIO_DMA_CTL, FIFO_RISC_DISABLED); return 0; } /* * Special Memory Allocation */ vm_offset_t get_bktr_mem( int unit, unsigned size ) { vm_offset_t addr = 0; addr = (vm_offset_t)contigmalloc(size, M_DEVBUF, M_NOWAIT, 0, 0xffffffff, 1<<24, 0); if (addr == 0) addr = (vm_offset_t)contigmalloc(size, M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (addr == 0) { printf("bktr%d: Unable to allocate %d bytes of memory.\n", unit, size); } return( addr ); } /*--------------------------------------------------------- ** ** BrookTree 848 character device driver routines ** **--------------------------------------------------------- */ #define VIDEO_DEV 0x00 #define TUNER_DEV 0x01 #define VBI_DEV 0x02 #define UNIT(x) ((x) & 0x0f) #define FUNCTION(x) (x >> 4) /* * */ static int bktr_open( dev_t dev, int flags, int fmt, struct thread *td ) { bktr_ptr_t bktr; int unit; int result; unit = UNIT( minor(dev) ); /* Get the device data */ bktr = (struct bktr_softc*)devclass_get_softc(bktr_devclass, unit); if (bktr == NULL) { /* the device is no longer valid/functioning */ return (ENXIO); } if (!(bktr->flags & METEOR_INITALIZED)) /* device not found */ return( ENXIO ); /* Record that the device is now busy */ device_busy(devclass_get_device(bktr_devclass, unit)); if (bt848_card != -1) { if ((bt848_card >> 8 == unit ) && ( (bt848_card & 0xff) < Bt848_MAX_CARD )) { if ( bktr->bt848_card != (bt848_card & 0xff) ) { bktr->bt848_card = (bt848_card & 0xff); probeCard(bktr, FALSE, unit); } } } if (bt848_tuner != -1) { if ((bt848_tuner >> 8 == unit ) && ( (bt848_tuner & 0xff) < Bt848_MAX_TUNER )) { if ( bktr->bt848_tuner != (bt848_tuner & 0xff) ) { bktr->bt848_tuner = (bt848_tuner & 0xff); probeCard(bktr, FALSE, unit); } } } if (bt848_reverse_mute != -1) { if ((bt848_reverse_mute >> 8) == unit ) { bktr->reverse_mute = bt848_reverse_mute & 0xff; } } if (bt848_slow_msp_audio != -1) { if ((bt848_slow_msp_audio >> 8) == unit ) { bktr->slow_msp_audio = (bt848_slow_msp_audio & 0xff); } } #ifdef BKTR_NEW_MSP34XX_DRIVER if (bt848_stereo_once != 0) { if ((bt848_stereo_once >> 8) == unit ) { bktr->stereo_once = (bt848_stereo_once & 0xff); } } if (bt848_amsound != -1) { if ((bt848_amsound >> 8) == unit ) { bktr->amsound = (bt848_amsound & 0xff); } } if (bt848_dolby != -1) { if ((bt848_dolby >> 8) == unit ) { bktr->dolby = (bt848_dolby & 0xff); } } #endif switch ( FUNCTION( minor(dev) ) ) { case VIDEO_DEV: result = video_open( bktr ); break; case TUNER_DEV: result = tuner_open( bktr ); break; case VBI_DEV: result = vbi_open( bktr ); break; default: result = ENXIO; break; } /* If there was an error opening the device, undo the busy status */ if (result != 0) device_unbusy(devclass_get_device(bktr_devclass, unit)); return( result ); } /* * */ static int bktr_close( dev_t dev, int flags, int fmt, struct thread *td ) { bktr_ptr_t bktr; int unit; int result; unit = UNIT( minor(dev) ); /* Get the device data */ bktr = (struct bktr_softc*)devclass_get_softc(bktr_devclass, unit); if (bktr == NULL) { /* the device is no longer valid/functioning */ return (ENXIO); } switch ( FUNCTION( minor(dev) ) ) { case VIDEO_DEV: result = video_close( bktr ); break; case TUNER_DEV: result = tuner_close( bktr ); break; case VBI_DEV: result = vbi_close( bktr ); break; default: return (ENXIO); break; } device_unbusy(devclass_get_device(bktr_devclass, unit)); return( result ); } /* * */ static int bktr_read( dev_t dev, struct uio *uio, int ioflag ) { bktr_ptr_t bktr; int unit; unit = UNIT(minor(dev)); /* Get the device data */ bktr = (struct bktr_softc*)devclass_get_softc(bktr_devclass, unit); if (bktr == NULL) { /* the device is no longer valid/functioning */ return (ENXIO); } switch ( FUNCTION( minor(dev) ) ) { case VIDEO_DEV: return( video_read( bktr, unit, dev, uio ) ); case VBI_DEV: return( vbi_read( bktr, uio, ioflag ) ); } return( ENXIO ); } /* * */ static int bktr_write( dev_t dev, struct uio *uio, int ioflag ) { return( EINVAL ); /* XXX or ENXIO ? */ } /* * */ static int bktr_ioctl( dev_t dev, ioctl_cmd_t cmd, caddr_t arg, int flag, struct thread *td ) { bktr_ptr_t bktr; int unit; unit = UNIT(minor(dev)); /* Get the device data */ bktr = (struct bktr_softc*)devclass_get_softc(bktr_devclass, unit); if (bktr == NULL) { /* the device is no longer valid/functioning */ return (ENXIO); } if (bktr->bigbuf == 0) /* no frame buffer allocated (ioctl failed) */ return( ENOMEM ); switch ( FUNCTION( minor(dev) ) ) { case VIDEO_DEV: return( video_ioctl( bktr, unit, cmd, arg, td ) ); case TUNER_DEV: return( tuner_ioctl( bktr, unit, cmd, arg, td ) ); } return( ENXIO ); } /* * */ static int bktr_mmap( dev_t dev, vm_offset_t offset, vm_paddr_t *paddr, int nprot ) { int unit; bktr_ptr_t bktr; unit = UNIT(minor(dev)); if (FUNCTION(minor(dev)) > 0) /* only allow mmap on /dev/bktr[n] */ return( -1 ); /* Get the device data */ bktr = (struct bktr_softc*)devclass_get_softc(bktr_devclass, unit); if (bktr == NULL) { /* the device is no longer valid/functioning */ return (ENXIO); } if (nprot & PROT_EXEC) return( -1 ); if (offset < 0) return( -1 ); if (offset >= bktr->alloc_pages * PAGE_SIZE) return( -1 ); *paddr = vtophys(bktr->bigbuf) + offset; return( 0 ); } static int bktr_poll( dev_t dev, int events, struct thread *td) { int unit; bktr_ptr_t bktr; int revents = 0; DECLARE_INTR_MASK(s); unit = UNIT(minor(dev)); /* Get the device data */ bktr = (struct bktr_softc*)devclass_get_softc(bktr_devclass, unit); if (bktr == NULL) { /* the device is no longer valid/functioning */ return (ENXIO); } LOCK_VBI(bktr); DISABLE_INTR(s); if (events & (POLLIN | POLLRDNORM)) { switch ( FUNCTION( minor(dev) ) ) { case VBI_DEV: if(bktr->vbisize == 0) selrecord(td, &bktr->vbi_select); else revents |= events & (POLLIN | POLLRDNORM); break; } } ENABLE_INTR(s); UNLOCK_VBI(bktr); return (revents); } #endif /* FreeBSD 4.x specific kernel interface routines */ /*****************/ /* *** BSDI *** */ /*****************/ #if defined(__bsdi__) #endif /* __bsdi__ BSDI specific kernel interface routines */ /*****************************/ /* *** OpenBSD / NetBSD *** */ /*****************************/ #if defined(__NetBSD__) || defined(__OpenBSD__) #define IPL_VIDEO IPL_BIO /* XXX */ static int bktr_intr(void *arg) { return common_bktr_intr(arg); } #define bktr_open bktropen #define bktr_close bktrclose #define bktr_read bktrread #define bktr_write bktrwrite #define bktr_ioctl bktrioctl #define bktr_mmap bktrmmap vm_offset_t vm_page_alloc_contig(vm_offset_t, vm_offset_t, vm_offset_t, vm_offset_t); #if defined(__OpenBSD__) static int bktr_probe(struct device *, void *, void *); #else static int bktr_probe(struct device *, struct cfdata *, void *); #endif static void bktr_attach(struct device *, struct device *, void *); struct cfattach bktr_ca = { sizeof(struct bktr_softc), bktr_probe, bktr_attach }; #if defined(__NetBSD__) extern struct cfdriver bktr_cd; #else struct cfdriver bktr_cd = { NULL, "bktr", DV_DULL }; #endif int bktr_probe(parent, match, aux) struct device *parent; #if defined(__OpenBSD__) void *match; #else struct cfdata *match; #endif void *aux; { struct pci_attach_args *pa = aux; if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROOKTREE && (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROOKTREE_BT848 || PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROOKTREE_BT849 || PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROOKTREE_BT878 || PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROOKTREE_BT879)) return 1; return 0; } /* * the attach routine. */ static void bktr_attach(struct device *parent, struct device *self, void *aux) { bktr_ptr_t bktr; u_long latency; u_long fun; unsigned int rev; #if defined(__OpenBSD__) struct pci_attach_args *pa = aux; pci_chipset_tag_t pc = pa->pa_pc; pci_intr_handle_t ih; const char *intrstr; int retval; int unit; bktr = (bktr_ptr_t)self; unit = bktr->bktr_dev.dv_unit; bktr->pc = pa->pa_pc; bktr->tag = pa->pa_tag; bktr->dmat = pa->pa_dmat; /* * map memory */ bktr->memt = pa->pa_memt; retval = pci_mem_find(pc, pa->pa_tag, PCI_MAPREG_START, &bktr->phys_base, &bktr->obmemsz, NULL); if (!retval) retval = bus_space_map(pa->pa_memt, bktr->phys_base, bktr->obmemsz, 0, &bktr->memh); if (retval) { printf(": couldn't map memory\n"); return; } /* * map interrupt */ if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin, pa->pa_intrline, &ih)) { printf(": couldn't map interrupt\n"); return; } intrstr = pci_intr_string(pa->pa_pc, ih); bktr->ih = pci_intr_establish(pa->pa_pc, ih, IPL_VIDEO, bktr_intr, bktr, bktr->bktr_dev.dv_xname); if (bktr->ih == NULL) { printf(": couldn't establish interrupt"); if (intrstr != NULL) printf(" at %s", intrstr); printf("\n"); return; } if (intrstr != NULL) printf(": %s\n", intrstr); #endif /* __OpenBSD__ */ #if defined(__NetBSD__) struct pci_attach_args *pa = aux; pci_intr_handle_t ih; const char *intrstr; int retval; int unit; bktr = (bktr_ptr_t)self; unit = bktr->bktr_dev.dv_unit; bktr->dmat = pa->pa_dmat; printf("\n"); /* * map memory */ retval = pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0, &bktr->memt, &bktr->memh, NULL, &bktr->obmemsz); DPR(("pci_mapreg_map: memt %x, memh %x, size %x\n", bktr->memt, (u_int)bktr->memh, (u_int)bktr->obmemsz)); if (retval) { printf("%s: couldn't map memory\n", bktr_name(bktr)); return; } /* * Disable the brooktree device */ OUTL(bktr, BKTR_INT_MASK, ALL_INTS_DISABLED); OUTW(bktr, BKTR_GPIO_DMA_CTL, FIFO_RISC_DISABLED); /* * map interrupt */ if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin, pa->pa_intrline, &ih)) { printf("%s: couldn't map interrupt\n", bktr_name(bktr)); return; } intrstr = pci_intr_string(pa->pa_pc, ih); bktr->ih = pci_intr_establish(pa->pa_pc, ih, IPL_VIDEO, bktr_intr, bktr); if (bktr->ih == NULL) { printf("%s: couldn't establish interrupt", bktr_name(bktr)); if (intrstr != NULL) printf(" at %s", intrstr); printf("\n"); return; } if (intrstr != NULL) printf("%s: interrupting at %s\n", bktr_name(bktr), intrstr); #endif /* __NetBSD__ */ /* * PCI latency timer. 32 is a good value for 4 bus mastering slots, if * you have more than four, then 16 would probably be a better value. */ #ifndef BROOKTREE_DEF_LATENCY_VALUE #define BROOKTREE_DEF_LATENCY_VALUE 10 #endif latency = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_LATENCY_TIMER); latency = (latency >> 8) & 0xff; if (!latency) { if (bootverbose) { printf("%s: PCI bus latency was 0 changing to %d", bktr_name(bktr), BROOKTREE_DEF_LATENCY_VALUE); } latency = BROOKTREE_DEF_LATENCY_VALUE; pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_LATENCY_TIMER, latency<<8); } /* Enabled Bus Master XXX: check if all old DMA is stopped first (e.g. after warm boot) */ fun = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, fun | PCI_COMMAND_MASTER_ENABLE); /* read the pci id and determine the card type */ fun = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG); rev = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG) & 0x000000ff; common_bktr_attach(bktr, unit, fun, rev); } /* * Special Memory Allocation */ vm_offset_t get_bktr_mem(bktr, dmapp, size) bktr_ptr_t bktr; bus_dmamap_t *dmapp; unsigned int size; { bus_dma_tag_t dmat = bktr->dmat; bus_dma_segment_t seg; bus_size_t align; int rseg; caddr_t kva; /* * Allocate a DMA area */ align = 1 << 24; if (bus_dmamem_alloc(dmat, size, align, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { align = PAGE_SIZE; if (bus_dmamem_alloc(dmat, size, align, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { printf("%s: Unable to dmamem_alloc of %d bytes\n", bktr_name(bktr), size); return 0; } } if (bus_dmamem_map(dmat, &seg, rseg, size, &kva, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) { printf("%s: Unable to dmamem_map of %d bytes\n", bktr_name(bktr), size); bus_dmamem_free(dmat, &seg, rseg); return 0; } #ifdef __OpenBSD__ bktr->dm_mapsize = size; #endif /* * Create and locd the DMA map for the DMA area */ if (bus_dmamap_create(dmat, size, 1, size, 0, BUS_DMA_NOWAIT, dmapp)) { printf("%s: Unable to dmamap_create of %d bytes\n", bktr_name(bktr), size); bus_dmamem_unmap(dmat, kva, size); bus_dmamem_free(dmat, &seg, rseg); return 0; } if (bus_dmamap_load(dmat, *dmapp, kva, size, NULL, BUS_DMA_NOWAIT)) { printf("%s: Unable to dmamap_load of %d bytes\n", bktr_name(bktr), size); bus_dmamem_unmap(dmat, kva, size); bus_dmamem_free(dmat, &seg, rseg); bus_dmamap_destroy(dmat, *dmapp); return 0; } return (vm_offset_t)kva; } void free_bktr_mem(bktr, dmap, kva) bktr_ptr_t bktr; bus_dmamap_t dmap; vm_offset_t kva; { bus_dma_tag_t dmat = bktr->dmat; #ifdef __NetBSD__ bus_dmamem_unmap(dmat, (caddr_t)kva, dmap->dm_mapsize); #else bus_dmamem_unmap(dmat, (caddr_t)kva, bktr->dm_mapsize); #endif bus_dmamem_free(dmat, dmap->dm_segs, 1); bus_dmamap_destroy(dmat, dmap); } /*--------------------------------------------------------- ** ** BrookTree 848 character device driver routines ** **--------------------------------------------------------- */ #define VIDEO_DEV 0x00 #define TUNER_DEV 0x01 #define VBI_DEV 0x02 #define UNIT(x) (minor((x) & 0x0f)) #define FUNCTION(x) (minor((x >> 4) & 0x0f)) /* * */ int bktr_open(dev_t dev, int flags, int fmt, struct thread *td) { bktr_ptr_t bktr; int unit; unit = UNIT(dev); /* unit out of range */ if ((unit > bktr_cd.cd_ndevs) || (bktr_cd.cd_devs[unit] == NULL)) return(ENXIO); bktr = bktr_cd.cd_devs[unit]; if (!(bktr->flags & METEOR_INITALIZED)) /* device not found */ return(ENXIO); switch (FUNCTION(dev)) { case VIDEO_DEV: return(video_open(bktr)); case TUNER_DEV: return(tuner_open(bktr)); case VBI_DEV: return(vbi_open(bktr)); } return(ENXIO); } /* * */ int bktr_close(dev_t dev, int flags, int fmt, struct thread *td) { bktr_ptr_t bktr; int unit; unit = UNIT(dev); bktr = bktr_cd.cd_devs[unit]; switch (FUNCTION(dev)) { case VIDEO_DEV: return(video_close(bktr)); case TUNER_DEV: return(tuner_close(bktr)); case VBI_DEV: return(vbi_close(bktr)); } return(ENXIO); } /* * */ int bktr_read(dev_t dev, struct uio *uio, int ioflag) { bktr_ptr_t bktr; int unit; unit = UNIT(dev); bktr = bktr_cd.cd_devs[unit]; switch (FUNCTION(dev)) { case VIDEO_DEV: return(video_read(bktr, unit, dev, uio)); case VBI_DEV: return(vbi_read(bktr, uio, ioflag)); } return(ENXIO); } /* * */ int bktr_write(dev_t dev, struct uio *uio, int ioflag) { /* operation not supported */ return(EOPNOTSUPP); } /* * */ int bktr_ioctl(dev_t dev, ioctl_cmd_t cmd, caddr_t arg, int flag, struct thread *td) { bktr_ptr_t bktr; int unit; unit = UNIT(dev); bktr = bktr_cd.cd_devs[unit]; if (bktr->bigbuf == 0) /* no frame buffer allocated (ioctl failed) */ return(ENOMEM); switch (FUNCTION(dev)) { case VIDEO_DEV: return(video_ioctl(bktr, unit, cmd, arg, pr)); case TUNER_DEV: return(tuner_ioctl(bktr, unit, cmd, arg, pr)); } return(ENXIO); } /* * */ paddr_t bktr_mmap(dev_t dev, off_t offset, int nprot) { int unit; bktr_ptr_t bktr; unit = UNIT(dev); if (FUNCTION(dev) > 0) /* only allow mmap on /dev/bktr[n] */ return(-1); bktr = bktr_cd.cd_devs[unit]; if ((vaddr_t)offset < 0) return(-1); if ((vaddr_t)offset >= bktr->alloc_pages * PAGE_SIZE) return(-1); #ifdef __NetBSD__ return (bus_dmamem_mmap(bktr->dmat, bktr->dm_mem->dm_segs, 1, (vaddr_t)offset, nprot, BUS_DMA_WAITOK)); #else return(i386_btop(vtophys(bktr->bigbuf) + offset)); #endif } #endif /* __NetBSD__ || __OpenBSD__ */ Index: head/sys/dev/buslogic/bt_pci.c =================================================================== --- head/sys/dev/buslogic/bt_pci.c (revision 129878) +++ head/sys/dev/buslogic/bt_pci.c (revision 129879) @@ -1,238 +1,239 @@ /*- * Product specific probe and attach routines for: * Buslogic BT946, BT948, BT956, BT958 SCSI controllers * * Copyright (c) 1995, 1997, 1998 Justin T. Gibbs * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #define BT_PCI_IOADDR PCIR_BAR(0) #define BT_PCI_MEMADDR PCIR_BAR(1) #define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER 0x1040104Bul #define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC 0x0140104Bul #define PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT 0x8130104Bul static int bt_pci_alloc_resources(device_t dev) { int command, type = 0, rid, zero; struct resource *regs = 0; struct resource *irq = 0; command = pci_read_config(dev, PCIR_COMMAND, /*bytes*/1); #if 0 /* XXX Memory Mapped I/O seems to cause problems */ if (command & PCIM_CMD_MEMEN) { type = SYS_RES_MEMORY; rid = BT_PCI_MEMADDR; regs = bus_alloc_resource_any(dev, type, &rid, RF_ACTIVE); } #else if (!regs && (command & PCIM_CMD_PORTEN)) { type = SYS_RES_IOPORT; rid = BT_PCI_IOADDR; regs = bus_alloc_resource_any(dev, type, &rid, RF_ACTIVE); } #endif if (!regs) return (ENOMEM); zero = 0; irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &zero, RF_ACTIVE | RF_SHAREABLE); if (!irq) { bus_release_resource(dev, type, rid, regs); return (ENOMEM); } bt_init_softc(dev, regs, irq, 0); return (0); } static void bt_pci_release_resources(device_t dev) { struct bt_softc *bt = device_get_softc(dev); if (bt->port) /* XXX can't cope with memory registers anyway */ bus_release_resource(dev, SYS_RES_IOPORT, BT_PCI_IOADDR, bt->port); if (bt->irq) bus_release_resource(dev, SYS_RES_IRQ, 0, bt->irq); bt_free_softc(dev); } static int bt_pci_probe(device_t dev) { switch (pci_get_devid(dev)) { case PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER: case PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC: { struct bt_softc *bt = device_get_softc(dev); pci_info_data_t pci_info; int error; error = bt_pci_alloc_resources(dev); if (error) return (error); /* * Determine if an ISA compatible I/O port has been * enabled. If so, record the port so it will not * be probed by our ISA probe. If the PCI I/O port * was not set to the compatibility port, disable it. */ error = bt_cmd(bt, BOP_INQUIRE_PCI_INFO, /*param*/NULL, /*paramlen*/0, (u_int8_t*)&pci_info, sizeof(pci_info), DEFAULT_CMD_TIMEOUT); if (error == 0 && pci_info.io_port < BIO_DISABLED) { bt_mark_probed_bio(pci_info.io_port); if (rman_get_start(bt->port) != bt_iop_from_bio(pci_info.io_port)) { u_int8_t new_addr; new_addr = BIO_DISABLED; bt_cmd(bt, BOP_MODIFY_IO_ADDR, /*param*/&new_addr, /*paramlen*/1, /*reply_buf*/NULL, /*reply_len*/0, DEFAULT_CMD_TIMEOUT); } } bt_pci_release_resources(dev); device_set_desc(dev, "Buslogic Multi-Master SCSI Host Adapter"); return (0); } default: break; } return (ENXIO); } static int bt_pci_attach(device_t dev) { struct bt_softc *bt = device_get_softc(dev); int opri; int error; /* Initialize softc */ error = bt_pci_alloc_resources(dev); if (error) { device_printf(dev, "can't allocate resources in bt_pci_attach\n"); return error; } /* Allocate a dmatag for our CCB DMA maps */ /* XXX Should be a child of the PCI bus dma tag */ if (bus_dma_tag_create( /* parent */ NULL, /* alignemnt */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR_32BIT, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ BUS_SPACE_MAXSIZE_32BIT, /* nsegments */ ~0, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ 0, /* lockfunc */ busdma_lock_mutex, /* lockarg */ &Giant, &bt->parent_dmat) != 0) { bt_pci_release_resources(dev); return (ENOMEM); } /* * Protect ourself from spurrious interrupts during * intialization and attach. We should really rely * on interrupts during attach, but we don't have * access to our interrupts during ISA probes, so until * that changes, we mask our interrupts during attach * too. */ opri = splcam(); if (bt_probe(dev) || bt_fetch_adapter_info(dev) || bt_init(dev)) { bt_pci_release_resources(dev); splx(opri); return (ENXIO); } error = bt_attach(dev); splx(opri); if (error) { bt_pci_release_resources(dev); return (error); } return (0); } static device_method_t bt_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, bt_pci_probe), DEVMETHOD(device_attach, bt_pci_attach), { 0, 0 } }; static driver_t bt_pci_driver = { "bt", bt_pci_methods, sizeof(struct bt_softc), }; static devclass_t bt_devclass; DRIVER_MODULE(bt, pci, bt_pci_driver, bt_devclass, 0, 0); Index: head/sys/dev/cp/if_cp.c =================================================================== --- head/sys/dev/cp/if_cp.c (revision 129878) +++ head/sys/dev/cp/if_cp.c (revision 129879) @@ -1,2770 +1,2771 @@ /* * Cronyx-Tau-PCI adapter driver for FreeBSD. * Supports PPP/HDLC, Cisco/HDLC and FrameRelay protocol in synchronous mode, * and asyncronous channels with full modem control. * Keepalive protocol implemented in both Cisco and PPP modes. * * Copyright (C) 1999-2004 Cronyx Engineering. * Author: Kurakin Roman, * * Copyright (C) 1999-2002 Cronyx Engineering. * Author: Serge Vakulenko, * * This software is distributed with NO WARRANTIES, not even the implied * warranties for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * Authors grant any other persons or organisations a permission to use, * modify and redistribute this software in source and binary forms, * as long as this message is kept with the software, all derivative * works or modified versions. * * $Cronyx: if_cp.c,v 1.1.2.32 2004/02/26 17:56:39 rik Exp $ */ #include __FBSDID("$FreeBSD$"); #include #if __FreeBSD_version >= 500000 # define NPCI 1 #else # include "pci.h" #endif #if NPCI > 0 #include #include #include #include #include +#include #include #include #include #include #include #if __FreeBSD_version >= 400000 # include #endif #include #include #include #if __FreeBSD_version > 501000 # include # include #else # include # include #endif #include #include #include "opt_ng_cronyx.h" #ifdef NETGRAPH_CRONYX # include "opt_netgraph.h" # ifndef NETGRAPH # error #option NETGRAPH missed from configuration # endif # include # include # if __FreeBSD_version >= 500000 # include # else # include # endif #else # include # define PP_CISCO IFF_LINK2 # if __FreeBSD_version < 400000 # include # if NBPFILTER > 0 # include # endif # else # if __FreeBSD_version < 500000 # include # endif # include # define NBPFILTER NBPF #endif #endif #if __FreeBSD_version >= 500000 #include #include #else #include #include #endif #include #include #include /* If we don't have Cronyx's sppp version, we don't have fr support via sppp */ #ifndef PP_FR #define PP_FR 0 #endif #define CP_DEBUG(d,s) ({if (d->chan->debug) {\ printf ("%s: ", d->name); printf s;}}) #define CP_DEBUG2(d,s) ({if (d->chan->debug>1) {\ printf ("%s: ", d->name); printf s;}}) #define CDEV_MAJOR 134 #if __FreeBSD_version >= 400000 static int cp_probe __P((device_t)); static int cp_attach __P((device_t)); static int cp_detach __P((device_t)); static device_method_t cp_methods[] = { /* Device interface */ DEVMETHOD(device_probe, cp_probe), DEVMETHOD(device_attach, cp_attach), DEVMETHOD(device_detach, cp_detach), {0, 0} }; typedef struct _bdrv_t { cp_board_t *board; struct resource *cp_res; struct resource *cp_irq; void *cp_intrhand; } bdrv_t; static driver_t cp_driver = { "cp", cp_methods, sizeof(bdrv_t), }; static devclass_t cp_devclass; #endif typedef struct _drv_t { char name [8]; cp_chan_t *chan; cp_board_t *board; cp_buf_t buf; int running; #ifdef NETGRAPH char nodename [NG_NODELEN+1]; hook_p hook; hook_p debug_hook; node_p node; struct ifqueue queue; struct ifqueue hi_queue; short timeout; struct callout_handle timeout_handle; #else struct sppp pp; #endif #if __FreeBSD_version >= 400000 dev_t devt; #endif } drv_t; static void cp_receive (cp_chan_t *c, unsigned char *data, int len); static void cp_transmit (cp_chan_t *c, void *attachment, int len); static void cp_error (cp_chan_t *c, int data); static void cp_up (drv_t *d); static void cp_start (drv_t *d); static void cp_down (drv_t *d); static void cp_watchdog (drv_t *d); #ifdef NETGRAPH extern struct ng_type typestruct; #else static void cp_ifstart (struct ifnet *ifp); static void cp_tlf (struct sppp *sp); static void cp_tls (struct sppp *sp); static void cp_ifwatchdog (struct ifnet *ifp); static int cp_sioctl (struct ifnet *ifp, u_long cmd, caddr_t data); static void cp_initialize (void *softc); #endif static cp_board_t *adapter [NBRD]; static drv_t *channel [NBRD*NCHAN]; static cp_qbuf_t *queue [NBRD]; static struct callout_handle led_timo [NBRD]; static struct callout_handle timeout_handle; static int cp_destroy = 0; /* * Print the mbuf chain, for debug purposes only. */ static void printmbuf (struct mbuf *m) { printf ("mbuf:"); for (; m; m=m->m_next) { if (m->m_flags & M_PKTHDR) printf (" HDR %d:", m->m_pkthdr.len); if (m->m_flags & M_EXT) printf (" EXT:"); printf (" %d", m->m_len); } printf ("\n"); } /* * Make an mbuf from data. */ static struct mbuf *makembuf (void *buf, unsigned len) { struct mbuf *m; MGETHDR (m, M_DONTWAIT, MT_DATA); if (! m) return 0; MCLGET (m, M_DONTWAIT); if (! (m->m_flags & M_EXT)) { m_freem (m); return 0; } m->m_pkthdr.len = m->m_len = len; bcopy (buf, mtod (m, caddr_t), len); return m; } #if __FreeBSD_version < 400000 static const char *cp_probe (pcici_t tag, pcidi_t type) { if (tag->vendor == cp_vendor_id && tag->device == cp_device_id) return "Cronyx-Tau-PCI serial adapter"; return 0; } #else static int cp_probe (device_t dev) { if ((pci_get_vendor (dev) == cp_vendor_id) && (pci_get_device (dev) == cp_device_id)) { device_set_desc (dev, "Cronyx-Tau-PCI serial adapter"); return 0; } return ENXIO; } #endif static void cp_timeout (void *arg) { drv_t *d; int s, i; for (i=0; ichan->type) { case T_G703: cp_g703_timer (d->chan); break; case T_E1: cp_e1_timer (d->chan); break; case T_E3: case T_T3: case T_STS1: cp_e3_timer (d->chan); break; default: break; } splx (s); } s = splimp (); if (!cp_destroy) timeout_handle = timeout (cp_timeout, 0, hz); splx (s); } static void cp_led_off (void *arg) { cp_board_t *b = arg; int s = splimp (); if (cp_destroy) { splx (s); return; } cp_led (b, 0); led_timo[b->num].callout = 0; splx (s); } static void cp_intr (void *arg) { #if __FreeBSD_version < 400000 cp_board_t *b = arg; #else bdrv_t *bd = arg; cp_board_t *b = bd->board; #endif int s = splimp (); if (cp_destroy) { splx (s); return; } /* Turn LED on. */ cp_led (b, 1); cp_interrupt (b); /* Turn LED off 50 msec later. */ if (!led_timo[b->num].callout) led_timo[b->num] = timeout (cp_led_off, b, hz/20); splx (s); } extern struct cdevsw cp_cdevsw; /* * Called if the probe succeeded. */ #if __FreeBSD_version < 400000 static void cp_attach (pcici_t tag, int unit) { vm_offset_t pbase; #else static int cp_attach (device_t dev) { bdrv_t *bd = device_get_softc (dev); int unit = device_get_unit (dev); int rid, error; #endif vm_offset_t vbase; cp_board_t *b; cp_chan_t *c; drv_t *d; unsigned short res; int s = splimp (); b = malloc (sizeof(cp_board_t), M_DEVBUF, M_WAITOK); if (!b) { printf ("cp%d: couldn't allocate memory\n", unit); #if __FreeBSD_version < 400000 splx (s); return; #else splx (s); return (ENXIO); #endif } adapter[unit] = b; bzero (b, sizeof(cp_board_t)); #if __FreeBSD_version < 400000 if (! pci_map_mem (tag, PCIR_MAPS, &vbase, &pbase)) { printf ("cp%d: cannot map memory\n", unit); free (b, M_DEVBUF); splx (s); return; } #else bd->board = b; b->sys = bd; rid = PCIR_BAR(0); bd->cp_res = bus_alloc_resource (dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, RF_ACTIVE); if (! bd->cp_res) { printf ("cp%d: cannot map memory\n", unit); free (b, M_DEVBUF); splx (s); return (ENXIO); } vbase = (vm_offset_t) rman_get_virtual (bd->cp_res); #endif res = cp_init (b, unit, (u_char*) vbase); if (res) { printf ("cp%d: can't init, error code:%x\n", unit, res); #if __FreeBSD_version >= 400000 bus_release_resource (dev, SYS_RES_MEMORY, PCIR_BAR(0), bd->cp_res); #endif free (b, M_DEVBUF); splx (s); #if __FreeBSD_version >= 400000 return (ENXIO); #else return; #endif } queue[unit] = contigmalloc (sizeof(cp_qbuf_t), M_DEVBUF, M_WAITOK, 0x100000, 0xffffffff, 16, 0); if (queue[unit] == NULL) { printf ("cp%d: allocate memory for qbuf_t\n", unit); free (b, M_DEVBUF); splx (s); #if __FreeBSD_version >= 400000 return (ENXIO); #else return; #endif } cp_reset (b, queue[unit], vtophys (queue[unit])); #if __FreeBSD_version < 400000 if (! pci_map_int (tag, cp_intr, b, &net_imask)) printf ("cp%d: cannot map interrupt\n", unit); #else rid = 0; bd->cp_irq = bus_alloc_resource (dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); if (! bd->cp_irq) { printf ("cp%d: cannot map interrupt\n", unit); bus_release_resource (dev, SYS_RES_MEMORY, PCIR_BAR(0), bd->cp_res); free (b, M_DEVBUF); splx (s); return (ENXIO); } error = bus_setup_intr (dev, bd->cp_irq, INTR_TYPE_NET, cp_intr, bd, &bd->cp_intrhand); if (error) { printf ("cp%d: cannot set up irq\n", unit); bus_release_resource (dev, SYS_RES_MEMORY, PCIR_BAR(0), bd->cp_res); bus_release_resource (dev, SYS_RES_IRQ, 0, bd->cp_irq); free (b, M_DEVBUF); splx (s); return (ENXIO); } #endif printf ("cp%d: %s, clock %ld MHz\n", unit, b->name, b->osc / 1000000); for (c=b->chan; cchan+NCHAN; ++c) { if (! c->type) continue; d = contigmalloc (sizeof(drv_t), M_DEVBUF, M_WAITOK, 0x100000, 0xffffffff, 16, 0); if (d == NULL) { printf ("cp%d-%d: cannot allocate memory for drv_t\n", unit, c->num); } channel [b->num*NCHAN + c->num] = d; bzero (d, sizeof(drv_t)); sprintf (d->name, "cp%d.%d", b->num, c->num); d->board = b; d->chan = c; c->sys = d; #ifdef NETGRAPH if (ng_make_node_common (&typestruct, &d->node) != 0) { printf ("%s: cannot make common node\n", d->name); d->node = NULL; continue; } #if __FreeBSD_version >= 500000 NG_NODE_SET_PRIVATE (d->node, d); #else d->node->private = d; #endif sprintf (d->nodename, "%s%d", NG_CP_NODE_TYPE, c->board->num*NCHAN + c->num); if (ng_name_node (d->node, d->nodename)) { printf ("%s: cannot name node\n", d->nodename); #if __FreeBSD_version >= 500000 NG_NODE_UNREF (d->node); #else ng_rmnode (d->node); ng_unref (d->node); #endif continue; } d->queue.ifq_maxlen = IFQ_MAXLEN; d->hi_queue.ifq_maxlen = IFQ_MAXLEN; #if __FreeBSD_version >= 500000 mtx_init (&d->queue.ifq_mtx, "cp_queue", NULL, MTX_DEF); mtx_init (&d->hi_queue.ifq_mtx, "cp_queue_hi", NULL, MTX_DEF); #endif #else /*NETGRAPH*/ d->pp.pp_if.if_softc = d; #if __FreeBSD_version > 501000 if_initname (&d->pp.pp_if, "cp", b->num * NCHAN + c->num); #else d->pp.pp_if.if_unit = b->num * NCHAN + c->num; d->pp.pp_if.if_name = "cp"; #endif d->pp.pp_if.if_mtu = PP_MTU; d->pp.pp_if.if_flags = IFF_POINTOPOINT | IFF_MULTICAST; d->pp.pp_if.if_ioctl = cp_sioctl; d->pp.pp_if.if_start = cp_ifstart; d->pp.pp_if.if_watchdog = cp_ifwatchdog; d->pp.pp_if.if_init = cp_initialize; sppp_attach (&d->pp.pp_if); if_attach (&d->pp.pp_if); d->pp.pp_tlf = cp_tlf; d->pp.pp_tls = cp_tls; #if __FreeBSD_version >= 400000 || NBPFILTER > 0 /* If BPF is in the kernel, call the attach for it. * The header size of PPP or Cisco/HDLC is 4 bytes. */ bpfattach (&d->pp.pp_if, DLT_PPP, 4); #endif #endif /*NETGRAPH*/ cp_start_e1 (c); cp_start_chan (c, 1, 1, &d->buf, vtophys (&d->buf)); /* Register callback functions. */ cp_register_transmit (c, &cp_transmit); cp_register_receive (c, &cp_receive); cp_register_error (c, &cp_error); #if __FreeBSD_version >= 400000 d->devt = make_dev (&cp_cdevsw, b->num*NCHAN+c->num, UID_ROOT, GID_WHEEL, 0600, "cp%d", b->num*NCHAN+c->num); #endif } splx (s); #if __FreeBSD_version >= 400000 return 0; #endif } #if __FreeBSD_version >= 400000 static int cp_detach (device_t dev) { bdrv_t *bd = device_get_softc (dev); cp_board_t *b = bd->board; cp_chan_t *c; int s = splimp (); /* Check if the device is busy (open). */ for (c=b->chan; cchan+NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (! d || ! d->chan->type) continue; if (d->running) { splx (s); return EBUSY; } } /* Ok, we can unload driver */ /* At first we should stop all channels */ for (c=b->chan; cchan+NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (! d || ! d->chan->type) continue; cp_stop_chan (c); cp_stop_e1 (c); cp_set_dtr (d->chan, 0); cp_set_rts (d->chan, 0); } /* Reset the adapter. */ cp_destroy = 1; cp_interrupt_poll (b, 1); cp_led_off (b); cp_reset (b, 0 ,0); if (led_timo[b->num].callout) untimeout (cp_led_off, b, led_timo[b->num]); for (c=b->chan; cchan+NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (! d || ! d->chan->type) continue; #ifndef NETGRAPH #if __FreeBSD_version >= 410000 && NBPFILTER > 0 /* Detach from the packet filter list of interfaces. */ bpfdetach (&d->pp.pp_if); #endif /* Detach from the sync PPP list. */ sppp_detach (&d->pp.pp_if); /* Detach from the system list of interfaces. */ if_detach (&d->pp.pp_if); #else #if __FreeBSD_version >= 500000 if (d->node) { ng_rmnode_self (d->node); NG_NODE_UNREF (d->node); d->node = NULL; } mtx_destroy (&d->queue.ifq_mtx); mtx_destroy (&d->hi_queue.ifq_mtx); #else ng_rmnode (d->node); d->node = 0; #endif #endif destroy_dev (d->devt); } /* Disable the interrupt request. */ bus_teardown_intr (dev, bd->cp_irq, bd->cp_intrhand); bus_deactivate_resource (dev, SYS_RES_IRQ, 0, bd->cp_irq); bus_release_resource (dev, SYS_RES_IRQ, 0, bd->cp_irq); bus_release_resource (dev, SYS_RES_MEMORY, PCIR_BAR(0), bd->cp_res); cp_led_off (b); if (led_timo[b->num].callout) untimeout (cp_led_off, b, led_timo[b->num]); splx (s); s = splimp (); for (c=b->chan; cchan+NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (! d || ! d->chan->type) continue; channel [b->num*NCHAN + c->num] = 0; /* Deallocate buffers. */ #if __FreeBSD_version < 400000 free (d, M_DEVBUF); #else contigfree (d, sizeof (*d), M_DEVBUF); #endif } adapter [b->num] = 0; #if __FreeBSD_version < 400000 free (queue[b->num], M_DEVBUF); #else contigfree (queue[b->num], sizeof (cp_qbuf_t), M_DEVBUF); #endif free (b, M_DEVBUF); splx (s); return 0; } #endif #if __FreeBSD_version < 400000 static u_long cp_count; static struct pci_device cp_driver = {"cp", cp_probe, cp_attach, &cp_count, 0}; DATA_SET (pcidevice_set, cp_driver); #endif #ifndef NETGRAPH static void cp_ifstart (struct ifnet *ifp) { drv_t *d = ifp->if_softc; cp_start (d); } static void cp_ifwatchdog (struct ifnet *ifp) { drv_t *d = ifp->if_softc; cp_watchdog (d); } static void cp_tlf (struct sppp *sp) { drv_t *d = sp->pp_if.if_softc; CP_DEBUG2 (d, ("cp_tlf\n")); /* cp_set_dtr (d->chan, 0);*/ /* cp_set_rts (d->chan, 0);*/ sp->pp_down (sp); } static void cp_tls (struct sppp *sp) { drv_t *d = sp->pp_if.if_softc; CP_DEBUG2 (d, ("cp_tls\n")); sp->pp_up (sp); } /* * Process an ioctl request. */ static int cp_sioctl (struct ifnet *ifp, u_long cmd, caddr_t data) { drv_t *d = ifp->if_softc; int error, s, was_up, should_be_up; was_up = (ifp->if_flags & IFF_RUNNING) != 0; error = sppp_ioctl (ifp, cmd, data); if (error) return error; if (! (ifp->if_flags & IFF_DEBUG)) d->chan->debug = 0; else if (! d->chan->debug) d->chan->debug = 1; switch (cmd) { default: CP_DEBUG2 (d, ("ioctl 0x%lx\n", cmd)); return 0; case SIOCADDMULTI: CP_DEBUG2 (d, ("ioctl SIOCADDMULTI\n")); return 0; case SIOCDELMULTI: CP_DEBUG2 (d, ("ioctl SIOCDELMULTI\n")); return 0; case SIOCSIFFLAGS: CP_DEBUG2 (d, ("ioctl SIOCSIFFLAGS\n")); break; case SIOCSIFADDR: CP_DEBUG2 (d, ("ioctl SIOCSIFADDR\n")); break; } /* We get here only in case of SIFFLAGS or SIFADDR. */ s = splimp (); should_be_up = (ifp->if_flags & IFF_RUNNING) != 0; if (! was_up && should_be_up) { /* Interface goes up -- start it. */ cp_up (d); cp_start (d); } else if (was_up && ! should_be_up) { /* Interface is going down -- stop it. */ /* if ((d->pp.pp_flags & PP_FR) || (ifp->if_flags & PP_CISCO))*/ cp_down (d); } CP_DEBUG (d, ("ioctl 0x%lx p4\n", cmd)); splx (s); return 0; } /* * Initialization of interface. * It seems to be never called by upper level? */ static void cp_initialize (void *softc) { drv_t *d = softc; CP_DEBUG (d, ("cp_initialize\n")); } #endif /*NETGRAPH*/ /* * Stop the interface. Called on splimp(). */ static void cp_down (drv_t *d) { CP_DEBUG (d, ("cp_down\n")); /* Interface is going down -- stop it. */ cp_set_dtr (d->chan, 0); cp_set_rts (d->chan, 0); d->running = 0; } /* * Start the interface. Called on splimp(). */ static void cp_up (drv_t *d) { CP_DEBUG (d, ("cp_up\n")); cp_set_dtr (d->chan, 1); cp_set_rts (d->chan, 1); d->running = 1; } /* * Start output on the interface. Get another datagram to send * off of the interface queue, and copy it to the interface * before starting the output. */ static void cp_send (drv_t *d) { struct mbuf *m; u_short len; CP_DEBUG2 (d, ("cp_send, tn=%d te=%d\n", d->chan->tn, d->chan->te)); /* No output if the interface is down. */ if (! d->running) return; /* No output if the modem is off. */ if (! (d->chan->lloop || d->chan->type != T_SERIAL || cp_get_dsr (d->chan))) return; while (cp_transmit_space (d->chan)) { /* Get the packet to send. */ #ifdef NETGRAPH IF_DEQUEUE (&d->hi_queue, m); if (! m) IF_DEQUEUE (&d->queue, m); #else m = sppp_dequeue (&d->pp.pp_if); #endif if (! m) return; #if (__FreeBSD_version >= 400000 || NBPFILTER > 0) && !defined (NETGRAPH) if (d->pp.pp_if.if_bpf) #if __FreeBSD_version >= 500000 BPF_MTAP (&d->pp.pp_if, m); #else bpf_mtap (&d->pp.pp_if, m); #endif #endif len = m->m_pkthdr.len; if (len >= BUFSZ) printf ("%s: too long packet: %d bytes: ", d->name, len); else if (! m->m_next) cp_send_packet (d->chan, (u_char*) mtod (m, caddr_t), len, 0); else { u_char *buf = d->chan->tbuf[d->chan->te]; m_copydata (m, 0, len, buf); cp_send_packet (d->chan, buf, len, 0); } m_freem (m); /* Set up transmit timeout, if the transmit ring is not empty.*/ #ifdef NETGRAPH d->timeout = 10; #else d->pp.pp_if.if_timer = 10; #endif } #ifndef NETGRAPH d->pp.pp_if.if_flags |= IFF_OACTIVE; #endif } /* * Start output on the interface. * Always called on splimp(). */ static void cp_start (drv_t *d) { if (d->running) { if (! d->chan->dtr) cp_set_dtr (d->chan, 1); if (! d->chan->rts) cp_set_rts (d->chan, 1); cp_send (d); } } /* * Handle transmit timeouts. * Recover after lost transmit interrupts. * Always called on splimp(). */ static void cp_watchdog (drv_t *d) { CP_DEBUG (d, ("device timeout\n")); if (d->running) { int s = splimp (); cp_stop_chan (d->chan); cp_stop_e1 (d->chan); cp_start_e1 (d->chan); cp_start_chan (d->chan, 1, 1, 0, 0); cp_set_dtr (d->chan, 1); cp_set_rts (d->chan, 1); cp_start (d); splx (s); } } static void cp_transmit (cp_chan_t *c, void *attachment, int len) { drv_t *d = c->sys; #ifdef NETGRAPH d->timeout = 0; #else ++d->pp.pp_if.if_opackets; d->pp.pp_if.if_flags &= ~IFF_OACTIVE; d->pp.pp_if.if_timer = 0; #endif cp_start (d); } static void cp_receive (cp_chan_t *c, unsigned char *data, int len) { drv_t *d = c->sys; struct mbuf *m; #if __FreeBSD_version >= 500000 && defined NETGRAPH int error; #endif if (! d->running) return; m = makembuf (data, len); if (! m) { CP_DEBUG (d, ("no memory for packet\n")); #ifndef NETGRAPH ++d->pp.pp_if.if_iqdrops; #endif return; } if (c->debug > 1) printmbuf (m); #ifdef NETGRAPH m->m_pkthdr.rcvif = 0; #if __FreeBSD_version >= 500000 NG_SEND_DATA_ONLY (error, d->hook, m); #else ng_queue_data (d->hook, m, 0); #endif #else ++d->pp.pp_if.if_ipackets; m->m_pkthdr.rcvif = &d->pp.pp_if; #if __FreeBSD_version >= 400000 || NBPFILTER > 0 /* Check if there's a BPF listener on this interface. * If so, hand off the raw packet to bpf. */ if (d->pp.pp_if.if_bpf) #if __FreeBSD_version >= 500000 BPF_TAP (&d->pp.pp_if, data, len); #else bpf_tap (&d->pp.pp_if, data, len); #endif #endif sppp_input (&d->pp.pp_if, m); #endif } static void cp_error (cp_chan_t *c, int data) { drv_t *d = c->sys; switch (data) { case CP_FRAME: CP_DEBUG (d, ("frame error\n")); #ifndef NETGRAPH ++d->pp.pp_if.if_ierrors; #endif break; case CP_CRC: CP_DEBUG (d, ("crc error\n")); #ifndef NETGRAPH ++d->pp.pp_if.if_ierrors; #endif break; case CP_OVERRUN: CP_DEBUG (d, ("overrun error\n")); #ifndef NETGRAPH ++d->pp.pp_if.if_collisions; ++d->pp.pp_if.if_ierrors; #endif break; case CP_OVERFLOW: CP_DEBUG (d, ("overflow error\n")); #ifndef NETGRAPH ++d->pp.pp_if.if_ierrors; #endif break; case CP_UNDERRUN: CP_DEBUG (d, ("underrun error\n")); #ifdef NETGRAPH d->timeout = 0; #else ++d->pp.pp_if.if_oerrors; d->pp.pp_if.if_flags &= ~IFF_OACTIVE; d->pp.pp_if.if_timer = 0; #endif cp_start (d); break; default: CP_DEBUG (d, ("error #%d\n", data)); break; } } /* * You also need read, write, open, close routines. * This should get you started */ #if __FreeBSD_version < 500000 static int cp_open (dev_t dev, int oflags, int devtype, struct proc *p) #else static int cp_open (dev_t dev, int oflags, int devtype, struct thread *td) #endif { int unit = minor (dev); drv_t *d; if (unit >= NBRD*NCHAN || ! (d = channel[unit])) return ENXIO; CP_DEBUG2 (d, ("cp_open\n")); return 0; } /* * Only called on the LAST close. */ #if __FreeBSD_version < 500000 static int cp_close (dev_t dev, int fflag, int devtype, struct proc *p) #else static int cp_close (dev_t dev, int fflag, int devtype, struct thread *td) #endif { drv_t *d = channel [minor (dev)]; CP_DEBUG2 (d, ("cp_close\n")); return 0; } static int cp_modem_status (cp_chan_t *c) { drv_t *d = c->sys; int status, s; status = d->running ? TIOCM_LE : 0; s = splimp (); if (cp_get_cd (c)) status |= TIOCM_CD; if (cp_get_cts (c)) status |= TIOCM_CTS; if (cp_get_dsr (c)) status |= TIOCM_DSR; if (c->dtr) status |= TIOCM_DTR; if (c->rts) status |= TIOCM_RTS; splx (s); return status; } #if __FreeBSD_version < 500000 static int cp_ioctl (dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) #else static int cp_ioctl (dev_t dev, u_long cmd, caddr_t data, int flag, struct thread *td) #endif { drv_t *d = channel [minor (dev)]; cp_chan_t *c = d->chan; struct serial_statistics *st; struct e1_statistics *opte1; struct e3_statistics *opte3; int error, s; char mask[16]; switch (cmd) { case SERIAL_GETREGISTERED: CP_DEBUG2 (d, ("ioctl: getregistered\n")); bzero (mask, sizeof(mask)); for (s=0; spp.pp_flags & PP_FR) ? "fr" : (d->pp.pp_if.if_flags & PP_CISCO) ? "cisco" : "ppp"); return 0; case SERIAL_SETPROTO: CP_DEBUG2 (d, ("ioctl: setproto\n")); /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else /* __FreeBSD_version >= 500000 */ error = suser (td); #endif /* __FreeBSD_version >= 500000 */ if (error) return error; if (d->pp.pp_if.if_flags & IFF_RUNNING) return EBUSY; if (! strcmp ("cisco", (char*)data)) { d->pp.pp_flags &= ~(PP_FR); d->pp.pp_flags |= PP_KEEPALIVE; d->pp.pp_if.if_flags |= PP_CISCO; } else if (! strcmp ("fr", (char*)data) && !PP_FR) { d->pp.pp_if.if_flags &= ~(PP_CISCO); d->pp.pp_flags |= PP_FR | PP_KEEPALIVE; } else if (! strcmp ("ppp", (char*)data)) { d->pp.pp_flags &= ~PP_FR; d->pp.pp_flags &= ~PP_KEEPALIVE; d->pp.pp_if.if_flags &= ~(PP_CISCO); } else return EINVAL; return 0; case SERIAL_GETKEEPALIVE: CP_DEBUG2 (d, ("ioctl: getkeepalive\n")); if ((d->pp.pp_flags & PP_FR) || (d->pp.pp_if.if_flags & PP_CISCO)) return EINVAL; *(int*)data = (d->pp.pp_flags & PP_KEEPALIVE) ? 1 : 0; return 0; case SERIAL_SETKEEPALIVE: CP_DEBUG2 (d, ("ioctl: setkeepalive\n")); /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else error = suser (td); #endif if (error) return error; if ((d->pp.pp_flags & PP_FR) || (d->pp.pp_if.if_flags & PP_CISCO)) return EINVAL; s = splimp (); if (*(int*)data) d->pp.pp_flags |= PP_KEEPALIVE; else d->pp.pp_flags &= ~PP_KEEPALIVE; splx (s); return 0; #endif /*NETGRAPH*/ case SERIAL_GETMODE: CP_DEBUG2 (d, ("ioctl: getmode\n")); *(int*)data = SERIAL_HDLC; return 0; case SERIAL_SETMODE: /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else error = suser (td); #endif if (error) return error; if (*(int*)data != SERIAL_HDLC) return EINVAL; return 0; case SERIAL_GETCFG: CP_DEBUG2 (d, ("ioctl: getcfg\n")); if (c->type != T_E1 || c->unfram) return EINVAL; *(char*)data = c->board->mux ? 'c' : 'a'; return 0; case SERIAL_SETCFG: CP_DEBUG2 (d, ("ioctl: setcfg\n")); #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else error = suser (td); #endif if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); cp_set_mux (c->board, *((char*)data) == 'c'); splx (s); return 0; case SERIAL_GETSTAT: CP_DEBUG2 (d, ("ioctl: getstat\n")); st = (struct serial_statistics*) data; st->rintr = c->rintr; st->tintr = c->tintr; st->mintr = 0; st->ibytes = c->ibytes; st->ipkts = c->ipkts; st->obytes = c->obytes; st->opkts = c->opkts; st->ierrs = c->overrun + c->frame + c->crc; st->oerrs = c->underrun; return 0; case SERIAL_GETESTAT: CP_DEBUG2 (d, ("ioctl: getestat\n")); if (c->type != T_E1 && c->type != T_G703) return EINVAL; opte1 = (struct e1_statistics*) data; opte1->status = c->status; opte1->cursec = c->cursec; opte1->totsec = c->totsec + c->cursec; opte1->currnt.bpv = c->currnt.bpv; opte1->currnt.fse = c->currnt.fse; opte1->currnt.crce = c->currnt.crce; opte1->currnt.rcrce = c->currnt.rcrce; opte1->currnt.uas = c->currnt.uas; opte1->currnt.les = c->currnt.les; opte1->currnt.es = c->currnt.es; opte1->currnt.bes = c->currnt.bes; opte1->currnt.ses = c->currnt.ses; opte1->currnt.oofs = c->currnt.oofs; opte1->currnt.css = c->currnt.css; opte1->currnt.dm = c->currnt.dm; opte1->total.bpv = c->total.bpv + c->currnt.bpv; opte1->total.fse = c->total.fse + c->currnt.fse; opte1->total.crce = c->total.crce + c->currnt.crce; opte1->total.rcrce = c->total.rcrce + c->currnt.rcrce; opte1->total.uas = c->total.uas + c->currnt.uas; opte1->total.les = c->total.les + c->currnt.les; opte1->total.es = c->total.es + c->currnt.es; opte1->total.bes = c->total.bes + c->currnt.bes; opte1->total.ses = c->total.ses + c->currnt.ses; opte1->total.oofs = c->total.oofs + c->currnt.oofs; opte1->total.css = c->total.css + c->currnt.css; opte1->total.dm = c->total.dm + c->currnt.dm; for (s=0; s<48; ++s) { opte1->interval[s].bpv = c->interval[s].bpv; opte1->interval[s].fse = c->interval[s].fse; opte1->interval[s].crce = c->interval[s].crce; opte1->interval[s].rcrce = c->interval[s].rcrce; opte1->interval[s].uas = c->interval[s].uas; opte1->interval[s].les = c->interval[s].les; opte1->interval[s].es = c->interval[s].es; opte1->interval[s].bes = c->interval[s].bes; opte1->interval[s].ses = c->interval[s].ses; opte1->interval[s].oofs = c->interval[s].oofs; opte1->interval[s].css = c->interval[s].css; opte1->interval[s].dm = c->interval[s].dm; } return 0; case SERIAL_GETE3STAT: CP_DEBUG2 (d, ("ioctl: gete3stat\n")); if (c->type != T_E3 && c->type != T_T3 && c->type != T_STS1) return EINVAL; opte3 = (struct e3_statistics*) data; opte3->status = c->e3status; opte3->cursec = (c->e3csec_5 * 2 + 1) / 10; opte3->totsec = c->e3tsec + opte3->cursec; opte3->ccv = c->e3ccv; opte3->tcv = c->e3tcv + opte3->ccv; for (s = 0; s < 48; ++s) { opte3->icv[s] = c->e3icv[s]; } return 0; case SERIAL_CLRSTAT: CP_DEBUG2 (d, ("ioctl: clrstat\n")); /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else error = suser (td); #endif if (error) return error; c->rintr = 0; c->tintr = 0; c->ibytes = 0; c->obytes = 0; c->ipkts = 0; c->opkts = 0; c->overrun = 0; c->frame = 0; c->crc = 0; c->underrun = 0; bzero (&c->currnt, sizeof (c->currnt)); bzero (&c->total, sizeof (c->total)); bzero (c->interval, sizeof (c->interval)); c->e3ccv = 0; c->e3tcv = 0; bzero (c->e3icv, sizeof (c->e3icv)); return 0; case SERIAL_GETBAUD: CP_DEBUG2 (d, ("ioctl: getbaud\n")); *(long*)data = c->baud; return 0; case SERIAL_SETBAUD: CP_DEBUG2 (d, ("ioctl: setbaud\n")); /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else error = suser (td); #endif if (error) return error; s = splimp (); cp_set_baud (c, *(long*)data); splx (s); return 0; case SERIAL_GETLOOP: CP_DEBUG2 (d, ("ioctl: getloop\n")); *(int*)data = c->lloop; return 0; case SERIAL_SETLOOP: CP_DEBUG2 (d, ("ioctl: setloop\n")); /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else error = suser (td); #endif if (error) return error; s = splimp (); cp_set_lloop (c, *(int*)data); splx (s); return 0; case SERIAL_GETDPLL: CP_DEBUG2 (d, ("ioctl: getdpll\n")); if (c->type != T_SERIAL) return EINVAL; *(int*)data = c->dpll; return 0; case SERIAL_SETDPLL: CP_DEBUG2 (d, ("ioctl: setdpll\n")); /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else error = suser (td); #endif if (error) return error; if (c->type != T_SERIAL) return EINVAL; s = splimp (); cp_set_dpll (c, *(int*)data); splx (s); return 0; case SERIAL_GETNRZI: CP_DEBUG2 (d, ("ioctl: getnrzi\n")); if (c->type != T_SERIAL) return EINVAL; *(int*)data = c->nrzi; return 0; case SERIAL_SETNRZI: CP_DEBUG2 (d, ("ioctl: setnrzi\n")); /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else error = suser (td); #endif if (error) return error; if (c->type != T_SERIAL) return EINVAL; s = splimp (); cp_set_nrzi (c, *(int*)data); splx (s); return 0; case SERIAL_GETDEBUG: CP_DEBUG2 (d, ("ioctl: getdebug\n")); *(int*)data = d->chan->debug; return 0; case SERIAL_SETDEBUG: CP_DEBUG2 (d, ("ioctl: setdebug\n")); /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else error = suser (td); #endif if (error) return error; d->chan->debug = *(int*)data; #ifndef NETGRAPH if (d->chan->debug) d->pp.pp_if.if_flags |= IFF_DEBUG; else d->pp.pp_if.if_flags &= ~IFF_DEBUG; #endif return 0; case SERIAL_GETHIGAIN: CP_DEBUG2 (d, ("ioctl: gethigain\n")); if (c->type != T_E1) return EINVAL; *(int*)data = c->higain; return 0; case SERIAL_SETHIGAIN: CP_DEBUG2 (d, ("ioctl: sethigain\n")); /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else error = suser (td); #endif if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); cp_set_higain (c, *(int*)data); splx (s); return 0; case SERIAL_GETPHONY: CP_DEBUG2 (d, ("ioctl: getphony\n")); if (c->type != T_E1) return EINVAL; *(int*)data = c->phony; return 0; case SERIAL_SETPHONY: CP_DEBUG2 (d, ("ioctl: setphony\n")); /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else error = suser (td); #endif if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); cp_set_phony (c, *(int*)data); splx (s); return 0; case SERIAL_GETUNFRAM: CP_DEBUG2 (d, ("ioctl: getunfram\n")); if (c->type != T_E1) return EINVAL; *(int*)data = c->unfram; return 0; case SERIAL_SETUNFRAM: CP_DEBUG2 (d, ("ioctl: setunfram\n")); /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else error = suser (td); #endif if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); cp_set_unfram (c, *(int*)data); splx (s); return 0; case SERIAL_GETSCRAMBLER: CP_DEBUG2 (d, ("ioctl: getscrambler\n")); if (c->type != T_G703 && !c->unfram) return EINVAL; *(int*)data = c->scrambler; return 0; case SERIAL_SETSCRAMBLER: CP_DEBUG2 (d, ("ioctl: setscrambler\n")); /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else error = suser (td); #endif if (error) return error; if (c->type != T_G703 && !c->unfram) return EINVAL; s = splimp (); cp_set_scrambler (c, *(int*)data); splx (s); return 0; case SERIAL_GETMONITOR: CP_DEBUG2 (d, ("ioctl: getmonitor\n")); if (c->type != T_E1 && c->type != T_E3 && c->type != T_T3 && c->type != T_STS1) return EINVAL; *(int*)data = c->monitor; return 0; case SERIAL_SETMONITOR: CP_DEBUG2 (d, ("ioctl: setmonitor\n")); /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else error = suser (td); #endif if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); cp_set_monitor (c, *(int*)data); splx (s); return 0; case SERIAL_GETUSE16: CP_DEBUG2 (d, ("ioctl: getuse16\n")); if (c->type != T_E1 || c->unfram) return EINVAL; *(int*)data = c->use16; return 0; case SERIAL_SETUSE16: CP_DEBUG2 (d, ("ioctl: setuse16\n")); /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else error = suser (td); #endif if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); cp_set_use16 (c, *(int*)data); splx (s); return 0; case SERIAL_GETCRC4: CP_DEBUG2 (d, ("ioctl: getcrc4\n")); if (c->type != T_E1 || c->unfram) return EINVAL; *(int*)data = c->crc4; return 0; case SERIAL_SETCRC4: CP_DEBUG2 (d, ("ioctl: setcrc4\n")); /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else error = suser (td); #endif if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); cp_set_crc4 (c, *(int*)data); splx (s); return 0; case SERIAL_GETCLK: CP_DEBUG2 (d, ("ioctl: getclk\n")); if (c->type != T_E1 && c->type != T_G703 && c->type != T_E3 && c->type != T_T3 && c->type != T_STS1) return EINVAL; switch (c->gsyn) { default: *(int*)data = E1CLK_INTERNAL; break; case GSYN_RCV: *(int*)data = E1CLK_RECEIVE; break; case GSYN_RCV0: *(int*)data = E1CLK_RECEIVE_CHAN0; break; case GSYN_RCV1: *(int*)data = E1CLK_RECEIVE_CHAN1; break; case GSYN_RCV2: *(int*)data = E1CLK_RECEIVE_CHAN2; break; case GSYN_RCV3: *(int*)data = E1CLK_RECEIVE_CHAN3; break; } return 0; case SERIAL_SETCLK: CP_DEBUG2 (d, ("ioctl: setclk\n")); /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else error = suser (td); #endif if (error) return error; if (c->type != T_E1 && c->type != T_G703 && c->type != T_E3 && c->type != T_T3 && c->type != T_STS1) return EINVAL; s = splimp (); switch (*(int*)data) { default: cp_set_gsyn (c, GSYN_INT); break; case E1CLK_RECEIVE: cp_set_gsyn (c, GSYN_RCV); break; case E1CLK_RECEIVE_CHAN0: cp_set_gsyn (c, GSYN_RCV0); break; case E1CLK_RECEIVE_CHAN1: cp_set_gsyn (c, GSYN_RCV1); break; case E1CLK_RECEIVE_CHAN2: cp_set_gsyn (c, GSYN_RCV2); break; case E1CLK_RECEIVE_CHAN3: cp_set_gsyn (c, GSYN_RCV3); break; } splx (s); return 0; case SERIAL_GETTIMESLOTS: CP_DEBUG2 (d, ("ioctl: gettimeslots\n")); if ((c->type != T_E1 || c->unfram) && c->type != T_DATA) return EINVAL; *(u_long*)data = c->ts; return 0; case SERIAL_SETTIMESLOTS: CP_DEBUG2 (d, ("ioctl: settimeslots\n")); /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else error = suser (td); #endif if (error) return error; if ((c->type != T_E1 || c->unfram) && c->type != T_DATA) return EINVAL; s = splimp (); cp_set_ts (c, *(u_long*)data); splx (s); return 0; case SERIAL_GETINVCLK: CP_DEBUG2 (d, ("ioctl: getinvclk\n")); #if 1 return EINVAL; #else if (c->type != T_SERIAL) return EINVAL; *(int*)data = c->invtxc; return 0; #endif case SERIAL_SETINVCLK: CP_DEBUG2 (d, ("ioctl: setinvclk\n")); /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else error = suser (td); #endif if (error) return error; if (c->type != T_SERIAL) return EINVAL; s = splimp (); cp_set_invtxc (c, *(int*)data); cp_set_invrxc (c, *(int*)data); splx (s); return 0; case SERIAL_GETINVTCLK: CP_DEBUG2 (d, ("ioctl: getinvtclk\n")); if (c->type != T_SERIAL) return EINVAL; *(int*)data = c->invtxc; return 0; case SERIAL_SETINVTCLK: CP_DEBUG2 (d, ("ioctl: setinvtclk\n")); /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else error = suser (td); #endif if (error) return error; if (c->type != T_SERIAL) return EINVAL; s = splimp (); cp_set_invtxc (c, *(int*)data); splx (s); return 0; case SERIAL_GETINVRCLK: CP_DEBUG2 (d, ("ioctl: getinvrclk\n")); if (c->type != T_SERIAL) return EINVAL; *(int*)data = c->invrxc; return 0; case SERIAL_SETINVRCLK: CP_DEBUG2 (d, ("ioctl: setinvrclk\n")); /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else error = suser (td); #endif if (error) return error; if (c->type != T_SERIAL) return EINVAL; s = splimp (); cp_set_invrxc (c, *(int*)data); splx (s); return 0; case SERIAL_GETLEVEL: CP_DEBUG2 (d, ("ioctl: getlevel\n")); if (c->type != T_G703) return EINVAL; s = splimp (); *(int*)data = cp_get_lq (c); splx (s); return 0; #if 0 case SERIAL_RESET: CP_DEBUG2 (d, ("ioctl: reset\n")); /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else error = suser (td); #endif if (error) return error; s = splimp (); cp_reset (c->board, 0, 0); splx (s); return 0; case SERIAL_HARDRESET: CP_DEBUG2 (d, ("ioctl: hardreset\n")); /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else error = suser (td); #endif if (error) return error; s = splimp (); /* hard_reset (c->board); */ splx (s); return 0; #endif case SERIAL_GETCABLE: CP_DEBUG2 (d, ("ioctl: getcable\n")); if (c->type != T_SERIAL) return EINVAL; s = splimp (); *(int*)data = cp_get_cable (c); splx (s); return 0; case SERIAL_GETDIR: CP_DEBUG2 (d, ("ioctl: getdir\n")); if (c->type != T_E1 && c->type != T_DATA) return EINVAL; *(int*)data = c->dir; return 0; case SERIAL_SETDIR: CP_DEBUG2 (d, ("ioctl: setdir\n")); /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else error = suser (td); #endif if (error) return error; s = splimp (); cp_set_dir (c, *(int*)data); splx (s); return 0; case SERIAL_GETRLOOP: CP_DEBUG2 (d, ("ioctl: getrloop\n")); if (c->type != T_G703 && c->type != T_E3 && c->type != T_T3 && c->type != T_STS1) return EINVAL; *(int*)data = cp_get_rloop (c); return 0; case SERIAL_SETRLOOP: CP_DEBUG2 (d, ("ioctl: setloop\n")); if (c->type != T_E3 && c->type != T_T3 && c->type != T_STS1) return EINVAL; /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else error = suser (td); #endif if (error) return error; s = splimp (); cp_set_rloop (c, *(int*)data); splx (s); return 0; case SERIAL_GETCABLEN: CP_DEBUG2 (d, ("ioctl: getcablen\n")); if (c->type != T_T3 && c->type != T_STS1) return EINVAL; *(int*)data = c->cablen; return 0; case SERIAL_SETCABLEN: CP_DEBUG2 (d, ("ioctl: setloop\n")); if (c->type != T_T3 && c->type != T_STS1) return EINVAL; /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else error = suser (td); #endif if (error) return error; s = splimp (); cp_set_cablen (c, *(int*)data); splx (s); return 0; case TIOCSDTR: /* Set DTR */ s = splimp (); cp_set_dtr (c, 1); splx (s); return 0; case TIOCCDTR: /* Clear DTR */ s = splimp (); cp_set_dtr (c, 0); splx (s); return 0; case TIOCMSET: /* Set DTR/RTS */ s = splimp (); cp_set_dtr (c, (*(int*)data & TIOCM_DTR) ? 1 : 0); cp_set_rts (c, (*(int*)data & TIOCM_RTS) ? 1 : 0); splx (s); return 0; case TIOCMBIS: /* Add DTR/RTS */ s = splimp (); if (*(int*)data & TIOCM_DTR) cp_set_dtr (c, 1); if (*(int*)data & TIOCM_RTS) cp_set_rts (c, 1); splx (s); return 0; case TIOCMBIC: /* Clear DTR/RTS */ s = splimp (); if (*(int*)data & TIOCM_DTR) cp_set_dtr (c, 0); if (*(int*)data & TIOCM_RTS) cp_set_rts (c, 0); splx (s); return 0; case TIOCMGET: /* Get modem status */ *(int*)data = cp_modem_status (c); return 0; } return ENOTTY; } #if __FreeBSD_version < 400000 static struct cdevsw cp_cdevsw = { cp_open, cp_close, noread, nowrite, cp_ioctl, nullstop, nullreset, nodevtotty, seltrue, nommap, NULL, "cp", NULL, -1 }; #elif __FreeBSD_version < 500000 static struct cdevsw cp_cdevsw = { cp_open, cp_close, noread, nowrite, cp_ioctl, nopoll, nommap, nostrategy, "cp", CDEV_MAJOR, nodump, nopsize, D_NAGGED, -1 }; #elif __FreeBSD_version == 500000 static struct cdevsw cp_cdevsw = { cp_open, cp_close, noread, nowrite, cp_ioctl, nopoll, nommap, nostrategy, "cp", CDEV_MAJOR, nodump, nopsize, D_NAGGED, }; #elif __FreeBSD_version <= 501000 static struct cdevsw cp_cdevsw = { .d_open = cp_open, .d_close = cp_close, .d_read = noread, .d_write = nowrite, .d_ioctl = cp_ioctl, .d_poll = nopoll, .d_mmap = nommap, .d_strategy = nostrategy, .d_name = "cp", .d_maj = CDEV_MAJOR, .d_dump = nodump, .d_flags = D_NAGGED, }; #elif __FreeBSD_version < 502103 static struct cdevsw cp_cdevsw = { .d_open = cp_open, .d_close = cp_close, .d_ioctl = cp_ioctl, .d_name = "cp", .d_maj = CDEV_MAJOR, .d_flags = D_NAGGED, }; #else /* __FreeBSD_version >= 502103 */ static struct cdevsw cp_cdevsw = { .d_version = D_VERSION, .d_open = cp_open, .d_close = cp_close, .d_ioctl = cp_ioctl, .d_name = "cp", .d_maj = CDEV_MAJOR, .d_flags = D_NEEDGIANT, }; #endif #ifdef NETGRAPH #if __FreeBSD_version >= 500000 static int ng_cp_constructor (node_p node) { drv_t *d = NG_NODE_PRIVATE (node); #else static int ng_cp_constructor (node_p *node) { drv_t *d = (*node)->private; #endif CP_DEBUG (d, ("Constructor\n")); return EINVAL; } static int ng_cp_newhook (node_p node, hook_p hook, const char *name) { int s; #if __FreeBSD_version >= 500000 drv_t *d = NG_NODE_PRIVATE (node); #else drv_t *d = node->private; #endif CP_DEBUG (d, ("Newhook\n")); /* Attach debug hook */ if (strcmp (name, NG_CP_HOOK_DEBUG) == 0) { #if __FreeBSD_version >= 500000 NG_HOOK_SET_PRIVATE (hook, NULL); #else hook->private = 0; #endif d->debug_hook = hook; return 0; } /* Check for raw hook */ if (strcmp (name, NG_CP_HOOK_RAW) != 0) return EINVAL; #if __FreeBSD_version >= 500000 NG_HOOK_SET_PRIVATE (hook, d); #else hook->private = d; #endif d->hook = hook; s = splimp (); cp_up (d); splx (s); return 0; } static char *format_timeslots (u_long s) { static char buf [100]; char *p = buf; int i; for (i=1; i<32; ++i) if ((s >> i) & 1) { int prev = (i > 1) & (s >> (i-1)); int next = (i < 31) & (s >> (i+1)); if (prev) { if (next) continue; *p++ = '-'; } else if (p > buf) *p++ = ','; if (i >= 10) *p++ = '0' + i / 10; *p++ = '0' + i % 10; } *p = 0; return buf; } static int print_modems (char *s, cp_chan_t *c, int need_header) { int status = cp_modem_status (c); int length = 0; if (need_header) length += sprintf (s + length, " LE DTR DSR RTS CTS CD\n"); length += sprintf (s + length, "%4s %4s %4s %4s %4s %4s\n", status & TIOCM_LE ? "On" : "-", status & TIOCM_DTR ? "On" : "-", status & TIOCM_DSR ? "On" : "-", status & TIOCM_RTS ? "On" : "-", status & TIOCM_CTS ? "On" : "-", status & TIOCM_CD ? "On" : "-"); return length; } static int print_stats (char *s, cp_chan_t *c, int need_header) { int length = 0; if (need_header) length += sprintf (s + length, " Rintr Tintr Mintr Ibytes Ipkts Ierrs Obytes Opkts Oerrs\n"); length += sprintf (s + length, "%7ld %7ld %7ld %8lu %7ld %7ld %8lu %7ld %7ld\n", c->rintr, c->tintr, 0l, (unsigned long) c->ibytes, c->ipkts, c->overrun + c->frame + c->crc, (unsigned long) c->obytes, c->opkts, c->underrun); return length; } static char *format_e1_status (u_char status) { static char buf [80]; if (status & E1_NOALARM) return "Ok"; buf[0] = 0; if (status & E1_LOS) strcat (buf, ",LOS"); if (status & E1_AIS) strcat (buf, ",AIS"); if (status & E1_LOF) strcat (buf, ",LOF"); if (status & E1_LOMF) strcat (buf, ",LOMF"); if (status & E1_FARLOF) strcat (buf, ",FARLOF"); if (status & E1_AIS16) strcat (buf, ",AIS16"); if (status & E1_FARLOMF) strcat (buf, ",FARLOMF"); if (status & E1_TSTREQ) strcat (buf, ",TSTREQ"); if (status & E1_TSTERR) strcat (buf, ",TSTERR"); if (buf[0] == ',') return buf+1; return "Unknown"; } static int print_frac (char *s, int leftalign, u_long numerator, u_long divider) { int n, length = 0; if (numerator < 1 || divider < 1) { length += sprintf (s+length, leftalign ? "/- " : " -"); return length; } n = (int) (0.5 + 1000.0 * numerator / divider); if (n < 1000) { length += sprintf (s+length, leftalign ? "/.%-3d" : " .%03d", n); return length; } *(s + length) = leftalign ? '/' : ' '; length ++; if (n >= 1000000) n = (n+500) / 1000 * 1000; else if (n >= 100000) n = (n+50) / 100 * 100; else if (n >= 10000) n = (n+5) / 10 * 10; switch (n) { case 1000: length += printf (s+length, ".999"); return length; case 10000: n = 9990; break; case 100000: n = 99900; break; case 1000000: n = 999000; break; } if (n < 10000) length += sprintf (s+length, "%d.%d", n/1000, n/10%100); else if (n < 100000) length += sprintf (s+length, "%d.%d", n/1000, n/100%10); else if (n < 1000000) length += sprintf (s+length, "%d.", n/1000); else length += sprintf (s+length, "%d", n/1000); return length; } static int print_e1_stats (char *s, cp_chan_t *c) { struct e1_counters total; u_long totsec; int length = 0; totsec = c->totsec + c->cursec; total.bpv = c->total.bpv + c->currnt.bpv; total.fse = c->total.fse + c->currnt.fse; total.crce = c->total.crce + c->currnt.crce; total.rcrce = c->total.rcrce + c->currnt.rcrce; total.uas = c->total.uas + c->currnt.uas; total.les = c->total.les + c->currnt.les; total.es = c->total.es + c->currnt.es; total.bes = c->total.bes + c->currnt.bes; total.ses = c->total.ses + c->currnt.ses; total.oofs = c->total.oofs + c->currnt.oofs; total.css = c->total.css + c->currnt.css; total.dm = c->total.dm + c->currnt.dm; length += sprintf (s + length, " Unav/Degr Bpv/Fsyn CRC/RCRC Err/Lerr Sev/Bur Oof/Slp Status\n"); /* Unavailable seconds, degraded minutes */ length += print_frac (s + length, 0, c->currnt.uas, c->cursec); length += print_frac (s + length, 1, 60 * c->currnt.dm, c->cursec); /* Bipolar violations, frame sync errors */ length += print_frac (s + length, 0, c->currnt.bpv, c->cursec); length += print_frac (s + length, 1, c->currnt.fse, c->cursec); /* CRC errors, remote CRC errors (E-bit) */ length += print_frac (s + length, 0, c->currnt.crce, c->cursec); length += print_frac (s + length, 1, c->currnt.rcrce, c->cursec); /* Errored seconds, line errored seconds */ length += print_frac (s + length, 0, c->currnt.es, c->cursec); length += print_frac (s + length, 1, c->currnt.les, c->cursec); /* Severely errored seconds, burst errored seconds */ length += print_frac (s + length, 0, c->currnt.ses, c->cursec); length += print_frac (s + length, 1, c->currnt.bes, c->cursec); /* Out of frame seconds, controlled slip seconds */ length += print_frac (s + length, 0, c->currnt.oofs, c->cursec); length += print_frac (s + length, 1, c->currnt.css, c->cursec); length += sprintf (s + length, " %s\n", format_e1_status (c->status)); /* Print total statistics. */ length += print_frac (s + length, 0, total.uas, totsec); length += print_frac (s + length, 1, 60 * total.dm, totsec); length += print_frac (s + length, 0, total.bpv, totsec); length += print_frac (s + length, 1, total.fse, totsec); length += print_frac (s + length, 0, total.crce, totsec); length += print_frac (s + length, 1, total.rcrce, totsec); length += print_frac (s + length, 0, total.es, totsec); length += print_frac (s + length, 1, total.les, totsec); length += print_frac (s + length, 0, total.ses, totsec); length += print_frac (s + length, 1, total.bes, totsec); length += print_frac (s + length, 0, total.oofs, totsec); length += print_frac (s + length, 1, total.css, totsec); length += sprintf (s + length, " -- Total\n"); return length; } static int print_chan (char *s, cp_chan_t *c) { drv_t *d = c->sys; int length = 0; length += sprintf (s + length, "cp%d", c->board->num * NCHAN + c->num); if (d->chan->debug) length += sprintf (s + length, " debug=%d", d->chan->debug); if (c->board->mux) { length += sprintf (s + length, " cfg=C"); } else { length += sprintf (s + length, " cfg=A"); } if (c->baud) length += sprintf (s + length, " %ld", c->baud); else length += sprintf (s + length, " extclock"); if (c->type == T_E1 || c->type == T_G703) switch (c->gsyn) { case GSYN_INT : length += sprintf (s + length, " syn=int"); break; case GSYN_RCV : length += sprintf (s + length, " syn=rcv"); break; case GSYN_RCV0 : length += sprintf (s + length, " syn=rcv0"); break; case GSYN_RCV1 : length += sprintf (s + length, " syn=rcv1"); break; case GSYN_RCV2 : length += sprintf (s + length, " syn=rcv2"); break; case GSYN_RCV3 : length += sprintf (s + length, " syn=rcv3"); break; } if (c->type == T_SERIAL) { length += sprintf (s + length, " dpll=%s", c->dpll ? "on" : "off"); length += sprintf (s + length, " nrzi=%s", c->nrzi ? "on" : "off"); length += sprintf (s + length, " invclk=%s", c->invtxc ? "on" : "off"); } if (c->type == T_E1) length += sprintf (s + length, " higain=%s", c->higain ? "on" : "off"); length += sprintf (s + length, " loop=%s", c->lloop ? "on" : "off"); if (c->type == T_E1) length += sprintf (s + length, " ts=%s", format_timeslots (c->ts)); if (c->type == T_G703) { int lq, x; x = splimp (); lq = cp_get_lq (c); splx (x); length += sprintf (s + length, " (level=-%.1fdB)", lq / 10.0); } length += sprintf (s + length, "\n"); return length; } #if __FreeBSD_version >= 500000 static int ng_cp_rcvmsg (node_p node, item_p item, hook_p lasthook) { drv_t *d = NG_NODE_PRIVATE (node); struct ng_mesg *msg; #else static int ng_cp_rcvmsg (node_p node, struct ng_mesg *msg, const char *retaddr, struct ng_mesg **rptr) { drv_t *d = node->private; #endif struct ng_mesg *resp = NULL; int error = 0; CP_DEBUG (d, ("Rcvmsg\n")); #if __FreeBSD_version >= 500000 NGI_GET_MSG (item, msg); #endif switch (msg->header.typecookie) { default: error = EINVAL; break; case NGM_CP_COOKIE: printf ("Not implemented yet\n"); error = EINVAL; break; case NGM_GENERIC_COOKIE: switch (msg->header.cmd) { default: error = EINVAL; break; case NGM_TEXT_STATUS: { char *s; int l = 0; int dl = sizeof (struct ng_mesg) + 730; #if __FreeBSD_version >= 500000 NG_MKRESPONSE (resp, msg, dl, M_NOWAIT); if (! resp) { error = ENOMEM; break; } #else MALLOC (resp, struct ng_mesg *, dl, M_NETGRAPH, M_NOWAIT); if (! resp) { error = ENOMEM; break; } bzero (resp, dl); #endif s = (resp)->data; if (d) { l += print_chan (s + l, d->chan); l += print_stats (s + l, d->chan, 1); l += print_modems (s + l, d->chan, 1); l += print_e1_stats (s + l, d->chan); } else l += sprintf (s + l, "Error: node not connect to channel"); #if __FreeBSD_version < 500000 (resp)->header.version = NG_VERSION; (resp)->header.arglen = strlen (s) + 1; (resp)->header.token = msg->header.token; (resp)->header.typecookie = NGM_CP_COOKIE; (resp)->header.cmd = msg->header.cmd; #endif strncpy ((resp)->header.cmdstr, "status", NG_CMDSTRLEN); } break; } break; } #if __FreeBSD_version >= 500000 NG_RESPOND_MSG (error, node, item, resp); NG_FREE_MSG (msg); #else *rptr = resp; FREE (msg, M_NETGRAPH); #endif return error; } #if __FreeBSD_version >= 500000 static int ng_cp_rcvdata (hook_p hook, item_p item) { drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE(hook)); struct mbuf *m; meta_p meta; #else static int ng_cp_rcvdata (hook_p hook, struct mbuf *m, meta_p meta) { drv_t *d = hook->node->private; #endif struct ifqueue *q; int s; CP_DEBUG2 (d, ("Rcvdata\n")); #if __FreeBSD_version >= 500000 NGI_GET_M (item, m); NGI_GET_META (item, meta); NG_FREE_ITEM (item); if (! NG_HOOK_PRIVATE (hook) || ! d) { NG_FREE_M (m); NG_FREE_META (meta); #else if (! hook->private || ! d) { NG_FREE_DATA (m,meta); #endif return ENETDOWN; } q = (meta && meta->priority > 0) ? &d->hi_queue : &d->queue; s = splimp (); #if __FreeBSD_version >= 500000 IF_LOCK (q); if (_IF_QFULL (q)) { _IF_DROP (q); IF_UNLOCK (q); splx (s); NG_FREE_M (m); NG_FREE_META (meta); return ENOBUFS; } _IF_ENQUEUE (q, m); IF_UNLOCK (q); #else if (IF_QFULL (q)) { IF_DROP (q); splx (s); NG_FREE_DATA (m, meta); return ENOBUFS; } IF_ENQUEUE (q, m); #endif cp_start (d); splx (s); return 0; } static int ng_cp_rmnode (node_p node) { #if __FreeBSD_version >= 500000 drv_t *d = NG_NODE_PRIVATE (node); CP_DEBUG (d, ("Rmnode\n")); if (d && d->running) { int s = splimp (); cp_down (d); splx (s); } #ifdef KLD_MODULE if (node->nd_flags & NG_REALLY_DIE) { NG_NODE_SET_PRIVATE (node, NULL); NG_NODE_UNREF (node); } node->nd_flags &= ~NG_INVALID; #endif #else /* __FreeBSD_version < 500000 */ drv_t *d = node->private; if (d && d->running) { int s = splimp (); cp_down (d); splx (s); } node->flags |= NG_INVALID; ng_cutlinks (node); #ifdef KLD_MODULE #if __FreeBSD_version >= 400000 /* We do so because of pci module problem, see also comment in cp_unload. Not in 4.x. */ ng_unname (node); ng_unref (node); #else node->flags &= ~NG_INVALID; #endif #endif #endif return 0; } static void ng_cp_watchdog (void *arg) { drv_t *d = arg; if (d) { if (d->timeout == 1) cp_watchdog (d); if (d->timeout) d->timeout--; d->timeout_handle = timeout (ng_cp_watchdog, d, hz); } } static int ng_cp_connect (hook_p hook) { #if __FreeBSD_version >= 500000 drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE (hook)); #else drv_t *d = hook->node->private; #endif if (d) { CP_DEBUG (d, ("Connect\n")); d->timeout_handle = timeout (ng_cp_watchdog, d, hz); } return 0; } static int ng_cp_disconnect (hook_p hook) { #if __FreeBSD_version >= 500000 drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE (hook)); #else drv_t *d = hook->node->private; #endif if (d) { CP_DEBUG (d, ("Disconnect\n")); #if __FreeBSD_version >= 500000 if (NG_HOOK_PRIVATE (hook)) #else if (hook->private) #endif { int s = splimp (); cp_down (d); splx (s); } untimeout (ng_cp_watchdog, d, d->timeout_handle); } return 0; } #endif #if __FreeBSD_version < 400000 #ifdef KLD_MODULE extern STAILQ_HEAD(devlist, pci_devinfo) pci_devq; static struct pci_devinfo *pci_device_find (u_int16_t device, u_int16_t vendor, int unit) { pcicfgregs *cfg; struct pci_devinfo *dinfo; int u=0,i; for (dinfo = STAILQ_FIRST (&pci_devq), i=0; dinfo && (i < pci_numdevs); dinfo = STAILQ_NEXT (dinfo, pci_links), i++) { cfg = &dinfo->cfg; if ((device == cfg->device) && (vendor == cfg->vendor)) { if (u == unit) return dinfo; u++; } } return 0; } /* * Function called when loading the driver. */ static int cp_load (void) { int i, s; pcicfgregs *cfg; struct pci_devinfo *dinfo; s = splimp (); for (i=0; icfg; cp_attach (cfg, i); dinfo->device = &cp_driver; strncpy (dinfo->conf.pd_name, cp_driver.pd_name, sizeof(dinfo->conf.pd_name)); dinfo->conf.pd_name[sizeof(dinfo->conf.pd_name) - 1] = 0; dinfo->conf.pd_unit = i; } splx (s); if (! i) { /* Deactivate the timeout routine. */ untimeout (cp_timeout, 0, timeout_handle); return ENXIO; } return 0; } /* * Function called when unloading the driver. */ static int cp_unload (void) { #if 1 /* Currently pci loadable module not fully supported, so we just return EBUSY. Do not forget to correct ng_cp_rmnode then probelm would be solved. */ return EBUSY; #else int i, s; /* Check if the device is busy (open). */ for (i=0; ichan->type && d->running) return EBUSY; } s = splimp (); /* Deactivate the timeout routine. */ untimeout (cp_timeout, 0, timeout_handle); /* OK to unload the driver, unregister the interrupt first. */ for (i=0; itype) continue; cp_reset (b, 0 ,0); /* pci_unmap_int (tag, cp_intr, b, &net_imask);*/ /* Here should be something like pci_unmap_mem ()*/ } for (i=0; i= 400000 || NBPFILTER > 0 /* Detach from the packet filter list of interfaces. */ { struct bpf_if *q, **b = &bpf_iflist; while ((q = *b)) { if (q->bif_ifp == d->pp.pp_if) { *b = q->bif_next; free (q, M_DEVBUF); } b = &(q->bif_next); } } #endif /* Detach from the sync PPP list. */ sppp_detach (&d->pp.pp_if); /* Detach from the system list of interfaces. */ { struct ifaddr *ifa; TAILQ_FOREACH (ifa, &d->pp.pp_if.if_addrhead, ifa_link) { TAILQ_REMOVE (&d->pp.pp_if.if_addrhead, ifa, ifa_link); free (ifa, M_IFADDR); } TAILQ_REMOVE (&ifnet, &d->pp.pp_if, if_link); } #endif /* Deallocate buffers. */ /* free (d, M_DEVBUF);*/ } for (i=0; itype) free (b, M_DEVBUF); } splx (s); return 0; #endif } #endif #endif #if __FreeBSD_version < 400000 #ifdef KLD_MODULE static int cp_modevent (module_t mod, int type, void *unused) { dev_t dev; switch (type) { case MOD_LOAD: dev = makedev (CDEV_MAJOR, 0); cdevsw_add (&dev, &cp_cdevsw, 0); timeout_handle = timeout (cp_timeout, 0, hz*5); return cp_load (); case MOD_UNLOAD: return cp_unload (); case MOD_SHUTDOWN: break; } return 0; } #endif /* KLD_MODULE */ #else /* __FreeBSD_version >= 400000 */ static int cp_modevent (module_t mod, int type, void *unused) { dev_t dev; static int load_count = 0; struct cdevsw *cdsw; #if __FreeBSD_version >= 502103 dev = udev2dev (makeudev(CDEV_MAJOR, 0)); #else dev = makedev (CDEV_MAJOR, 0); #endif switch (type) { case MOD_LOAD: if (dev != NODEV && (cdsw = devsw (dev)) && cdsw->d_maj == CDEV_MAJOR) { printf ("Tau-PCI driver is already in system\n"); return (ENXIO); } #if __FreeBSD_version >= 500000 && defined NETGRAPH if (ng_newtype (&typestruct)) printf ("Failed to register ng_cp\n"); #endif ++load_count; #if __FreeBSD_version <= 500000 cdevsw_add (&cp_cdevsw); #endif timeout_handle = timeout (cp_timeout, 0, hz*5); break; case MOD_UNLOAD: if (load_count == 1) { printf ("Removing device entry for Tau-PCI\n"); #if __FreeBSD_version <= 500000 cdevsw_remove (&cp_cdevsw); #endif #if __FreeBSD_version >= 500000 && defined NETGRAPH ng_rmtype (&typestruct); #endif } untimeout (cp_timeout, 0, timeout_handle); --load_count; break; case MOD_SHUTDOWN: break; } return 0; } #endif /* __FreeBSD_version < 400000 */ #ifdef NETGRAPH static struct ng_type typestruct = { .version = NG_ABI_VERSION, .name = NG_CP_NODE_TYPE, .constructor = ng_cp_constructor, .rcvmsg = ng_cp_rcvmsg, .shutdown = ng_cp_rmnode, .newhook = ng_cp_newhook, .connect = ng_cp_connect, .rcvdata = ng_cp_rcvdata, .disconnect = ng_cp_disconnect, }; #if __FreeBSD_version < 400000 NETGRAPH_INIT_ORDERED (cp, &typestruct, SI_SUB_DRIVERS,\ SI_ORDER_MIDDLE + CDEV_MAJOR); #endif #endif /*NETGRAPH*/ #if __FreeBSD_version >= 500000 #ifdef NETGRAPH MODULE_DEPEND (ng_cp, netgraph, NG_ABI_VERSION, NG_ABI_VERSION, NG_ABI_VERSION); #else MODULE_DEPEND (cp, sppp, 1, 1, 1); #endif #ifdef KLD_MODULE DRIVER_MODULE (cpmod, pci, cp_driver, cp_devclass, cp_modevent, NULL); #else DRIVER_MODULE (cp, pci, cp_driver, cp_devclass, cp_modevent, NULL); #endif #elif __FreeBSD_version >= 400000 #ifdef NETGRAPH DRIVER_MODULE (cp, pci, cp_driver, cp_devclass, ng_mod_event, &typestruct); #else DRIVER_MODULE (cp, pci, cp_driver, cp_devclass, cp_modevent, NULL); #endif #else /* __FreeBSD_version < 400000 */ #ifdef KLD_MODULE #ifndef NETGRAPH static moduledata_t cpmod = { "cp", cp_modevent, NULL}; DECLARE_MODULE (cp, cpmod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE + CDEV_MAJOR); #endif #else /* KLD_MODULE */ /* * Now for some driver initialisation. * Occurs ONCE during boot (very early). * This is if we are NOT a loadable module. */ static void cp_drvinit (void *unused) { dev_t dev; dev = makedev (CDEV_MAJOR, 0); cdevsw_add (&dev, &cp_cdevsw, 0); /* Activate the timeout routine. */ timeout_handle = timeout (cp_timeout, 0, hz); #ifdef NETGRAPH #if 0 /* Register our node type in netgraph */ if (ng_newtype (&typestruct)) printf ("Failed to register ng_cp\n"); #endif #endif } SYSINIT (cpdev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE+CDEV_MAJOR, cp_drvinit, 0) #endif /* KLD_MODULE */ #endif /* __FreeBSD_version < 400000 */ #endif /* NPCI */ Index: head/sys/dev/ctau/if_ct.c =================================================================== --- head/sys/dev/ctau/if_ct.c (revision 129878) +++ head/sys/dev/ctau/if_ct.c (revision 129879) @@ -1,2744 +1,2745 @@ /* * Cronyx-Tau adapter driver for FreeBSD. * Supports PPP/HDLC and Cisco/HDLC protocol in synchronous mode, * and asyncronous channels with full modem control. * Keepalive protocol implemented in both Cisco and PPP modes. * * Copyright (C) 1994-2002 Cronyx Engineering. * Author: Serge Vakulenko, * * Copyright (C) 1999-2004 Cronyx Engineering. * Author: Roman Kurakin, * * This software is distributed with NO WARRANTIES, not even the implied * warranties for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * Authors grant any other persons or organisations a permission to use, * modify and redistribute this software in source and binary forms, * as long as this message is kept with the software, all derivative * works or modified versions. * * Cronyx Id: if_ct.c,v 1.1.2.22 2004/02/26 19:06:51 rik Exp $ */ #include __FBSDID("$FreeBSD$"); #include #if __FreeBSD_version >= 500000 # define NCTAU 1 #else # include "ctau.h" #endif #if NCTAU > 0 #include #include #include +#include #include #include #include #include #include #include #include #if __FreeBSD_version >= 400000 # include # include # include # include #endif #include #include #include #include #include #include #include #if __FreeBSD_version < 500000 #include #endif #if __FreeBSD_version >= 400000 #include # if __FreeBSD_version <= 501000 # include # endif #endif #if __FreeBSD_version >= 400000 #include #include #include #else #include #include #include #endif #include "opt_ng_cronyx.h" #ifdef NETGRAPH_CRONYX # include "opt_netgraph.h" # include # include #if __FreeBSD_version >= 400000 # include #else # include #endif #else # include # if __FreeBSD_version < 500000 # include "sppp.h" # if NSPPP <= 0 # error The device ctau requires sppp or netgraph. # endif # endif # include # define PP_CISCO IFF_LINK2 #if __FreeBSD_version < 400000 # include # if NBPFILTER > 0 # include # endif #else # if __FreeBSD_version < 500000 # include # endif # include # define NBPFILTER NBPF #endif #endif /* If we don't have Cronyx's sppp version, we don't have fr support via sppp */ #ifndef PP_FR #define PP_FR 0 #endif #define CT_DEBUG(d,s) ({if (d->chan->debug) {\ printf ("%s: ", d->name); printf s;}}) #define CT_DEBUG2(d,s) ({if (d->chan->debug>1) {\ printf ("%s: ", d->name); printf s;}}) #define CDEV_MAJOR 99 #if __FreeBSD_version >= 400000 static void ct_identify __P((driver_t *, device_t)); static int ct_probe __P((device_t)); static int ct_attach __P((device_t)); static int ct_detach __P((device_t)); static device_method_t ct_isa_methods [] = { DEVMETHOD(device_identify, ct_identify), DEVMETHOD(device_probe, ct_probe), DEVMETHOD(device_attach, ct_attach), DEVMETHOD(device_detach, ct_detach), {0, 0} }; typedef struct _bdrv_t { ct_board_t *board; struct resource *base_res; struct resource *drq_res; struct resource *irq_res; int base_rid; int drq_rid; int irq_rid; void *intrhand; } bdrv_t; static driver_t ct_isa_driver = { "ct", ct_isa_methods, sizeof (bdrv_t), }; static devclass_t ct_devclass; #endif typedef struct _drv_t { char name [8]; ct_chan_t *chan; ct_board_t *board; ct_buf_t buf; int running; #ifdef NETGRAPH char nodename [NG_NODELEN+1]; hook_p hook; hook_p debug_hook; node_p node; struct ifqueue queue; struct ifqueue hi_queue; short timeout; struct callout_handle timeout_handle; #else struct sppp pp; #endif #if __FreeBSD_version >= 400000 dev_t devt; #endif } drv_t; static void ct_receive (ct_chan_t *c, char *data, int len); static void ct_transmit (ct_chan_t *c, void *attachment, int len); static void ct_error (ct_chan_t *c, int data); static void ct_up (drv_t *d); static void ct_start (drv_t *d); static void ct_down (drv_t *d); static void ct_watchdog (drv_t *d); #ifdef NETGRAPH extern struct ng_type typestruct; #else static void ct_ifstart (struct ifnet *ifp); static void ct_tlf (struct sppp *sp); static void ct_tls (struct sppp *sp); static void ct_ifwatchdog (struct ifnet *ifp); static int ct_sioctl (struct ifnet *ifp, u_long cmd, caddr_t data); static void ct_initialize (void *softc); #endif static ct_board_t *adapter [NCTAU]; static drv_t *channel [NCTAU*NCHAN]; static struct callout_handle led_timo [NCTAU]; static struct callout_handle timeout_handle; /* * Print the mbuf chain, for debug purposes only. */ static void printmbuf (struct mbuf *m) { printf ("mbuf:"); for (; m; m=m->m_next) { if (m->m_flags & M_PKTHDR) printf (" HDR %d:", m->m_pkthdr.len); if (m->m_flags & M_EXT) printf (" EXT:"); printf (" %d", m->m_len); } printf ("\n"); } /* * Make an mbuf from data. */ static struct mbuf *makembuf (void *buf, u_int len) { struct mbuf *m; MGETHDR (m, M_DONTWAIT, MT_DATA); if (! m) return 0; MCLGET (m, M_DONTWAIT); if (! (m->m_flags & M_EXT)) { m_freem (m); return 0; } m->m_pkthdr.len = m->m_len = len; bcopy (buf, mtod (m, caddr_t), len); return m; } static void ct_timeout (void *arg) { drv_t *d; int s, i; for (i=0; ichan->mode != M_G703) continue; s = splimp (); ct_g703_timer (d->chan); splx (s); } timeout_handle = timeout (ct_timeout, 0, hz); } static void ct_led_off (void *arg) { ct_board_t *b = arg; int s = splimp (); ct_led (b, 0); led_timo[b->num].callout = 0; splx (s); } /* * Activate interupt handler from DDK. */ #if __FreeBSD_version >= 400000 static void ct_intr (void *arg) { bdrv_t *bd = arg; ct_board_t *b = bd->board; #else static void ct_intr (int bnum) { ct_board_t *b = adapter [bnum]; #endif int s = splimp (); /* Turn LED on. */ ct_led (b, 1); ct_int_handler (b); /* Turn LED off 50 msec later. */ if (! led_timo[b->num].callout) led_timo[b->num] = timeout (ct_led_off, b, hz/20); splx (s); } static int probe_irq (ct_board_t *b, int irq) { int mask, busy, cnt; /* Clear pending irq, if any. */ ct_probe_irq (b, -irq); DELAY (100); for (cnt=0; cnt<5; ++cnt) { /* Get the mask of pending irqs, assuming they are busy. * Activate the adapter on given irq. */ busy = ct_probe_irq (b, irq); DELAY (1000); /* Get the mask of active irqs. * Deactivate our irq. */ mask = ct_probe_irq (b, -irq); DELAY (100); if ((mask & ~busy) == 1 << irq) { ct_probe_irq (b, 0); /* printf ("ct%d: irq %d ok, mask=0x%04x, busy=0x%04x\n", b->num, irq, mask, busy); */ return 1; } } /* printf ("ct%d: irq %d not functional, mask=0x%04x, busy=0x%04x\n", b->num, irq, mask, busy); */ ct_probe_irq (b, 0); return 0; } static short porttab [] = { 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0x300, 0x320, 0x340, 0x360, 0x380, 0x3a0, 0x3c0, 0x3e0, 0 }; static char dmatab [] = { 7, 6, 5, 0 }; static char irqtab [] = { 5, 10, 11, 7, 3, 15, 12, 0 }; #if __FreeBSD_version >= 400000 static int ct_is_free_res (device_t dev, int rid, int type, u_long start, u_long end, u_long count) { struct resource *res; if (!(res = bus_alloc_resource (dev, type, &rid, start, end, count, RF_ALLOCATED))) return 0; bus_release_resource (dev, type, rid, res); return 1; } static void ct_identify (driver_t *driver, device_t dev) { u_long iobase, rescount; int devcount; device_t *devices; device_t child; devclass_t my_devclass; int i, k; if ((my_devclass = devclass_find ("ct")) == NULL) return; devclass_get_devices (my_devclass, &devices, &devcount); if (devcount == 0) { /* We should find all devices by our self. We could alter other * devices, but we don't have a choise */ for (i = 0; (iobase = porttab [i]) != 0; i++) { if (!ct_is_free_res (dev, 1, SYS_RES_IOPORT, iobase, iobase + NPORT, NPORT)) continue; if (ct_probe_board (iobase, -1, -1) == 0) continue; devcount++; child = BUS_ADD_CHILD (dev, ISA_ORDER_SPECULATIVE, "ct", -1); if (child == NULL) return; device_set_desc_copy (child, "Cronyx Tau-ISA"); device_set_driver (child, driver); bus_set_resource (child, SYS_RES_IOPORT, 0, iobase, NPORT); if (devcount >= NCTAU) break; } } else { static short porttab [] = { 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0x300, 0x320, 0x340, 0x360, 0x380, 0x3a0, 0x3c0, 0x3e0, 0 }; /* Lets check user choise. */ for (k = 0; k < devcount; k++) { if (bus_get_resource (devices[k], SYS_RES_IOPORT, 0, &iobase, &rescount) != 0) continue; for (i = 0; porttab [i] != 0; i++) { if (porttab [i] != iobase) continue; if (!ct_is_free_res (devices[k], 1, SYS_RES_IOPORT, iobase, iobase + NPORT, NPORT)) continue; if (ct_probe_board (iobase, -1, -1) == 0) continue; porttab [i] = -1; device_set_desc_copy (devices[k], "Cronyx Tau-ISA"); break; } if (porttab [i] == 0) { device_delete_child ( device_get_parent (devices[k]), devices [k]); devices[k] = 0; continue; } } for (k = 0; k < devcount; k++) { if (devices[k] == 0) continue; if (bus_get_resource (devices[k], SYS_RES_IOPORT, 0, &iobase, &rescount) == 0) continue; for (i = 0; (iobase = porttab [i]) != 0; i++) { if (porttab [i] == -1) continue; if (!ct_is_free_res (devices[k], 1, SYS_RES_IOPORT, iobase, iobase + NPORT, NPORT)) continue; if (ct_probe_board (iobase, -1, -1) == 0) continue; bus_set_resource (devices[k], SYS_RES_IOPORT, 0, iobase, NPORT); porttab [i] = -1; device_set_desc_copy (devices[k], "Cronyx Tau-ISA"); break; } if (porttab [i] == 0) { device_delete_child ( device_get_parent (devices[k]), devices [k]); } } free (devices, M_TEMP); } return; } static int ct_probe (device_t dev) { int unit = device_get_unit (dev); u_long iobase, rescount; if (!device_get_desc (dev) || strcmp (device_get_desc (dev), "Cronyx Tau-ISA")) return ENXIO; /* KASSERT ((bd != NULL), ("ct%d: NULL device softc\n", unit));*/ if (bus_get_resource (dev, SYS_RES_IOPORT, 0, &iobase, &rescount) != 0) { printf ("ct%d: Couldn't get IOPORT\n", unit); return ENXIO; } if (!ct_is_free_res (dev, 1, SYS_RES_IOPORT, iobase, iobase + NPORT, NPORT)) { printf ("ct%d: Resource IOPORT isn't free\n", unit); return ENXIO; } if (!ct_probe_board (iobase, -1, -1)) { printf ("ct%d: probing for Tau-ISA at %lx faild\n", unit, iobase); return ENXIO; } return 0; } #else /* __FreeBSD_version < 400000 */ static int ct_probe (struct isa_device *id) { int unit = id->id_unit; int iobase; ct_board_t *b; int i; iobase = id->id_iobase; if (iobase < 0) { /* Autodetect the adapter. */ for (i=0; ; i++) { if (! porttab[i]) { iobase = -1; return 0; } iobase = porttab[i]; if (unit > 0 && adapter[0] && adapter[0]->port == iobase) continue; if (unit > 1 && adapter[1] && adapter[1]->port == iobase) continue; if (! haveseen_isadev (id, CC_IOADDR | CC_QUIET) && ct_probe_board (iobase, -1, -1)) break; } } else if (! ct_probe_board (iobase, -1, -1)) return 0; if (id->id_drq < 0) { /* Find available 16-bit DRQ. */ for (i=0; ; ++i) { if (! dmatab[i]) { printf ("ct%d: no available drq found\n", unit); id->id_drq = -1; return 0; } id->id_drq = dmatab[i]; if (! haveseen_isadev (id, CC_DRQ | CC_QUIET) && !isa_dma_acquire (id->id_drq)) break; } } b = malloc (sizeof (ct_board_t), M_DEVBUF, M_WAITOK); if (!b) { printf ("ct:%d: Couldn't allocate memory\n", unit); return (ENXIO); } adapter[unit] = b; bzero (b, sizeof(ct_board_t)); if (! ct_open_board (b, unit, iobase, id->id_irq ? ffs (id->id_irq) - 1 : -1, id->id_drq)) { printf ("ct%d: error loading firmware\n", unit); adapter [unit] = 0; free (b, M_DEVBUF); isa_dma_release (id->id_drq); return 0; } if (id->id_irq) { if (! probe_irq (b, ffs (id->id_irq) - 1)) printf ("ct%d: irq %d not functional\n", unit, ffs (id->id_irq) - 1); } else { /* Find available IRQ. */ for (i=0; ; ++i) { if (! irqtab[i]) { printf ("ct%d: no available irq found\n", unit); id->id_irq = -1; isa_dma_release (id->id_drq); adapter [unit] = 0; free (b, M_DEVBUF); return 0; } id->id_irq = 1 << irqtab[i]; if (haveseen_isadev (id, CC_IRQ | CC_QUIET)) continue; #ifdef KLD_MODULE if (register_intr (irqtab[i], 0, 0, (inthand2_t*) ct_intr, &net_imask, unit) != 0) continue; unregister_intr (irqtab[i], (inthand2_t*) ct_intr); #endif if (probe_irq (b, irqtab[i])) break; } } ct_init_board (b, b->num, b->port, ffs (id->id_irq) - 1, b->dma, b->type, b->osc); ct_setup_board (b, 0, 0, 0); return 1; } #endif /* __FreeBSD_version < 400000 */ extern struct cdevsw ct_cdevsw; /* * The adapter is present, initialize the driver structures. */ #if __FreeBSD_version < 400000 static int ct_attach (struct isa_device *id) { #else static int ct_attach (device_t dev) { bdrv_t *bd = device_get_softc (dev); u_long iobase, drq, irq, rescount; int unit = device_get_unit (dev); int i; int s; #endif ct_board_t *b; ct_chan_t *c; drv_t *d; #if __FreeBSD_version >= 400000 KASSERT ((bd != NULL), ("ct%d: NULL device softc\n", unit)); bus_get_resource (dev, SYS_RES_IOPORT, 0, &iobase, &rescount); bd->base_rid = 0; bd->base_res = bus_alloc_resource (dev, SYS_RES_IOPORT, &bd->base_rid, iobase, iobase + NPORT, NPORT, RF_ACTIVE); if (! bd->base_res) { printf ("ct%d: cannot alloc base address\n", unit); return ENXIO; } if (bus_get_resource (dev, SYS_RES_DRQ, 0, &drq, &rescount) != 0) { for (i = 0; (drq = dmatab [i]) != 0; i++) { if (!ct_is_free_res (dev, 1, SYS_RES_DRQ, drq, drq + 1, 1)) continue; bus_set_resource (dev, SYS_RES_DRQ, 0, drq, 1); break; } if (dmatab[i] == 0) { bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); printf ("ct%d: Couldn't get DRQ\n", unit); return ENXIO; } } bd->drq_rid = 0; bd->drq_res = bus_alloc_resource (dev, SYS_RES_DRQ, &bd->drq_rid, drq, drq + 1, 1, RF_ACTIVE); if (! bd->drq_res) { printf ("ct%d: cannot allocate drq\n", unit); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); return ENXIO; } if (bus_get_resource (dev, SYS_RES_IRQ, 0, &irq, &rescount) != 0) { for (i = 0; (irq = irqtab [i]) != 0; i++) { if (!ct_is_free_res (dev, 1, SYS_RES_IRQ, irq, irq + 1, 1)) continue; bus_set_resource (dev, SYS_RES_IRQ, 0, irq, 1); break; } if (irqtab[i] == 0) { bus_release_resource (dev, SYS_RES_DRQ, bd->drq_rid, bd->drq_res); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); printf ("ct%d: Couldn't get IRQ\n", unit); return ENXIO; } } bd->irq_rid = 0; bd->irq_res = bus_alloc_resource (dev, SYS_RES_IRQ, &bd->irq_rid, irq, irq + 1, 1, RF_ACTIVE); if (! bd->irq_res) { printf ("ct%d: Couldn't allocate irq\n", unit); bus_release_resource (dev, SYS_RES_DRQ, bd->drq_rid, bd->drq_res); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); return ENXIO; } b = malloc (sizeof (ct_board_t), M_DEVBUF, M_WAITOK); if (!b) { printf ("ct:%d: Couldn't allocate memory\n", unit); return (ENXIO); } adapter[unit] = b; bzero (b, sizeof(ct_board_t)); if (! ct_open_board (b, unit, iobase, irq, drq)) { printf ("ct%d: error loading firmware\n", unit); free (b, M_DEVBUF); bus_release_resource (dev, SYS_RES_IRQ, bd->irq_rid, bd->irq_res); bus_release_resource (dev, SYS_RES_DRQ, bd->drq_rid, bd->drq_res); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); return ENXIO; } bd->board = b; if (! probe_irq (b, irq)) { printf ("ct%d: irq %ld not functional\n", unit, irq); bd->board = 0; adapter [unit] = 0; free (b, M_DEVBUF); bus_release_resource (dev, SYS_RES_IRQ, bd->irq_rid, bd->irq_res); bus_release_resource (dev, SYS_RES_DRQ, bd->drq_rid, bd->drq_res); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); return ENXIO; } s = splimp (); if (bus_setup_intr (dev, bd->irq_res, INTR_TYPE_NET, ct_intr, bd, &bd->intrhand)) { printf ("ct%d: Can't setup irq %ld\n", unit, irq); bd->board = 0; adapter [unit] = 0; free (b, M_DEVBUF); bus_release_resource (dev, SYS_RES_IRQ, bd->irq_rid, bd->irq_res); bus_release_resource (dev, SYS_RES_DRQ, bd->drq_rid, bd->drq_res); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); splx (s); return ENXIO; } ct_init_board (b, b->num, b->port, irq, drq, b->type, b->osc); ct_setup_board (b, 0, 0, 0); #else b = adapter [id->id_unit]; #endif printf ("ct%d: , clock %s MHz\n", b->num, b->name, b->osc == 20000000 ? "20" : "16.384"); #if __FreeBSD_version < 400000 id->id_ointr = ct_intr; #endif for (c=b->chan; cchan+NCHAN; ++c) { d = contigmalloc (sizeof(drv_t), M_DEVBUF, M_WAITOK, 0x100000, 0x1000000, 16, 0); channel [b->num*NCHAN + c->num] = d; bzero (d, sizeof(drv_t)); sprintf (d->name, "ct%d.%d", b->num, c->num); d->board = b; d->chan = c; c->sys = d; #ifdef NETGRAPH if (ng_make_node_common (&typestruct, &d->node) != 0) { printf ("%s: cannot make common node\n", d->name); channel [b->num*NCHAN + c->num] = 0; c->sys = 0; #if __FreeBSD_version < 400000 free (d, M_DEVBUF); #else contigfree (d, sizeof (*d), M_DEVBUF); #endif continue; } #if __FreeBSD_version >= 500000 NG_NODE_SET_PRIVATE (d->node, d); #else d->node->private = d; #endif sprintf (d->nodename, "%s%d", NG_CT_NODE_TYPE, c->board->num*NCHAN + c->num); if (ng_name_node (d->node, d->nodename)) { printf ("%s: cannot name node\n", d->nodename); #if __FreeBSD_version >= 500000 NG_NODE_UNREF (d->node); #else ng_rmnode (d->node); ng_unref (d->node); #endif channel [b->num*NCHAN + c->num] = 0; c->sys = 0; #if __FreeBSD_version < 400000 free (d, M_DEVBUF); #else contigfree (d, sizeof (*d), M_DEVBUF); #endif continue; } d->queue.ifq_maxlen = IFQ_MAXLEN; d->hi_queue.ifq_maxlen = IFQ_MAXLEN; #if __FreeBSD_version >= 500000 mtx_init (&d->queue.ifq_mtx, "ct_queue", NULL, MTX_DEF); mtx_init (&d->hi_queue.ifq_mtx, "ct_queue_hi", NULL, MTX_DEF); #endif #else /*NETGRAPH*/ d->pp.pp_if.if_softc = d; #if __FreeBSD_version > 501000 if_initname (&d->pp.pp_if, "ct", b->num * NCHAN + c->num); #else d->pp.pp_if.if_unit = b->num * NCHAN + c->num; d->pp.pp_if.if_name = "ct"; #endif d->pp.pp_if.if_mtu = PP_MTU; d->pp.pp_if.if_flags = IFF_POINTOPOINT | IFF_MULTICAST; d->pp.pp_if.if_ioctl = ct_sioctl; d->pp.pp_if.if_start = ct_ifstart; d->pp.pp_if.if_watchdog = ct_ifwatchdog; d->pp.pp_if.if_init = ct_initialize; sppp_attach (&d->pp.pp_if); if_attach (&d->pp.pp_if); d->pp.pp_tlf = ct_tlf; d->pp.pp_tls = ct_tls; #if __FreeBSD_version >= 400000 || NBPFILTER > 0 /* If BPF is in the kernel, call the attach for it. * Header size is 4 bytes. */ bpfattach (&d->pp.pp_if, DLT_PPP, 4); #endif #endif /*NETGRAPH*/ ct_start_chan (c, &d->buf, vtophys (&d->buf)); ct_register_receive (c, &ct_receive); ct_register_transmit (c, &ct_transmit); ct_register_error (c, &ct_error); #if __FreeBSD_version >= 400000 d->devt = make_dev (&ct_cdevsw, b->num*NCHAN+c->num, UID_ROOT, GID_WHEEL, 0600, "ct%d", b->num*NCHAN+c->num); } splx (s); return 0; #else /* __FreeBSD_version < 400000 */ } return 1; #endif /*__FreeBSD_version */ } #if __FreeBSD_version >= 400000 static int ct_detach (device_t dev) { bdrv_t *bd = device_get_softc (dev); ct_board_t *b = bd->board; ct_chan_t *c; int s = splimp (); /* Check if the device is busy (open). */ for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (!d || !d->chan->type) continue; if (d->running) { splx (s); return EBUSY; } } /* Deactivate the timeout routine. */ if (led_timo[b->num].callout) untimeout (ct_led_off, b, led_timo[b->num]); bus_teardown_intr (dev, bd->irq_res, bd->intrhand); bus_deactivate_resource (dev, SYS_RES_IRQ, bd->irq_rid, bd->irq_res); bus_release_resource (dev, SYS_RES_IRQ, bd->irq_rid, bd->irq_res); bus_deactivate_resource (dev, SYS_RES_DRQ, bd->drq_rid, bd->drq_res); bus_release_resource (dev, SYS_RES_DRQ, bd->drq_rid, bd->drq_res); bus_deactivate_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->irq_res); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); ct_close_board (b); /* Detach the interfaces, free buffer memory. */ for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (!d || !d->chan->type) continue; #ifdef NETGRAPH #if __FreeBSD_version >= 500000 if (d->node) { ng_rmnode_self (d->node); NG_NODE_UNREF (d->node); d->node = NULL; } mtx_destroy (&d->queue.ifq_mtx); mtx_destroy (&d->hi_queue.ifq_mtx); #else ng_rmnode (d->node); d->node = 0; #endif #else #if __FreeBSD_version >= 410000 && NBPFILTER > 0 /* Detach from the packet filter list of interfaces. */ bpfdetach (&d->pp.pp_if); #endif /* Detach from the sync PPP list. */ sppp_detach (&d->pp.pp_if); if_detach (&d->pp.pp_if); #endif destroy_dev (d->devt); } ct_led_off (b); if (led_timo[b->num].callout) untimeout (ct_led_off, b, led_timo[b->num]); splx (s); s = splimp (); for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (!d || !d->chan->type) continue; /* Deallocate buffers. */ #if __FreeBSD_version < 400000 free (d, M_DEVBUF); #else contigfree (d, sizeof (*d), M_DEVBUF); #endif } bd->board = 0; adapter [b->num] = 0; free (b, M_DEVBUF); splx (s); return 0; } #endif #ifndef NETGRAPH static void ct_ifstart (struct ifnet *ifp) { drv_t *d = ifp->if_softc; ct_start (d); } static void ct_ifwatchdog (struct ifnet *ifp) { drv_t *d = ifp->if_softc; ct_watchdog (d); } static void ct_tlf (struct sppp *sp) { drv_t *d = sp->pp_if.if_softc; CT_DEBUG (d, ("ct_tlf\n")); /* ct_set_dtr (d->chan, 0);*/ /* ct_set_rts (d->chan, 0);*/ sp->pp_down (sp); } static void ct_tls (struct sppp *sp) { drv_t *d = sp->pp_if.if_softc; CT_DEBUG (d, ("ct_tls\n")); sp->pp_up (sp); } /* * Initialization of interface. * Ii seems to be never called by upper level. */ static void ct_initialize (void *softc) { drv_t *d = softc; CT_DEBUG (d, ("ct_initialize\n")); } /* * Process an ioctl request. */ static int ct_sioctl (struct ifnet *ifp, u_long cmd, caddr_t data) { drv_t *d = ifp->if_softc; int error, s, was_up, should_be_up; was_up = (ifp->if_flags & IFF_RUNNING) != 0; error = sppp_ioctl (ifp, cmd, data); if (error) return error; if (! (ifp->if_flags & IFF_DEBUG)) d->chan->debug = 0; else if (! d->chan->debug) d->chan->debug = 1; switch (cmd) { default: CT_DEBUG2 (d, ("ioctl 0x%lx\n", cmd)); return 0; case SIOCADDMULTI: CT_DEBUG2 (d, ("SIOCADDMULTI\n")); return 0; case SIOCDELMULTI: CT_DEBUG2 (d, ("SIOCDELMULTI\n")); return 0; case SIOCSIFFLAGS: CT_DEBUG2 (d, ("SIOCSIFFLAGS\n")); break; case SIOCSIFADDR: CT_DEBUG2 (d, ("SIOCSIFADDR\n")); break; } /* We get here only in case of SIFFLAGS or SIFADDR. */ s = splimp (); should_be_up = (ifp->if_flags & IFF_RUNNING) != 0; if (! was_up && should_be_up) { /* Interface goes up -- start it. */ ct_up (d); ct_start (d); } else if (was_up && ! should_be_up) { /* Interface is going down -- stop it. */ /* if ((d->pp.pp_flags & PP_FR) || (ifp->if_flags & PP_CISCO))*/ ct_down (d); } splx (s); return 0; } #endif /*NETGRAPH*/ /* * Stop the interface. Called on splimp(). */ static void ct_down (drv_t *d) { int s = splimp (); CT_DEBUG (d, ("ct_down\n")); ct_set_dtr (d->chan, 0); ct_set_rts (d->chan, 0); d->running = 0; splx (s); } /* * Start the interface. Called on splimp(). */ static void ct_up (drv_t *d) { int s = splimp (); CT_DEBUG (d, ("ct_up\n")); ct_set_dtr (d->chan, 1); ct_set_rts (d->chan, 1); d->running = 1; splx (s); } /* * Start output on the (slave) interface. Get another datagram to send * off of the interface queue, and copy it to the interface * before starting the output. */ static void ct_send (drv_t *d) { struct mbuf *m; u_short len; CT_DEBUG2 (d, ("ct_send, tn=%d\n", d->chan->tn)); /* No output if the interface is down. */ if (! d->running) return; /* No output if the modem is off. */ if (! ct_get_dsr (d->chan) && !ct_get_loop (d->chan)) return; while (ct_buf_free (d->chan)) { /* Get the packet to send. */ #ifdef NETGRAPH IF_DEQUEUE (&d->hi_queue, m); if (! m) IF_DEQUEUE (&d->queue, m); #else m = sppp_dequeue (&d->pp.pp_if); #endif if (! m) return; #if (__FreeBSD_version >= 400000 || NBPFILTER > 0) && !defined (NETGRAPH) if (d->pp.pp_if.if_bpf) #if __FreeBSD_version >= 500000 BPF_MTAP (&d->pp.pp_if, m); #else bpf_mtap (&d->pp.pp_if, m); #endif #endif len = m->m_pkthdr.len; if (! m->m_next) ct_send_packet (d->chan, (u_char*)mtod (m, caddr_t), len, 0); else { m_copydata (m, 0, len, d->chan->tbuf[d->chan->te]); ct_send_packet (d->chan, d->chan->tbuf[d->chan->te], len, 0); } m_freem (m); /* Set up transmit timeout, if the transmit ring is not empty. * Transmit timeout is 10 seconds. */ #ifdef NETGRAPH d->timeout = 10; #else d->pp.pp_if.if_timer = 10; #endif } #ifndef NETGRAPH d->pp.pp_if.if_flags |= IFF_OACTIVE; #endif } /* * Start output on the interface. * Always called on splimp(). */ static void ct_start (drv_t *d) { int s = splimp (); if (d->running) { if (! d->chan->dtr) ct_set_dtr (d->chan, 1); if (! d->chan->rts) ct_set_rts (d->chan, 1); ct_send (d); } splx (s); } /* * Handle transmit timeouts. * Recover after lost transmit interrupts. * Always called on splimp(). */ static void ct_watchdog (drv_t *d) { int s = splimp (); CT_DEBUG (d, ("device timeout\n")); if (d->running) { ct_setup_chan (d->chan); ct_start_chan (d->chan, 0, 0); ct_set_dtr (d->chan, 1); ct_set_rts (d->chan, 1); ct_start (d); } splx (s); } /* * Transmit callback function. */ static void ct_transmit (ct_chan_t *c, void *attachment, int len) { drv_t *d = c->sys; if (!d) return; #ifdef NETGRAPH d->timeout = 0; #else ++d->pp.pp_if.if_opackets; d->pp.pp_if.if_flags &= ~IFF_OACTIVE; d->pp.pp_if.if_timer = 0; #endif ct_start (d); } /* * Process the received packet. */ static void ct_receive (ct_chan_t *c, char *data, int len) { drv_t *d = c->sys; struct mbuf *m; #if __FreeBSD_version >= 500000 && defined NETGRAPH int error; #endif if (!d || !d->running) return; m = makembuf (data, len); if (! m) { CT_DEBUG (d, ("no memory for packet\n")); #ifndef NETGRAPH ++d->pp.pp_if.if_iqdrops; #endif return; } if (c->debug > 1) printmbuf (m); #ifdef NETGRAPH m->m_pkthdr.rcvif = 0; #if __FreeBSD_version >= 500000 NG_SEND_DATA_ONLY (error, d->hook, m); #else ng_queue_data (d->hook, m, 0); #endif #else ++d->pp.pp_if.if_ipackets; m->m_pkthdr.rcvif = &d->pp.pp_if; #if __FreeBSD_version >= 400000 || NBPFILTER > 0 /* Check if there's a BPF listener on this interface. * If so, hand off the raw packet to bpf. */ if (d->pp.pp_if.if_bpf) #if __FreeBSD_version >= 500000 BPF_TAP (&d->pp.pp_if, data, len); #else bpf_tap (&d->pp.pp_if, data, len); #endif #endif sppp_input (&d->pp.pp_if, m); #endif } /* * Error callback function. */ static void ct_error (ct_chan_t *c, int data) { drv_t *d = c->sys; if (!d) return; switch (data) { case CT_FRAME: CT_DEBUG (d, ("frame error\n")); #ifndef NETGRAPH ++d->pp.pp_if.if_ierrors; #endif break; case CT_CRC: CT_DEBUG (d, ("crc error\n")); #ifndef NETGRAPH ++d->pp.pp_if.if_ierrors; #endif break; case CT_OVERRUN: CT_DEBUG (d, ("overrun error\n")); #ifndef NETGRAPH ++d->pp.pp_if.if_collisions; ++d->pp.pp_if.if_ierrors; #endif break; case CT_OVERFLOW: CT_DEBUG (d, ("overflow error\n")); #ifndef NETGRAPH ++d->pp.pp_if.if_ierrors; #endif break; case CT_UNDERRUN: CT_DEBUG (d, ("underrun error\n")); #ifdef NETGRAPH d->timeout = 0; #else ++d->pp.pp_if.if_oerrors; d->pp.pp_if.if_flags &= ~IFF_OACTIVE; d->pp.pp_if.if_timer = 0; #endif ct_start (d); break; default: CT_DEBUG (d, ("error #%d\n", data)); } } #if __FreeBSD_version < 500000 static int ct_open (dev_t dev, int oflags, int devtype, struct proc *p) #else static int ct_open (dev_t dev, int oflags, int devtype, struct thread *td) #endif { drv_t *d; if (minor(dev) >= NCTAU*NCHAN || ! (d = channel[minor(dev)])) return ENXIO; CT_DEBUG2 (d, ("ct_open\n")); return 0; } #if __FreeBSD_version < 500000 static int ct_close (dev_t dev, int fflag, int devtype, struct proc *p) #else static int ct_close (dev_t dev, int fflag, int devtype, struct thread *td) #endif { drv_t *d = channel [minor(dev)]; if (!d) return 0; CT_DEBUG2 (d, ("ct_close\n")); return 0; } static int ct_modem_status (ct_chan_t *c) { drv_t *d = c->sys; int status, s; if (!d) return 0; status = d->running ? TIOCM_LE : 0; s = splimp (); if (ct_get_cd (c)) status |= TIOCM_CD; if (ct_get_cts (c)) status |= TIOCM_CTS; if (ct_get_dsr (c)) status |= TIOCM_DSR; if (c->dtr) status |= TIOCM_DTR; if (c->rts) status |= TIOCM_RTS; splx (s); return status; } /* * Process an ioctl request on /dev/cronyx/ctauN. */ #if __FreeBSD_version < 500000 static int ct_ioctl (dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) #else static int ct_ioctl (dev_t dev, u_long cmd, caddr_t data, int flag, struct thread *td) #endif { drv_t *d = channel [minor (dev)]; ct_chan_t *c; struct serial_statistics *st; struct e1_statistics *opte1; int error, s; char mask[16]; if (!d || !d->chan) return 0; c = d->chan; switch (cmd) { case SERIAL_GETREGISTERED: bzero (mask, sizeof(mask)); for (s=0; spp.pp_flags & PP_FR) ? "fr" : (d->pp.pp_if.if_flags & PP_CISCO) ? "cisco" : "ppp"); return 0; case SERIAL_SETPROTO: /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else /* __FreeBSD_version >= 500000 */ error = suser (td); #endif /* __FreeBSD_version >= 500000 */ if (error) return error; if (d->pp.pp_if.if_flags & IFF_RUNNING) return EBUSY; if (! strcmp ("cisco", (char*)data)) { d->pp.pp_flags &= ~(PP_FR); d->pp.pp_flags |= PP_KEEPALIVE; d->pp.pp_if.if_flags |= PP_CISCO; } else if (! strcmp ("fr", (char*)data)) { d->pp.pp_if.if_flags &= ~(PP_CISCO); d->pp.pp_flags |= PP_FR | PP_KEEPALIVE; } else if (! strcmp ("ppp", (char*)data)) { d->pp.pp_flags &= ~(PP_FR | PP_KEEPALIVE); d->pp.pp_if.if_flags &= ~(PP_CISCO); } else return EINVAL; return 0; case SERIAL_GETKEEPALIVE: if ((d->pp.pp_flags & PP_FR) || (d->pp.pp_if.if_flags & PP_CISCO)) return EINVAL; *(int*)data = (d->pp.pp_flags & PP_KEEPALIVE) ? 1 : 0; return 0; case SERIAL_SETKEEPALIVE: /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else /* __FreeBSD_version >= 500000 */ error = suser (td); #endif /* __FreeBSD_version >= 500000 */ if (error) return error; if ((d->pp.pp_flags & PP_FR) || (d->pp.pp_if.if_flags & PP_CISCO)) return EINVAL; if (*(int*)data) d->pp.pp_flags |= PP_KEEPALIVE; else d->pp.pp_flags &= ~PP_KEEPALIVE; return 0; #endif /*NETGRAPH*/ case SERIAL_GETMODE: *(int*)data = SERIAL_HDLC; return 0; case SERIAL_GETCFG: if (c->mode == M_HDLC) return EINVAL; switch (ct_get_config (c->board)) { default: *(char*)data = 'a'; break; case CFG_B: *(char*)data = 'b'; break; case CFG_C: *(char*)data = 'c'; break; } return 0; case SERIAL_SETCFG: /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else /* __FreeBSD_version >= 500000 */ error = suser (td); #endif /* __FreeBSD_version >= 500000 */ if (error) return error; if (c->mode == M_HDLC) return EINVAL; s = splimp (); switch (*(char*)data) { case 'a': ct_set_config (c->board, CFG_A); break; case 'b': ct_set_config (c->board, CFG_B); break; case 'c': ct_set_config (c->board, CFG_C); break; } splx (s); return 0; case SERIAL_GETSTAT: st = (struct serial_statistics*) data; st->rintr = c->rintr; st->tintr = c->tintr; st->mintr = c->mintr; st->ibytes = c->ibytes; st->ipkts = c->ipkts; st->ierrs = c->ierrs; st->obytes = c->obytes; st->opkts = c->opkts; st->oerrs = c->oerrs; return 0; case SERIAL_GETESTAT: opte1 = (struct e1_statistics*)data; opte1->status = c->status; opte1->cursec = c->cursec; opte1->totsec = c->totsec + c->cursec; opte1->currnt.bpv = c->currnt.bpv; opte1->currnt.fse = c->currnt.fse; opte1->currnt.crce = c->currnt.crce; opte1->currnt.rcrce = c->currnt.rcrce; opte1->currnt.uas = c->currnt.uas; opte1->currnt.les = c->currnt.les; opte1->currnt.es = c->currnt.es; opte1->currnt.bes = c->currnt.bes; opte1->currnt.ses = c->currnt.ses; opte1->currnt.oofs = c->currnt.oofs; opte1->currnt.css = c->currnt.css; opte1->currnt.dm = c->currnt.dm; opte1->total.bpv = c->total.bpv + c->currnt.bpv; opte1->total.fse = c->total.fse + c->currnt.fse; opte1->total.crce = c->total.crce + c->currnt.crce; opte1->total.rcrce = c->total.rcrce + c->currnt.rcrce; opte1->total.uas = c->total.uas + c->currnt.uas; opte1->total.les = c->total.les + c->currnt.les; opte1->total.es = c->total.es + c->currnt.es; opte1->total.bes = c->total.bes + c->currnt.bes; opte1->total.ses = c->total.ses + c->currnt.ses; opte1->total.oofs = c->total.oofs + c->currnt.oofs; opte1->total.css = c->total.css + c->currnt.css; opte1->total.dm = c->total.dm + c->currnt.dm; for (s=0; s<48; ++s) { opte1->interval[s].bpv = c->interval[s].bpv; opte1->interval[s].fse = c->interval[s].fse; opte1->interval[s].crce = c->interval[s].crce; opte1->interval[s].rcrce = c->interval[s].rcrce; opte1->interval[s].uas = c->interval[s].uas; opte1->interval[s].les = c->interval[s].les; opte1->interval[s].es = c->interval[s].es; opte1->interval[s].bes = c->interval[s].bes; opte1->interval[s].ses = c->interval[s].ses; opte1->interval[s].oofs = c->interval[s].oofs; opte1->interval[s].css = c->interval[s].css; opte1->interval[s].dm = c->interval[s].dm; } return 0; case SERIAL_CLRSTAT: /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else /* __FreeBSD_version >= 500000 */ error = suser (td); #endif /* __FreeBSD_version >= 500000 */ if (error) return error; c->rintr = 0; c->tintr = 0; c->mintr = 0; c->ibytes = 0; c->ipkts = 0; c->ierrs = 0; c->obytes = 0; c->opkts = 0; c->oerrs = 0; bzero (&c->currnt, sizeof (c->currnt)); bzero (&c->total, sizeof (c->total)); bzero (c->interval, sizeof (c->interval)); return 0; case SERIAL_GETBAUD: *(long*)data = ct_get_baud(c); return 0; case SERIAL_SETBAUD: /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else /* __FreeBSD_version >= 500000 */ error = suser (td); #endif /* __FreeBSD_version >= 500000 */ if (error) return error; s = splimp (); ct_set_baud (c, *(long*)data); splx (s); return 0; case SERIAL_GETLOOP: *(int*)data = ct_get_loop (c); return 0; case SERIAL_SETLOOP: /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else /* __FreeBSD_version >= 500000 */ error = suser (td); #endif /* __FreeBSD_version >= 500000 */ if (error) return error; s = splimp (); ct_set_loop (c, *(int*)data); splx (s); return 0; case SERIAL_GETDPLL: if (c->mode == M_E1 || c->mode == M_G703) return EINVAL; *(int*)data = ct_get_dpll (c); return 0; case SERIAL_SETDPLL: /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else /* __FreeBSD_version >= 500000 */ error = suser (td); #endif /* __FreeBSD_version >= 500000 */ if (error) return error; if (c->mode == M_E1 || c->mode == M_G703) return EINVAL; s = splimp (); ct_set_dpll (c, *(int*)data); splx (s); return 0; case SERIAL_GETNRZI: if (c->mode == M_E1 || c->mode == M_G703) return EINVAL; *(int*)data = ct_get_nrzi (c); return 0; case SERIAL_SETNRZI: /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else /* __FreeBSD_version >= 500000 */ error = suser (td); #endif /* __FreeBSD_version >= 500000 */ if (error) return error; if (c->mode == M_E1 || c->mode == M_G703) return EINVAL; s = splimp (); ct_set_nrzi (c, *(int*)data); splx (s); return 0; case SERIAL_GETDEBUG: *(int*)data = c->debug; return 0; case SERIAL_SETDEBUG: /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else /* __FreeBSD_version >= 500000 */ error = suser (td); #endif /* __FreeBSD_version >= 500000 */ if (error) return error; c->debug = *(int*)data; #ifndef NETGRAPH if (d->chan->debug) d->pp.pp_if.if_flags |= IFF_DEBUG; else d->pp.pp_if.if_flags &= (~IFF_DEBUG); #endif return 0; case SERIAL_GETHIGAIN: if (c->mode != M_E1) return EINVAL; *(int*)data = ct_get_higain (c); return 0; case SERIAL_SETHIGAIN: /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else /* __FreeBSD_version >= 500000 */ error = suser (td); #endif /* __FreeBSD_version >= 500000 */ if (error) return error; s = splimp (); ct_set_higain (c, *(int*)data); splx (s); return 0; case SERIAL_GETPHONY: CT_DEBUG2 (d, ("ioctl: getphony\n")); if (c->mode != M_E1) return EINVAL; *(int*)data = c->gopt.phony; return 0; case SERIAL_SETPHONY: CT_DEBUG2 (d, ("ioctl: setphony\n")); if (c->mode != M_E1) return EINVAL; /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else /* __FreeBSD_version >= 500000 */ error = suser (td); #endif /* __FreeBSD_version >= 500000 */ if (error) return error; s = splimp (); ct_set_phony (c, *(int*)data); splx (s); return 0; case SERIAL_GETCLK: if (c->mode != M_E1 && c->mode != M_G703) return EINVAL; switch (ct_get_clk(c)) { default: *(int*)data = E1CLK_INTERNAL; break; case GCLK_RCV: *(int*)data = E1CLK_RECEIVE; break; case GCLK_RCLKO: *(int*)data = c->num ? E1CLK_RECEIVE_CHAN0 : E1CLK_RECEIVE_CHAN1; break; } return 0; case SERIAL_SETCLK: /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else /* __FreeBSD_version >= 500000 */ error = suser (td); #endif /* __FreeBSD_version >= 500000 */ if (error) return error; s = splimp (); switch (*(int*)data) { default: ct_set_clk (c, GCLK_INT); break; case E1CLK_RECEIVE: ct_set_clk (c, GCLK_RCV); break; case E1CLK_RECEIVE_CHAN0: case E1CLK_RECEIVE_CHAN1: ct_set_clk (c, GCLK_RCLKO); break; } splx (s); return 0; case SERIAL_GETTIMESLOTS: if (c->mode != M_E1) return EINVAL; *(long*)data = ct_get_ts (c); return 0; case SERIAL_SETTIMESLOTS: /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else /* __FreeBSD_version >= 500000 */ error = suser (td); #endif /* __FreeBSD_version >= 500000 */ if (error) return error; s = splimp (); ct_set_ts (c, *(long*)data); splx (s); return 0; case SERIAL_GETSUBCHAN: if (c->mode != M_E1) return EINVAL; *(long*)data = ct_get_subchan (c->board); return 0; case SERIAL_SETSUBCHAN: /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else /* __FreeBSD_version >= 500000 */ error = suser (td); #endif /* __FreeBSD_version >= 500000 */ if (error) return error; s = splimp (); ct_set_subchan (c->board, *(long*)data); splx (s); return 0; case SERIAL_GETINVCLK: case SERIAL_GETINVTCLK: if (c->mode == M_E1 || c->mode == M_G703) return EINVAL; *(int*)data = ct_get_invtxc (c); return 0; case SERIAL_GETINVRCLK: if (c->mode == M_E1 || c->mode == M_G703) return EINVAL; *(int*)data = ct_get_invrxc (c); return 0; case SERIAL_SETINVCLK: case SERIAL_SETINVTCLK: /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else /* __FreeBSD_version >= 500000 */ error = suser (td); #endif /* __FreeBSD_version >= 500000 */ if (error) return error; if (c->mode == M_E1 || c->mode == M_G703) return EINVAL; s = splimp (); ct_set_invtxc (c, *(int*)data); splx (s); return 0; case SERIAL_SETINVRCLK: /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else /* __FreeBSD_version >= 500000 */ error = suser (td); #endif /* __FreeBSD_version >= 500000 */ if (error) return error; if (c->mode == M_E1 || c->mode == M_G703) return EINVAL; s = splimp (); ct_set_invrxc (c, *(int*)data); splx (s); return 0; case SERIAL_GETLEVEL: if (c->mode != M_G703) return EINVAL; s = splimp (); *(int*)data = ct_get_lq (c); splx (s); return 0; case TIOCSDTR: /* Set DTR */ s = splimp (); ct_set_dtr (c, 1); splx (s); return 0; case TIOCCDTR: /* Clear DTR */ s = splimp (); ct_set_dtr (c, 0); splx (s); return 0; case TIOCMSET: /* Set DTR/RTS */ s = splimp (); ct_set_dtr (c, (*(int*)data & TIOCM_DTR) ? 1 : 0); ct_set_rts (c, (*(int*)data & TIOCM_RTS) ? 1 : 0); splx (s); return 0; case TIOCMBIS: /* Add DTR/RTS */ s = splimp (); if (*(int*)data & TIOCM_DTR) ct_set_dtr (c, 1); if (*(int*)data & TIOCM_RTS) ct_set_rts (c, 1); splx (s); return 0; case TIOCMBIC: /* Clear DTR/RTS */ s = splimp (); if (*(int*)data & TIOCM_DTR) ct_set_dtr (c, 0); if (*(int*)data & TIOCM_RTS) ct_set_rts (c, 0); splx (s); return 0; case TIOCMGET: /* Get modem status */ *(int*)data = ct_modem_status (c); return 0; } return ENOTTY; } #if __FreeBSD_version < 400000 struct isa_driver ctdriver = { ct_probe, ct_attach, "ct" }; static struct cdevsw ct_cdevsw = { ct_open, ct_close, noread, nowrite, ct_ioctl, nostop, noreset, nodevtotty, seltrue, nommap, NULL, "ct", NULL, -1, }; #elif __FreeBSD_version < 500000 static struct cdevsw ct_cdevsw = { ct_open, ct_close, noread, nowrite, ct_ioctl, nopoll, nommap, nostrategy, "ct", CDEV_MAJOR, nodump, nopsize, D_NAGGED, -1 }; #elif __FreeBSD_version == 500000 static struct cdevsw ct_cdevsw = { ct_open, ct_close, noread, nowrite, ct_ioctl, nopoll, nommap, nostrategy, "ct", CDEV_MAJOR, nodump, nopsize, D_NAGGED, }; #elif __FreeBSD_version <= 501000 static struct cdevsw ct_cdevsw = { .d_open = ct_open, .d_close = ct_close, .d_read = noread, .d_write = nowrite, .d_ioctl = ct_ioctl, .d_poll = nopoll, .d_mmap = nommap, .d_strategy = nostrategy, .d_name = "ct", .d_maj = CDEV_MAJOR, .d_dump = nodump, .d_flags = D_NAGGED, }; #elif __FreeBSD_version < 502103 static struct cdevsw ct_cdevsw = { .d_open = ct_open, .d_close = ct_close, .d_ioctl = ct_ioctl, .d_name = "ct", .d_maj = CDEV_MAJOR, .d_flags = D_NAGGED, }; #else /* __FreeBSD_version >= 502103 */ static struct cdevsw ct_cdevsw = { .d_version = D_VERSION, .d_open = ct_open, .d_close = ct_close, .d_ioctl = ct_ioctl, .d_name = "ct", .d_maj = CDEV_MAJOR, .d_flags = D_NEEDGIANT, }; #endif /* __FreeBSD_version > 501000 */ #ifdef NETGRAPH #if __FreeBSD_version >= 500000 static int ng_ct_constructor (node_p node) { drv_t *d = NG_NODE_PRIVATE (node); #else static int ng_ct_constructor (node_p *node) { drv_t *d = (*node)->private; #endif CT_DEBUG (d, ("Constructor\n")); return EINVAL; } static int ng_ct_newhook (node_p node, hook_p hook, const char *name) { int s; #if __FreeBSD_version >= 500000 drv_t *d = NG_NODE_PRIVATE (node); #else drv_t *d = node->private; #endif if (!d) return EINVAL; /* Attach debug hook */ if (strcmp (name, NG_CT_HOOK_DEBUG) == 0) { #if __FreeBSD_version >= 500000 NG_HOOK_SET_PRIVATE (hook, NULL); #else hook->private = 0; #endif d->debug_hook = hook; return 0; } /* Check for raw hook */ if (strcmp (name, NG_CT_HOOK_RAW) != 0) return EINVAL; #if __FreeBSD_version >= 500000 NG_HOOK_SET_PRIVATE (hook, d); #else hook->private = d; #endif d->hook = hook; s = splimp (); ct_up (d); splx (s); return 0; } static char *format_timeslots (u_long s) { static char buf [100]; char *p = buf; int i; for (i=1; i<32; ++i) if ((s >> i) & 1) { int prev = (i > 1) & (s >> (i-1)); int next = (i < 31) & (s >> (i+1)); if (prev) { if (next) continue; *p++ = '-'; } else if (p > buf) *p++ = ','; if (i >= 10) *p++ = '0' + i / 10; *p++ = '0' + i % 10; } *p = 0; return buf; } static int print_modems (char *s, ct_chan_t *c, int need_header) { int status = ct_modem_status (c); int length = 0; if (need_header) length += sprintf (s + length, " LE DTR DSR RTS CTS CD\n"); length += sprintf (s + length, "%4s %4s %4s %4s %4s %4s\n", status & TIOCM_LE ? "On" : "-", status & TIOCM_DTR ? "On" : "-", status & TIOCM_DSR ? "On" : "-", status & TIOCM_RTS ? "On" : "-", status & TIOCM_CTS ? "On" : "-", status & TIOCM_CD ? "On" : "-"); return length; } static int print_stats (char *s, ct_chan_t *c, int need_header) { struct serial_statistics st; int length = 0; st.rintr = c->rintr; st.tintr = c->tintr; st.mintr = c->mintr; st.ibytes = c->ibytes; st.ipkts = c->ipkts; st.ierrs = c->ierrs; st.obytes = c->obytes; st.opkts = c->opkts; st.oerrs = c->oerrs; if (need_header) length += sprintf (s + length, " Rintr Tintr Mintr Ibytes Ipkts Ierrs Obytes Opkts Oerrs\n"); length += sprintf (s + length, "%7ld %7ld %7ld %8ld %7ld %7ld %8ld %7ld %7ld\n", st.rintr, st.tintr, st.mintr, st.ibytes, st.ipkts, st.ierrs, st.obytes, st.opkts, st.oerrs); return length; } static char *format_e1_status (u_char status) { static char buf [80]; if (status & E1_NOALARM) return "Ok"; buf[0] = 0; if (status & E1_LOS) strcat (buf, ",LOS"); if (status & E1_AIS) strcat (buf, ",AIS"); if (status & E1_LOF) strcat (buf, ",LOF"); if (status & E1_LOMF) strcat (buf, ",LOMF"); if (status & E1_FARLOF) strcat (buf, ",FARLOF"); if (status & E1_AIS16) strcat (buf, ",AIS16"); if (status & E1_FARLOMF) strcat (buf, ",FARLOMF"); if (status & E1_TSTREQ) strcat (buf, ",TSTREQ"); if (status & E1_TSTERR) strcat (buf, ",TSTERR"); if (buf[0] == ',') return buf+1; return "Unknown"; } static int print_frac (char *s, int leftalign, u_long numerator, u_long divider) { int n, length = 0; if (numerator < 1 || divider < 1) { length += sprintf (s+length, leftalign ? "/- " : " -"); return length; } n = (int) (0.5 + 1000.0 * numerator / divider); if (n < 1000) { length += sprintf (s+length, leftalign ? "/.%-3d" : " .%03d", n); return length; } *(s + length) = leftalign ? '/' : ' '; length ++; if (n >= 1000000) n = (n+500) / 1000 * 1000; else if (n >= 100000) n = (n+50) / 100 * 100; else if (n >= 10000) n = (n+5) / 10 * 10; switch (n) { case 1000: length += printf (s+length, ".999"); return length; case 10000: n = 9990; break; case 100000: n = 99900; break; case 1000000: n = 999000; break; } if (n < 10000) length += sprintf (s+length, "%d.%d", n/1000, n/10%100); else if (n < 100000) length += sprintf (s+length, "%d.%d", n/1000, n/100%10); else if (n < 1000000) length += sprintf (s+length, "%d.", n/1000); else length += sprintf (s+length, "%d", n/1000); return length; } static int print_e1_stats (char *s, ct_chan_t *c) { struct e1_counters total; u_long totsec; int length = 0; totsec = c->totsec + c->cursec; total.bpv = c->total.bpv + c->currnt.bpv; total.fse = c->total.fse + c->currnt.fse; total.crce = c->total.crce + c->currnt.crce; total.rcrce = c->total.rcrce + c->currnt.rcrce; total.uas = c->total.uas + c->currnt.uas; total.les = c->total.les + c->currnt.les; total.es = c->total.es + c->currnt.es; total.bes = c->total.bes + c->currnt.bes; total.ses = c->total.ses + c->currnt.ses; total.oofs = c->total.oofs + c->currnt.oofs; total.css = c->total.css + c->currnt.css; total.dm = c->total.dm + c->currnt.dm; length += sprintf (s + length, " Unav/Degr Bpv/Fsyn CRC/RCRC Err/Lerr Sev/Bur Oof/Slp Status\n"); /* Unavailable seconds, degraded minutes */ length += print_frac (s + length, 0, c->currnt.uas, c->cursec); length += print_frac (s + length, 1, 60 * c->currnt.dm, c->cursec); /* Bipolar violations, frame sync errors */ length += print_frac (s + length, 0, c->currnt.bpv, c->cursec); length += print_frac (s + length, 1, c->currnt.fse, c->cursec); /* CRC errors, remote CRC errors (E-bit) */ length += print_frac (s + length, 0, c->currnt.crce, c->cursec); length += print_frac (s + length, 1, c->currnt.rcrce, c->cursec); /* Errored seconds, line errored seconds */ length += print_frac (s + length, 0, c->currnt.es, c->cursec); length += print_frac (s + length, 1, c->currnt.les, c->cursec); /* Severely errored seconds, burst errored seconds */ length += print_frac (s + length, 0, c->currnt.ses, c->cursec); length += print_frac (s + length, 1, c->currnt.bes, c->cursec); /* Out of frame seconds, controlled slip seconds */ length += print_frac (s + length, 0, c->currnt.oofs, c->cursec); length += print_frac (s + length, 1, c->currnt.css, c->cursec); length += sprintf (s + length, " %s\n", format_e1_status (c->status)); /* Print total statistics. */ length += print_frac (s + length, 0, total.uas, totsec); length += print_frac (s + length, 1, 60 * total.dm, totsec); length += print_frac (s + length, 0, total.bpv, totsec); length += print_frac (s + length, 1, total.fse, totsec); length += print_frac (s + length, 0, total.crce, totsec); length += print_frac (s + length, 1, total.rcrce, totsec); length += print_frac (s + length, 0, total.es, totsec); length += print_frac (s + length, 1, total.les, totsec); length += print_frac (s + length, 0, total.ses, totsec); length += print_frac (s + length, 1, total.bes, totsec); length += print_frac (s + length, 0, total.oofs, totsec); length += print_frac (s + length, 1, total.css, totsec); length += sprintf (s + length, " -- Total\n"); return length; } static int print_chan (char *s, ct_chan_t *c) { drv_t *d = c->sys; int length = 0; length += sprintf (s + length, "ct%d", c->board->num * NCHAN + c->num); if (d->chan->debug) length += sprintf (s + length, " debug=%d", d->chan->debug); switch (ct_get_config (c->board)) { case CFG_A: length += sprintf (s + length, " cfg=A"); break; case CFG_B: length += sprintf (s + length, " cfg=B"); break; case CFG_C: length += sprintf (s + length, " cfg=C"); break; default: length += sprintf (s + length, " cfg=unknown"); break; } if (ct_get_baud (c)) length += sprintf (s + length, " %ld", ct_get_baud (c)); else length += sprintf (s + length, " extclock"); if (c->mode == M_E1 || c->mode == M_G703) switch (ct_get_clk(c)) { case GCLK_INT : length += sprintf (s + length, " syn=int"); break; case GCLK_RCV : length += sprintf (s + length, " syn=rcv"); break; case GCLK_RCLKO : length += sprintf (s + length, " syn=xrcv"); break; } if (c->mode == M_HDLC) { length += sprintf (s + length, " dpll=%s", ct_get_dpll (c) ? "on" : "off"); length += sprintf (s + length, " nrzi=%s", ct_get_nrzi (c) ? "on" : "off"); length += sprintf (s + length, " invtclk=%s", ct_get_invtxc (c) ? "on" : "off"); length += sprintf (s + length, " invrclk=%s", ct_get_invrxc (c) ? "on" : "off"); } if (c->mode == M_E1) length += sprintf (s + length, " higain=%s", ct_get_higain (c)? "on" : "off"); length += sprintf (s + length, " loop=%s", ct_get_loop (c) ? "on" : "off"); if (c->mode == M_E1) length += sprintf (s + length, " ts=%s", format_timeslots (ct_get_ts(c))); if (c->mode == M_E1 && ct_get_config (c->board) != CFG_A) length += sprintf (s + length, " pass=%s", format_timeslots (ct_get_subchan(c->board))); if (c->mode == M_G703) { int lq, x; x = splimp (); lq = ct_get_lq (c); splx (x); length += sprintf (s + length, " (level=-%.1fdB)", lq / 10.0); } length += sprintf (s + length, "\n"); return length; } #if __FreeBSD_version >= 500000 static int ng_ct_rcvmsg (node_p node, item_p item, hook_p lasthook) { drv_t *d = NG_NODE_PRIVATE (node); struct ng_mesg *msg; #else static int ng_ct_rcvmsg (node_p node, struct ng_mesg *msg, const char *retaddr, struct ng_mesg **rptr) { drv_t *d = node->private; #endif struct ng_mesg *resp = NULL; int error = 0; if (!d) return EINVAL; CT_DEBUG (d, ("Rcvmsg\n")); #if __FreeBSD_version >= 500000 NGI_GET_MSG (item, msg); #endif switch (msg->header.typecookie) { default: error = EINVAL; break; case NGM_CT_COOKIE: printf ("Don't forget to implement\n"); error = EINVAL; break; case NGM_GENERIC_COOKIE: switch (msg->header.cmd) { default: error = EINVAL; break; case NGM_TEXT_STATUS: { char *s; int l = 0; int dl = sizeof (struct ng_mesg) + 730; #if __FreeBSD_version >= 500000 NG_MKRESPONSE (resp, msg, dl, M_NOWAIT); if (! resp) { error = ENOMEM; break; } #else MALLOC (resp, struct ng_mesg *, dl, M_NETGRAPH, M_NOWAIT); if (! resp) { error = ENOMEM; break; } bzero (resp, dl); #endif s = (resp)->data; l += print_chan (s + l, d->chan); l += print_stats (s + l, d->chan, 1); l += print_modems (s + l, d->chan, 1); l += print_e1_stats (s + l, d->chan); #if __FreeBSD_version < 500000 (resp)->header.version = NG_VERSION; (resp)->header.arglen = strlen (s) + 1; (resp)->header.token = msg->header.token; (resp)->header.typecookie = NGM_CT_COOKIE; (resp)->header.cmd = msg->header.cmd; #endif strncpy ((resp)->header.cmdstr, "status", NG_CMDSTRLEN); } break; } break; } #if __FreeBSD_version >= 500000 NG_RESPOND_MSG (error, node, item, resp); NG_FREE_MSG (msg); #else *rptr = resp; FREE (msg, M_NETGRAPH); #endif return error; } #if __FreeBSD_version >= 500000 static int ng_ct_rcvdata (hook_p hook, item_p item) { drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE(hook)); struct mbuf *m; meta_p meta; #else static int ng_ct_rcvdata (hook_p hook, struct mbuf *m, meta_p meta) { drv_t *d = hook->node->private; #endif struct ifqueue *q; int s; if (!d) return ENETDOWN; #if __FreeBSD_version >= 500000 NGI_GET_M (item, m); NGI_GET_META (item, meta); NG_FREE_ITEM (item); if (! NG_HOOK_PRIVATE (hook) || ! d) { NG_FREE_M (m); NG_FREE_META (meta); #else if (! hook->private || ! d) { NG_FREE_DATA (m,meta); #endif return ENETDOWN; } q = (meta && meta->priority > 0) ? &d->hi_queue : &d->queue; s = splimp (); #if __FreeBSD_version >= 500000 IF_LOCK (q); if (_IF_QFULL (q)) { _IF_DROP (q); IF_UNLOCK (q); splx (s); NG_FREE_M (m); NG_FREE_META (meta); return ENOBUFS; } _IF_ENQUEUE (q, m); IF_UNLOCK (q); #else if (IF_QFULL (q)) { IF_DROP (q); splx (s); NG_FREE_DATA (m, meta); return ENOBUFS; } IF_ENQUEUE (q, m); #endif ct_start (d); splx (s); return 0; } static int ng_ct_rmnode (node_p node) { #if __FreeBSD_version >= 500000 drv_t *d = NG_NODE_PRIVATE (node); CT_DEBUG (d, ("Rmnode\n")); if (d && d->running) { int s = splimp (); ct_down (d); splx (s); } #ifdef KLD_MODULE if (node->nd_flags & NG_REALLY_DIE) { NG_NODE_SET_PRIVATE (node, NULL); NG_NODE_UNREF (node); } node->nd_flags &= ~NG_INVALID; #endif #else /* __FreeBSD_version < 500000 */ drv_t *d = node->private; int s; if (!d) return 0; s = splimp (); ct_down (d); splx (s); node->flags |= NG_INVALID; ng_cutlinks (node); #ifdef KLD_MODULE ng_unname (node); ng_unref (node); #else node->flags &= ~NG_INVALID; #endif #endif return 0; } static void ng_ct_watchdog (void *arg) { drv_t *d = arg; if (!d) return; if (d->timeout == 1) ct_watchdog (d); if (d->timeout) d->timeout--; d->timeout_handle = timeout (ng_ct_watchdog, d, hz); } static int ng_ct_connect (hook_p hook) { #if __FreeBSD_version >= 500000 drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE (hook)); #else drv_t *d = hook->node->private; #endif if (!d) return 0; d->timeout_handle = timeout (ng_ct_watchdog, d, hz); return 0; } static int ng_ct_disconnect (hook_p hook) { #if __FreeBSD_version >= 500000 drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE (hook)); #else drv_t *d = hook->node->private; #endif if (!d) return 0; #if __FreeBSD_version >= 500000 if (NG_HOOK_PRIVATE (hook)) #else if (hook->private) #endif ct_down (d); untimeout (ng_ct_watchdog, d, d->timeout_handle); return 0; } #endif #ifdef KLD_MODULE #if __FreeBSD_version < 400000 /* * Function called when loading the driver. */ static int ct_load (void) { int i; for (i=0;iirq, 0, 0, (inthand2_t*) ct_intr, &net_imask, id.id_unit); enable_intr(); } if (!i) { /* Deactivate the timeout routine. */ untimeout (ct_timeout, 0, timeout_handle); return ENXIO; } return 0; } /* * Function called when unloading the driver. */ static int ct_unload (void) { int i, s; /* Check if the device is busy (open). */ for (i=0; irunning) return EBUSY; } /* OK to unload the driver, unregister the interrupt first. */ s = splimp (); /* Deactivate the timeout routine. */ for (i=0; iport) continue; ct_close_board (b); } for (i=0; iport) continue; if (led_timo[i].callout) untimeout (ct_led_off, b, led_timo[i]); } for (i=0; iport) continue; /* Disable the interrupt request. */ disable_intr(); unregister_intr (b->irq, (inthand2_t *)ct_intr); isa_dma_release (b->dma); enable_intr(); } /* Detach the interfaces, free buffer memory. */ for (i=0; i 0 /* Detach from the packet filter list of interfaces. */ { struct bpf_if *q, **b = &bpf_iflist; while ((q = *b)) { if (q->bif_ifp == d->pp.pp_if) { *b = q->bif_next; free (q, M_DEVBUF); } b = &(q->bif_next); } } #endif /* NBPFILTER > 0 */ /* Detach from the sync PPP list. */ sppp_detach (&d->pp.pp_if); /* Detach from the system list of interfaces. */ { struct ifaddr *ifa; TAILQ_FOREACH (ifa, &d->pp.pp_if.if_addrhead, ifa_link) { TAILQ_REMOVE (&d->pp.pp_if.if_addrhead, ifa, ifa_link); free (ifa, M_IFADDR); } TAILQ_REMOVE (&ifnet, &d->pp.pp_if, if_link); } #endif /* !NETGRAPH */ /* Deallocate buffers. */ /* free (d, M_DEVBUF);*/ } for (i=0; i= 400000 */ static int ct_modevent (module_t mod, int type, void *unused) { dev_t dev; static int load_count = 0; struct cdevsw *cdsw; #if __FreeBSD_version >= 502103 dev = udev2dev (makeudev(CDEV_MAJOR, 0)); #else dev = makedev (CDEV_MAJOR, 0); #endif switch (type) { case MOD_LOAD: if (dev != NODEV && (cdsw = devsw (dev)) && cdsw->d_maj == CDEV_MAJOR) { printf ("Tau-ISA driver is already in system\n"); return (ENXIO); } #if __FreeBSD_version >= 500000 && defined NETGRAPH if (ng_newtype (&typestruct)) printf ("Failed to register ng_ct\n"); #endif ++load_count; #if __FreeBSD_version <= 500000 cdevsw_add (&ct_cdevsw); #endif timeout_handle = timeout (ct_timeout, 0, hz*5); break; case MOD_UNLOAD: if (load_count == 1) { printf ("Removing device entry for Tau-ISA\n"); #if __FreeBSD_version <= 500000 cdevsw_remove (&ct_cdevsw); #endif #if __FreeBSD_version >= 500000 && defined NETGRAPH ng_rmtype (&typestruct); #endif } if (timeout_handle.callout) untimeout (ct_timeout, 0, timeout_handle); --load_count; break; case MOD_SHUTDOWN: break; } return 0; } #endif /* __FreeBSD_version >= 400000 */ #ifdef NETGRAPH static struct ng_type typestruct = { .version = NG_ABI_VERSION, .name = NG_CT_NODE_TYPE, .constructor = ng_ct_constructor, .rcvmsg = ng_ct_rcvmsg, .shutdown = ng_ct_rmnode, .newhook = ng_ct_newhook, .connect = ng_ct_connect, .rcvdata = ng_ct_rcvdata, .disconnect = ng_ct_disconnect }; #if __FreeBSD_version < 400000 NETGRAPH_INIT_ORDERED (ct, &typestruct, SI_SUB_DRIVERS,\ SI_ORDER_MIDDLE + CDEV_MAJOR); #endif #endif /*NETGRAPH*/ #if __FreeBSD_version >= 500000 #ifdef NETGRAPH MODULE_DEPEND (ng_ct, netgraph, NG_ABI_VERSION, NG_ABI_VERSION, NG_ABI_VERSION); #else MODULE_DEPEND (ct, sppp, 1, 1, 1); #endif #ifdef KLD_MODULE DRIVER_MODULE (ctmod, isa, ct_isa_driver, ct_devclass, ct_modevent, NULL); #else DRIVER_MODULE (ct, isa, ct_isa_driver, ct_devclass, ct_modevent, NULL); #endif #elif __FreeBSD_version >= 400000 #ifdef NETGRAPH DRIVER_MODULE(ct, isa, ct_isa_driver, ct_devclass, ng_mod_event, &typestruct); #else DRIVER_MODULE(ct, isa, ct_isa_driver, ct_devclass, ct_modevent, 0); #endif #else /* __FreeBSD_version < 400000 */ #ifdef KLD_MODULE #ifndef NETGRAPH static moduledata_t ctmod = { "ct", ct_modevent, }; DECLARE_MODULE (ct, ctmod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE + CDEV_MAJOR); #endif /* !NETGRAPH */ #else /* KLD_MODULE */ /* * Now for some driver initialisation. * Occurs ONCE during boot (very early). * This is if we are NOT a loadable module. */ static void ct_drvinit (void *unused) { dev_t dev; dev = makedev (CDEV_MAJOR, 0); cdevsw_add (&dev, &ct_cdevsw, NULL); /* Activate the timeout routine. */ timeout_handle = timeout (ct_timeout, 0, hz); #ifdef NETGRAPH #if 0 /* Register our node type in netgraph */ if (ng_newtype (&typestruct)) printf ("Failed to register ng_ct\n"); #endif #endif } SYSINIT (ctdev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE+CDEV_MAJOR, ct_drvinit, 0) #endif /* KLD_MODULE */ #endif /* __FreeBSD_version < 400000 */ #endif /* NCTAU */ Index: head/sys/dev/cx/if_cx.c =================================================================== --- head/sys/dev/cx/if_cx.c (revision 129878) +++ head/sys/dev/cx/if_cx.c (revision 129879) @@ -1,3258 +1,3259 @@ /* * Cronyx-Sigma adapter driver for FreeBSD. * Supports PPP/HDLC and Cisco/HDLC protocol in synchronous mode, * and asyncronous channels with full modem control. * Keepalive protocol implemented in both Cisco and PPP modes. * * Copyright (C) 1994-2002 Cronyx Engineering. * Author: Serge Vakulenko, * * Copyright (C) 1999-2004 Cronyx Engineering. * Rewritten on DDK, ported to NETGRAPH, rewritten for FreeBSD 3.x-5.x by * Kurakin Roman, * * This software is distributed with NO WARRANTIES, not even the implied * warranties for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * Authors grant any other persons or organisations a permission to use, * modify and redistribute this software in source and binary forms, * as long as this message is kept with the software, all derivative * works or modified versions. * * Cronyx Id: if_cx.c,v 1.1.2.23 2004/02/26 17:56:40 rik Exp $ */ #include __FBSDID("$FreeBSD$"); #include #if __FreeBSD_version >= 500000 # define NCX 1 #else # include "cx.h" #endif #if NCX > 0 #include #include +#include #include #include #include #include #include #include #include #include #if __FreeBSD_version >= 400000 # include # include # include # include #endif #include #include #include #include #include #include #include #include #if __FreeBSD_version < 500000 #include #include #endif #if __FreeBSD_version >= 400000 # include # if __FreeBSD_version <= 501000 # include # endif #endif #if __FreeBSD_version >= 400000 # include # include # include #else # include # include # include #endif #include "opt_ng_cronyx.h" #ifdef NETGRAPH_CRONYX # include "opt_netgraph.h" # include # include # if __FreeBSD_version >= 400000 # include # else # include # endif #else # include # if __FreeBSD_version < 500000 # include "sppp.h" # if NSPPP <= 0 # error The device cx requires sppp or netgraph. # endif # endif # include # define PP_CISCO IFF_LINK2 #if __FreeBSD_version < 400000 # include # if NBPFILTER > 0 # include # endif #else # if __FreeBSD_version < 500000 # include # endif # include # define NBPFILTER NBPF #endif #endif /* If we don't have Cronyx's sppp version, we don't have fr support via sppp */ #ifndef PP_FR #define PP_FR 0 #endif #define CX_DEBUG(d,s) ({if (d->chan->debug) {\ printf ("%s: ", d->name); printf s;}}) #define CX_DEBUG2(d,s) ({if (d->chan->debug>1) {\ printf ("%s: ", d->name); printf s;}}) #define UNIT(d) (minor(d) & 0x3f) #define IF_CUNIT(d) (minor(d) & 0x40) #define UNIT_CTL 0x3f #define CALLOUT(d) (minor(d) & 0x80) #define CDEV_MAJOR 42 typedef struct _async_q { int beg; int end; #define BF_SZ 14400 int buf[BF_SZ+1]; } async_q; #define AQ_GSZ(q) ((BF_SZ + (q)->end - (q)->beg)%BF_SZ) #define AQ_PUSH(q,c) {*((q)->buf + (q)->end) = c;\ (q)->end = ((q)->end + 1)%BF_SZ;} #define AQ_POP(q,c) {c = *((q)->buf + (q)->beg);\ (q)->beg = ((q)->beg + 1)%BF_SZ;} #if __FreeBSD_version >= 400000 static void cx_identify __P((driver_t *, device_t)); static int cx_probe __P((device_t)); static int cx_attach __P((device_t)); static int cx_detach __P((device_t)); static device_method_t cx_isa_methods [] = { DEVMETHOD(device_identify, cx_identify), DEVMETHOD(device_probe, cx_probe), DEVMETHOD(device_attach, cx_attach), DEVMETHOD(device_detach, cx_detach), {0, 0} }; typedef struct _bdrv_t { cx_board_t *board; struct resource *base_res; struct resource *drq_res; struct resource *irq_res; int base_rid; int drq_rid; int irq_rid; void *intrhand; } bdrv_t; static driver_t cx_isa_driver = { "cx", cx_isa_methods, sizeof (bdrv_t), }; static devclass_t cx_devclass; #endif typedef struct _drv_t { char name [8]; cx_chan_t *chan; cx_board_t *board; cx_buf_t buf; struct tty tty; struct callout_handle dcd_timeout_handle; unsigned dtrwait; unsigned dtroff; unsigned callout; unsigned lock; int open_dev; int cd; int running; struct callout_handle dtr_timeout_handle; #ifdef NETGRAPH char nodename [NG_NODELEN+1]; hook_p hook; hook_p debug_hook; node_p node; struct ifqueue lo_queue; struct ifqueue hi_queue; short timeout; struct callout_handle timeout_handle; #else struct sppp pp; #endif #if __FreeBSD_version >= 400000 dev_t devt[3]; #endif async_q aqueue; #define CX_READ 1 #define CX_WRITE 2 int intr_action; short atimeout; } drv_t; extern long csigma_fw_len; extern const char *csigma_fw_version; extern const char *csigma_fw_date; extern const char *csigma_fw_copyright; extern const cr_dat_tst_t csigma_fw_tvec[]; extern const u_char csigma_fw_data[]; static void cx_oproc (struct tty *tp); static int cx_param (struct tty *tp, struct termios *t); static void cx_stop (struct tty *tp, int flag); static void cx_dtrwakeup (void *a); static void cx_receive (cx_chan_t *c, char *data, int len); static void cx_transmit (cx_chan_t *c, void *attachment, int len); static void cx_error (cx_chan_t *c, int data); static void cx_modem (cx_chan_t *c); static void cx_up (drv_t *d); static void cx_start (drv_t *d); static void disc_optim(struct tty *tp, struct termios *t); #if __FreeBSD_version < 500000 static swihand_t cx_softintr; #else static void cx_softintr (void *); static void *cx_fast_ih; #endif static void cx_down (drv_t *d); static void cx_watchdog (drv_t *d); static void cx_carrier (void *arg); #ifdef NETGRAPH extern struct ng_type typestruct; #else static void cx_ifstart (struct ifnet *ifp); static void cx_tlf (struct sppp *sp); static void cx_tls (struct sppp *sp); static void cx_ifwatchdog (struct ifnet *ifp); static int cx_sioctl (struct ifnet *ifp, u_long cmd, caddr_t data); static void cx_initialize (void *softc); #endif static cx_board_t *adapter [NCX]; static drv_t *channel [NCX*NCHAN]; static struct callout_handle led_timo [NCX]; static struct callout_handle timeout_handle; #if __FreeBSD_version >= 400000 extern struct cdevsw cx_cdevsw; #endif static int MY_SOFT_INTR; /* * Print the mbuf chain, for debug purposes only. */ static void printmbuf (struct mbuf *m) { printf ("mbuf:"); for (; m; m=m->m_next) { if (m->m_flags & M_PKTHDR) printf (" HDR %d:", m->m_pkthdr.len); if (m->m_flags & M_EXT) printf (" EXT:"); printf (" %d", m->m_len); } printf ("\n"); } /* * Make an mbuf from data. */ static struct mbuf *makembuf (void *buf, u_int len) { struct mbuf *m, *o, *p; MGETHDR (m, M_DONTWAIT, MT_DATA); if (! m) return 0; if (len >= MINCLSIZE) MCLGET (m, M_DONTWAIT); m->m_pkthdr.len = len; m->m_len = 0; p = m; while (len) { u_int n = M_TRAILINGSPACE (p); if (n > len) n = len; if (! n) { /* Allocate new mbuf. */ o = p; MGET (p, M_DONTWAIT, MT_DATA); if (! p) { m_freem (m); return 0; } if (len >= MINCLSIZE) MCLGET (p, M_DONTWAIT); p->m_len = 0; o->m_next = p; n = M_TRAILINGSPACE (p); if (n > len) n = len; } bcopy (buf, mtod (p, caddr_t) + p->m_len, n); p->m_len += n; buf = n + (char*) buf; len -= n; } return m; } /* * Recover after lost transmit interrupts. */ static void cx_timeout (void *arg) { drv_t *d; int s, i; for (i=0; iatimeout == 1 && d->tty.t_state & TS_BUSY) { d->tty.t_state &= ~TS_BUSY; if (d->tty.t_dev) { d->intr_action |= CX_WRITE; MY_SOFT_INTR = 1; #if __FreeBSD_version >= 500000 swi_sched (cx_fast_ih, 0); #else setsofttty (); #endif } CX_DEBUG (d, ("cx_timeout\n")); } if (d->atimeout) d->atimeout--; splx (s); } timeout_handle = timeout (cx_timeout, 0, hz*5); } static void cx_led_off (void *arg) { cx_board_t *b = arg; int s = splhigh (); cx_led (b, 0); led_timo[b->num].callout = 0; splx (s); } /* * Activate interupt handler from DDK. */ #if __FreeBSD_version >= 400000 static void cx_intr (void *arg) { bdrv_t *bd = arg; cx_board_t *b = bd->board; #else static void cx_intr (int bnum) { cx_board_t *b = adapter [bnum]; #endif int s = splhigh (); /* Turn LED on. */ cx_led (b, 1); cx_int_handler (b); /* Turn LED off 50 msec later. */ if (! led_timo[b->num].callout) led_timo[b->num] = timeout (cx_led_off, b, hz/20); splx (s); } static int probe_irq (cx_board_t *b, int irq) { int mask, busy, cnt; /* Clear pending irq, if any. */ cx_probe_irq (b, -irq); DELAY (100); for (cnt=0; cnt<5; ++cnt) { /* Get the mask of pending irqs, assuming they are busy. * Activate the adapter on given irq. */ busy = cx_probe_irq (b, irq); DELAY (100); /* Get the mask of active irqs. * Deactivate our irq. */ mask = cx_probe_irq (b, -irq); DELAY (100); if ((mask & ~busy) == 1 << irq) { cx_probe_irq (b, 0); /* printf ("cx%d: irq %d ok, mask=0x%04x, busy=0x%04x\n", b->num, irq, mask, busy); */ return 1; } } /* printf ("cx%d: irq %d not functional, mask=0x%04x, busy=0x%04x\n", b->num, irq, mask, busy); */ cx_probe_irq (b, 0); return 0; } static short porttab [] = { 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0x300, 0x320, 0x340, 0x360, 0x380, 0x3a0, 0x3c0, 0x3e0, 0 }; static char dmatab [] = { 7, 6, 5, 0 }; static char irqtab [] = { 5, 10, 11, 7, 3, 15, 12, 0 }; #if __FreeBSD_version >= 400000 static int cx_is_free_res (device_t dev, int rid, int type, u_long start, u_long end, u_long count) { struct resource *res; if (!(res = bus_alloc_resource (dev, type, &rid, start, end, count, RF_ALLOCATED))) return 0; bus_release_resource (dev, type, rid, res); return 1; } static void cx_identify (driver_t *driver, device_t dev) { u_long iobase, rescount; int devcount; device_t *devices; device_t child; devclass_t my_devclass; int i, k; if ((my_devclass = devclass_find ("cx")) == NULL) return; devclass_get_devices (my_devclass, &devices, &devcount); if (devcount == 0) { /* We should find all devices by our self. We could alter other * devices, but we don't have a choise */ for (i = 0; (iobase = porttab [i]) != 0; i++) { if (!cx_is_free_res (dev, 1, SYS_RES_IOPORT, iobase, iobase + NPORT, NPORT)) continue; if (cx_probe_board (iobase, -1, -1) == 0) continue; devcount++; child = BUS_ADD_CHILD (dev, ISA_ORDER_SPECULATIVE, "cx", -1); if (child == NULL) return; device_set_desc_copy (child, "Cronyx Sigma"); device_set_driver (child, driver); bus_set_resource (child, SYS_RES_IOPORT, 0, iobase, NPORT); if (devcount >= NCX) break; } } else { static short porttab [] = { 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0x300, 0x320, 0x340, 0x360, 0x380, 0x3a0, 0x3c0, 0x3e0, 0 }; /* Lets check user choise. */ for (k = 0; k < devcount; k++) { if (bus_get_resource (devices[k], SYS_RES_IOPORT, 0, &iobase, &rescount) != 0) continue; for (i = 0; porttab [i] != 0; i++) { if (porttab [i] != iobase) continue; if (!cx_is_free_res (devices[k], 1, SYS_RES_IOPORT, iobase, iobase + NPORT, NPORT)) continue; if (cx_probe_board (iobase, -1, -1) == 0) continue; porttab [i] = -1; device_set_desc_copy (devices[k], "Cronyx Sigma"); break; } if (porttab [i] == 0) { device_delete_child ( device_get_parent (devices[k]), devices [k]); devices[k] = 0; continue; } } for (k = 0; k < devcount; k++) { if (devices[k] == 0) continue; if (bus_get_resource (devices[k], SYS_RES_IOPORT, 0, &iobase, &rescount) == 0) continue; for (i = 0; (iobase = porttab [i]) != 0; i++) { if (porttab [i] == -1) { continue; } if (!cx_is_free_res (devices[k], 1, SYS_RES_IOPORT, iobase, iobase + NPORT, NPORT)) continue; if (cx_probe_board (iobase, -1, -1) == 0) continue; bus_set_resource (devices[k], SYS_RES_IOPORT, 0, iobase, NPORT); porttab [i] = -1; device_set_desc_copy (devices[k], "Cronyx Sigma"); break; } if (porttab [i] == 0) { device_delete_child ( device_get_parent (devices[k]), devices [k]); } } free (devices, M_TEMP); } return; } static int cx_probe (device_t dev) { int unit = device_get_unit (dev); int i; u_long iobase, rescount; if (!device_get_desc (dev) || strcmp (device_get_desc (dev), "Cronyx Sigma")) return ENXIO; if (bus_get_resource (dev, SYS_RES_IOPORT, 0, &iobase, &rescount) != 0) { printf ("cx%d: Couldn't get IOPORT\n", unit); return ENXIO; } if (!cx_is_free_res (dev, 1, SYS_RES_IOPORT, iobase, iobase + NPORT, NPORT)) { printf ("cx%d: Resource IOPORT isn't free %lx\n", unit, iobase); return ENXIO; } for (i = 0; porttab [i] != 0; i++) { if (porttab [i] == iobase) { porttab [i] = -1; break; } } if (porttab [i] == 0) { return ENXIO; } if (!cx_probe_board (iobase, -1, -1)) { printf ("cx%d: probing for Sigma at %lx faild\n", unit, iobase); return ENXIO; } return 0; } #else /* __FreeBSD_version < 400000 */ static int cx_probe (struct isa_device *id) { cx_board_t *b; int i; #ifndef NETGRAPH if (! sppp_attach) { printf ("cx%d: no synchronous PPP driver configured\n", id->id_unit); return 0; } #endif if (id->id_iobase < 0) { /* Autodetect the adapter. */ for (i=0; ; i++) { if (! porttab[i]) { id->id_iobase = -1; return 0; } id->id_iobase = porttab[i]; if (id->id_unit > 0 && adapter[0] && adapter[0]->port == id->id_iobase) continue; if (id->id_unit > 1 && adapter[1] && adapter[1]->port == id->id_iobase) continue; if (! haveseen_isadev (id, CC_IOADDR | CC_QUIET) && cx_probe_board (id->id_iobase, -1, -1)) break; } } else if (! cx_probe_board (id->id_iobase, -1, -1)) return 0; if (id->id_drq < 0) { /* Find available 16-bit DRQ. */ for (i=0; ; ++i) { if (! dmatab[i]) { printf ("cx%d: no available drq found\n", id->id_unit); id->id_drq = -1; return 0; } id->id_drq = dmatab[i]; if (! haveseen_isadev (id, CC_DRQ | CC_QUIET) && !isa_dma_acquire (id->id_drq)) break; } } b = malloc (sizeof (cx_board_t), M_DEVBUF, M_WAITOK); if (!b) { printf ("cx:%d: Couldn't allocate memory\n", id->id_unit); return (ENXIO); } adapter[id->id_unit] = b; bzero (b, sizeof(cx_board_t)); if (! cx_open_board (b, id->id_unit, id->id_iobase, id->id_irq ? ffs (id->id_irq) - 1 : -1, id->id_drq)) { printf ("cx%d: cannot initialize adapter\n", id->id_unit); isa_dma_release (id->id_drq); adapter[id->id_unit] = 0; free (b, M_DEVBUF); return 0; } if (id->id_irq) { if (! probe_irq (b, ffs (id->id_irq) - 1)) printf ("cx%d: irq %d not functional\n", id->id_unit, ffs (id->id_irq) - 1); } else { /* Find available IRQ. */ for (i=0; ; ++i) { if (! irqtab[i]) { printf ("cx%d: no available irq found\n", id->id_unit); id->id_irq = -1; isa_dma_release (id->id_drq); adapter[id->id_unit] = 0; free (b, M_DEVBUF); return 0; } id->id_irq = 1 << irqtab[i]; if (haveseen_isadev (id, CC_IRQ | CC_QUIET)) continue; #ifdef KLD_MODULE if (register_intr (irqtab[i], 0, 0, (inthand2_t*) cx_intr, &net_imask, id->id_unit) != 0) continue; unregister_intr (irqtab[i], (inthand2_t*) cx_intr); #endif if (probe_irq (b, irqtab[i])) break; } } cx_init (b, b->num, b->port, ffs (id->id_irq) - 1, b->dma); cx_setup_board (b, 0, 0, 0); return 1; } #endif /* __FreeBSD_version < 400000 */ /* * The adapter is present, initialize the driver structures. */ #if __FreeBSD_version < 400000 static int cx_attach (struct isa_device *id) { #else static int cx_attach (device_t dev) { bdrv_t *bd = device_get_softc (dev); u_long iobase, drq, irq, rescount; int unit = device_get_unit (dev); int i; int s; #endif cx_board_t *b; cx_chan_t *c; drv_t *d; #if __FreeBSD_version >= 400000 KASSERT ((bd != NULL), ("cx%d: NULL device softc\n", unit)); bus_get_resource (dev, SYS_RES_IOPORT, 0, &iobase, &rescount); bd->base_rid = 0; bd->base_res = bus_alloc_resource (dev, SYS_RES_IOPORT, &bd->base_rid, iobase, iobase + NPORT, NPORT, RF_ACTIVE); if (! bd->base_res) { printf ("cx%d: cannot allocate base address\n", unit); return ENXIO; } if (bus_get_resource (dev, SYS_RES_DRQ, 0, &drq, &rescount) != 0) { for (i = 0; (drq = dmatab [i]) != 0; i++) { if (!cx_is_free_res (dev, 1, SYS_RES_DRQ, drq, drq + 1, 1)) continue; bus_set_resource (dev, SYS_RES_DRQ, 0, drq, 1); break; } if (dmatab[i] == 0) { bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); printf ("cx%d: Couldn't get DRQ\n", unit); return ENXIO; } } bd->drq_rid = 0; bd->drq_res = bus_alloc_resource (dev, SYS_RES_DRQ, &bd->drq_rid, drq, drq + 1, 1, RF_ACTIVE); if (! bd->drq_res) { printf ("cx%d: cannot allocate drq\n", unit); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); return ENXIO; } if (bus_get_resource (dev, SYS_RES_IRQ, 0, &irq, &rescount) != 0) { for (i = 0; (irq = irqtab [i]) != 0; i++) { if (!cx_is_free_res (dev, 1, SYS_RES_IRQ, irq, irq + 1, 1)) continue; bus_set_resource (dev, SYS_RES_IRQ, 0, irq, 1); break; } if (irqtab[i] == 0) { bus_release_resource (dev, SYS_RES_DRQ, bd->drq_rid, bd->drq_res); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); printf ("cx%d: Couldn't get IRQ\n", unit); return ENXIO; } } bd->irq_rid = 0; bd->irq_res = bus_alloc_resource (dev, SYS_RES_IRQ, &bd->irq_rid, irq, irq + 1, 1, RF_ACTIVE); if (! bd->irq_res) { printf ("cx%d: Couldn't allocate irq\n", unit); bus_release_resource (dev, SYS_RES_DRQ, bd->drq_rid, bd->drq_res); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); return ENXIO; } b = malloc (sizeof (cx_board_t), M_DEVBUF, M_WAITOK); if (!b) { printf ("cx:%d: Couldn't allocate memory\n", unit); return (ENXIO); } adapter[unit] = b; bzero (b, sizeof(cx_board_t)); if (! cx_open_board (b, unit, iobase, irq, drq)) { printf ("cx%d: error loading firmware\n", unit); free (b, M_DEVBUF); bus_release_resource (dev, SYS_RES_IRQ, bd->irq_rid, bd->irq_res); bus_release_resource (dev, SYS_RES_DRQ, bd->drq_rid, bd->drq_res); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); return ENXIO; } bd->board = b; if (! probe_irq (b, irq)) { printf ("cx%d: irq %ld not functional\n", unit, irq); bd->board = 0; adapter [unit] = 0; free (b, M_DEVBUF); bus_release_resource (dev, SYS_RES_IRQ, bd->irq_rid, bd->irq_res); bus_release_resource (dev, SYS_RES_DRQ, bd->drq_rid, bd->drq_res); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); return ENXIO; } s = splhigh (); if (bus_setup_intr (dev, bd->irq_res, INTR_TYPE_NET, cx_intr, bd, &bd->intrhand)) { printf ("cx%d: Can't setup irq %ld\n", unit, irq); bd->board = 0; adapter [unit] = 0; free (b, M_DEVBUF); bus_release_resource (dev, SYS_RES_IRQ, bd->irq_rid, bd->irq_res); bus_release_resource (dev, SYS_RES_DRQ, bd->drq_rid, bd->drq_res); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); splx (s); return ENXIO; } cx_init (b, b->num, b->port, irq, drq); cx_setup_board (b, 0, 0, 0); #else /* __FreeBSD_version >= 400000 */ b = adapter[id->id_unit]; #endif /* __FreeBSD_version >= 400000 */ printf ("cx%d: \n", b->num, b->name); #if __FreeBSD_version < 400000 id->id_ointr = cx_intr; #endif for (c=b->chan; cchan+NCHAN; ++c) { #if __FreeBSD_version >= 400000 char *dnmt="tty %x"; char *dnmc="cua %x"; #endif if (c->type == T_NONE) continue; d = contigmalloc (sizeof(drv_t), M_DEVBUF, M_WAITOK, 0x100000, 0x1000000, 16, 0); channel [b->num*NCHAN + c->num] = d; bzero (d, sizeof(drv_t)); sprintf (d->name, "cx%d.%d", b->num, c->num); d->board = b; d->chan = c; d->tty.t_oproc = cx_oproc; d->tty.t_param = cx_param; #if __FreeBSD_version >= 400000 d->tty.t_stop = cx_stop; #endif d->dtrwait = 3 * hz; /* Default DTR off timeout is 3 seconds. */ d->open_dev = 0; c->sys = d; switch (c->type) { case T_SYNC_RS232: case T_SYNC_V35: case T_SYNC_RS449: case T_UNIV: case T_UNIV_RS232: case T_UNIV_RS449: case T_UNIV_V35: #ifdef NETGRAPH if (ng_make_node_common (&typestruct, &d->node) != 0) { printf ("%s: cannot make common node\n", d->name); channel [b->num*NCHAN + c->num] = 0; c->sys = 0; #if __FreeBSD_version < 400000 free (d, M_DEVBUF); #else contigfree (d, sizeof (*d), M_DEVBUF); #endif continue; } #if __FreeBSD_version >= 500000 NG_NODE_SET_PRIVATE (d->node, d); #else d->node->private = d; #endif sprintf (d->nodename, "%s%d", NG_CX_NODE_TYPE, c->board->num*NCHAN + c->num); if (ng_name_node (d->node, d->nodename)) { printf ("%s: cannot name node\n", d->nodename); #if __FreeBSD_version >= 500000 NG_NODE_UNREF (d->node); #else ng_rmnode (d->node); ng_unref (d->node); #endif channel [b->num*NCHAN + c->num] = 0; c->sys = 0; #if __FreeBSD_version < 400000 free (d, M_DEVBUF); #else contigfree (d, sizeof (*d), M_DEVBUF); #endif continue; } d->lo_queue.ifq_maxlen = IFQ_MAXLEN; d->hi_queue.ifq_maxlen = IFQ_MAXLEN; #if __FreeBSD_version >= 500000 mtx_init (&d->lo_queue.ifq_mtx, "cx_queue_lo", NULL, MTX_DEF); mtx_init (&d->hi_queue.ifq_mtx, "cx_queue_hi", NULL, MTX_DEF); #endif #else /*NETGRAPH*/ d->pp.pp_if.if_softc = d; #if __FreeBSD_version > 501000 if_initname (&d->pp.pp_if, "cx", b->num * NCHAN + c->num); #else d->pp.pp_if.if_unit = b->num * NCHAN + c->num; d->pp.pp_if.if_name = "cx"; #endif d->pp.pp_if.if_mtu = PP_MTU; d->pp.pp_if.if_flags = IFF_POINTOPOINT | IFF_MULTICAST; d->pp.pp_if.if_ioctl = cx_sioctl; d->pp.pp_if.if_start = cx_ifstart; d->pp.pp_if.if_watchdog = cx_ifwatchdog; d->pp.pp_if.if_init = cx_initialize; sppp_attach (&d->pp.pp_if); if_attach (&d->pp.pp_if); d->pp.pp_tlf = cx_tlf; d->pp.pp_tls = cx_tls; #if __FreeBSD_version >= 400000 || NBPFILTER > 0 /* If BPF is in the kernel, call the attach for it. * Size of PPP header is 4 bytes. */ bpfattach (&d->pp.pp_if, DLT_PPP, 4); #endif #endif /*NETGRAPH*/ } cx_start_chan (c, &d->buf, vtophys (&d->buf)); cx_register_receive (c, &cx_receive); cx_register_transmit (c, &cx_transmit); cx_register_error (c, &cx_error); cx_register_modem (c, &cx_modem); #if __FreeBSD_version >= 400000 dnmt[3] = 'x'+b->num; dnmc[3] = 'x'+b->num; d->devt[0] = make_dev (&cx_cdevsw, b->num*NCHAN + c->num, UID_ROOT, GID_WHEEL, 0644, dnmt, b->num*NCHAN + c->num); d->devt[1] = make_dev (&cx_cdevsw, b->num*NCHAN + c->num + 64, UID_ROOT, GID_WHEEL, 0600, "cx%d", b->num*NCHAN + c->num); d->devt[2] = make_dev (&cx_cdevsw, b->num*NCHAN + c->num + 128, UID_ROOT, GID_WHEEL, 0660, dnmc, b->num*NCHAN + c->num); } splx (s); return 0; #else /* __FreeBSD_version < 400000 */ } return 1; #endif } #if __FreeBSD_version >= 400000 static int cx_detach (device_t dev) { bdrv_t *bd = device_get_softc (dev); cx_board_t *b = bd->board; cx_chan_t *c; int s = splhigh (); /* Check if the device is busy (open). */ for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (!d || d->chan->type == T_NONE) continue; if (d->lock) { splx (s); return EBUSY; } if (c->mode == M_ASYNC && (d->tty.t_state & TS_ISOPEN) && (d->open_dev|0x2)) { splx (s); return EBUSY; } if (d->running) { splx (s); return EBUSY; } } /* Deactivate the timeout routine. And soft interrupt*/ if (led_timo[b->num].callout) untimeout (cx_led_off, b, led_timo[b->num]); for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = c->sys; if (!d || d->chan->type == T_NONE) continue; if (d->dtr_timeout_handle.callout) untimeout (cx_dtrwakeup, d, d->dtr_timeout_handle); if (d->dcd_timeout_handle.callout) untimeout (cx_carrier, c, d->dcd_timeout_handle); } bus_teardown_intr (dev, bd->irq_res, bd->intrhand); bus_deactivate_resource (dev, SYS_RES_IRQ, bd->irq_rid, bd->irq_res); bus_release_resource (dev, SYS_RES_IRQ, bd->irq_rid, bd->irq_res); bus_deactivate_resource (dev, SYS_RES_DRQ, bd->drq_rid, bd->drq_res); bus_release_resource (dev, SYS_RES_DRQ, bd->drq_rid, bd->drq_res); bus_deactivate_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->irq_res); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); cx_close_board (b); /* Detach the interfaces, free buffer memory. */ for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (!d || d->chan->type == T_NONE) continue; #ifdef NETGRAPH #if __FreeBSD_version >= 500000 if (d->node) { ng_rmnode_self (d->node); NG_NODE_UNREF (d->node); d->node = NULL; } mtx_destroy (&d->lo_queue.ifq_mtx); mtx_destroy (&d->hi_queue.ifq_mtx); #else ng_rmnode (d->node); d->node = NULL; #endif #else #if __FreeBSD_version >= 410000 && NBPFILTER > 0 /* Detach from the packet filter list of interfaces. */ bpfdetach (&d->pp.pp_if); #endif /* Detach from the sync PPP list. */ sppp_detach (&d->pp.pp_if); if_detach (&d->pp.pp_if); #endif destroy_dev (d->devt[0]); destroy_dev (d->devt[1]); destroy_dev (d->devt[2]); } cx_led_off (b); if (led_timo[b->num].callout) untimeout (cx_led_off, b, led_timo[b->num]); splx (s); s = splhigh (); for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (!d || d->chan->type == T_NONE) continue; /* Deallocate buffers. */ contigfree (d, sizeof (*d), M_DEVBUF); } bd->board = 0; adapter [b->num] = 0; free (b, M_DEVBUF); splx (s); return 0; } #endif #ifndef NETGRAPH static void cx_ifstart (struct ifnet *ifp) { drv_t *d = ifp->if_softc; cx_start (d); } static void cx_ifwatchdog (struct ifnet *ifp) { drv_t *d = ifp->if_softc; cx_watchdog (d); } static void cx_tlf (struct sppp *sp) { drv_t *d = sp->pp_if.if_softc; CX_DEBUG (d, ("cx_tlf\n")); /* cx_set_dtr (d->chan, 0);*/ /* cx_set_rts (d->chan, 0);*/ sp->pp_down (sp); } static void cx_tls (struct sppp *sp) { drv_t *d = sp->pp_if.if_softc; CX_DEBUG (d, ("cx_tls\n")); sp->pp_up (sp); } /* * Initialization of interface. * It seems to be never called by upper level. */ static void cx_initialize (void *softc) { drv_t *d = softc; CX_DEBUG (d, ("cx_initialize\n")); } /* * Process an ioctl request. */ static int cx_sioctl (struct ifnet *ifp, u_long cmd, caddr_t data) { drv_t *d = ifp->if_softc; int error, s, was_up, should_be_up; /* No socket ioctls while the channel is in async mode. */ if (d->chan->type == T_NONE || d->chan->mode == M_ASYNC) return EBUSY; /* Socket ioctls on slave subchannels are not allowed. */ was_up = (ifp->if_flags & IFF_RUNNING) != 0; error = sppp_ioctl (ifp, cmd, data); if (error) return error; if (! (ifp->if_flags & IFF_DEBUG)) d->chan->debug = 0; else if (! d->chan->debug) d->chan->debug = 1; switch (cmd) { default: CX_DEBUG2 (d, ("ioctl 0x%lx\n", cmd)); return 0; case SIOCADDMULTI: CX_DEBUG2 (d, ("SIOCADDMULTI\n")); return 0; case SIOCDELMULTI: CX_DEBUG2 (d, ("SIOCDELMULTI\n")); return 0; case SIOCSIFFLAGS: CX_DEBUG2 (d, ("SIOCSIFFLAGS\n")); break; case SIOCSIFADDR: CX_DEBUG2 (d, ("SIOCSIFADDR\n")); break; } /* We get here only in case of SIFFLAGS or SIFADDR. */ s = splhigh (); should_be_up = (ifp->if_flags & IFF_RUNNING) != 0; if (!was_up && should_be_up) { /* Interface goes up -- start it. */ cx_up (d); cx_start (d); } else if (was_up && !should_be_up) { /* Interface is going down -- stop it. */ /* if ((d->pp.pp_flags & PP_FR) || (ifp->if_flags & PP_CISCO))*/ cx_down (d); } splx (s); return 0; } #endif /*NETGRAPH*/ /* * Stop the interface. Called on splimp(). */ static void cx_down (drv_t *d) { int s = splhigh (); CX_DEBUG (d, ("cx_down\n")); cx_set_dtr (d->chan, 0); cx_set_rts (d->chan, 0); d->running = 0; splx (s); } /* * Start the interface. Called on splimp(). */ static void cx_up (drv_t *d) { int s = splhigh (); CX_DEBUG (d, ("cx_up\n")); cx_set_dtr (d->chan, 1); cx_set_rts (d->chan, 1); d->running = 1; splx (s); } /* * Start output on the (slave) interface. Get another datagram to send * off of the interface queue, and copy it to the interface * before starting the output. */ static void cx_send (drv_t *d) { struct mbuf *m; u_short len; CX_DEBUG2 (d, ("cx_send\n")); /* No output if the interface is down. */ if (! d->running) return; /* No output if the modem is off. */ if (! cx_get_dsr (d->chan) && ! cx_get_loop(d->chan)) return; if (cx_buf_free (d->chan)) { /* Get the packet to send. */ #ifdef NETGRAPH IF_DEQUEUE (&d->hi_queue, m); if (! m) IF_DEQUEUE (&d->lo_queue, m); #else m = sppp_dequeue (&d->pp.pp_if); #endif if (! m) return; #if (__FreeBSD_version >= 400000 || NBPFILTER > 0) && !defined (NETGRAPH) if (d->pp.pp_if.if_bpf) #if __FreeBSD_version >= 500000 BPF_MTAP (&d->pp.pp_if, m); #else bpf_mtap (&d->pp.pp_if, m); #endif #endif len = m->m_pkthdr.len; if (! m->m_next) cx_send_packet (d->chan, (u_char*)mtod (m, caddr_t), len, 0); else { u_char buf [DMABUFSZ]; m_copydata (m, 0, len, buf); cx_send_packet (d->chan, buf, len, 0); } m_freem (m); /* Set up transmit timeout, 10 seconds. */ #ifdef NETGRAPH d->timeout = 10; #else d->pp.pp_if.if_timer = 10; #endif } #ifndef NETGRAPH d->pp.pp_if.if_flags |= IFF_OACTIVE; #endif } /* * Start output on the interface. * Always called on splimp(). */ static void cx_start (drv_t *d) { int s = splhigh (); if (d->running) { if (! d->chan->dtr) cx_set_dtr (d->chan, 1); if (! d->chan->rts) cx_set_rts (d->chan, 1); cx_send (d); } splx (s); } /* * Handle transmit timeouts. * Recover after lost transmit interrupts. * Always called on splimp(). */ static void cx_watchdog (drv_t *d) { int s = splhigh (); CX_DEBUG (d, ("device timeout\n")); if (d->running) { cx_setup_chan (d->chan); cx_start_chan (d->chan, 0, 0); cx_set_dtr (d->chan, 1); cx_set_rts (d->chan, 1); cx_start (d); } splx (s); } /* * Transmit callback function. */ static void cx_transmit (cx_chan_t *c, void *attachment, int len) { drv_t *d = c->sys; if (!d) return; if (c->mode == M_ASYNC) { d->tty.t_state &= ~(TS_BUSY | TS_FLUSH); d->atimeout = 0; if (d->tty.t_dev) { d->intr_action |= CX_WRITE; MY_SOFT_INTR = 1; #if __FreeBSD_version >= 500000 swi_sched (cx_fast_ih, 0); #else setsofttty (); #endif } return; } #ifdef NETGRAPH d->timeout = 0; #else ++d->pp.pp_if.if_opackets; d->pp.pp_if.if_flags &= ~IFF_OACTIVE; d->pp.pp_if.if_timer = 0; #endif cx_start (d); } /* * Process the received packet. */ static void cx_receive (cx_chan_t *c, char *data, int len) { drv_t *d = c->sys; struct mbuf *m; char *cc = data; #if __FreeBSD_version >= 500000 && defined NETGRAPH int error; #endif if (!d) return; if (c->mode == M_ASYNC) { if (d->tty.t_state & TS_ISOPEN) { async_q *q = &d->aqueue; int size = BF_SZ - 1 - AQ_GSZ (q); if (len <= 0 && !size) return; if (len > size) { c->ierrs++; cx_error (c, CX_OVERRUN); len = size - 1; } while (len--) { AQ_PUSH (q, *(unsigned char *)cc); cc++; } d->intr_action |= CX_READ; MY_SOFT_INTR = 1; #if __FreeBSD_version >= 500000 swi_sched (cx_fast_ih, 0); #else setsofttty (); #endif } return; } if (! d->running) return; m = makembuf (data, len); if (! m) { CX_DEBUG (d, ("no memory for packet\n")); #ifndef NETGRAPH ++d->pp.pp_if.if_iqdrops; #endif return; } if (c->debug > 1) printmbuf (m); #ifdef NETGRAPH m->m_pkthdr.rcvif = 0; #if __FreeBSD_version >= 500000 NG_SEND_DATA_ONLY (error, d->hook, m); #else ng_queue_data (d->hook, m, 0); #endif #else ++d->pp.pp_if.if_ipackets; m->m_pkthdr.rcvif = &d->pp.pp_if; #if __FreeBSD_version >= 400000 || NBPFILTER > 0 /* Check if there's a BPF listener on this interface. * If so, hand off the raw packet to bpf. */ if (d->pp.pp_if.if_bpf) #if __FreeBSD_version >= 500000 BPF_TAP (&d->pp.pp_if, data, len); #else bpf_tap (&d->pp.pp_if, data, len); #endif #endif sppp_input (&d->pp.pp_if, m); #endif } #define CONDITION(t,tp) (!(t->c_iflag & (ICRNL | IGNCR | IMAXBEL | INLCR | ISTRIP | IXON))\ && (!(tp->t_iflag & BRKINT) || (tp->t_iflag & IGNBRK))\ && (!(tp->t_iflag & PARMRK)\ || (tp->t_iflag & (IGNPAR | IGNBRK)) == (IGNPAR | IGNBRK))\ && !(t->c_lflag & (ECHO | ICANON | IEXTEN | ISIG | PENDIN))\ && linesw[tp->t_line].l_rint == ttyinput) /* * Error callback function. */ static void cx_error (cx_chan_t *c, int data) { drv_t *d = c->sys; async_q *q; if (!d) return; q = &(d->aqueue); switch (data) { case CX_FRAME: CX_DEBUG (d, ("frame error\n")); if (c->mode == M_ASYNC && (d->tty.t_state & TS_ISOPEN) && (AQ_GSZ (q) < BF_SZ - 1) && (!CONDITION((&d->tty.t_termios), (&d->tty)) || !(d->tty.t_iflag & (IGNPAR | PARMRK)))) { AQ_PUSH (q, TTY_FE); d->intr_action |= CX_READ; MY_SOFT_INTR = 1; #if __FreeBSD_version >= 500000 swi_sched (cx_fast_ih, 0); #else setsofttty (); #endif } #ifndef NETGRAPH else ++d->pp.pp_if.if_ierrors; #endif break; case CX_CRC: CX_DEBUG (d, ("crc error\n")); if (c->mode == M_ASYNC && (d->tty.t_state & TS_ISOPEN) && (AQ_GSZ (q) < BF_SZ - 1) && (!CONDITION((&d->tty.t_termios), (&d->tty)) || !(d->tty.t_iflag & INPCK) || !(d->tty.t_iflag & (IGNPAR | PARMRK)))) { AQ_PUSH (q, TTY_PE); d->intr_action |= CX_READ; MY_SOFT_INTR = 1; #if __FreeBSD_version >= 500000 swi_sched (cx_fast_ih, 0); #else setsofttty (); #endif } #ifndef NETGRAPH else ++d->pp.pp_if.if_ierrors; #endif break; case CX_OVERRUN: CX_DEBUG (d, ("overrun error\n")); #ifdef TTY_OE if (c->mode == M_ASYNC && (d->tty.t_state & TS_ISOPEN) && (AQ_GSZ (q) < BF_SZ - 1) && (!CONDITION((&d->tty.t_termios), (&d->tty)))) { AQ_PUSH (q, TTY_OE); d->intr_action |= CX_READ; MY_SOFT_INTR = 1; #if __FreeBSD_version >= 500000 swi_sched (cx_fast_ih, 0); #else setsofttty (); #endif } #endif #ifndef NETGRAPH else { ++d->pp.pp_if.if_collisions; ++d->pp.pp_if.if_ierrors; } #endif break; case CX_OVERFLOW: CX_DEBUG (d, ("overflow error\n")); #ifndef NETGRAPH if (c->mode != M_ASYNC) ++d->pp.pp_if.if_ierrors; #endif break; case CX_UNDERRUN: CX_DEBUG (d, ("underrun error\n")); if (c->mode != M_ASYNC) { #ifdef NETGRAPH d->timeout = 0; #else ++d->pp.pp_if.if_oerrors; d->pp.pp_if.if_flags &= ~IFF_OACTIVE; d->pp.pp_if.if_timer = 0; cx_start (d); #endif } break; case CX_BREAK: CX_DEBUG (d, ("break error\n")); if (c->mode == M_ASYNC && (d->tty.t_state & TS_ISOPEN) && (AQ_GSZ (q) < BF_SZ - 1) && (!CONDITION((&d->tty.t_termios), (&d->tty)) || !(d->tty.t_iflag & (IGNBRK | BRKINT | PARMRK)))) { AQ_PUSH (q, TTY_BI); d->intr_action |= CX_READ; MY_SOFT_INTR = 1; #if __FreeBSD_version >= 500000 swi_sched (cx_fast_ih, 0); #else setsofttty (); #endif } #ifndef NETGRAPH else ++d->pp.pp_if.if_ierrors; #endif break; default: CX_DEBUG (d, ("error #%d\n", data)); } } #if __FreeBSD_version < 500000 static int cx_open (dev_t dev, int flag, int mode, struct proc *p) #else static int cx_open (dev_t dev, int flag, int mode, struct thread *td) #endif { int unit = UNIT (dev); drv_t *d; int error; if (unit >= NCX*NCHAN || ! (d = channel[unit])) return ENXIO; CX_DEBUG2 (d, ("cx_open unit=%d, flag=0x%x, mode=0x%x\n", unit, flag, mode)); if (d->chan->mode != M_ASYNC || IF_CUNIT(dev)) { d->open_dev |= 0x1; return 0; } #if __FreeBSD_version >= 400000 dev->si_tty = &d->tty; #endif d->tty.t_dev = dev; again: if (d->dtroff) { error = tsleep (&d->dtrwait, TTIPRI | PCATCH, "cxdtr", 0); if (error) return error; goto again; } if ((d->tty.t_state & TS_ISOPEN) && (d->tty.t_state & TS_XCLUDE) && #if __FreeBSD_version >= 500000 suser (td)) #else p->p_ucred->cr_uid != 0) #endif return EBUSY; if (d->tty.t_state & TS_ISOPEN) { /* * Cannot open /dev/cua if /dev/tty already opened. */ if (CALLOUT (dev) && ! d->callout) return EBUSY; /* * Opening /dev/tty when /dev/cua is already opened. * Wait for close, then try again. */ if (! CALLOUT (dev) && d->callout) { if (flag & O_NONBLOCK) return EBUSY; error = tsleep (d, TTIPRI | PCATCH, "cxbi", 0); if (error) return error; goto again; } } else if (d->lock && ! CALLOUT (dev) && (flag & O_NONBLOCK)) /* * We try to open /dev/tty in non-blocking mode * while somebody is already waiting for carrier on it. */ return EBUSY; else { ttychars (&d->tty); if (d->tty.t_ispeed == 0) { d->tty.t_iflag = 0; d->tty.t_oflag = 0; d->tty.t_lflag = 0; d->tty.t_cflag = CREAD | CS8 | HUPCL; d->tty.t_ispeed = d->chan->rxbaud; d->tty.t_ospeed = d->chan->txbaud; } if (CALLOUT (dev)) d->tty.t_cflag |= CLOCAL; else d->tty.t_cflag &= ~CLOCAL; cx_param (&d->tty, &d->tty.t_termios); ttsetwater (&d->tty); } splhigh (); if (! (d->tty.t_state & TS_ISOPEN)) { cx_start_chan (d->chan, 0, 0); cx_set_dtr (d->chan, 1); cx_set_rts (d->chan, 1); d->cd = cx_get_cd (d->chan); if (CALLOUT (dev) || cx_get_cd (d->chan)) (*linesw[d->tty.t_line].l_modem) (&d->tty, 1); } if (! (flag & O_NONBLOCK) && ! (d->tty.t_cflag & CLOCAL) && ! (d->tty.t_state & TS_CARR_ON)) { /* Lock the channel against cxconfig while we are * waiting for carrier. */ d->lock++; error = tsleep (&d->tty.t_rawq, TTIPRI | PCATCH, "cxdcd", 0); /* Unlock the channel. */ d->lock--; spl0 (); if (error) goto failed; goto again; } error = (*linesw[d->tty.t_line].l_open) (dev, &d->tty); disc_optim (&d->tty, &d->tty.t_termios); spl0 (); if (error) { failed: if (! (d->tty.t_state & TS_ISOPEN)) { splhigh (); cx_set_dtr (d->chan, 0); cx_set_rts (d->chan, 0); if (d->dtrwait) { d->dtr_timeout_handle = timeout (cx_dtrwakeup, d, d->dtrwait); d->dtroff = 1; } spl0 (); } return error; } if (d->tty.t_state & TS_ISOPEN) d->callout = CALLOUT (dev) ? 1 : 0; d->open_dev |= 0x2; CX_DEBUG2 (d, ("cx_open done\n")); return 0; } #if __FreeBSD_version < 500000 static int cx_close (dev_t dev, int flag, int mode, struct proc *p) #else static int cx_close (dev_t dev, int flag, int mode, struct thread *td) #endif { drv_t *d = channel [UNIT (dev)]; int s; CX_DEBUG2 (d, ("cx_close\n")); if ((!(d->open_dev&0x2)) || IF_CUNIT(dev)){ d->open_dev &= ~0x1; return 0; } s = splhigh (); (*linesw[d->tty.t_line].l_close) (&d->tty, flag); disc_optim (&d->tty, &d->tty.t_termios); /* Disable receiver. * Transmitter continues sending the queued data. */ cx_enable_receive (d->chan, 0); /* Clear DTR and RTS. */ if ((d->tty.t_cflag & HUPCL) || ! (d->tty.t_state & TS_ISOPEN)) { cx_set_dtr (d->chan, 0); cx_set_rts (d->chan, 0); if (d->dtrwait) { d->dtr_timeout_handle = timeout (cx_dtrwakeup, d, d->dtrwait); d->dtroff = 1; } } ttyclose (&d->tty); splx (s); d->callout = 0; /* Wake up bidirectional opens. */ wakeup (d); d->open_dev &= ~0x2; return 0; } static int cx_read (dev_t dev, struct uio *uio, int flag) { drv_t *d = channel [UNIT (dev)]; if (d) CX_DEBUG2 (d, ("cx_read\n")); if (!d || d->chan->mode != M_ASYNC || IF_CUNIT(dev)) return EBADF; return (*linesw[d->tty.t_line].l_read) (&d->tty, uio, flag); } static int cx_write (dev_t dev, struct uio *uio, int flag) { drv_t *d = channel [UNIT (dev)]; if (d) CX_DEBUG2 (d, ("cx_write\n")); if (!d || d->chan->mode != M_ASYNC || IF_CUNIT(dev)) return EBADF; return (*linesw[d->tty.t_line].l_write) (&d->tty, uio, flag); } static int cx_modem_status (drv_t *d) { int status = 0, s = splhigh (); /* Already opened by someone or network interface is up? */ if ((d->chan->mode == M_ASYNC && (d->tty.t_state & TS_ISOPEN) && (d->open_dev|0x2)) || (d->chan->mode != M_ASYNC && d->running)) status = TIOCM_LE; /* always enabled while open */ if (cx_get_dsr (d->chan)) status |= TIOCM_DSR; if (cx_get_cd (d->chan)) status |= TIOCM_CD; if (cx_get_cts (d->chan)) status |= TIOCM_CTS; if (d->chan->dtr) status |= TIOCM_DTR; if (d->chan->rts) status |= TIOCM_RTS; splx (s); return status; } #if __FreeBSD_version < 500000 static int cx_ioctl (dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) #else static int cx_ioctl (dev_t dev, u_long cmd, caddr_t data, int flag, struct thread *td) #endif { drv_t *d = channel [UNIT (dev)]; cx_chan_t *c; struct serial_statistics *st; int error, s; char mask[16]; if (!d || !(c = d->chan)) return EINVAL; switch (cmd) { case SERIAL_GETREGISTERED: CX_DEBUG2 (d, ("ioctl: getregistered\n")); bzero (mask, sizeof(mask)); for (s=0; sp_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else /* __FreeBSD_version >= 500000 */ error = suser (td); #endif /* __FreeBSD_version >= 500000 */ if (error) return error; s = splhigh (); cx_set_port (c, *(int *)data); splx (s); return 0; #ifndef NETGRAPH case SERIAL_GETPROTO: CX_DEBUG2 (d, ("ioctl: getproto\n")); s = splhigh (); strcpy ((char*)data, (c->mode == M_ASYNC) ? "async" : (d->pp.pp_flags & PP_FR) ? "fr" : (d->pp.pp_if.if_flags & PP_CISCO) ? "cisco" : "ppp"); splx (s); return 0; case SERIAL_SETPROTO: CX_DEBUG2 (d, ("ioctl: setproto\n")); /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else /* __FreeBSD_version >= 500000 */ error = suser (td); #endif /* __FreeBSD_version >= 500000 */ if (error) return error; if (c->mode == M_ASYNC) return EBUSY; if (d->pp.pp_if.if_flags & IFF_RUNNING) return EBUSY; if (! strcmp ("cisco", (char*)data)) { d->pp.pp_flags &= ~(PP_FR); d->pp.pp_flags |= PP_KEEPALIVE; d->pp.pp_if.if_flags |= PP_CISCO; } else if (! strcmp ("fr", (char*)data)) { d->pp.pp_if.if_flags &= ~(PP_CISCO); d->pp.pp_flags |= PP_FR | PP_KEEPALIVE; } else if (! strcmp ("ppp", (char*)data)) { d->pp.pp_flags &= ~(PP_FR | PP_KEEPALIVE); d->pp.pp_if.if_flags &= ~(PP_CISCO); } else return EINVAL; return 0; case SERIAL_GETKEEPALIVE: CX_DEBUG2 (d, ("ioctl: getkeepalive\n")); if ((d->pp.pp_flags & PP_FR) || (d->pp.pp_if.if_flags & PP_CISCO) || (c->mode == M_ASYNC)) return EINVAL; s = splhigh (); *(int*)data = (d->pp.pp_flags & PP_KEEPALIVE) ? 1 : 0; splx (s); return 0; case SERIAL_SETKEEPALIVE: CX_DEBUG2 (d, ("ioctl: setkeepalive\n")); /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else /* __FreeBSD_version >= 500000 */ error = suser (td); #endif /* __FreeBSD_version >= 500000 */ if (error) return error; if ((d->pp.pp_flags & PP_FR) || (d->pp.pp_if.if_flags & PP_CISCO)) return EINVAL; s = splhigh (); if (*(int*)data) d->pp.pp_flags |= PP_KEEPALIVE; else d->pp.pp_flags &= ~PP_KEEPALIVE; splx (s); return 0; #endif /*NETGRAPH*/ case SERIAL_GETMODE: CX_DEBUG2 (d, ("ioctl: getmode\n")); s = splhigh (); *(int*)data = (c->mode == M_ASYNC) ? SERIAL_ASYNC : SERIAL_HDLC; splx (s); return 0; case SERIAL_SETMODE: CX_DEBUG2 (d, ("ioctl: setmode\n")); /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else /* __FreeBSD_version >= 500000 */ error = suser (td); #endif /* __FreeBSD_version >= 500000 */ if (error) return error; /* Somebody is waiting for carrier? */ if (d->lock) return EBUSY; /* /dev/ttyXX is already opened by someone? */ if (c->mode == M_ASYNC && (d->tty.t_state & TS_ISOPEN) && (d->open_dev|0x2)) return EBUSY; /* Network interface is up? * Cannot change to async mode. */ if (c->mode != M_ASYNC && d->running && (*(int*)data == SERIAL_ASYNC)) return EBUSY; s = splhigh (); if (c->mode == M_HDLC && *(int*)data == SERIAL_ASYNC) { cx_set_mode (c, M_ASYNC); cx_enable_receive (c, 0); cx_enable_transmit (c, 0); } else if (c->mode == M_ASYNC && *(int*)data == SERIAL_HDLC) { cx_set_mode (c, M_HDLC); cx_enable_receive (c, 1); cx_enable_transmit (c, 1); } splx (s); return 0; case SERIAL_GETSTAT: CX_DEBUG2 (d, ("ioctl: getestat\n")); st = (struct serial_statistics*) data; s = splhigh (); st->rintr = c->rintr; st->tintr = c->tintr; st->mintr = c->mintr; st->ibytes = c->ibytes; st->ipkts = c->ipkts; st->ierrs = c->ierrs; st->obytes = c->obytes; st->opkts = c->opkts; st->oerrs = c->oerrs; splx (s); return 0; case SERIAL_CLRSTAT: CX_DEBUG2 (d, ("ioctl: clrstat\n")); /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else /* __FreeBSD_version >= 500000 */ error = suser (td); #endif /* __FreeBSD_version >= 500000 */ if (error) return error; s = splhigh (); c->rintr = 0; c->tintr = 0; c->mintr = 0; c->ibytes = 0; c->ipkts = 0; c->ierrs = 0; c->obytes = 0; c->opkts = 0; c->oerrs = 0; splx (s); return 0; case SERIAL_GETBAUD: CX_DEBUG2 (d, ("ioctl: getbaud\n")); if (c->mode == M_ASYNC) return EINVAL; s = splhigh (); *(long*)data = cx_get_baud(c); splx (s); return 0; case SERIAL_SETBAUD: CX_DEBUG2 (d, ("ioctl: setbaud\n")); /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else /* __FreeBSD_version >= 500000 */ error = suser (td); #endif /* __FreeBSD_version >= 500000 */ if (error) return error; if (c->mode == M_ASYNC) return EINVAL; s = splhigh (); cx_set_baud (c, *(long*)data); splx (s); return 0; case SERIAL_GETLOOP: CX_DEBUG2 (d, ("ioctl: getloop\n")); if (c->mode == M_ASYNC) return EINVAL; s = splhigh (); *(int*)data = cx_get_loop (c); splx (s); return 0; case SERIAL_SETLOOP: CX_DEBUG2 (d, ("ioctl: setloop\n")); /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else /* __FreeBSD_version >= 500000 */ error = suser (td); #endif /* __FreeBSD_version >= 500000 */ if (error) return error; if (c->mode == M_ASYNC) return EINVAL; s = splhigh (); cx_set_loop (c, *(int*)data); splx (s); return 0; case SERIAL_GETDPLL: CX_DEBUG2 (d, ("ioctl: getdpll\n")); if (c->mode == M_ASYNC) return EINVAL; s = splhigh (); *(int*)data = cx_get_dpll (c); splx (s); return 0; case SERIAL_SETDPLL: CX_DEBUG2 (d, ("ioctl: setdpll\n")); /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else /* __FreeBSD_version >= 500000 */ error = suser (td); #endif /* __FreeBSD_version >= 500000 */ if (error) return error; if (c->mode == M_ASYNC) return EINVAL; s = splhigh (); cx_set_dpll (c, *(int*)data); splx (s); return 0; case SERIAL_GETNRZI: CX_DEBUG2 (d, ("ioctl: getnrzi\n")); if (c->mode == M_ASYNC) return EINVAL; s = splhigh (); *(int*)data = cx_get_nrzi (c); splx (s); return 0; case SERIAL_SETNRZI: CX_DEBUG2 (d, ("ioctl: setnrzi\n")); /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else /* __FreeBSD_version >= 500000 */ error = suser (td); #endif /* __FreeBSD_version >= 500000 */ if (error) return error; if (c->mode == M_ASYNC) return EINVAL; s = splhigh (); cx_set_nrzi (c, *(int*)data); splx (s); return 0; case SERIAL_GETDEBUG: CX_DEBUG2 (d, ("ioctl: getdebug\n")); s = splhigh (); *(int*)data = c->debug; splx (s); return 0; case SERIAL_SETDEBUG: CX_DEBUG2 (d, ("ioctl: setdebug\n")); /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else /* __FreeBSD_version >= 500000 */ error = suser (td); #endif /* __FreeBSD_version >= 500000 */ if (error) return error; s = splhigh (); c->debug = *(int*)data; splx (s); #ifndef NETGRAPH if (d->chan->debug) d->pp.pp_if.if_flags |= IFF_DEBUG; else d->pp.pp_if.if_flags &= (~IFF_DEBUG); #endif return 0; } if (c->mode == M_ASYNC) { #if __FreeBSD_version >= 500000 error = (*linesw[d->tty.t_line].l_ioctl) (&d->tty, cmd, data, flag, td); #else error = (*linesw[d->tty.t_line].l_ioctl) (&d->tty, cmd, data, flag, p); #endif disc_optim (&d->tty, &d->tty.t_termios); if (error != ENOIOCTL) { if (error) CX_DEBUG2 (d, ("l_ioctl: 0x%lx, error %d\n", cmd, error)); return error; } error = ttioctl (&d->tty, cmd, data, flag); disc_optim (&d->tty, &d->tty.t_termios); if (error != ENOIOCTL) { if (error) CX_DEBUG2 (d, ("ttioctl: 0x%lx, error %d\n", cmd, error)); return error; } } switch (cmd) { case TIOCSBRK: /* Start sending line break */ CX_DEBUG2 (d, ("ioctl: tiocsbrk\n")); s = splhigh (); cx_send_break (c, 500); splx (s); return 0; case TIOCCBRK: /* Stop sending line break */ CX_DEBUG2 (d, ("ioctl: tioccbrk\n")); return 0; case TIOCSDTR: /* Set DTR */ CX_DEBUG2 (d, ("ioctl: tiocsdtr\n")); s = splhigh (); cx_set_dtr (c, 1); splx (s); return 0; case TIOCCDTR: /* Clear DTR */ CX_DEBUG2 (d, ("ioctl: tioccdtr\n")); s = splhigh (); cx_set_dtr (c, 0); splx (s); return 0; case TIOCMSET: /* Set DTR/RTS */ CX_DEBUG2 (d, ("ioctl: tiocmset\n")); s = splhigh (); cx_set_dtr (c, (*(int*)data & TIOCM_DTR) ? 1 : 0); cx_set_rts (c, (*(int*)data & TIOCM_RTS) ? 1 : 0); splx (s); return 0; case TIOCMBIS: /* Add DTR/RTS */ CX_DEBUG2 (d, ("ioctl: tiocmbis\n")); s = splhigh (); if (*(int*)data & TIOCM_DTR) cx_set_dtr (c, 1); if (*(int*)data & TIOCM_RTS) cx_set_rts (c, 1); splx (s); return 0; case TIOCMBIC: /* Clear DTR/RTS */ CX_DEBUG2 (d, ("ioctl: tiocmbic\n")); s = splhigh (); if (*(int*)data & TIOCM_DTR) cx_set_dtr (c, 0); if (*(int*)data & TIOCM_RTS) cx_set_rts (c, 0); splx (s); return 0; case TIOCMGET: /* Get modem status */ CX_DEBUG2 (d, ("ioctl: tiocmget\n")); *(int*)data = cx_modem_status (d); return 0; #ifdef TIOCMSDTRWAIT case TIOCMSDTRWAIT: CX_DEBUG2 (d, ("ioctl: tiocmsdtrwait\n")); /* Only for superuser! */ #if __FreeBSD_version < 400000 error = suser (p->p_ucred, &p->p_acflag); #elif __FreeBSD_version < 500000 error = suser (p); #else /* __FreeBSD_version >= 500000 */ error = suser (td); #endif /* __FreeBSD_version >= 500000 */ if (error) return error; s = splhigh (); d->dtrwait = *(int*)data * hz / 100; splx (s); return 0; #endif #ifdef TIOCMGDTRWAIT case TIOCMGDTRWAIT: CX_DEBUG2 (d, ("ioctl: tiocmgdtrwait\n")); s = splhigh (); *(int*)data = d->dtrwait * 100 / hz; splx (s); return 0; #endif } CX_DEBUG2 (d, ("ioctl: 0x%lx\n", cmd)); return ENOTTY; } /* * Wake up opens() waiting for DTR ready. */ static void cx_dtrwakeup (void *arg) { drv_t *d = arg; d->dtroff = 0; wakeup (&d->dtrwait); } static void disc_optim(tp, t) struct tty *tp; struct termios *t; { if (CONDITION(t,tp)) tp->t_state |= TS_CAN_BYPASS_L_RINT; else tp->t_state &= ~TS_CAN_BYPASS_L_RINT; } #if __FreeBSD_version >= 500000 void cx_softintr (void *unused) #else void cx_softintr () #endif { drv_t *d; async_q *q; int i, s, ic, k; while (MY_SOFT_INTR) { MY_SOFT_INTR = 0; for (i=0; ichan || d->chan->type == T_NONE || d->chan->mode != M_ASYNC || !d->tty.t_dev) continue; s = splhigh (); if (d->intr_action & CX_READ) { q = &(d->aqueue); if (d->tty.t_state & TS_CAN_BYPASS_L_RINT) { k = AQ_GSZ(q); if (d->tty.t_rawq.c_cc + k > d->tty.t_ihiwat && (d->tty.t_cflag & CRTS_IFLOW || d->tty.t_iflag & IXOFF) && !(d->tty.t_state & TS_TBLOCK)) ttyblock(&d->tty); d->tty.t_rawcc += k; while (k>0) { k--; AQ_POP (q, ic); splx (s); putc (ic, &d->tty.t_rawq); s = splhigh (); } ttwakeup(&d->tty); if (d->tty.t_state & TS_TTSTOP && (d->tty.t_iflag & IXANY || d->tty.t_cc[VSTART] == d->tty.t_cc[VSTOP])) { d->tty.t_state &= ~TS_TTSTOP; d->tty.t_lflag &= ~FLUSHO; d->intr_action |= CX_WRITE; } } else { while (q->end != q->beg) { AQ_POP (q, ic); splx (s); (*linesw[d->tty.t_line].l_rint) (ic, &d->tty); s = splhigh (); } } d->intr_action &= ~CX_READ; } splx (s); s = splhigh (); if (d->intr_action & CX_WRITE) { if (d->tty.t_line) (*linesw[d->tty.t_line].l_start) (&d->tty); else cx_oproc (&d->tty); d->intr_action &= ~CX_WRITE; } splx (s); } } } /* * Fill transmitter buffer with data. */ static void cx_oproc (struct tty *tp) { int s = splhigh (), k; drv_t *d = channel [UNIT (tp->t_dev)]; static u_char buf[DMABUFSZ]; u_char *p; u_short len = 0, sublen = 0; if (!d) { splx (s); return; } CX_DEBUG2 (d, ("cx_oproc\n")); if (tp->t_cflag & CRTSCTS && (tp->t_state & TS_TBLOCK) && d->chan->rts) cx_set_rts (d->chan, 0); else if (tp->t_cflag & CRTSCTS && ! (tp->t_state & TS_TBLOCK) && ! d->chan->rts) cx_set_rts (d->chan, 1); if (! (tp->t_state & (TS_TIMEOUT | TS_TTSTOP))) { /* Start transmitter. */ cx_enable_transmit (d->chan, 1); /* Is it busy? */ if (! cx_buf_free (d->chan)) { tp->t_state |= TS_BUSY; splx (s); return; } if (tp->t_iflag & IXOFF) { p = (buf + (DMABUFSZ/2)); sublen = q_to_b (&tp->t_outq, p, (DMABUFSZ/2)); k = sublen; while (k--) { /* Send XON/XOFF out of band. */ if (*p == tp->t_cc[VSTOP]) { cx_xflow_ctl (d->chan, 0); p++; continue; } if (*p == tp->t_cc[VSTART]) { cx_xflow_ctl (d->chan, 1); p++; continue; } buf[len] = *p; len++; p++; } } else { p = buf; len = q_to_b (&tp->t_outq, p, (DMABUFSZ/2)); } if (len) { cx_send_packet (d->chan, buf, len, 0); tp->t_state |= TS_BUSY; d->atimeout = 10; CX_DEBUG2 (d, ("out %d bytes\n", len)); } } ttwwakeup (tp); splx (s); } static int cx_param (struct tty *tp, struct termios *t) { drv_t *d = channel [UNIT (tp->t_dev)]; int s, bits, parity; if (!d) return EINVAL; s = splhigh (); if (t->c_ospeed == 0) { /* Clear DTR and RTS. */ cx_set_dtr (d->chan, 0); splx (s); CX_DEBUG2 (d, ("cx_param (hangup)\n")); return 0; } CX_DEBUG2 (d, ("cx_param\n")); /* Check requested parameters. */ if (t->c_ospeed < 300 || t->c_ospeed > 256*1024) { splx (s); return EINVAL; } if (t->c_ispeed && (t->c_ispeed < 300 || t->c_ispeed > 256*1024)) { splx (s); return EINVAL; } /* And copy them to tty and channel structures. */ tp->t_ispeed = t->c_ispeed = tp->t_ospeed = t->c_ospeed; tp->t_cflag = t->c_cflag; /* Set character length and parity mode. */ switch (t->c_cflag & CSIZE) { default: case CS8: bits = 8; break; case CS7: bits = 7; break; case CS6: bits = 6; break; case CS5: bits = 5; break; } parity = ((t->c_cflag & PARENB) ? 1 : 0) * (1 + ((t->c_cflag & PARODD) ? 0 : 1)); /* Set current channel number. */ if (! d->chan->dtr) cx_set_dtr (d->chan, 1); disc_optim (&d->tty, &d->tty.t_termios); cx_set_async_param (d->chan, t->c_ospeed, bits, parity, (t->c_cflag & CSTOPB), !(t->c_cflag & PARENB), (t->c_cflag & CRTSCTS), (t->c_iflag & IXON), (t->c_iflag & IXANY), t->c_cc[VSTART], t->c_cc[VSTOP]); splx (s); return 0; } #if __FreeBSD_version < 400000 static struct tty *cx_devtotty (dev_t dev) { int unit = UNIT (dev); if (unit == UNIT_CTL || unit >= NCX*NCHAN || ! channel[unit]) return 0; return &channel[unit]->tty; } #endif /* * Stop output on a line */ static void cx_stop (struct tty *tp, int flag) { drv_t *d = channel [UNIT (tp->t_dev)]; int s; if (!d) return; s = splhigh (); if (tp->t_state & TS_BUSY) { /* Stop transmitter */ CX_DEBUG2 (d, ("cx_stop\n")); cx_transmitter_ctl (d->chan, 0); } splx (s); } /* * Process the (delayed) carrier signal setup. */ static void cx_carrier (void *arg) { drv_t *d = arg; cx_chan_t *c = d->chan; int s, cd; s = splhigh (); cd = cx_get_cd (c); if (d->cd != cd) { if (cd) { CX_DEBUG (d, ("carrier on\n")); d->cd = 1; splx (s); (*linesw[d->tty.t_line].l_modem) (&d->tty, 1); } else { CX_DEBUG (d, ("carrier loss\n")); d->cd = 0; splx (s); (*linesw[d->tty.t_line].l_modem) (&d->tty, 0); } } } /* * Modem signal callback function. */ static void cx_modem (cx_chan_t *c) { drv_t *d = c->sys; if (!d || c->mode != M_ASYNC) return; /* Handle carrier detect/loss. */ untimeout (cx_carrier, c, d->dcd_timeout_handle); /* Carrier changed - delay processing DCD for a while * to give both sides some time to initialize. */ d->dcd_timeout_handle = timeout (cx_carrier, d, hz/2); } #if __FreeBSD_version < 400000 struct isa_driver cxdriver = { cx_probe, cx_attach, "cx" }; static struct cdevsw cx_cdevsw = { cx_open, cx_close, cx_read, cx_write, cx_ioctl, cx_stop, noreset, cx_devtotty, ttpoll, nommap, NULL, "cx", NULL, -1, }; #elif __FreeBSD_version < 500000 static struct cdevsw cx_cdevsw = { cx_open, cx_close, cx_read, cx_write, cx_ioctl, ttypoll, nommap, nostrategy, "cx", CDEV_MAJOR, nodump, nopsize, D_TTY, -1 }; #elif __FreeBSD_version == 500000 static struct cdevsw cx_cdevsw = { cx_open, cx_close, cx_read, cx_write, cx_ioctl, ttypoll, nommap, nostrategy, "cx", CDEV_MAJOR, nodump, nopsize, D_TTY, }; #elif __FreeBSD_version <= 501000 static struct cdevsw cx_cdevsw = { .d_open = cx_open, .d_close = cx_close, .d_read = cx_read, .d_write = cx_write, .d_ioctl = cx_ioctl, .d_poll = ttypoll, .d_mmap = nommap, .d_strategy = nostrategy, .d_name = "cx", .d_maj = CDEV_MAJOR, .d_dump = nodump, .d_flags = D_TTY, }; #elif __FreeBSD_version < 502103 static struct cdevsw cx_cdevsw = { .d_open = cx_open, .d_close = cx_close, .d_read = cx_read, .d_write = cx_write, .d_ioctl = cx_ioctl, .d_poll = ttypoll, .d_name = "cx", .d_maj = CDEV_MAJOR, .d_flags = D_TTY, }; #else /* __FreeBSD_version >= 502103 */ static struct cdevsw cx_cdevsw = { .d_version = D_VERSION, .d_open = cx_open, .d_close = cx_close, .d_read = cx_read, .d_write = cx_write, .d_ioctl = cx_ioctl, .d_name = "cx", .d_maj = CDEV_MAJOR, .d_flags = D_TTY | D_NEEDGIANT, }; #endif #ifdef NETGRAPH #if __FreeBSD_version >= 500000 static int ng_cx_constructor (node_p node) { drv_t *d = NG_NODE_PRIVATE (node); #else static int ng_cx_constructor (node_p *node) { drv_t *d = (*node)->private; #endif CX_DEBUG (d, ("Constructor\n")); return EINVAL; } static int ng_cx_newhook (node_p node, hook_p hook, const char *name) { int s; #if __FreeBSD_version >= 500000 drv_t *d = NG_NODE_PRIVATE (node); #else drv_t *d = node->private; #endif if (d->chan->mode == M_ASYNC) return EINVAL; /* Attach debug hook */ if (strcmp (name, NG_CX_HOOK_DEBUG) == 0) { #if __FreeBSD_version >= 500000 NG_HOOK_SET_PRIVATE (hook, NULL); #else hook->private = 0; #endif d->debug_hook = hook; return 0; } /* Check for raw hook */ if (strcmp (name, NG_CX_HOOK_RAW) != 0) return EINVAL; #if __FreeBSD_version >= 500000 NG_HOOK_SET_PRIVATE (hook, d); #else hook->private = d; #endif d->hook = hook; s = splhigh (); cx_up (d); splx (s); return 0; } static int print_modems (char *s, cx_chan_t *c, int need_header) { int status = cx_modem_status (c->sys); int length = 0; if (need_header) length += sprintf (s + length, " LE DTR DSR RTS CTS CD\n"); length += sprintf (s + length, "%4s %4s %4s %4s %4s %4s\n", status & TIOCM_LE ? "On" : "-", status & TIOCM_DTR ? "On" : "-", status & TIOCM_DSR ? "On" : "-", status & TIOCM_RTS ? "On" : "-", status & TIOCM_CTS ? "On" : "-", status & TIOCM_CD ? "On" : "-"); return length; } static int print_stats (char *s, cx_chan_t *c, int need_header) { int length = 0; if (need_header) length += sprintf (s + length, " Rintr Tintr Mintr Ibytes Ipkts Ierrs Obytes Opkts Oerrs\n"); length += sprintf (s + length, "%7ld %7ld %7ld %8ld %7ld %7ld %8ld %7ld %7ld\n", c->rintr, c->tintr, c->mintr, c->ibytes, c->ipkts, c->ierrs, c->obytes, c->opkts, c->oerrs); return length; } static int print_chan (char *s, cx_chan_t *c) { drv_t *d = c->sys; int length = 0; length += sprintf (s + length, "cx%d", c->board->num * NCHAN + c->num); if (d->chan->debug) length += sprintf (s + length, " debug=%d", d->chan->debug); if (cx_get_baud (c)) length += sprintf (s + length, " %ld", cx_get_baud (c)); else length += sprintf (s + length, " extclock"); if (c->mode == M_HDLC) { length += sprintf (s + length, " dpll=%s", cx_get_dpll (c) ? "on" : "off"); length += sprintf (s + length, " nrzi=%s", cx_get_nrzi (c) ? "on" : "off"); } length += sprintf (s + length, " loop=%s", cx_get_loop (c) ? "on\n" : "off\n"); return length; } #if __FreeBSD_version >= 500000 static int ng_cx_rcvmsg (node_p node, item_p item, hook_p lasthook) { drv_t *d = NG_NODE_PRIVATE (node); struct ng_mesg *msg; #else static int ng_cx_rcvmsg (node_p node, struct ng_mesg *msg, const char *retaddr, struct ng_mesg **rptr) { drv_t *d = node->private; #endif struct ng_mesg *resp = NULL; int error = 0; if (!d) return EINVAL; CX_DEBUG (d, ("Rcvmsg\n")); #if __FreeBSD_version >= 500000 NGI_GET_MSG (item, msg); #endif switch (msg->header.typecookie) { default: error = EINVAL; break; case NGM_CX_COOKIE: printf ("Don't forget to implement\n"); error = EINVAL; break; case NGM_GENERIC_COOKIE: switch (msg->header.cmd) { default: error = EINVAL; break; case NGM_TEXT_STATUS: { char *s; int l = 0; int dl = sizeof (struct ng_mesg) + 730; #if __FreeBSD_version >= 500000 NG_MKRESPONSE (resp, msg, dl, M_NOWAIT); if (! resp) { error = ENOMEM; break; } #else MALLOC (resp, struct ng_mesg *, dl, M_NETGRAPH, M_NOWAIT); if (! resp) { error = ENOMEM; break; } #endif bzero (resp, dl); s = (resp)->data; l += print_chan (s + l, d->chan); l += print_stats (s + l, d->chan, 1); l += print_modems (s + l, d->chan, 1); #if __FreeBSD_version < 500000 (resp)->header.version = NG_VERSION; (resp)->header.arglen = strlen (s) + 1; (resp)->header.token = msg->header.token; (resp)->header.typecookie = NGM_CX_COOKIE; (resp)->header.cmd = msg->header.cmd; #endif strncpy ((resp)->header.cmdstr, "status", NG_CMDSTRLEN); } break; } break; } #if __FreeBSD_version >= 500000 NG_RESPOND_MSG (error, node, item, resp); NG_FREE_MSG (msg); #else *rptr = resp; FREE (msg, M_NETGRAPH); #endif return error; } #if __FreeBSD_version >= 500000 static int ng_cx_rcvdata (hook_p hook, item_p item) { drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE(hook)); struct mbuf *m; meta_p meta; #else static int ng_cx_rcvdata (hook_p hook, struct mbuf *m, meta_p meta) { drv_t *d = hook->node->private; #endif struct ifqueue *q; int s; #if __FreeBSD_version >= 500000 NGI_GET_M (item, m); NGI_GET_META (item, meta); NG_FREE_ITEM (item); if (! NG_HOOK_PRIVATE (hook) || ! d) { NG_FREE_M (m); NG_FREE_META (meta); #else if (! hook->private || ! d) { NG_FREE_DATA (m,meta); #endif return ENETDOWN; } q = (meta && meta->priority > 0) ? &d->hi_queue : &d->lo_queue; s = splhigh (); #if __FreeBSD_version >= 500000 IF_LOCK (q); if (_IF_QFULL (q)) { _IF_DROP (q); IF_UNLOCK (q); splx (s); NG_FREE_M (m); NG_FREE_META (meta); return ENOBUFS; } _IF_ENQUEUE (q, m); IF_UNLOCK (q); #else if (IF_QFULL (q)) { IF_DROP (q); splx (s); NG_FREE_DATA (m, meta); return ENOBUFS; } IF_ENQUEUE (q, m); #endif cx_start (d); splx (s); return 0; } static int ng_cx_rmnode (node_p node) { #if __FreeBSD_version >= 500000 drv_t *d = NG_NODE_PRIVATE (node); CX_DEBUG (d, ("Rmnode\n")); if (d && d->running) { int s = splhigh (); cx_down (d); splx (s); } #ifdef KLD_MODULE if (node->nd_flags & NG_REALLY_DIE) { NG_NODE_SET_PRIVATE (node, NULL); NG_NODE_UNREF (node); } node->nd_flags &= ~NG_INVALID; #endif #else /* __FreeBSD_version < 500000 */ drv_t *d = node->private; int s; s = splhigh (); cx_down (d); splx (s); node->flags |= NG_INVALID; ng_cutlinks (node); #ifdef KLD_MODULE ng_unname (node); ng_unref (node); #else node->flags &= ~NG_INVALID; #endif #endif return 0; } static void ng_cx_watchdog (void *arg) { drv_t *d = arg; if (d->timeout == 1) cx_watchdog (d); if (d->timeout) d->timeout--; d->timeout_handle = timeout (ng_cx_watchdog, d, hz); } static int ng_cx_connect (hook_p hook) { #if __FreeBSD_version >= 500000 drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE (hook)); #else drv_t *d = hook->node->private; #endif d->timeout_handle = timeout (ng_cx_watchdog, d, hz); return 0; } static int ng_cx_disconnect (hook_p hook) { #if __FreeBSD_version >= 500000 drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE (hook)); #else drv_t *d = hook->node->private; #endif int s; s = splhigh (); #if __FreeBSD_version >= 500000 if (NG_HOOK_PRIVATE (hook)) #else if (hook->private) #endif cx_down (d); splx (s); untimeout (ng_cx_watchdog, d, d->timeout_handle); return 0; } #endif /*NETGRAPH*/ #ifdef KLD_MODULE #if __FreeBSD_version < 400000 /* * Function called when loading the driver. */ static int cx_load (void) { int i; for (i=0;iirq, 0, 0, (inthand2_t*) cx_intr, &net_imask, id.id_unit); enable_intr(); } if (!i) { /* Deactivate the timeout routine. And soft interrupt*/ untimeout (cx_timeout, 0, timeout_handle); unregister_swi (SWI_TTY, cx_softintr); return ENXIO; } return 0; } /* * Function called when unloading the driver. */ static int cx_unload (void) { int i, s; /* Check if the device is busy (open). */ for (i=0; ichan)->type == T_NONE) continue; if (d->lock) return EBUSY; if (c->mode == M_ASYNC && (d->tty.t_state & TS_ISOPEN) && (d->open_dev|0x2)) return EBUSY; if (d->running) return EBUSY; } s = splhigh (); /* Deactivate the timeout routine. And soft interrupt*/ for (i=0; iport) continue; untimeout (cx_timeout, 0, timeout_handle); unregister_swi (SWI_TTY, cx_softintr); break; } for (i=0; ichan)->type == T_NONE) continue; if (d->dtr_timeout_handle.callout) untimeout (cx_dtrwakeup, d, d->dtr_timeout_handle); if (d->dcd_timeout_handle.callout) untimeout (cx_carrier, c, d->dcd_timeout_handle); } /* Close all active boards. */ for (i=0; iport) continue; cx_close_board (b); } for (i=0; iport) continue; if (led_timo[i].callout) untimeout (cx_led_off, b, led_timo[i]); } /* OK to unload the driver, unregister the interrupt first. */ for (i=0; iport) continue; /* Disable the interrupt request. */ disable_intr(); unregister_intr (b->irq, (inthand2_t *)cx_intr); isa_dma_release (b->dma); enable_intr(); } splx (s); s = splhigh (); /* Detach the interfaces, free buffer memory. */ for (i=0; ichan)->type == T_NONE) continue; #ifndef NETGRAPH #if NBPFILTER > 0 /* Detach from the packet filter list of interfaces. */ { struct bpf_if *q, **b = &bpf_iflist; while ((q = *b)) { if (q->bif_ifp == d->pp.pp_if) { *b = q->bif_next; free (q, M_DEVBUF); } b = &(q->bif_next); } } #endif /* NBPFILTER */ /* Detach from the sync PPP list. */ sppp_detach (&d->pp.pp_if); /* Detach from the system list of interfaces. */ { struct ifaddr *ifa; TAILQ_FOREACH (ifa, &d->pp.pp_if.if_addrhead, ifa_link) { TAILQ_REMOVE (&d->pp.pp_if.if_addrhead, ifa, ifa_link); free (ifa, M_IFADDR); } TAILQ_REMOVE (&ifnet, &d->pp.pp_if, if_link); } #endif /* !NETGRAPH */ /* Deallocate buffers. */ /* free (d, M_DEVBUF);*/ } for (i=0; inum] = 0; free (b, M_DEVBUF); } splx (s); return 0; } #define devsw(a) cdevsw[major((a))] #endif /* __FreeBSD_version < 400000 */ #endif /* KLD_MODULE */ #if __FreeBSD_version < 400000 #ifdef KLD_MODULE static int cx_modevent (module_t mod, int type, void *unused) { dev_t dev; int result; static int load_count = 0; dev = makedev (CDEV_MAJOR, 0); switch (type) { case MOD_LOAD: if (devsw(dev)) return (ENXIO); load_count ++; cdevsw_add (&dev, &cx_cdevsw, NULL); timeout_handle = timeout (cx_timeout, 0, hz*5); /* Software interrupt. */ register_swi (SWI_TTY, cx_softintr); result = cx_load (); return result; case MOD_UNLOAD: result = cx_unload (); if (result) return result; if (devsw(dev)&&!(load_count-1)) { cdevsw_add (&dev, NULL, NULL); } load_count --; return result; case MOD_SHUTDOWN: break; } return 0; } #endif /* KLD_MODULE */ #else /* __FreeBSD_version >= 400000 */ static int cx_modevent (module_t mod, int type, void *unused) { dev_t dev; static int load_count = 0; struct cdevsw *cdsw; #if __FreeBSD_version >= 502103 dev = udev2dev (makeudev(CDEV_MAJOR, 0)); #else dev = makedev (CDEV_MAJOR, 0); #endif switch (type) { case MOD_LOAD: if (dev != NODEV && (cdsw = devsw (dev)) && cdsw->d_maj == CDEV_MAJOR) { printf ("Sigma driver is already in system\n"); return (EEXIST); } #if __FreeBSD_version >= 500000 && defined NETGRAPH if (ng_newtype (&typestruct)) printf ("Failed to register ng_cx\n"); #endif ++load_count; #if __FreeBSD_version <= 500000 cdevsw_add (&cx_cdevsw); #endif timeout_handle = timeout (cx_timeout, 0, hz*5); /* Software interrupt. */ #if __FreeBSD_version < 500000 register_swi (SWI_TTY, cx_softintr); #else swi_add(&tty_ithd, "tty:cx", cx_softintr, NULL, SWI_TTY, 0, &cx_fast_ih); #endif break; case MOD_UNLOAD: if (load_count == 1) { printf ("Removing device entry for Sigma\n"); #if __FreeBSD_version <= 500000 cdevsw_remove (&cx_cdevsw); #endif #if __FreeBSD_version >= 500000 && defined NETGRAPH ng_rmtype (&typestruct); #endif } if (timeout_handle.callout) untimeout (cx_timeout, 0, timeout_handle); #if __FreeBSD_version >= 500000 ithread_remove_handler (cx_fast_ih); #else unregister_swi (SWI_TTY, cx_softintr); #endif --load_count; break; case MOD_SHUTDOWN: break; } return 0; } #endif /* __FreeBSD_version >= 400000 */ #ifdef NETGRAPH static struct ng_type typestruct = { .version = NG_ABI_VERSION, .name = NG_CX_NODE_TYPE, .constructor = ng_cx_constructor, .rcvmsg = ng_cx_rcvmsg, .shutdown = ng_cx_rmnode, .newhook = ng_cx_newhook, .connect = ng_cx_connect, .rcvdata = ng_cx_rcvdata, .disconnect = ng_cx_disconnect }; #if __FreeBSD_version < 400000 NETGRAPH_INIT_ORDERED (cx, &typestruct, SI_SUB_DRIVERS,\ SI_ORDER_MIDDLE + CDEV_MAJOR); #endif #endif /*NETGRAPH*/ #if __FreeBSD_version >= 500000 #ifdef NETGRAPH MODULE_DEPEND (ng_cx, netgraph, NG_ABI_VERSION, NG_ABI_VERSION, NG_ABI_VERSION); #else MODULE_DEPEND (isa_cx, sppp, 1, 1, 1); #endif #ifdef KLD_MODULE DRIVER_MODULE (cxmod, isa, cx_isa_driver, cx_devclass, cx_modevent, NULL); #else DRIVER_MODULE (cx, isa, cx_isa_driver, cx_devclass, cx_modevent, NULL); #endif #elif __FreeBSD_version >= 400000 #ifdef NETGRAPH DRIVER_MODULE(cx, isa, cx_isa_driver, cx_devclass, ng_mod_event, &typestruct); #else DRIVER_MODULE(cx, isa, cx_isa_driver, cx_devclass, cx_modevent, 0); #endif #else /* __FreeBSD_version < 400000 */ #ifdef KLD_MODULE #ifndef NETGRAPH static moduledata_t cxmod = { "cx", cx_modevent, NULL}; DECLARE_MODULE (cx, cxmod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE + CDEV_MAJOR); #endif #else /* KLD_MODULE */ /* * Now for some driver initialisation. * Occurs ONCE during boot (very early). * This is if we are NOT a loadable module. */ static void cx_drvinit (void *unused) { #if __FreeBSD_version < 400000 dev_t dev; dev = makedev (CDEV_MAJOR, 0); cdevsw_add (&dev, &cx_cdevsw, NULL); #else cdevsw_add (&cx_cdevsw); #endif /* Activate the timeout routine. */ timeout_handle = timeout (cx_timeout, 0, hz*5); /* Software interrupt. */ register_swi (SWI_TTY, cx_softintr); #ifdef NETGRAPH #if 0 /* Register our node type in netgraph */ if (ng_newtype (&typestruct)) printf ("Failed to register ng_cx\n"); #endif #endif } SYSINIT (cxdev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE+CDEV_MAJOR, cx_drvinit, 0) #endif /* KLD_MODULE */ #endif /* __FreeBSD_version < 400000 */ #endif /* NCX */ Index: head/sys/dev/cy/cy_isa.c =================================================================== --- head/sys/dev/cy/cy_isa.c (revision 129878) +++ head/sys/dev/cy/cy_isa.c (revision 129879) @@ -1,148 +1,149 @@ /*- * cyclades cyclom-y serial driver * Andrew Herbert , 17 August 1993 * * Copyright (c) 1993 Andrew Herbert. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name Andrew Herbert may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL I BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Cyclades Y ISA serial interface driver */ #include __FBSDID("$FreeBSD$"); #include #include #include #include +#include #include #include #include #include #include #include static int cy_isa_attach(device_t dev); static int cy_isa_probe(device_t dev); static device_method_t cy_isa_methods[] = { /* Device interface. */ DEVMETHOD(device_probe, cy_isa_probe), DEVMETHOD(device_attach, cy_isa_attach), { 0, 0 } }; static driver_t cy_isa_driver = { cy_driver_name, cy_isa_methods, 0, }; DRIVER_MODULE(cy, isa, cy_isa_driver, cy_devclass, 0, 0); static int cy_isa_probe(device_t dev) { struct resource *mem_res; cy_addr iobase; int mem_rid; if (isa_get_logicalid(dev) != 0) /* skip PnP probes */ return (ENXIO); mem_rid = 0; mem_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &mem_rid, 0ul, ~0ul, 0ul, RF_ACTIVE); if (mem_res == NULL) { device_printf(dev, "ioport resource allocation failed\n"); return (ENXIO); } iobase = rman_get_virtual(mem_res); /* Cyclom-16Y hardware reset (Cyclom-8Ys don't care) */ cy_inb(iobase, CY16_RESET, 0); /* XXX? */ DELAY(500); /* wait for the board to get its act together */ /* this is needed to get the board out of reset */ cy_outb(iobase, CY_CLEAR_INTR, 0, 0); DELAY(500); bus_release_resource(dev, SYS_RES_MEMORY, mem_rid, mem_res); return (cy_units(iobase, 0) == 0 ? ENXIO : 0); } static int cy_isa_attach(device_t dev) { struct resource *irq_res, *mem_res; void *irq_cookie, *vaddr, *vsc; int irq_rid, mem_rid; irq_res = NULL; mem_res = NULL; mem_rid = 0; mem_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &mem_rid, 0ul, ~0ul, 0ul, RF_ACTIVE); if (mem_res == NULL) { device_printf(dev, "memory resource allocation failed\n"); goto fail; } vaddr = rman_get_virtual(mem_res); vsc = cyattach_common(vaddr, 0); if (vsc == NULL) { device_printf(dev, "no ports found!\n"); goto fail; } irq_rid = 0; irq_res = bus_alloc_resource(dev, SYS_RES_IRQ, &irq_rid, 0ul, ~0ul, 0ul, RF_SHAREABLE | RF_ACTIVE); if (irq_res == NULL) { device_printf(dev, "interrupt resource allocation failed\n"); goto fail; } if (bus_setup_intr(dev, irq_res, INTR_TYPE_TTY | INTR_FAST, cyintr, vsc, &irq_cookie) != 0) { device_printf(dev, "interrupt setup failed\n"); goto fail; } return (0); fail: if (irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, irq_rid, irq_res); if (mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, mem_rid, mem_res); return (ENXIO); } Index: head/sys/dev/cy/cy_pci.c =================================================================== --- head/sys/dev/cy/cy_pci.c (revision 129878) +++ head/sys/dev/cy/cy_pci.c (revision 129879) @@ -1,191 +1,192 @@ /* * Copyright (c) 1996, David Greenman * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Cyclades Y PCI serial interface driver */ #include __FBSDID("$FreeBSD$"); #include "opt_cy_pci_fastintr.h" #include #include #include +#include #include #include #include #include #include #define CY_PCI_BASE_ADDR0 0x10 #define CY_PCI_BASE_ADDR1 0x14 #define CY_PCI_BASE_ADDR2 0x18 #define CY_PLX_9050_ICS 0x4c #define CY_PLX_9060_ICS 0x68 #define CY_PLX_9050_ICS_IENABLE 0x040 #define CY_PLX_9050_ICS_LOCAL_IENABLE 0x001 #define CY_PLX_9050_ICS_LOCAL_IPOLARITY 0x002 #define CY_PLX_9060_ICS_IENABLE 0x100 #define CY_PLX_9060_ICS_LOCAL_IENABLE 0x800 /* Cyclom-Y Custom Register for PLX ID. */ #define PLX_VER 0x3400 #define PLX_9050 0x0b #define PLX_9060 0x0c #define PLX_9080 0x0d static int cy_pci_attach(device_t dev); static int cy_pci_probe(device_t dev); static device_method_t cy_pci_methods[] = { /* Device interface. */ DEVMETHOD(device_probe, cy_pci_probe), DEVMETHOD(device_attach, cy_pci_attach), { 0, 0 } }; static driver_t cy_pci_driver = { cy_driver_name, cy_pci_methods, 0, }; DRIVER_MODULE(cy, pci, cy_pci_driver, cy_devclass, 0, 0); MODULE_DEPEND(cy, pci, 1, 1, 1); static int cy_pci_probe(dev) device_t dev; { u_int32_t device_id; device_id = pci_get_devid(dev); device_id &= ~0x00060000; if (device_id != 0x0100120e && device_id != 0x0101120e) return (ENXIO); device_set_desc(dev, "Cyclades Cyclom-Y Serial Adapter"); return (0); } static int cy_pci_attach(dev) device_t dev; { struct resource *ioport_res, *irq_res, *mem_res; void *irq_cookie, *vaddr, *vsc; u_int32_t ioport; int irq_setup, ioport_rid, irq_rid, mem_rid; u_char plx_ver; ioport_res = NULL; irq_res = NULL; mem_res = NULL; ioport_rid = CY_PCI_BASE_ADDR1; ioport_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &ioport_rid, 0ul, ~0ul, 0ul, RF_ACTIVE); if (ioport_res == NULL) { device_printf(dev, "ioport resource allocation failed\n"); goto fail; } ioport = rman_get_start(ioport_res); mem_rid = CY_PCI_BASE_ADDR2; mem_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &mem_rid, 0ul, ~0ul, 0ul, RF_ACTIVE); if (mem_res == NULL) { device_printf(dev, "memory resource allocation failed\n"); goto fail; } vaddr = rman_get_virtual(mem_res); vsc = cyattach_common(vaddr, 1); if (vsc == NULL) { device_printf(dev, "no ports found!\n"); goto fail; } irq_rid = 0; irq_res = bus_alloc_resource(dev, SYS_RES_IRQ, &irq_rid, 0ul, ~0ul, 0ul, RF_SHAREABLE | RF_ACTIVE); if (irq_res == NULL) { device_printf(dev, "interrupt resource allocation failed\n"); goto fail; } #ifdef CY_PCI_FASTINTR irq_setup = bus_setup_intr(dev, irq_res, INTR_TYPE_TTY | INTR_FAST, cyintr, vsc, &irq_cookie); #else irq_setup = ENXIO; #endif if (irq_setup != 0) irq_setup = bus_setup_intr(dev, irq_res, INTR_TYPE_TTY, cyintr, vsc, &irq_cookie); if (irq_setup != 0) { device_printf(dev, "interrupt setup failed\n"); goto fail; } /* * Enable the "local" interrupt input to generate a * PCI interrupt. */ plx_ver = *((u_char *)vaddr + PLX_VER) & 0x0f; switch (plx_ver) { case PLX_9050: outw(ioport + CY_PLX_9050_ICS, CY_PLX_9050_ICS_IENABLE | CY_PLX_9050_ICS_LOCAL_IENABLE | CY_PLX_9050_ICS_LOCAL_IPOLARITY); break; case PLX_9060: case PLX_9080: default: /* Old board, use PLX_9060 values. */ outw(ioport + CY_PLX_9060_ICS, inw(ioport + CY_PLX_9060_ICS) | CY_PLX_9060_ICS_IENABLE | CY_PLX_9060_ICS_LOCAL_IENABLE); break; } return (0); fail: if (ioport_res != NULL) bus_release_resource(dev, SYS_RES_IOPORT, ioport_rid, ioport_res); if (irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, irq_rid, irq_res); if (mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, mem_rid, mem_res); return (ENXIO); } Index: head/sys/dev/dcons/dcons.c =================================================================== --- head/sys/dev/dcons/dcons.c (revision 129878) +++ head/sys/dev/dcons/dcons.c (revision 129879) @@ -1,648 +1,649 @@ /* * Copyright (C) 2003 * Hidetoshi Shimokawa. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * * This product includes software developed by Hidetoshi Shimokawa. * * 4. Neither the name of the author nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: dcons.c,v 1.65 2003/10/24 03:24:55 simokawa Exp $ * $FreeBSD$ */ #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_ddb.h" #include "opt_comconsole.h" #include "opt_dcons.h" #ifndef DCONS_POLL_HZ #define DCONS_POLL_HZ 100 #endif #ifndef DCONS_BUF_SIZE #define DCONS_BUF_SIZE (16*1024) #endif #ifndef DCONS_FORCE_CONSOLE #define DCONS_FORCE_CONSOLE 0 /* mostly for FreeBSD-4 */ #endif #ifndef DCONS_FORCE_GDB #define DCONS_FORCE_GDB 1 #endif #if __FreeBSD_version >= 500101 #define CONS_NODEV 1 /* for latest current */ static struct consdev gdbconsdev; #endif static d_open_t dcons_open; static d_close_t dcons_close; static d_ioctl_t dcons_ioctl; static struct cdevsw dcons_cdevsw = { #if __FreeBSD_version >= 500104 .d_version = D_VERSION, .d_open = dcons_open, .d_close = dcons_close, .d_ioctl = dcons_ioctl, .d_name = "dcons", .d_flags = D_TTY | D_NEEDGIANT, #else /* open */ dcons_open, /* close */ dcons_close, /* read */ ttyread, /* write */ ttywrite, /* ioctl */ dcons_ioctl, /* poll */ ttypoll, /* mmap */ nommap, /* strategy */ nostrategy, /* name */ "dcons", /* major */ CDEV_MAJOR, /* dump */ nodump, /* psize */ nopsize, /* flags */ 0, #endif }; #ifndef KLD_MODULE static char bssbuf[DCONS_BUF_SIZE]; /* buf in bss */ #endif /* global data */ static struct dcons_global dg; struct dcons_global *dcons_conf; static int poll_hz = DCONS_POLL_HZ; SYSCTL_NODE(_kern, OID_AUTO, dcons, CTLFLAG_RD, 0, "Dumb Console"); SYSCTL_INT(_kern_dcons, OID_AUTO, poll_hz, CTLFLAG_RW, &poll_hz, 0, "dcons polling rate"); static int drv_init = 0; static struct callout dcons_callout; struct dcons_buf *dcons_buf; /* for local dconschat */ /* per device data */ static struct dcons_softc { dev_t dev; struct dcons_ch o, i; int brk_state; #define DC_GDB 1 int flags; } sc[DCONS_NPORT]; static void dcons_tty_start(struct tty *); static int dcons_tty_param(struct tty *, struct termios *); static void dcons_timeout(void *); static int dcons_drv_init(int); static int dcons_getc(struct dcons_softc *); static int dcons_checkc(struct dcons_softc *); static void dcons_putc(struct dcons_softc *, int); static cn_probe_t dcons_cnprobe; static cn_init_t dcons_cninit; static cn_getc_t dcons_cngetc; static cn_checkc_t dcons_cncheckc; static cn_putc_t dcons_cnputc; CONS_DRIVER(dcons, dcons_cnprobe, dcons_cninit, NULL, dcons_cngetc, dcons_cncheckc, dcons_cnputc, NULL); #if __FreeBSD_version < 500000 #define THREAD proc #else #define THREAD thread #endif static int dcons_open(dev_t dev, int flag, int mode, struct THREAD *td) { struct tty *tp; int unit, error, s; unit = minor(dev); if (unit != 0) return (ENXIO); tp = dev->si_tty = ttymalloc(dev->si_tty); tp->t_oproc = dcons_tty_start; tp->t_param = dcons_tty_param; tp->t_stop = nottystop; tp->t_dev = dev; error = 0; s = spltty(); if ((tp->t_state & TS_ISOPEN) == 0) { tp->t_state |= TS_CARR_ON; ttychars(tp); tp->t_iflag = TTYDEF_IFLAG; tp->t_oflag = TTYDEF_OFLAG; tp->t_cflag = TTYDEF_CFLAG|CLOCAL; tp->t_lflag = TTYDEF_LFLAG; tp->t_ispeed = tp->t_ospeed = TTYDEF_SPEED; ttsetwater(tp); } else if ((tp->t_state & TS_XCLUDE) && suser(td)) { splx(s); return (EBUSY); } splx(s); error = (*linesw[tp->t_line].l_open)(dev, tp); return (error); } static int dcons_close(dev_t dev, int flag, int mode, struct THREAD *td) { int unit; struct tty *tp; unit = minor(dev); if (unit != 0) return (ENXIO); tp = dev->si_tty; if (tp->t_state & TS_ISOPEN) { (*linesw[tp->t_line].l_close)(tp, flag); ttyclose(tp); } return (0); } static int dcons_ioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct THREAD *td) { int unit; struct tty *tp; int error; unit = minor(dev); if (unit != 0) return (ENXIO); tp = dev->si_tty; error = (*linesw[tp->t_line].l_ioctl)(tp, cmd, data, flag, td); if (error != ENOIOCTL) return (error); error = ttioctl(tp, cmd, data, flag); if (error != ENOIOCTL) return (error); return (ENOTTY); } static int dcons_tty_param(struct tty *tp, struct termios *t) { tp->t_ispeed = t->c_ispeed; tp->t_ospeed = t->c_ospeed; tp->t_cflag = t->c_cflag; return 0; } static void dcons_tty_start(struct tty *tp) { struct dcons_softc *dc; int s; dc = (struct dcons_softc *)tp->t_dev->si_drv1; s = spltty(); if (tp->t_state & (TS_TIMEOUT | TS_TTSTOP)) { ttwwakeup(tp); return; } tp->t_state |= TS_BUSY; while (tp->t_outq.c_cc != 0) dcons_putc(dc, getc(&tp->t_outq)); tp->t_state &= ~TS_BUSY; ttwwakeup(tp); splx(s); } static void dcons_timeout(void *v) { struct tty *tp; struct dcons_softc *dc; int i, c, polltime; for (i = 0; i < DCONS_NPORT; i ++) { dc = &sc[i]; tp = dc->dev->si_tty; while ((c = dcons_checkc(dc)) != -1) if (tp->t_state & TS_ISOPEN) (*linesw[tp->t_line].l_rint)(c, tp); } polltime = hz / poll_hz; if (polltime < 1) polltime = 1; callout_reset(&dcons_callout, polltime, dcons_timeout, tp); } static void dcons_cnprobe(struct consdev *cp) { #if __FreeBSD_version >= 501109 sprintf(cp->cn_name, "dcons"); #else cp->cn_dev = makedev(CDEV_MAJOR, DCONS_CON); #endif #if DCONS_FORCE_CONSOLE cp->cn_pri = CN_REMOTE; #else cp->cn_pri = CN_NORMAL; #endif } static void dcons_cninit(struct consdev *cp) { dcons_drv_init(0); #if CONS_NODEV cp->cn_arg #else cp->cn_dev->si_drv1 #endif = (void *)&sc[DCONS_CON]; /* share port0 with unit0 */ } #if CONS_NODEV static int dcons_cngetc(struct consdev *cp) { return(dcons_getc((struct dcons_softc *)cp->cn_arg)); } static int dcons_cncheckc(struct consdev *cp) { return(dcons_checkc((struct dcons_softc *)cp->cn_arg)); } static void dcons_cnputc(struct consdev *cp, int c) { dcons_putc((struct dcons_softc *)cp->cn_arg, c); } #else static int dcons_cngetc(dev_t dev) { return(dcons_getc((struct dcons_softc *)dev->si_drv1)); } static int dcons_cncheckc(dev_t dev) { return(dcons_checkc((struct dcons_softc *)dev->si_drv1)); } static void dcons_cnputc(dev_t dev, int c) { dcons_putc((struct dcons_softc *)dev->si_drv1, c); } #endif static int dcons_getc(struct dcons_softc *dc) { int c; while ((c = dcons_checkc(dc)) == -1); return (c & 0xff); } static int dcons_checkc(struct dcons_softc *dc) { unsigned char c; u_int32_t ptr, pos, gen, next_gen; struct dcons_ch *ch; ch = &dc->i; if (dg.dma_tag != NULL) bus_dmamap_sync(dg.dma_tag, dg.dma_map, BUS_DMASYNC_POSTREAD); ptr = ntohl(*ch->ptr); gen = ptr >> DCONS_GEN_SHIFT; pos = ptr & DCONS_POS_MASK; if (gen == ch->gen && pos == ch->pos) return (-1); next_gen = DCONS_NEXT_GEN(ch->gen); /* XXX sanity check */ if ((gen != ch->gen && gen != next_gen) || (gen == ch->gen && pos < ch->pos)) { /* generation skipped !! */ /* XXX discard */ ch->gen = gen; ch->pos = pos; return (-1); } c = ch->buf[ch->pos]; ch->pos ++; if (ch->pos >= ch->size) { ch->gen = next_gen; ch->pos = 0; } #if DDB && ALT_BREAK_TO_DEBUGGER switch (dc->brk_state) { case STATE1: if (c == KEY_TILDE) dc->brk_state = STATE2; else dc->brk_state = STATE0; break; case STATE2: dc->brk_state = STATE0; if (c == KEY_CTRLB) { #if DCONS_FORCE_GDB if (dc->flags & DC_GDB) boothowto |= RB_GDB; #endif breakpoint(); } } if (c == KEY_CR) dc->brk_state = STATE1; #endif return (c); } static void dcons_putc(struct dcons_softc *dc, int c) { struct dcons_ch *ch; ch = &dc->o; ch->buf[ch->pos] = c; ch->pos ++; if (ch->pos >= ch->size) { ch->gen = DCONS_NEXT_GEN(ch->gen); ch->pos = 0; } *ch->ptr = DCONS_MAKE_PTR(ch); if (dg.dma_tag != NULL) bus_dmamap_sync(dg.dma_tag, dg.dma_map, BUS_DMASYNC_PREWRITE); } static int dcons_init_port(int port, int offset, int size) { int osize; struct dcons_softc *dc; dc = &sc[port]; osize = size * 3 / 4; dc->o.size = osize; dc->i.size = size - osize; dc->o.buf = (char *)dg.buf + offset; dc->i.buf = dc->o.buf + osize; dc->o.gen = dc->i.gen = 0; dc->o.pos = dc->i.pos = 0; dc->o.ptr = &dg.buf->optr[port]; dc->i.ptr = &dg.buf->iptr[port]; dc->brk_state = STATE0; dg.buf->osize[port] = htonl(osize); dg.buf->isize[port] = htonl(size - osize); dg.buf->ooffset[port] = htonl(offset); dg.buf->ioffset[port] = htonl(offset + osize); dg.buf->optr[port] = DCONS_MAKE_PTR(&dc->o); dg.buf->iptr[port] = DCONS_MAKE_PTR(&dc->i); return(0); } static int dcons_drv_init(int stage) { int size, size0, offset; if (drv_init) return(drv_init); drv_init = -1; bzero(&dg, sizeof(dg)); dcons_conf = &dg; dg.cdev = &dcons_consdev; dg.size = DCONS_BUF_SIZE; #ifndef KLD_MODULE if (stage == 0) /* XXX or cold */ /* * DCONS_FORCE_CONSOLE == 1 and statically linked. * called from cninit(). can't use contigmalloc yet . */ dg.buf = (struct dcons_buf *) bssbuf; else #endif /* * DCONS_FORCE_CONSOLE == 0 or kernel module case. * if the module is loaded after boot, * bssbuf could be non-continuous. */ dg.buf = (struct dcons_buf *) contigmalloc(dg.size, M_DEVBUF, 0, 0x10000, 0xffffffff, PAGE_SIZE, 0ul); dcons_buf = dg.buf; offset = DCONS_HEADER_SIZE; size = (dg.size - offset); size0 = size * 3 / 4; dcons_init_port(0, offset, size0); offset += size0; dcons_init_port(1, offset, size - size0); dg.buf->version = htonl(DCONS_VERSION); dg.buf->magic = ntohl(DCONS_MAGIC); #if DDB && DCONS_FORCE_GDB #if CONS_NODEV gdbconsdev.cn_arg = (void *)&sc[DCONS_GDB]; #if __FreeBSD_version >= 501109 sprintf(gdbconsdev.cn_name, "dgdb"); #endif gdb_arg = &gdbconsdev; #else gdbdev = makedev(CDEV_MAJOR, DCONS_GDB); #endif gdb_getc = dcons_cngetc; gdb_putc = dcons_cnputc; #endif drv_init = 1; return 0; } static int dcons_attach_port(int port, char *name, int flags) { struct dcons_softc *dc; struct tty *tp; dc = &sc[port]; dc->flags = flags; dc->dev = make_dev(&dcons_cdevsw, port, UID_ROOT, GID_WHEEL, 0600, name); tp = ttymalloc(NULL); dc->dev->si_drv1 = (void *)dc; dc->dev->si_tty = tp; tp->t_oproc = dcons_tty_start; tp->t_param = dcons_tty_param; tp->t_stop = nottystop; tp->t_dev = dc->dev; return(0); } static int dcons_attach(void) { int polltime; dcons_attach_port(DCONS_CON, "dcons", 0); dcons_attach_port(DCONS_GDB, "dgdb", DC_GDB); #if __FreeBSD_version < 500000 callout_init(&dcons_callout); #else callout_init(&dcons_callout, 0); #endif polltime = hz / poll_hz; if (polltime < 1) polltime = 1; callout_reset(&dcons_callout, polltime, dcons_timeout, NULL); return(0); } static int dcons_detach(int port) { struct tty *tp; struct dcons_softc *dc; dc = &sc[port]; tp = dc->dev->si_tty; if (tp->t_state & TS_ISOPEN) { printf("dcons: still opened\n"); (*linesw[tp->t_line].l_close)(tp, 0); tp->t_gen++; ttyclose(tp); ttwakeup(tp); ttwwakeup(tp); } /* XXX * must wait until all device are closed. */ tsleep((void *)dc, PWAIT, "dcodtc", hz/4); destroy_dev(dc->dev); return(0); } /* cnXXX works only for FreeBSD-5 */ static int dcons_modevent(module_t mode, int type, void *data) { int err = 0, ret; switch (type) { case MOD_LOAD: ret = dcons_drv_init(1); dcons_attach(); #if __FreeBSD_version >= 500000 if (ret == 0) { dcons_cnprobe(&dcons_consdev); dcons_cninit(&dcons_consdev); cnadd(&dcons_consdev); } #endif break; case MOD_UNLOAD: printf("dcons: unload\n"); callout_stop(&dcons_callout); #if DDB && DCONS_FORCE_GDB #if CONS_NODEV gdb_arg = NULL; #else gdbdev = NODEV; #endif #endif #if __FreeBSD_version >= 500000 cnremove(&dcons_consdev); #endif dcons_detach(DCONS_CON); dcons_detach(DCONS_GDB); dg.buf->magic = 0; contigfree(dg.buf, DCONS_BUF_SIZE, M_DEVBUF); break; case MOD_SHUTDOWN: break; } return(err); } DEV_MODULE(dcons, dcons_modevent, NULL); MODULE_VERSION(dcons, DCONS_VERSION); Index: head/sys/dev/dcons/dcons_crom.c =================================================================== --- head/sys/dev/dcons/dcons_crom.c (revision 129878) +++ head/sys/dev/dcons/dcons_crom.c (revision 129879) @@ -1,238 +1,239 @@ /* * Copyright (C) 2003 * Hidetoshi Shimokawa. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * * This product includes software developed by Hidetoshi Shimokawa. * * 4. Neither the name of the author nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: dcons_crom.c,v 1.8 2003/10/23 15:47:21 simokawa Exp $ * $FreeBSD$ */ #include #include +#include #include #include #include #include #include #include #include #include #include #include #include static bus_addr_t dcons_paddr; #if __FreeBSD_version >= 500000 static int force_console = 1; TUNABLE_INT("hw.firewire.dcons_crom.force_console", &force_console); #endif #ifndef CSRVAL_VENDOR_PRIVATE #define NEED_NEW_DRIVER #endif #define ADDR_HI(x) (((x) >> 24) & 0xffffff) #define ADDR_LO(x) ((x) & 0xffffff) struct dcons_crom_softc { struct firewire_dev_comm fd; struct crom_chunk unit; struct crom_chunk spec; struct crom_chunk ver; bus_dma_tag_t dma_tag; bus_dmamap_t dma_map; bus_addr_t bus_addr; }; static void dcons_crom_identify(driver_t *driver, device_t parent) { BUS_ADD_CHILD(parent, 0, "dcons_crom", device_get_unit(parent)); } static int dcons_crom_probe(device_t dev) { device_t pa; pa = device_get_parent(dev); if(device_get_unit(dev) != device_get_unit(pa)){ return(ENXIO); } device_set_desc(dev, "dcons configuration ROM"); return (0); } #ifndef NEED_NEW_DRIVER static void dcons_crom_post_busreset(void *arg) { struct dcons_crom_softc *sc; struct crom_src *src; struct crom_chunk *root; sc = (struct dcons_crom_softc *) arg; src = sc->fd.fc->crom_src; root = sc->fd.fc->crom_root; bzero(&sc->unit, sizeof(struct crom_chunk)); crom_add_chunk(src, root, &sc->unit, CROM_UDIR); crom_add_entry(&sc->unit, CSRKEY_SPEC, CSRVAL_VENDOR_PRIVATE); crom_add_simple_text(src, &sc->unit, &sc->spec, "FreeBSD"); crom_add_entry(&sc->unit, CSRKEY_VER, DCONS_CSR_VAL_VER); crom_add_simple_text(src, &sc->unit, &sc->ver, "dcons"); crom_add_entry(&sc->unit, DCONS_CSR_KEY_HI, ADDR_HI(dcons_paddr)); crom_add_entry(&sc->unit, DCONS_CSR_KEY_LO, ADDR_LO(dcons_paddr)); } #endif static void dmamap_cb(void *arg, bus_dma_segment_t *segments, int seg, int error) { struct dcons_crom_softc *sc; if (error) printf("dcons_dmamap_cb: error=%d\n", error); sc = (struct dcons_crom_softc *)arg; sc->bus_addr = segments[0].ds_addr; bus_dmamap_sync(sc->dma_tag, sc->dma_map, BUS_DMASYNC_PREWRITE); device_printf(sc->fd.dev, #if __FreeBSD_version < 500000 "bus_addr 0x%x\n", sc->bus_addr); #else "bus_addr 0x%jx\n", (uintmax_t)sc->bus_addr); #endif if (dcons_paddr != 0) { /* XXX */ device_printf(sc->fd.dev, "dcons_paddr is already set\n"); return; } dcons_conf->dma_tag = sc->dma_tag; dcons_conf->dma_map = sc->dma_map; dcons_paddr = sc->bus_addr; #if __FreeBSD_version >= 500000 /* Force to be the high-level console */ if (force_console) cnselect(dcons_conf->cdev); #endif } static int dcons_crom_attach(device_t dev) { #ifdef NEED_NEW_DRIVER printf("dcons_crom: you need newer firewire driver\n"); return (-1); #else struct dcons_crom_softc *sc; sc = (struct dcons_crom_softc *) device_get_softc(dev); sc->fd.fc = device_get_ivars(dev); sc->fd.dev = dev; sc->fd.post_explore = NULL; sc->fd.post_busreset = (void *) dcons_crom_post_busreset; /* map dcons buffer */ bus_dma_tag_create( /*parent*/ sc->fd.fc->dmat, /*alignment*/ sizeof(u_int32_t), /*boundary*/ 0, /*lowaddr*/ BUS_SPACE_MAXADDR, /*highaddr*/ BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/ dcons_conf->size, /*nsegments*/ 1, /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, /*flags*/ BUS_DMA_ALLOCNOW, #if __FreeBSD_version >= 501102 /*lockfunc*/busdma_lock_mutex, /*lockarg*/&Giant, #endif &sc->dma_tag); bus_dmamap_create(sc->dma_tag, 0, &sc->dma_map); bus_dmamap_load(sc->dma_tag, sc->dma_map, (void *)dcons_conf->buf, dcons_conf->size, dmamap_cb, sc, 0); return (0); #endif } static int dcons_crom_detach(device_t dev) { struct dcons_crom_softc *sc; sc = (struct dcons_crom_softc *) device_get_softc(dev); sc->fd.post_busreset = NULL; /* XXX */ if (dcons_conf->dma_tag == sc->dma_tag) dcons_conf->dma_tag = NULL; bus_dmamap_unload(sc->dma_tag, sc->dma_map); bus_dmamap_destroy(sc->dma_tag, sc->dma_map); bus_dma_tag_destroy(sc->dma_tag); return 0; } static devclass_t dcons_crom_devclass; static device_method_t dcons_crom_methods[] = { /* device interface */ DEVMETHOD(device_identify, dcons_crom_identify), DEVMETHOD(device_probe, dcons_crom_probe), DEVMETHOD(device_attach, dcons_crom_attach), DEVMETHOD(device_detach, dcons_crom_detach), { 0, 0 } }; static driver_t dcons_crom_driver = { "dcons_crom", dcons_crom_methods, sizeof(struct dcons_crom_softc), }; DRIVER_MODULE(dcons_crom, firewire, dcons_crom_driver, dcons_crom_devclass, 0, 0); MODULE_VERSION(dcons_crom, 1); MODULE_DEPEND(dcons_crom, dcons, DCONS_VERSION, DCONS_VERSION, DCONS_VERSION); MODULE_DEPEND(dcons_crom, firewire, 1, 1, 1); Index: head/sys/dev/digi/digi.c =================================================================== --- head/sys/dev/digi/digi.c (revision 129878) +++ head/sys/dev/digi/digi.c (revision 129879) @@ -1,1955 +1,1956 @@ /*- * Copyright (c) 2001 Brian Somers * based on work by Slawa Olhovchenkov * John Prince * Eric Hernes * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /*- * TODO: * Figure out what the con bios stuff is supposed to do * Test with *LOTS* more cards - I only have a PCI8r and an ISA Xem. */ #include "opt_compat.h" #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #define CTRL_DEV 0x800000 #define CALLOUT_MASK 0x400000 #define CONTROL_INIT_STATE 0x100000 #define CONTROL_LOCK_STATE 0x200000 #define CONTROL_MASK (CTRL_DEV|CONTROL_INIT_STATE|CONTROL_LOCK_STATE) #define UNIT_MASK 0x030000 #define PORT_MASK 0x0000FF #define DEV_TO_UNIT(dev) (MINOR_TO_UNIT(minor(dev))) #define MINOR_MAGIC_MASK (CALLOUT_MASK | CONTROL_MASK) #define MINOR_TO_UNIT(mynor) (((mynor) & UNIT_MASK)>>16) #define MINOR_TO_PORT(mynor) ((mynor) & PORT_MASK) static d_open_t digiopen; static d_close_t digiclose; static d_read_t digiread; static d_write_t digiwrite; static d_ioctl_t digiioctl; static void digistop(struct tty *tp, int rw); static int digimctl(struct digi_p *port, int bits, int how); static void digi_poll(void *ptr); static void digi_freemoduledata(struct digi_softc *); static void fepcmd(struct digi_p *port, int cmd, int op, int ncmds); static void digistart(struct tty *tp); static int digiparam(struct tty *tp, struct termios *t); static void digihardclose(struct digi_p *port); static void digi_intr(void *); static int digi_init(struct digi_softc *_sc); static int digi_loadmoduledata(struct digi_softc *); static int digi_inuse(struct digi_softc *); static void digi_free_state(struct digi_softc *); #define fepcmd_b(port, cmd, op1, op2, ncmds) \ fepcmd(port, cmd, (op2 << 8) | op1, ncmds) #define fepcmd_w fepcmd static speed_t digidefaultrate = TTYDEF_SPEED; struct con_bios { struct con_bios *next; u_char *bios; size_t size; }; static struct con_bios *con_bios_list; devclass_t digi_devclass; static char driver_name[] = "digi"; unsigned digi_debug = 0; static struct speedtab digispeedtab[] = { { 0, 0}, /* old (sysV-like) Bx codes */ { 50, 1}, { 75, 2}, { 110, 3}, { 134, 4}, { 150, 5}, { 200, 6}, { 300, 7}, { 600, 8}, { 1200, 9}, { 1800, 10}, { 2400, 11}, { 4800, 12}, { 9600, 13}, { 19200, 14}, { 38400, 15}, { 57600, (02000 | 1)}, { 76800, (02000 | 2)}, { 115200, (02000 | 3)}, { 230400, (02000 | 6)}, { -1, -1} }; const struct digi_control_signals digi_xixe_signals = { 0x02, 0x08, 0x10, 0x20, 0x40, 0x80 }; const struct digi_control_signals digi_normal_signals = { 0x02, 0x80, 0x20, 0x10, 0x40, 0x01 }; static struct cdevsw digi_sw = { .d_version = D_VERSION, .d_open = digiopen, .d_close = digiclose, .d_read = digiread, .d_write = digiwrite, .d_ioctl = digiioctl, .d_name = driver_name, .d_flags = D_TTY | D_NEEDGIANT, }; static void digi_poll(void *ptr) { struct digi_softc *sc; sc = (struct digi_softc *)ptr; callout_handle_init(&sc->callout); digi_intr(sc); sc->callout = timeout(digi_poll, sc, (hz >= 200) ? hz / 100 : 1); } static void digi_int_test(void *v) { struct digi_softc *sc = v; callout_handle_init(&sc->inttest); #ifdef DIGI_INTERRUPT if (sc->intr_timestamp.tv_sec || sc->intr_timestamp.tv_usec) { /* interrupt OK! */ return; } log(LOG_ERR, "digi%d: Interrupt didn't work, use polled mode\n", unit); #endif sc->callout = timeout(digi_poll, sc, (hz >= 200) ? hz / 100 : 1); } static void digi_freemoduledata(struct digi_softc *sc) { if (sc->fep.data != NULL) { free(sc->fep.data, M_TTYS); sc->fep.data = NULL; } if (sc->link.data != NULL) { free(sc->link.data, M_TTYS); sc->link.data = NULL; } if (sc->bios.data != NULL) { free(sc->bios.data, M_TTYS); sc->bios.data = NULL; } } static int digi_bcopy(const void *vfrom, void *vto, size_t sz) { volatile const char *from = (volatile const char *)vfrom; volatile char *to = (volatile char *)vto; size_t i; for (i = 0; i < sz; i++) *to++ = *from++; from = (const volatile char *)vfrom; to = (volatile char *)vto; for (i = 0; i < sz; i++) if (*to++ != *from++) return (0); return (1); } void digi_delay(struct digi_softc *sc, const char *txt, u_long timo) { if (cold) DELAY(timo * 1000000 / hz); else tsleep(sc, PUSER | PCATCH, txt, timo); } static int digi_init(struct digi_softc *sc) { int i, cnt, resp; u_char *ptr; int lowwater; struct digi_p *port; volatile struct board_chan *bc; ptr = NULL; if (sc->status == DIGI_STATUS_DISABLED) { log(LOG_ERR, "digi%d: Cannot init a disabled card\n", sc->res.unit); return (EIO); } if (sc->bios.data == NULL) { log(LOG_ERR, "digi%d: Cannot init without BIOS\n", sc->res.unit); return (EIO); } #if 0 if (sc->link.data == NULL && sc->model >= PCCX) { log(LOG_ERR, "digi%d: Cannot init without link info\n", sc->res.unit); return (EIO); } #endif if (sc->fep.data == NULL) { log(LOG_ERR, "digi%d: Cannot init without fep code\n", sc->res.unit); return (EIO); } sc->status = DIGI_STATUS_NOTINIT; if (sc->numports) { /* * We're re-initialising - maybe because someone's attached * another port module. For now, we just re-initialise * everything. */ if (digi_inuse(sc)) return (EBUSY); digi_free_state(sc); } ptr = sc->setwin(sc, MISCGLOBAL); for (i = 0; i < 16; i += 2) vW(ptr + i) = 0; switch (sc->model) { case PCXEVE: outb(sc->wport, 0xff); /* window 7 */ ptr = sc->vmem + (BIOSCODE & 0x1fff); if (!digi_bcopy(sc->bios.data, ptr, sc->bios.size)) { device_printf(sc->dev, "BIOS upload failed\n"); return (EIO); } outb(sc->port, FEPCLR); break; case PCXE: case PCXI: case PCCX: ptr = sc->setwin(sc, BIOSCODE + ((0xf000 - sc->mem_seg) << 4)); if (!digi_bcopy(sc->bios.data, ptr, sc->bios.size)) { device_printf(sc->dev, "BIOS upload failed\n"); return (EIO); } break; case PCXEM: case PCIEPCX: case PCIXR: if (sc->pcibus) PCIPORT = FEPRST; else outb(sc->port, FEPRST | FEPMEM); for (i = 0; ((sc->pcibus ? PCIPORT : inb(sc->port)) & FEPMASK) != FEPRST; i++) { if (i > hz) { log(LOG_ERR, "digi%d: %s init reset failed\n", sc->res.unit, sc->name); return (EIO); } digi_delay(sc, "digiinit0", 5); } DLOG(DIGIDB_INIT, (sc->dev, "Got init reset after %d us\n", i)); /* Now upload the BIOS */ cnt = (sc->bios.size < sc->win_size - BIOSOFFSET) ? sc->bios.size : sc->win_size - BIOSOFFSET; ptr = sc->setwin(sc, BIOSOFFSET); if (!digi_bcopy(sc->bios.data, ptr, cnt)) { device_printf(sc->dev, "BIOS upload (1) failed\n"); return (EIO); } if (cnt != sc->bios.size) { /* and the second part */ ptr = sc->setwin(sc, sc->win_size); if (!digi_bcopy(sc->bios.data + cnt, ptr, sc->bios.size - cnt)) { device_printf(sc->dev, "BIOS upload failed\n"); return (EIO); } } ptr = sc->setwin(sc, 0); vW(ptr + 0) = 0x0401; vW(ptr + 2) = 0x0bf0; vW(ptr + 4) = 0x0000; vW(ptr + 6) = 0x0000; break; } DLOG(DIGIDB_INIT, (sc->dev, "BIOS uploaded\n")); ptr = sc->setwin(sc, MISCGLOBAL); W(ptr) = 0; if (sc->pcibus) { PCIPORT = FEPCLR; resp = FEPRST; } else if (sc->model == PCXEVE) { outb(sc->port, FEPCLR); resp = FEPRST; } else { outb(sc->port, FEPCLR | FEPMEM); resp = FEPRST | FEPMEM; } for (i = 0; ((sc->pcibus ? PCIPORT : inb(sc->port)) & FEPMASK) == resp; i++) { if (i > hz) { log(LOG_ERR, "digi%d: BIOS start failed\n", sc->res.unit); return (EIO); } digi_delay(sc, "digibios0", 5); } DLOG(DIGIDB_INIT, (sc->dev, "BIOS started after %d us\n", i)); for (i = 0; vW(ptr) != *(u_short *)"GD"; i++) { if (i > 2*hz) { log(LOG_ERR, "digi%d: BIOS boot failed " "(0x%02x != 0x%02x)\n", sc->res.unit, vW(ptr), *(u_short *)"GD"); return (EIO); } digi_delay(sc, "digibios1", 5); } DLOG(DIGIDB_INIT, (sc->dev, "BIOS booted after %d iterations\n", i)); if (sc->link.data != NULL) { DLOG(DIGIDB_INIT, (sc->dev, "Loading link data\n")); ptr = sc->setwin(sc, 0xcd0); digi_bcopy(sc->link.data, ptr, 21); /* XXX 21 ? */ } /* load FEP/OS */ switch (sc->model) { case PCXE: case PCXEVE: case PCXI: ptr = sc->setwin(sc, sc->model == PCXI ? 0x2000 : 0x0); digi_bcopy(sc->fep.data, ptr, sc->fep.size); /* A BIOS request to move our data to 0x2000 */ ptr = sc->setwin(sc, MBOX); vW(ptr + 0) = 2; vW(ptr + 2) = sc->mem_seg + FEPCODESEG; vW(ptr + 4) = 0; vW(ptr + 6) = FEPCODESEG; vW(ptr + 8) = 0; vW(ptr + 10) = sc->fep.size; /* Run the BIOS request */ outb(sc->port, FEPREQ | FEPMEM); outb(sc->port, FEPCLR | FEPMEM); for (i = 0; W(ptr); i++) { if (i > hz) { log(LOG_ERR, "digi%d: FEP/OS move failed\n", sc->res.unit); sc->hidewin(sc); return (EIO); } digi_delay(sc, "digifep0", 5); } DLOG(DIGIDB_INIT, (sc->dev, "FEP/OS moved after %d iterations\n", i)); /* Clear the confirm word */ ptr = sc->setwin(sc, FEPSTAT); vW(ptr + 0) = 0; /* A BIOS request to execute the FEP/OS */ ptr = sc->setwin(sc, MBOX); vW(ptr + 0) = 0x01; vW(ptr + 2) = FEPCODESEG; vW(ptr + 4) = 0x04; /* Run the BIOS request */ outb(sc->port, FEPREQ); outb(sc->port, FEPCLR); ptr = sc->setwin(sc, FEPSTAT); break; case PCXEM: case PCIEPCX: case PCIXR: DLOG(DIGIDB_INIT, (sc->dev, "Loading FEP/OS\n")); cnt = (sc->fep.size < sc->win_size - BIOSOFFSET) ? sc->fep.size : sc->win_size - BIOSOFFSET; ptr = sc->setwin(sc, BIOSOFFSET); digi_bcopy(sc->fep.data, ptr, cnt); if (cnt != sc->fep.size) { ptr = sc->setwin(sc, BIOSOFFSET + cnt); digi_bcopy(sc->fep.data + cnt, ptr, sc->fep.size - cnt); } DLOG(DIGIDB_INIT, (sc->dev, "FEP/OS loaded\n")); ptr = sc->setwin(sc, 0xc30); W(ptr + 4) = 0x1004; W(ptr + 6) = 0xbfc0; W(ptr + 0) = 0x03; W(ptr + 2) = 0x00; /* Clear the confirm word */ ptr = sc->setwin(sc, FEPSTAT); W(ptr + 0) = 0; if (sc->port) outb(sc->port, 0); /* XXX necessary ? */ break; case PCCX: ptr = sc->setwin(sc, 0xd000); digi_bcopy(sc->fep.data, ptr, sc->fep.size); /* A BIOS request to execute the FEP/OS */ ptr = sc->setwin(sc, 0xc40); W(ptr + 0) = 1; W(ptr + 2) = FEPCODE >> 4; W(ptr + 4) = 4; /* Clear the confirm word */ ptr = sc->setwin(sc, FEPSTAT); W(ptr + 0) = 0; /* Run the BIOS request */ outb(sc->port, FEPREQ | FEPMEM); /* send interrupt to BIOS */ outb(sc->port, FEPCLR | FEPMEM); break; } /* Now wait 'till the FEP/OS has booted */ for (i = 0; vW(ptr) != *(u_short *)"OS"; i++) { if (i > 2*hz) { log(LOG_ERR, "digi%d: FEP/OS start failed " "(0x%02x != 0x%02x)\n", sc->res.unit, vW(ptr), *(u_short *)"OS"); sc->hidewin(sc); return (EIO); } digi_delay(sc, "digifep1", 5); } DLOG(DIGIDB_INIT, (sc->dev, "FEP/OS started after %d iterations\n", i)); if (sc->model >= PCXEM) { ptr = sc->setwin(sc, 0xe04); vW(ptr) = 2; ptr = sc->setwin(sc, 0xc02); sc->numports = vW(ptr); } else { ptr = sc->setwin(sc, 0xc22); sc->numports = vW(ptr); } if (sc->numports == 0) { device_printf(sc->dev, "%s, 0 ports found\n", sc->name); sc->hidewin(sc); return (0); } if (sc->numports > 256) { /* Our minor numbering scheme is broken for more than 256 */ device_printf(sc->dev, "%s, 256 ports (%d ports found)\n", sc->name, sc->numports); sc->numports = 256; } else device_printf(sc->dev, "%s, %d ports found\n", sc->name, sc->numports); if (sc->ports) free(sc->ports, M_TTYS); sc->ports = malloc(sizeof(struct digi_p) * sc->numports, M_TTYS, M_WAITOK | M_ZERO); if (sc->ttys) free(sc->ttys, M_TTYS); sc->ttys = malloc(sizeof(struct tty) * sc->numports, M_TTYS, M_WAITOK | M_ZERO); /* * XXX Should read port 0xc90 for an array of 2byte values, 1 per * port. If the value is 0, the port is broken.... */ ptr = sc->setwin(sc, 0); /* We should now init per-port structures */ bc = (volatile struct board_chan *)(ptr + CHANSTRUCT); sc->gdata = (volatile struct global_data *)(ptr + FEP_GLOBAL); sc->memcmd = ptr + sc->gdata->cstart; sc->memevent = ptr + sc->gdata->istart; for (i = 0; i < sc->numports; i++, bc++) { port = sc->ports + i; port->pnum = i; port->sc = sc; port->status = ENABLED; port->tp = sc->ttys + i; port->bc = bc; if (sc->model == PCXEVE) { port->txbuf = ptr + (((bc->tseg - sc->mem_seg) << 4) & 0x1fff); port->rxbuf = ptr + (((bc->rseg - sc->mem_seg) << 4) & 0x1fff); port->txwin = FEPWIN | ((bc->tseg - sc->mem_seg) >> 9); port->rxwin = FEPWIN | ((bc->rseg - sc->mem_seg) >> 9); } else if (sc->model == PCXI || sc->model == PCXE) { port->txbuf = ptr + ((bc->tseg - sc->mem_seg) << 4); port->rxbuf = ptr + ((bc->rseg - sc->mem_seg) << 4); port->txwin = port->rxwin = 0; } else { port->txbuf = ptr + (((bc->tseg - sc->mem_seg) << 4) % sc->win_size); port->rxbuf = ptr + (((bc->rseg - sc->mem_seg) << 4) % sc->win_size); port->txwin = FEPWIN | (((bc->tseg - sc->mem_seg) << 4) / sc->win_size); port->rxwin = FEPWIN | (((bc->rseg - sc->mem_seg) << 4) / sc->win_size); } port->txbufsize = bc->tmax + 1; port->rxbufsize = bc->rmax + 1; lowwater = port->txbufsize >> 2; if (lowwater > 1024) lowwater = 1024; sc->setwin(sc, 0); fepcmd_w(port, STXLWATER, lowwater, 10); fepcmd_w(port, SRXLWATER, port->rxbufsize >> 2, 10); fepcmd_w(port, SRXHWATER, (3 * port->rxbufsize) >> 2, 10); bc->edelay = 100; port->dtr_wait = 3 * hz; /* * We don't use all the flags from since * they are only relevant for logins. It's important to have * echo off initially so that the line doesn't start blathering * before the echo flag can be turned off. */ port->it_in.c_iflag = 0; port->it_in.c_oflag = 0; port->it_in.c_cflag = TTYDEF_CFLAG; port->it_in.c_lflag = 0; termioschars(&port->it_in); port->it_in.c_ispeed = port->it_in.c_ospeed = digidefaultrate; port->it_out = port->it_in; port->send_ring = 1; /* Default action on signal RI */ port->dev[0] = make_dev(&digi_sw, (sc->res.unit << 16) + i, UID_ROOT, GID_WHEEL, 0600, "ttyD%d.%d", sc->res.unit, i); port->dev[1] = make_dev(&digi_sw, ((sc->res.unit << 16) + i) | CONTROL_INIT_STATE, UID_ROOT, GID_WHEEL, 0600, "ttyiD%d.%d", sc->res.unit, i); port->dev[2] = make_dev(&digi_sw, ((sc->res.unit << 16) + i) | CONTROL_LOCK_STATE, UID_ROOT, GID_WHEEL, 0600, "ttylD%d.%d", sc->res.unit, i); port->dev[3] = make_dev(&digi_sw, ((sc->res.unit << 16) + i) | CALLOUT_MASK, UID_UUCP, GID_DIALER, 0660, "cuaD%d.%d", sc->res.unit, i); port->dev[4] = make_dev(&digi_sw, ((sc->res.unit << 16) + i) | CALLOUT_MASK | CONTROL_INIT_STATE, UID_UUCP, GID_DIALER, 0660, "cuaiD%d.%d", sc->res.unit, i); port->dev[5] = make_dev(&digi_sw, ((sc->res.unit << 16) + i) | CALLOUT_MASK | CONTROL_LOCK_STATE, UID_UUCP, GID_DIALER, 0660, "cualD%d.%d", sc->res.unit, i); } sc->hidewin(sc); sc->inttest = timeout(digi_int_test, sc, hz); /* fepcmd_w(&sc->ports[0], 0xff, 0, 0); */ sc->status = DIGI_STATUS_ENABLED; return (0); } static int digimctl(struct digi_p *port, int bits, int how) { int mstat; if (how == DMGET) { port->sc->setwin(port->sc, 0); mstat = port->bc->mstat; port->sc->hidewin(port->sc); bits = TIOCM_LE; if (mstat & port->sc->csigs->rts) bits |= TIOCM_RTS; if (mstat & port->cd) bits |= TIOCM_CD; if (mstat & port->dsr) bits |= TIOCM_DSR; if (mstat & port->sc->csigs->cts) bits |= TIOCM_CTS; if (mstat & port->sc->csigs->ri) bits |= TIOCM_RI; if (mstat & port->sc->csigs->dtr) bits |= TIOCM_DTR; return (bits); } /* Only DTR and RTS may be set */ mstat = 0; if (bits & TIOCM_DTR) mstat |= port->sc->csigs->dtr; if (bits & TIOCM_RTS) mstat |= port->sc->csigs->rts; switch (how) { case DMSET: fepcmd_b(port, SETMODEM, mstat, ~mstat, 0); break; case DMBIS: fepcmd_b(port, SETMODEM, mstat, 0, 0); break; case DMBIC: fepcmd_b(port, SETMODEM, 0, mstat, 0); break; } return (0); } static void digi_disc_optim(struct tty *tp, struct termios *t, struct digi_p *port) { if (!(t->c_iflag & (ICRNL | IGNCR | IMAXBEL | INLCR | ISTRIP)) && (!(t->c_iflag & BRKINT) || (t->c_iflag & IGNBRK)) && (!(t->c_iflag & PARMRK) || (t->c_iflag & (IGNPAR | IGNBRK)) == (IGNPAR | IGNBRK)) && !(t->c_lflag & (ECHO | ICANON | IEXTEN | ISIG | PENDIN)) && linesw[tp->t_line].l_rint == ttyinput) tp->t_state |= TS_CAN_BYPASS_L_RINT; else tp->t_state &= ~TS_CAN_BYPASS_L_RINT; } static int digiopen(dev_t dev, int flag, int mode, struct thread *td) { struct digi_softc *sc; struct tty *tp; int unit; int pnum; struct digi_p *port; int s; int error, mynor; volatile struct board_chan *bc; error = 0; mynor = minor(dev); unit = MINOR_TO_UNIT(minor(dev)); pnum = MINOR_TO_PORT(minor(dev)); sc = (struct digi_softc *)devclass_get_softc(digi_devclass, unit); if (!sc) return (ENXIO); if (sc->status != DIGI_STATUS_ENABLED) { DLOG(DIGIDB_OPEN, (sc->dev, "Cannot open a disabled card\n")); return (ENXIO); } if (pnum >= sc->numports) { DLOG(DIGIDB_OPEN, (sc->dev, "port%d: Doesn't exist\n", pnum)); return (ENXIO); } if (mynor & (CTRL_DEV | CONTROL_MASK)) { sc->opencnt++; return (0); } port = &sc->ports[pnum]; tp = dev->si_tty = port->tp; bc = port->bc; s = spltty(); open_top: while (port->status & DIGI_DTR_OFF) { port->wopeners++; error = tsleep(&port->dtr_wait, TTIPRI | PCATCH, "digidtr", 0); port->wopeners--; if (error) goto out; } if (tp->t_state & TS_ISOPEN) { /* * The device is open, so everything has been initialized. * Handle conflicts. */ if (mynor & CALLOUT_MASK) { if (!port->active_out) { error = EBUSY; DLOG(DIGIDB_OPEN, (sc->dev, "port %d:" " BUSY error = %d\n", pnum, error)); goto out; } } else if (port->active_out) { if (flag & O_NONBLOCK) { error = EBUSY; DLOG(DIGIDB_OPEN, (sc->dev, "port %d: BUSY error = %d\n", pnum, error)); goto out; } port->wopeners++; error = tsleep(&port->active_out, TTIPRI | PCATCH, "digibi", 0); port->wopeners--; if (error != 0) { DLOG(DIGIDB_OPEN, (sc->dev, "port %d: tsleep(digibi) error = %d\n", pnum, error)); goto out; } goto open_top; } if (tp->t_state & TS_XCLUDE && suser(td) != 0) { error = EBUSY; goto out; } } else { /* * The device isn't open, so there are no conflicts. * Initialize it. Initialization is done twice in many * cases: to preempt sleeping callin opens if we are callout, * and to complete a callin open after DCD rises. */ tp->t_oproc = digistart; tp->t_param = digiparam; tp->t_stop = digistop; tp->t_dev = dev; tp->t_termios = (mynor & CALLOUT_MASK) ? port->it_out : port->it_in; sc->setwin(sc, 0); bc->rout = bc->rin; /* clear input queue */ bc->idata = 1; bc->iempty = 1; bc->ilow = 1; bc->mint = port->cd | port->sc->csigs->ri; bc->tin = bc->tout; if (port->ialtpin) { port->cd = sc->csigs->dsr; port->dsr = sc->csigs->cd; } else { port->cd = sc->csigs->cd; port->dsr = sc->csigs->dsr; } port->wopeners++; /* XXX required ? */ error = digiparam(tp, &tp->t_termios); port->wopeners--; if (error != 0) { DLOG(DIGIDB_OPEN, (sc->dev, "port %d: cxpparam error = %d\n", pnum, error)); goto out; } ttsetwater(tp); /* handle fake and initial DCD for callout devices */ if (bc->mstat & port->cd || mynor & CALLOUT_MASK) linesw[tp->t_line].l_modem(tp, 1); } /* Wait for DCD if necessary */ if (!(tp->t_state & TS_CARR_ON) && !(mynor & CALLOUT_MASK) && !(tp->t_cflag & CLOCAL) && !(flag & O_NONBLOCK)) { port->wopeners++; error = tsleep(TSA_CARR_ON(tp), TTIPRI | PCATCH, "digidcd", 0); port->wopeners--; if (error != 0) { DLOG(DIGIDB_OPEN, (sc->dev, "port %d: tsleep(digidcd) error = %d\n", pnum, error)); goto out; } goto open_top; } error = linesw[tp->t_line].l_open(dev, tp); DLOG(DIGIDB_OPEN, (sc->dev, "port %d: l_open error = %d\n", pnum, error)); digi_disc_optim(tp, &tp->t_termios, port); if (tp->t_state & TS_ISOPEN && mynor & CALLOUT_MASK) port->active_out = TRUE; if (tp->t_state & TS_ISOPEN) sc->opencnt++; out: splx(s); if (!(tp->t_state & TS_ISOPEN)) digihardclose(port); DLOG(DIGIDB_OPEN, (sc->dev, "port %d: open() returns %d\n", pnum, error)); return (error); } static int digiclose(dev_t dev, int flag, int mode, struct thread *td) { int mynor; struct tty *tp; int unit, pnum; struct digi_softc *sc; struct digi_p *port; int s; mynor = minor(dev); unit = MINOR_TO_UNIT(mynor); pnum = MINOR_TO_PORT(mynor); sc = (struct digi_softc *)devclass_get_softc(digi_devclass, unit); KASSERT(sc, ("digi%d: softc not allocated in digiclose\n", unit)); if (mynor & (CTRL_DEV | CONTROL_MASK)) { sc->opencnt--; return (0); } port = sc->ports + pnum; tp = port->tp; DLOG(DIGIDB_CLOSE, (sc->dev, "port %d: closing\n", pnum)); s = spltty(); linesw[tp->t_line].l_close(tp, flag); digi_disc_optim(tp, &tp->t_termios, port); digistop(tp, FREAD | FWRITE); digihardclose(port); ttyclose(tp); if (--sc->opencnt == 0) splx(s); return (0); } static void digidtrwakeup(void *chan) { struct digi_p *port = chan; port->status &= ~DIGI_DTR_OFF; wakeup(&port->dtr_wait); port->wopeners--; } static void digihardclose(struct digi_p *port) { volatile struct board_chan *bc; int s; bc = port->bc; s = spltty(); port->sc->setwin(port->sc, 0); bc->idata = 0; bc->iempty = 0; bc->ilow = 0; bc->mint = 0; if ((port->tp->t_cflag & HUPCL) || (!port->active_out && !(bc->mstat & port->cd) && !(port->it_in.c_cflag & CLOCAL)) || !(port->tp->t_state & TS_ISOPEN)) { digimctl(port, TIOCM_DTR | TIOCM_RTS, DMBIC); if (port->dtr_wait != 0) { /* Schedule a wakeup of any callin devices */ port->wopeners++; timeout(&digidtrwakeup, port, port->dtr_wait); port->status |= DIGI_DTR_OFF; } } port->active_out = FALSE; wakeup(&port->active_out); wakeup(TSA_CARR_ON(port->tp)); splx(s); } static int digiread(dev_t dev, struct uio *uio, int flag) { int mynor; struct tty *tp; int error, unit, pnum; struct digi_softc *sc; mynor = minor(dev); if (mynor & CONTROL_MASK) return (ENODEV); unit = MINOR_TO_UNIT(mynor); pnum = MINOR_TO_PORT(mynor); sc = (struct digi_softc *)devclass_get_softc(digi_devclass, unit); KASSERT(sc, ("digi%d: softc not allocated in digiclose\n", unit)); tp = &sc->ttys[pnum]; error = linesw[tp->t_line].l_read(tp, uio, flag); DLOG(DIGIDB_READ, (sc->dev, "port %d: read() returns %d\n", pnum, error)); return (error); } static int digiwrite(dev_t dev, struct uio *uio, int flag) { int mynor; struct tty *tp; int error, unit, pnum; struct digi_softc *sc; mynor = minor(dev); if (mynor & CONTROL_MASK) return (ENODEV); unit = MINOR_TO_UNIT(mynor); pnum = MINOR_TO_PORT(mynor); sc = (struct digi_softc *)devclass_get_softc(digi_devclass, unit); KASSERT(sc, ("digi%d: softc not allocated in digiclose\n", unit)); tp = &sc->ttys[pnum]; error = linesw[tp->t_line].l_write(tp, uio, flag); DLOG(DIGIDB_WRITE, (sc->dev, "port %d: write() returns %d\n", pnum, error)); return (error); } /* * Load module "digi_.ko" and look for a symbol called digi_mod_. * * Populate sc->bios, sc->fep, and sc->link from this data. * * sc->fep.data, sc->bios.data and sc->link.data are malloc()d according * to their respective sizes. * * The module is unloaded when we're done. */ static int digi_loadmoduledata(struct digi_softc *sc) { struct digi_mod *digi_mod; linker_file_t lf; char *modfile, *sym; caddr_t symptr; int modlen, res; KASSERT(sc->bios.data == NULL, ("Uninitialised BIOS variable")); KASSERT(sc->fep.data == NULL, ("Uninitialised FEP variable")); KASSERT(sc->link.data == NULL, ("Uninitialised LINK variable")); KASSERT(sc->module != NULL, ("Uninitialised module name")); modlen = strlen(sc->module); modfile = malloc(modlen + 6, M_TEMP, M_WAITOK); snprintf(modfile, modlen + 6, "digi_%s", sc->module); if ((res = linker_reference_module(modfile, NULL, &lf)) != 0) { if (res == ENOENT && rootdev == NODEV) printf("%s: Failed to autoload module: No filesystem\n", modfile); else printf("%s: Failed %d to autoload module\n", modfile, res); } free(modfile, M_TEMP); if (res != 0) return (res); sym = malloc(modlen + 10, M_TEMP, M_WAITOK); snprintf(sym, modlen + 10, "digi_mod_%s", sc->module); if ((symptr = linker_file_lookup_symbol(lf, sym, 0)) == NULL) printf("digi_%s.ko: Symbol `%s' not found\n", sc->module, sym); free(sym, M_TEMP); digi_mod = (struct digi_mod *)symptr; if (digi_mod->dm_version != DIGI_MOD_VERSION) { printf("digi_%s.ko: Invalid version %d (need %d)\n", sc->module, digi_mod->dm_version, DIGI_MOD_VERSION); linker_file_unload(lf); return (EINVAL); } sc->bios.size = digi_mod->dm_bios.size; if (sc->bios.size != 0 && digi_mod->dm_bios.data != NULL) { sc->bios.data = malloc(sc->bios.size, M_TTYS, M_WAITOK); bcopy(digi_mod->dm_bios.data, sc->bios.data, sc->bios.size); } sc->fep.size = digi_mod->dm_fep.size; if (sc->fep.size != 0 && digi_mod->dm_fep.data != NULL) { sc->fep.data = malloc(sc->fep.size, M_TTYS, M_WAITOK); bcopy(digi_mod->dm_fep.data, sc->fep.data, sc->fep.size); } sc->link.size = digi_mod->dm_link.size; if (sc->link.size != 0 && digi_mod->dm_link.data != NULL) { sc->link.data = malloc(sc->link.size, M_TTYS, M_WAITOK); bcopy(digi_mod->dm_link.data, sc->link.data, sc->link.size); } linker_file_unload(lf); return (0); } static int digiioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct thread *td) { int unit, pnum, mynor, error, s; struct digi_softc *sc; struct digi_p *port; struct tty *tp; #if defined(COMPAT_43) || defined(COMPAT_SUNOS) int oldcmd; struct termios term; #endif mynor = minor(dev); unit = MINOR_TO_UNIT(mynor); pnum = MINOR_TO_PORT(mynor); sc = (struct digi_softc *)devclass_get_softc(digi_devclass, unit); KASSERT(sc, ("digi%d: softc not allocated in digiioctl\n", unit)); if (sc->status == DIGI_STATUS_DISABLED) return (ENXIO); if (mynor & CTRL_DEV) { switch (cmd) { case DIGIIO_DEBUG: #ifdef DEBUG digi_debug = *(int *)data; return (0); #else device_printf(sc->dev, "DEBUG not defined\n"); return (ENXIO); #endif case DIGIIO_REINIT: digi_loadmoduledata(sc); error = digi_init(sc); digi_freemoduledata(sc); return (error); case DIGIIO_MODEL: *(enum digi_model *)data = sc->model; return (0); case DIGIIO_IDENT: return (copyout(sc->name, *(char **)data, strlen(sc->name) + 1)); } } if (pnum >= sc->numports) return (ENXIO); port = sc->ports + pnum; if (!(port->status & ENABLED)) return (ENXIO); tp = port->tp; if (mynor & CONTROL_MASK) { struct termios *ct; switch (mynor & CONTROL_MASK) { case CONTROL_INIT_STATE: ct = (mynor & CALLOUT_MASK) ? &port->it_out : &port->it_in; break; case CONTROL_LOCK_STATE: ct = (mynor & CALLOUT_MASK) ? &port->lt_out : &port->lt_in; break; default: return (ENODEV); /* /dev/nodev */ } switch (cmd) { case TIOCSETA: error = suser(td); if (error != 0) return (error); *ct = *(struct termios *)data; return (0); case TIOCGETA: *(struct termios *)data = *ct; return (0); case TIOCGETD: *(int *)data = TTYDISC; return (0); case TIOCGWINSZ: bzero(data, sizeof(struct winsize)); return (0); case DIGIIO_GETALTPIN: switch (mynor & CONTROL_MASK) { case CONTROL_INIT_STATE: *(int *)data = port->ialtpin; break; case CONTROL_LOCK_STATE: *(int *)data = port->laltpin; break; default: panic("Confusion when re-testing minor"); return (ENODEV); } return (0); case DIGIIO_SETALTPIN: switch (mynor & CONTROL_MASK) { case CONTROL_INIT_STATE: if (!port->laltpin) { port->ialtpin = !!*(int *)data; DLOG(DIGIDB_SET, (sc->dev, "port%d: initial ALTPIN %s\n", pnum, port->ialtpin ? "set" : "cleared")); } break; case CONTROL_LOCK_STATE: port->laltpin = !!*(int *)data; DLOG(DIGIDB_SET, (sc->dev, "port%d: ALTPIN %slocked\n", pnum, port->laltpin ? "" : "un")); break; default: panic("Confusion when re-testing minor"); return (ENODEV); } return (0); default: return (ENOTTY); } } switch (cmd) { case DIGIIO_GETALTPIN: *(int *)data = !!(port->dsr == sc->csigs->cd); return (0); case DIGIIO_SETALTPIN: if (!port->laltpin) { if (*(int *)data) { DLOG(DIGIDB_SET, (sc->dev, "port%d: ALTPIN set\n", pnum)); port->cd = sc->csigs->dsr; port->dsr = sc->csigs->cd; } else { DLOG(DIGIDB_SET, (sc->dev, "port%d: ALTPIN cleared\n", pnum)); port->cd = sc->csigs->cd; port->dsr = sc->csigs->dsr; } } return (0); } tp = port->tp; #if defined(COMPAT_43) || defined(COMPAT_SUNOS) term = tp->t_termios; oldcmd = cmd; error = ttsetcompat(tp, &cmd, data, &term); if (error != 0) return (error); if (cmd != oldcmd) data = (caddr_t) & term; #endif if (cmd == TIOCSETA || cmd == TIOCSETAW || cmd == TIOCSETAF) { int cc; struct termios *dt; struct termios *lt; dt = (struct termios *)data; lt = (mynor & CALLOUT_MASK) ? &port->lt_out : &port->lt_in; dt->c_iflag = (tp->t_iflag & lt->c_iflag) | (dt->c_iflag & ~lt->c_iflag); dt->c_oflag = (tp->t_oflag & lt->c_oflag) | (dt->c_oflag & ~lt->c_oflag); dt->c_cflag = (tp->t_cflag & lt->c_cflag) | (dt->c_cflag & ~lt->c_cflag); dt->c_lflag = (tp->t_lflag & lt->c_lflag) | (dt->c_lflag & ~lt->c_lflag); port->c_iflag = dt->c_iflag & (IXOFF | IXON | IXANY); dt->c_iflag &= ~(IXOFF | IXON | IXANY); for (cc = 0; cc < NCCS; ++cc) if (lt->c_cc[cc] != 0) dt->c_cc[cc] = tp->t_cc[cc]; if (lt->c_ispeed != 0) dt->c_ispeed = tp->t_ispeed; if (lt->c_ospeed != 0) dt->c_ospeed = tp->t_ospeed; } error = linesw[tp->t_line].l_ioctl(tp, cmd, data, flag, td); if (error == 0 && cmd == TIOCGETA) ((struct termios *)data)->c_iflag |= port->c_iflag; if (error >= 0 && error != ENOIOCTL) return (error); s = spltty(); error = ttioctl(tp, cmd, data, flag); if (error == 0 && cmd == TIOCGETA) ((struct termios *)data)->c_iflag |= port->c_iflag; digi_disc_optim(tp, &tp->t_termios, port); if (error >= 0 && error != ENOIOCTL) { splx(s); return (error); } sc->setwin(sc, 0); switch (cmd) { case DIGIIO_RING: port->send_ring = *(u_char *)data; break; case TIOCSBRK: /* * now it sends 400 millisecond break because I don't know * how to send an infinite break */ fepcmd_w(port, SENDBREAK, 400, 10); break; case TIOCCBRK: /* now it's empty */ break; case TIOCSDTR: digimctl(port, TIOCM_DTR, DMBIS); break; case TIOCCDTR: digimctl(port, TIOCM_DTR, DMBIC); break; case TIOCMSET: digimctl(port, *(int *)data, DMSET); break; case TIOCMBIS: digimctl(port, *(int *)data, DMBIS); break; case TIOCMBIC: digimctl(port, *(int *)data, DMBIC); break; case TIOCMGET: *(int *)data = digimctl(port, 0, DMGET); break; case TIOCMSDTRWAIT: error = suser(td); if (error != 0) { splx(s); return (error); } port->dtr_wait = *(int *)data *hz / 100; break; case TIOCMGDTRWAIT: *(int *)data = port->dtr_wait * 100 / hz; break; #ifdef DIGI_INTERRUPT case TIOCTIMESTAMP: *(struct timeval *)data = sc->intr_timestamp; break; #endif default: splx(s); return (ENOTTY); } splx(s); return (0); } static int digiparam(struct tty *tp, struct termios *t) { int mynor; int unit; int pnum; struct digi_softc *sc; struct digi_p *port; int cflag; int iflag; int hflow; int s; int window; mynor = minor(tp->t_dev); unit = MINOR_TO_UNIT(mynor); pnum = MINOR_TO_PORT(mynor); sc = (struct digi_softc *)devclass_get_softc(digi_devclass, unit); KASSERT(sc, ("digi%d: softc not allocated in digiparam\n", unit)); port = &sc->ports[pnum]; DLOG(DIGIDB_SET, (sc->dev, "port%d: setting parameters\n", pnum)); if (t->c_ispeed == 0) t->c_ispeed = t->c_ospeed; cflag = ttspeedtab(t->c_ospeed, digispeedtab); if (cflag < 0 || (cflag > 0 && t->c_ispeed != t->c_ospeed)) return (EINVAL); s = splclock(); window = sc->window; sc->setwin(sc, 0); if (cflag == 0) { /* hangup */ DLOG(DIGIDB_SET, (sc->dev, "port%d: hangup\n", pnum)); digimctl(port, TIOCM_DTR | TIOCM_RTS, DMBIC); } else { digimctl(port, TIOCM_DTR | TIOCM_RTS, DMBIS); DLOG(DIGIDB_SET, (sc->dev, "port%d: CBAUD = %d\n", pnum, cflag)); #if 0 /* convert flags to sysV-style values */ if (t->c_cflag & PARODD) cflag |= 0x0200; if (t->c_cflag & PARENB) cflag |= 0x0100; if (t->c_cflag & CSTOPB) cflag |= 0x0080; #else /* convert flags to sysV-style values */ if (t->c_cflag & PARODD) cflag |= FEP_PARODD; if (t->c_cflag & PARENB) cflag |= FEP_PARENB; if (t->c_cflag & CSTOPB) cflag |= FEP_CSTOPB; if (t->c_cflag & CLOCAL) cflag |= FEP_CLOCAL; #endif cflag |= (t->c_cflag & CSIZE) >> 4; DLOG(DIGIDB_SET, (sc->dev, "port%d: CFLAG = 0x%x\n", pnum, cflag)); fepcmd_w(port, SETCFLAGS, (unsigned)cflag, 0); } iflag = t->c_iflag & (IGNBRK | BRKINT | IGNPAR | PARMRK | INPCK | ISTRIP); if (port->c_iflag & IXON) iflag |= 0x400; if (port->c_iflag & IXANY) iflag |= 0x800; if (port->c_iflag & IXOFF) iflag |= 0x1000; DLOG(DIGIDB_SET, (sc->dev, "port%d: set iflag = 0x%x\n", pnum, iflag)); fepcmd_w(port, SETIFLAGS, (unsigned)iflag, 0); hflow = 0; if (t->c_cflag & CDTR_IFLOW) hflow |= sc->csigs->dtr; if (t->c_cflag & CRTS_IFLOW) hflow |= sc->csigs->rts; if (t->c_cflag & CCTS_OFLOW) hflow |= sc->csigs->cts; if (t->c_cflag & CDSR_OFLOW) hflow |= port->dsr; if (t->c_cflag & CCAR_OFLOW) hflow |= port->cd; DLOG(DIGIDB_SET, (sc->dev, "port%d: set hflow = 0x%x\n", pnum, hflow)); fepcmd_w(port, SETHFLOW, 0xff00 | (unsigned)hflow, 0); DLOG(DIGIDB_SET, (sc->dev, "port%d: set startc(0x%x), stopc(0x%x)\n", pnum, t->c_cc[VSTART], t->c_cc[VSTOP])); fepcmd_b(port, SONOFFC, t->c_cc[VSTART], t->c_cc[VSTOP], 0); if (sc->window != 0) sc->towin(sc, 0); if (window != 0) sc->towin(sc, window); splx(s); return (0); } static void digi_intr(void *vp) { struct digi_p *port; char *cxcon; struct digi_softc *sc; int ehead, etail; volatile struct board_chan *bc; struct tty *tp; int head, tail; int wrapmask; int size, window; struct event { u_char pnum; u_char event; u_char mstat; u_char lstat; } event; sc = vp; if (sc->status != DIGI_STATUS_ENABLED) { DLOG(DIGIDB_IRQ, (sc->dev, "interrupt on disabled board !\n")); return; } #ifdef DIGI_INTERRUPT microtime(&sc->intr_timestamp); #endif window = sc->window; sc->setwin(sc, 0); if (sc->model >= PCXEM && W(sc->vmem + 0xd00)) { struct con_bios *con = con_bios_list; register u_char *ptr; ptr = sc->vmem + W(sc->vmem + 0xd00); while (con) { if (ptr[1] && W(ptr + 2) == W(con->bios + 2)) /* Not first block -- exact match */ break; if (W(ptr + 4) >= W(con->bios + 4) && W(ptr + 4) <= W(con->bios + 6)) /* Initial search concetrator BIOS */ break; } if (con == NULL) { log(LOG_ERR, "digi%d: wanted bios LREV = 0x%04x" " not found!\n", sc->res.unit, W(ptr + 4)); W(ptr + 10) = 0; W(sc->vmem + 0xd00) = 0; goto eoi; } cxcon = con->bios; W(ptr + 4) = W(cxcon + 4); W(ptr + 6) = W(cxcon + 6); if (ptr[1] == 0) W(ptr + 2) = W(cxcon + 2); W(ptr + 8) = (ptr[1] << 6) + W(cxcon + 8); size = W(cxcon + 10) - (ptr[1] << 10); if (size <= 0) { W(ptr + 8) = W(cxcon + 8); W(ptr + 10) = 0; } else { if (size > 1024) size = 1024; W(ptr + 10) = size; bcopy(cxcon + (ptr[1] << 10), ptr + 12, size); } W(sc->vmem + 0xd00) = 0; goto eoi; } ehead = sc->gdata->ein; etail = sc->gdata->eout; if (ehead == etail) { #ifdef DEBUG sc->intr_count++; if (sc->intr_count % 6000 == 0) { DLOG(DIGIDB_IRQ, (sc->dev, "6000 useless polls %x %x\n", ehead, etail)); sc->intr_count = 0; } #endif goto eoi; } while (ehead != etail) { event = *(volatile struct event *)(sc->memevent + etail); etail = (etail + 4) & sc->gdata->imax; if (event.pnum >= sc->numports) { log(LOG_ERR, "digi%d: port %d: got event" " on nonexisting port\n", sc->res.unit, event.pnum); continue; } port = &sc->ports[event.pnum]; bc = port->bc; tp = port->tp; if (!(tp->t_state & TS_ISOPEN) && !port->wopeners) { DLOG(DIGIDB_IRQ, (sc->dev, "port %d: event 0x%x on closed port\n", event.pnum, event.event)); bc->rout = bc->rin; bc->idata = 0; bc->iempty = 0; bc->ilow = 0; bc->mint = 0; continue; } if (event.event & ~ALL_IND) log(LOG_ERR, "digi%d: port%d: ? event 0x%x mstat 0x%x" " lstat 0x%x\n", sc->res.unit, event.pnum, event.event, event.mstat, event.lstat); if (event.event & DATA_IND) { DLOG(DIGIDB_IRQ, (sc->dev, "port %d: DATA_IND\n", event.pnum)); wrapmask = port->rxbufsize - 1; head = bc->rin; tail = bc->rout; size = 0; if (!(tp->t_state & TS_ISOPEN)) { bc->rout = head; goto end_of_data; } while (head != tail) { int top; DLOG(DIGIDB_INT, (sc->dev, "port %d: p rx head = %d tail = %d\n", event.pnum, head, tail)); top = (head > tail) ? head : wrapmask + 1; sc->towin(sc, port->rxwin); size = top - tail; if (tp->t_state & TS_CAN_BYPASS_L_RINT) { size = b_to_q((char *)port->rxbuf + tail, size, &tp->t_rawq); tail = top - size; ttwakeup(tp); } else for (; tail < top;) { linesw[tp->t_line]. l_rint(port->rxbuf[tail], tp); sc->towin(sc, port->rxwin); size--; tail++; if (tp->t_state & TS_TBLOCK) break; } tail &= wrapmask; sc->setwin(sc, 0); bc->rout = tail; head = bc->rin; if (size) break; } if (bc->orun) { CE_RECORD(port, CE_OVERRUN); log(LOG_ERR, "digi%d: port%d: %s\n", sc->res.unit, event.pnum, digi_errortxt(CE_OVERRUN)); bc->orun = 0; } end_of_data: if (size) { tp->t_state |= TS_TBLOCK; port->status |= PAUSE_RX; DLOG(DIGIDB_RX, (sc->dev, "port %d: pause RX\n", event.pnum)); } else { bc->idata = 1; } } if (event.event & MODEMCHG_IND) { DLOG(DIGIDB_MODEM, (sc->dev, "port %d: MODEMCHG_IND\n", event.pnum)); if ((event.mstat ^ event.lstat) & port->cd) { sc->hidewin(sc); linesw[tp->t_line].l_modem (tp, event.mstat & port->cd); sc->setwin(sc, 0); wakeup(TSA_CARR_ON(tp)); } if (event.mstat & sc->csigs->ri) { DLOG(DIGIDB_RI, (sc->dev, "port %d: RING\n", event.pnum)); if (port->send_ring) { linesw[tp->t_line].l_rint('R', tp); linesw[tp->t_line].l_rint('I', tp); linesw[tp->t_line].l_rint('N', tp); linesw[tp->t_line].l_rint('G', tp); linesw[tp->t_line].l_rint('\r', tp); linesw[tp->t_line].l_rint('\n', tp); } } } if (event.event & BREAK_IND) { DLOG(DIGIDB_MODEM, (sc->dev, "port %d: BREAK_IND\n", event.pnum)); linesw[tp->t_line].l_rint(TTY_BI, tp); } if (event.event & (LOWTX_IND | EMPTYTX_IND)) { DLOG(DIGIDB_IRQ, (sc->dev, "port %d:%s%s\n", event.pnum, event.event & LOWTX_IND ? " LOWTX" : "", event.event & EMPTYTX_IND ? " EMPTYTX" : "")); (*linesw[tp->t_line].l_start)(tp); } } sc->gdata->eout = etail; eoi: if (sc->window != 0) sc->towin(sc, 0); if (window != 0) sc->towin(sc, window); } static void digistart(struct tty *tp) { int unit; int pnum; struct digi_p *port; struct digi_softc *sc; volatile struct board_chan *bc; int head, tail; int size, ocount, totcnt = 0; int s; int wmask; unit = MINOR_TO_UNIT(minor(tp->t_dev)); pnum = MINOR_TO_PORT(minor(tp->t_dev)); sc = (struct digi_softc *)devclass_get_softc(digi_devclass, unit); KASSERT(sc, ("digi%d: softc not allocated in digistart\n", unit)); port = &sc->ports[pnum]; bc = port->bc; wmask = port->txbufsize - 1; s = spltty(); port->lcc = tp->t_outq.c_cc; sc->setwin(sc, 0); if (!(tp->t_state & TS_TBLOCK)) { if (port->status & PAUSE_RX) { DLOG(DIGIDB_RX, (sc->dev, "port %d: resume RX\n", pnum)); /* * CAREFUL - braces are needed here if the DLOG is * optimised out! */ } port->status &= ~PAUSE_RX; bc->idata = 1; } if (!(tp->t_state & TS_TTSTOP) && port->status & PAUSE_TX) { DLOG(DIGIDB_TX, (sc->dev, "port %d: resume TX\n", pnum)); port->status &= ~PAUSE_TX; fepcmd_w(port, RESUMETX, 0, 10); } if (tp->t_outq.c_cc == 0) tp->t_state &= ~TS_BUSY; else tp->t_state |= TS_BUSY; head = bc->tin; while (tp->t_outq.c_cc != 0) { tail = bc->tout; DLOG(DIGIDB_INT, (sc->dev, "port%d: s tx head = %d tail = %d\n", pnum, head, tail)); if (head < tail) size = tail - head - 1; else { size = port->txbufsize - head; if (tail == 0) size--; } if (size == 0) break; sc->towin(sc, port->txwin); ocount = q_to_b(&tp->t_outq, port->txbuf + head, size); totcnt += ocount; head += ocount; head &= wmask; sc->setwin(sc, 0); bc->tin = head; bc->iempty = 1; bc->ilow = 1; } port->lostcc = tp->t_outq.c_cc; tail = bc->tout; if (head < tail) size = port->txbufsize - tail + head; else size = head - tail; port->lbuf = size; DLOG(DIGIDB_INT, (sc->dev, "port%d: s total cnt = %d\n", pnum, totcnt)); ttwwakeup(tp); splx(s); } static void digistop(struct tty *tp, int rw) { struct digi_softc *sc; int unit; int pnum; struct digi_p *port; unit = MINOR_TO_UNIT(minor(tp->t_dev)); pnum = MINOR_TO_PORT(minor(tp->t_dev)); sc = (struct digi_softc *)devclass_get_softc(digi_devclass, unit); KASSERT(sc, ("digi%d: softc not allocated in digistop\n", unit)); port = sc->ports + pnum; DLOG(DIGIDB_TX, (sc->dev, "port %d: pause TX\n", pnum)); port->status |= PAUSE_TX; fepcmd_w(port, PAUSETX, 0, 10); } static void fepcmd(struct digi_p *port, int cmd, int op1, int ncmds) { u_char *mem; unsigned tail, head; int count, n; mem = port->sc->memcmd; port->sc->setwin(port->sc, 0); head = port->sc->gdata->cin; mem[head + 0] = cmd; mem[head + 1] = port->pnum; *(u_short *)(mem + head + 2) = op1; head = (head + 4) & port->sc->gdata->cmax; port->sc->gdata->cin = head; for (count = FEPTIMEOUT; count > 0; count--) { head = port->sc->gdata->cin; tail = port->sc->gdata->cout; n = (head - tail) & port->sc->gdata->cmax; if (n <= ncmds * sizeof(short) * 4) break; } if (count == 0) log(LOG_ERR, "digi%d: port%d: timeout on FEP command\n", port->sc->res.unit, port->pnum); } const char * digi_errortxt(int id) { static const char *error_desc[] = { "silo overflow", "interrupt-level buffer overflow", "tty-level buffer overflow", }; KASSERT(id >= 0 && id < sizeof(error_desc) / sizeof(error_desc[0]), ("Unexpected digi error id %d\n", id)); return (error_desc[id]); } int digi_attach(struct digi_softc *sc) { sc->res.ctldev = make_dev(&digi_sw, (sc->res.unit << 16) | CTRL_DEV, UID_ROOT, GID_WHEEL, 0600, "digi%r.ctl", sc->res.unit); digi_loadmoduledata(sc); digi_init(sc); digi_freemoduledata(sc); return (0); } static int digi_inuse(struct digi_softc *sc) { int i; for (i = 0; i < sc->numports; i++) if (sc->ttys[i].t_state & TS_ISOPEN) { DLOG(DIGIDB_INIT, (sc->dev, "port%d: busy\n", i)); return (1); } else if (sc->ports[i].wopeners || sc->ports[i].opencnt) { DLOG(DIGIDB_INIT, (sc->dev, "port%d: blocked in open\n", i)); return (1); } return (0); } static void digi_free_state(struct digi_softc *sc) { int d, i; /* Blow it all away */ for (i = 0; i < sc->numports; i++) for (d = 0; d < 6; d++) destroy_dev(sc->ports[i].dev[d]); untimeout(digi_poll, sc, sc->callout); callout_handle_init(&sc->callout); untimeout(digi_int_test, sc, sc->inttest); callout_handle_init(&sc->inttest); bus_teardown_intr(sc->dev, sc->res.irq, sc->res.irqHandler); #ifdef DIGI_INTERRUPT if (sc->res.irq != NULL) { bus_release_resource(dev, SYS_RES_IRQ, sc->res.irqrid, sc->res.irq); sc->res.irq = NULL; } #endif if (sc->numports) { KASSERT(sc->ports, ("digi%d: Lost my ports ?", sc->res.unit)); KASSERT(sc->ttys, ("digi%d: Lost my ttys ?", sc->res.unit)); free(sc->ports, M_TTYS); sc->ports = NULL; free(sc->ttys, M_TTYS); sc->ttys = NULL; sc->numports = 0; } sc->status = DIGI_STATUS_NOTINIT; } int digi_detach(device_t dev) { struct digi_softc *sc = device_get_softc(dev); DLOG(DIGIDB_INIT, (sc->dev, "detaching\n")); /* If we're INIT'd, numports must be 0 */ KASSERT(sc->numports == 0 || sc->status != DIGI_STATUS_NOTINIT, ("digi%d: numports(%d) & status(%d) are out of sync", sc->res.unit, sc->numports, (int)sc->status)); if (digi_inuse(sc)) return (EBUSY); digi_free_state(sc); destroy_dev(sc->res.ctldev); if (sc->res.mem != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, sc->res.mrid, sc->res.mem); sc->res.mem = NULL; } if (sc->res.io != NULL) { bus_release_resource(dev, SYS_RES_IOPORT, sc->res.iorid, sc->res.io); sc->res.io = NULL; } return (0); } int digi_shutdown(device_t dev) { return (0); } MODULE_VERSION(digi, 1); Index: head/sys/dev/digi/digi_isa.c =================================================================== --- head/sys/dev/digi/digi_isa.c (revision 129878) +++ head/sys/dev/digi/digi_isa.c (revision 129879) @@ -1,472 +1,473 @@ /*- * Copyright (c) 2001 Brian Somers * based on work by Slawa Olhovchenkov * John Prince * Eric Hernes * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); /*- * TODO: * Figure out how to make the non-Xi boards use memory addresses other * than 0xd0000 !!! */ #include #include #include +#include #include #include #include #include #include #include #include #include #include #include /* Valid i/o addresses are any of these with either 0 or 4 added */ static u_long digi_validio[] = { 0x100, 0x110, 0x120, 0x200, 0x220, 0x300, 0x320 }; #define DIGI_NVALIDIO (sizeof(digi_validio) / sizeof(digi_validio[0])) #define IO_SIZE 0x04 static u_long digi_validmem[] = { 0x80000, 0x88000, 0x90000, 0x98000, 0xa0000, 0xa8000, 0xb0000, 0xb8000, 0xc0000, 0xc8000, 0xd0000, 0xd8000, 0xe0000, 0xe8000, 0xf0000, 0xf8000, 0xf0000000, 0xf1000000, 0xf2000000, 0xf3000000, 0xf4000000, 0xf5000000, 0xf6000000, 0xf7000000, 0xf8000000, 0xf9000000, 0xfa000000, 0xfb000000, 0xfc000000, 0xfd000000, 0xfe000000, 0xff000000 }; #define DIGI_NVALIDMEM (sizeof(digi_validmem) / sizeof(digi_validmem[0])) static u_char * digi_isa_setwin(struct digi_softc *sc, unsigned int addr) { outb(sc->wport, sc->window = FEPWIN | (addr >> sc->win_bits)); return (sc->vmem + (addr % sc->win_size)); } static u_char * digi_xi_setwin(struct digi_softc *sc, unsigned int addr) { outb(sc->wport, sc->window = FEPMEM); return (sc->vmem + addr); } static void digi_isa_hidewin(struct digi_softc *sc) { outb(sc->wport, sc->window = 0); /* outb(sc->port, 0); */ } static void digi_isa_towin(struct digi_softc *sc, int win) { outb(sc->wport, sc->window = win); } static void digi_xi_towin(struct digi_softc *sc, int win) { outb(sc->wport, sc->window = FEPMEM); } /* * sc->port should be set and its resource allocated. */ static int digi_isa_check(struct digi_softc *sc) { int i, ident; sc->name = NULL; /* Invasive probe - reset the card */ outb(sc->port, FEPRST); for (i = 0; (inb(sc->port) & FEPMASK) != FEPRST; i++) { if (i == hz / 10) return (0); digi_delay(sc, "digirst", 1); } DLOG(DIGIDB_INIT, (sc->dev, "got reset after %d iterations\n", i)); ident = inb(sc->port); /* * NOTE, this probe is all wrong. I haven't got the data sheets ! */ DLOG(DIGIDB_INIT, (sc->dev, "board type is 0x%x\n", ident)); if (ident & 0x1) { switch (ident) { case 0x05: case 0x15: case 0x25: case 0x35: sc->model = PCXI; sc->csigs = &digi_xixe_signals; switch (ident & 0x30) { case 0: sc->name = "Digiboard PC/Xi 64K"; sc->mem_seg = 0xf000; sc->win_size = 0x10000; sc->win_bits = 16; break; case 0x10: sc->name = "Digiboard PC/Xi 128K"; sc->mem_seg = 0xE000; sc->win_size = 0x20000; sc->win_bits = 17; break; case 0x20: sc->name = "Digiboard PC/Xi 256K"; sc->mem_seg = 0xC000; sc->win_size = 0x40000; sc->win_bits = 18; break; case 0x30: sc->name = "Digiboard PC/Xi 512K"; sc->mem_seg = 0x8000; sc->win_size = 0x80000; sc->win_bits = 19; break; } sc->wport = sc->port; sc->module = "Xe"; sc->setwin = digi_xi_setwin; sc->hidewin = digi_isa_hidewin; sc->towin = digi_xi_towin; break; case 0xf5: sc->name = "Digiboard PC/Xem"; sc->model = PCXEM; sc->csigs = &digi_normal_signals; sc->win_size = 0x8000; sc->win_bits = 15; sc->wport = sc->port + 1; sc->module = "Xem"; sc->setwin = digi_isa_setwin; sc->hidewin = digi_isa_hidewin; sc->towin = digi_isa_towin; break; } } else { outb(sc->port, 1); ident = inb(sc->port); if (ident & 0x1) { device_printf(sc->dev, "PC/Xm is unsupported\n"); return (0); } sc->mem_seg = 0xf000; if (!(ident & 0xc0)) { sc->name = "Digiboard PC/Xe 64K"; sc->model = PCXE; sc->csigs = &digi_xixe_signals; sc->win_size = 0x10000; sc->win_bits = 16; sc->wport = sc->port; } else { sc->name = "Digiboard PC/Xe 64/8K (windowed)"; sc->model = PCXEVE; sc->csigs = &digi_normal_signals; sc->win_size = 0x2000; sc->win_bits = 13; sc->wport = sc->port + 1; } sc->module = "Xe"; sc->setwin = digi_isa_setwin; sc->hidewin = digi_isa_hidewin; sc->towin = digi_isa_towin; } return (sc->name != NULL); } static int digi_isa_probe(device_t dev) { struct digi_softc *sc = device_get_softc(dev); int i; KASSERT(sc, ("digi%d: softc not allocated in digi_isa_probe\n", device_get_unit(dev))); bzero(sc, sizeof(*sc)); sc->status = DIGI_STATUS_NOTINIT; sc->dev = dev; sc->res.unit = device_get_unit(dev); if (sc->res.unit >= 16) { /* Don't overflow our control mask */ device_printf(dev, "At most 16 digiboards may be used\n"); return (ENXIO); } DLOG(DIGIDB_INIT, (sc->dev, "probing on isa bus\n")); /* Check that we've got a valid i/o address */ if ((sc->port = bus_get_resource_start(dev, SYS_RES_IOPORT, 0)) == 0) { DLOG(DIGIDB_INIT, (sc->dev, "io address not given\n")); return (ENXIO); } for (i = 0; i < DIGI_NVALIDIO; i++) if (sc->port == digi_validio[i] || sc->port == digi_validio[i] + 4) break; if (i == DIGI_NVALIDIO) { device_printf(dev, "0x%03x: Invalid i/o address\n", sc->port); return (ENXIO); } /* Ditto for our memory address */ if ((sc->pmem = bus_get_resource_start(dev, SYS_RES_MEMORY, 0)) == 0) return (ENXIO); for (i = 0; i < DIGI_NVALIDMEM; i++) if (sc->pmem == digi_validmem[i]) break; if (i == DIGI_NVALIDMEM) { device_printf(dev, "0x%lx: Invalid memory address\n", sc->pmem); return (ENXIO); } if ((sc->pmem & 0xfffffful) != sc->pmem) { device_printf(dev, "0x%lx: Memory address not supported\n", sc->pmem); return (ENXIO); } sc->vmem = (u_char *)sc->pmem; DLOG(DIGIDB_INIT, (sc->dev, "isa? port 0x%03x mem 0x%lx\n", sc->port, sc->pmem)); /* Temporarily map our io ports */ sc->res.iorid = 0; sc->res.io = bus_alloc_resource(dev, SYS_RES_IOPORT, &sc->res.iorid, 0ul, ~0ul, IO_SIZE, RF_ACTIVE); if (sc->res.io == NULL) return (ENXIO); /* Check the type of card and get internal memory characteristics */ if (!digi_isa_check(sc)) { bus_release_resource(dev, SYS_RES_IOPORT, sc->res.iorid, sc->res.io); return (ENXIO); } /* Temporarily map our memory */ sc->res.mrid = 0; sc->res.mem = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->res.mrid, 0ul, ~0ul, sc->win_size, RF_ALLOCATED); if (sc->res.mem == NULL) { device_printf(dev, "0x%lx: Memory range is in use\n", sc->pmem); bus_release_resource(dev, SYS_RES_IOPORT, sc->res.iorid, sc->res.io); return (ENXIO); } outb(sc->port, FEPCLR); /* drop RESET */ sc->hidewin(sc); /* set initial sc->window */ bus_release_resource(dev, SYS_RES_MEMORY, sc->res.mrid, sc->res.mem); bus_release_resource(dev, SYS_RES_IOPORT, sc->res.iorid, sc->res.io); /* Let digi_isa_attach() know what we've found */ bus_set_resource(dev, SYS_RES_IOPORT, 0, sc->port, IO_SIZE); bus_set_resource(dev, SYS_RES_MEMORY, 0, sc->pmem, sc->win_size); DLOG(DIGIDB_INIT, (sc->dev, "Probe returns -10\n")); return (-10); /* Other drivers are preferred for now */ } static int digi_isa_attach(device_t dev) { struct digi_softc *sc = device_get_softc(dev); int i, t, res; u_char *ptr; int reset; u_long msize, iosize; long scport; KASSERT(sc, ("digi%d: softc not allocated in digi_isa_attach\n", device_get_unit(dev))); res = ENXIO; bzero(sc, sizeof(*sc)); sc->status = DIGI_STATUS_NOTINIT; sc->dev = dev; sc->res.unit = device_get_unit(dev); DLOG(DIGIDB_INIT, (sc->dev, "attaching\n")); bus_get_resource(dev, SYS_RES_IOPORT, 0, &scport, &iosize); bus_get_resource(dev, SYS_RES_MEMORY, 0, &sc->pmem, &msize); sc->port = scport; /* sc->altpin = !!(device_get_flags(dev) & DGBFLAG_ALTPIN); */ /* Allocate resources (verified in digi_isa_probe()) */ sc->res.iorid = 0; sc->res.io = bus_alloc_resource(dev, SYS_RES_IOPORT, &sc->res.iorid, 0ul, ~0ul, iosize, RF_ACTIVE); if (sc->res.io == NULL) return (ENXIO); /* Check the type of card and get internal memory characteristics */ DLOG(DIGIDB_INIT, (sc->dev, "Checking card type\n")); if (!digi_isa_check(sc)) goto failed; callout_handle_init(&sc->callout); callout_handle_init(&sc->inttest); sc->res.mrid = 0; sc->res.mem = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->res.mrid, 0ul, ~0ul, msize, RF_ACTIVE); if (sc->res.mem == NULL) { device_printf(dev, "0x%lx: Memory range is in use\n", sc->pmem); sc->hidewin(sc); goto failed; } /* map memory */ sc->vmem = pmap_mapdev(sc->pmem, msize); DLOG(DIGIDB_INIT, (sc->dev, "internal memory segment 0x%x\n", sc->mem_seg)); /* Start by resetting the card */ reset = FEPRST; if (sc->model == PCXI) reset |= FEPMEM; outb(sc->port, reset); for (i = 0; (inb(sc->port) & FEPMASK) != reset; i++) { if (i == hz / 10) { device_printf(dev, "1st reset failed\n"); sc->hidewin(sc); goto failed; } digi_delay(sc, "digirst1", 1); } DLOG(DIGIDB_INIT, (sc->dev, "got reset after %d iterations\n", i)); if (sc->model != PCXI) { t = (sc->pmem >> 8) & 0xffe0; if (sc->model == PCXEVE) t |= 0x10; /* enable windowing */ outb(sc->port + 2, t & 0xff); outb(sc->port + 3, t >> 8); } if (sc->model == PCXI || sc->model == PCXE) { outb(sc->port, FEPRST | FEPMEM); for (i = 0; (inb(sc->port) & FEPMASK) != FEPRST; i++) { if (i == hz / 10) { device_printf(dev, "memory reservation failed (0x%02x)\n", inb(sc->port)); sc->hidewin(sc); goto failed; } digi_delay(sc, "digirst2", 1); } DLOG(DIGIDB_INIT, (sc->dev, "got memory after %d iterations\n", i)); } DLOG(DIGIDB_INIT, (sc->dev, "short memory test\n")); ptr = sc->setwin(sc, BOTWIN); vD(ptr) = 0xA55A3CC3; if (vD(ptr) != 0xA55A3CC3) { device_printf(dev, "1st memory test failed\n"); sc->hidewin(sc); goto failed; } DLOG(DIGIDB_INIT, (sc->dev, "1st memory test ok\n")); ptr = sc->setwin(sc, TOPWIN); vD(ptr) = 0x5AA5C33C; if (vD(ptr) != 0x5AA5C33C) { device_printf(dev, "2nd memory test failed\n"); sc->hidewin(sc); goto failed; } DLOG(DIGIDB_INIT, (sc->dev, "2nd memory test ok\n")); ptr = sc->setwin(sc, BIOSCODE + ((0xf000 - sc->mem_seg) << 4)); vD(ptr) = 0x5AA5C33C; if (vD(ptr) != 0x5AA5C33C) { device_printf(dev, "3rd (BIOS) memory test failed\n"); sc->hidewin(sc); goto failed; } DLOG(DIGIDB_INIT, (sc->dev, "3rd memory test ok\n")); if ((res = digi_attach(sc)) == 0) return (0); failed: if (sc->res.mem != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, sc->res.mrid, sc->res.mem); sc->res.mem = NULL; } if (sc->res.io != NULL) { bus_release_resource(dev, SYS_RES_IOPORT, sc->res.iorid, sc->res.io); sc->res.io = NULL; } return (res); } static device_method_t digi_isa_methods[] = { /* Device interface */ DEVMETHOD(device_probe, digi_isa_probe), DEVMETHOD(device_attach, digi_isa_attach), DEVMETHOD(device_detach, digi_detach), DEVMETHOD(device_shutdown, digi_shutdown), {0, 0} }; static driver_t digi_isa_drv = { "digi", digi_isa_methods, sizeof(struct digi_softc), }; DRIVER_MODULE(digi, isa, digi_isa_drv, digi_devclass, 0, 0); Index: head/sys/dev/digi/digi_pci.c =================================================================== --- head/sys/dev/digi/digi_pci.c (revision 129878) +++ head/sys/dev/digi/digi_pci.c (revision 129879) @@ -1,229 +1,230 @@ /*- * Copyright (c) 2001 Brian Somers * based on work by Slawa Olhovchenkov * John Prince * Eric Hernes * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include static u_char * digi_pci_setwin(struct digi_softc *sc, unsigned int addr) { return (sc->vmem + addr); } static void digi_pci_hidewin(struct digi_softc *sc) { return; } static void digi_pci_towin(struct digi_softc *sc, int win) { return; } static int digi_pci_probe(device_t dev) { unsigned int device_id = pci_get_devid(dev); if (device_get_unit(dev) >= 16) { /* Don't overflow our control mask */ device_printf(dev, "At most 16 digiboards may be used\n"); return (ENXIO); } if ((device_id & 0xffff) != PCI_VENDOR_DIGI) return (ENXIO); switch (device_id >> 16) { case PCI_DEVICE_EPC: case PCI_DEVICE_XEM: case PCI_DEVICE_XR: case PCI_DEVICE_CX: case PCI_DEVICE_XRJ: case PCI_DEVICE_EPCJ: case PCI_DEVICE_920_4: case PCI_DEVICE_920_8: case PCI_DEVICE_920_2: return (0); } return (ENXIO); } static int digi_pci_attach(device_t dev) { struct digi_softc *sc; u_int32_t device_id; #ifdef DIGI_INTERRUPT int retVal = 0; #endif sc = device_get_softc(dev); KASSERT(sc, ("digi%d: softc not allocated in digi_pci_attach\n", device_get_unit(dev))); bzero(sc, sizeof(*sc)); sc->dev = dev; sc->res.unit = device_get_unit(dev); device_id = pci_get_devid(dev); switch (device_id >> 16) { case PCI_DEVICE_EPC: sc->name = "Digiboard PCI EPC/X ASIC"; sc->res.mrid = 0x10; sc->model = PCIEPCX; sc->module = "EPCX_PCI"; break; case PCI_DEVICE_XEM: sc->name = "Digiboard PCI PC/Xem ASIC"; sc->res.mrid = 0x10; sc->model = PCXEM; sc->module = "Xem"; break; case PCI_DEVICE_XR: sc->name = "Digiboard PCI PC/Xr ASIC"; sc->res.mrid = 0x10; sc->model = PCIXR; sc->module = "Xr"; break; case PCI_DEVICE_CX: sc->name = "Digiboard PCI C/X ASIC"; sc->res.mrid = 0x10; sc->model = PCCX; sc->module = "CX_PCI"; break; case PCI_DEVICE_XRJ: sc->name = "Digiboard PCI PC/Xr PLX"; sc->res.mrid = 0x18; sc->model = PCIXR; sc->module = "Xr"; break; case PCI_DEVICE_EPCJ: sc->name = "Digiboard PCI EPC/X PLX"; sc->res.mrid = 0x18; sc->model = PCIEPCX; sc->module = "EPCX_PCI"; break; case PCI_DEVICE_920_4: /* Digi PCI4r 920 */ sc->name = "Digiboard PCI4r 920"; sc->res.mrid = 0x10; sc->model = PCIXR; sc->module = "Xr"; break; case PCI_DEVICE_920_8: /* Digi PCI8r 920 */ sc->name = "Digiboard PCI8r 920"; sc->res.mrid = 0x10; sc->model = PCIXR; sc->module = "Xr"; break; case PCI_DEVICE_920_2: /* Digi PCI2r 920 */ sc->name = "Digiboard PCI2r 920"; sc->res.mrid = 0x10; sc->model = PCIXR; sc->module = "Xr"; break; default: device_printf(dev, "Unknown device id = %08x\n", device_id); return (ENXIO); } pci_write_config(dev, 0x40, 0, 4); pci_write_config(dev, 0x46, 0, 4); sc->res.mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->res.mrid, RF_ACTIVE); #ifdef DIGI_INTERRUPT sc->res.irqrid = 0; sc->res.irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->res.irqrid, RF_SHAREABLE | RF_ACTIVE); if (sc->res.irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); return (ENXIO); } retVal = bus_setup_intr(dev, sc->res.irq, INTR_TYPE_TTY, digiintr, sc, &sc->res.irqHandler); #else DLOG(DIGIDB_IRQ, (sc->dev, "Interrupt support compiled out\n")); #endif sc->vmem = rman_get_virtual(sc->res.mem); sc->pmem = vtophys(sc->vmem); sc->pcibus = 1; sc->win_size = 0x200000; sc->win_bits = 21; sc->csigs = &digi_normal_signals; sc->status = DIGI_STATUS_NOTINIT; callout_handle_init(&sc->callout); callout_handle_init(&sc->inttest); sc->setwin = digi_pci_setwin; sc->hidewin = digi_pci_hidewin; sc->towin = digi_pci_towin; PCIPORT = FEPRST; return (digi_attach(sc)); } static device_method_t digi_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, digi_pci_probe), DEVMETHOD(device_attach, digi_pci_attach), DEVMETHOD(device_detach, digi_detach), DEVMETHOD(device_shutdown, digi_shutdown), {0, 0} }; static driver_t digi_pci_drv = { "digi", digi_pci_methods, sizeof(struct digi_softc), }; DRIVER_MODULE(digi, pci, digi_pci_drv, digi_devclass, 0, 0); Index: head/sys/dev/em/if_em.h =================================================================== --- head/sys/dev/em/if_em.h (revision 129878) +++ head/sys/dev/em/if_em.h (revision 129879) @@ -1,436 +1,437 @@ /************************************************************************** Copyright (c) 2001-2003, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***************************************************************************/ /*$FreeBSD$*/ #ifndef _EM_H_DEFINED_ #define _EM_H_DEFINED_ #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_bdg.h" #include /* Tunables */ /* * EM_MAX_TXD: Maximum number of Transmit Descriptors * Valid Range: 80-256 for 82542 and 82543-based adapters * 80-4096 for others * Default Value: 256 * This value is the number of transmit descriptors allocated by the driver. * Increasing this value allows the driver to queue more transmits. Each * descriptor is 16 bytes. */ #define EM_MAX_TXD 256 /* * EM_MAX_RXD - Maximum number of receive Descriptors * Valid Range: 80-256 for 82542 and 82543-based adapters * 80-4096 for others * Default Value: 256 * This value is the number of receive descriptors allocated by the driver. * Increasing this value allows the driver to buffer more incoming packets. * Each descriptor is 16 bytes. A receive buffer is also allocated for each * descriptor. The maximum MTU size is 16110. * */ #define EM_MAX_RXD 256 /* * EM_TIDV - Transmit Interrupt Delay Value * Valid Range: 0-65535 (0=off) * Default Value: 64 * This value delays the generation of transmit interrupts in units of * 1.024 microseconds. Transmit interrupt reduction can improve CPU * efficiency if properly tuned for specific network traffic. If the * system is reporting dropped transmits, this value may be set too high * causing the driver to run out of available transmit descriptors. */ #define EM_TIDV 64 /* * EM_TADV - Transmit Absolute Interrupt Delay Value (Not valid for 82542/82543/82544) * Valid Range: 0-65535 (0=off) * Default Value: 64 * This value, in units of 1.024 microseconds, limits the delay in which a * transmit interrupt is generated. Useful only if EM_TIDV is non-zero, * this value ensures that an interrupt is generated after the initial * packet is sent on the wire within the set amount of time. Proper tuning, * along with EM_TIDV, may improve traffic throughput in specific * network conditions. */ #define EM_TADV 64 /* * EM_RDTR - Receive Interrupt Delay Timer (Packet Timer) * Valid Range: 0-65535 (0=off) * Default Value: 0 * This value delays the generation of receive interrupts in units of 1.024 * microseconds. Receive interrupt reduction can improve CPU efficiency if * properly tuned for specific network traffic. Increasing this value adds * extra latency to frame reception and can end up decreasing the throughput * of TCP traffic. If the system is reporting dropped receives, this value * may be set too high, causing the driver to run out of available receive * descriptors. * * CAUTION: When setting EM_RDTR to a value other than 0, adapters * may hang (stop transmitting) under certain network conditions. * If this occurs a WATCHDOG message is logged in the system event log. * In addition, the controller is automatically reset, restoring the * network connection. To eliminate the potential for the hang * ensure that EM_RDTR is set to 0. */ #define EM_RDTR 0 /* * Receive Interrupt Absolute Delay Timer (Not valid for 82542/82543/82544) * Valid Range: 0-65535 (0=off) * Default Value: 64 * This value, in units of 1.024 microseconds, limits the delay in which a * receive interrupt is generated. Useful only if EM_RDTR is non-zero, * this value ensures that an interrupt is generated after the initial * packet is received within the set amount of time. Proper tuning, * along with EM_RDTR, may improve traffic throughput in specific network * conditions. */ #define EM_RADV 64 /* * This parameter controls the maximum no of times the driver will loop * in the isr. * Minimum Value = 1 */ #define EM_MAX_INTR 3 /* * Inform the stack about transmit checksum offload capabilities. */ #define EM_CHECKSUM_FEATURES (CSUM_TCP | CSUM_UDP) /* * This parameter controls the duration of transmit watchdog timer. */ #define EM_TX_TIMEOUT 5 /* set to 5 seconds */ /* * This parameter controls when the driver calls the routine to reclaim * transmit descriptors. */ #define EM_TX_CLEANUP_THRESHOLD EM_MAX_TXD / 8 /* * This parameter controls whether or not autonegotation is enabled. * 0 - Disable autonegotiation * 1 - Enable autonegotiation */ #define DO_AUTO_NEG 1 /* * This parameter control whether or not the driver will wait for * autonegotiation to complete. * 1 - Wait for autonegotiation to complete * 0 - Don't wait for autonegotiation to complete */ #define WAIT_FOR_AUTO_NEG_DEFAULT 0 /* * EM_MASTER_SLAVE is only defined to enable a workaround for a known compatibility issue * with 82541/82547 devices and some switches. See the "Known Limitations" section of * the README file for a complete description and a list of affected switches. * * 0 = Hardware default * 1 = Master mode * 2 = Slave mode * 3 = Auto master/slave */ /* #define EM_MASTER_SLAVE 2 */ /* Tunables -- End */ #define AUTONEG_ADV_DEFAULT (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ ADVERTISE_1000_FULL) #define EM_VENDOR_ID 0x8086 #define EM_MMBA 0x0010 /* Mem base address */ #define EM_ROUNDUP(size, unit) (((size) + (unit) - 1) & ~((unit) - 1)) #define EM_JUMBO_PBA 0x00000028 #define EM_DEFAULT_PBA 0x00000030 #define EM_SMARTSPEED_DOWNSHIFT 3 #define EM_SMARTSPEED_MAX 15 #define MAX_NUM_MULTICAST_ADDRESSES 128 #define PCI_ANY_ID (~0U) #define ETHER_ALIGN 2 /* Defines for printing debug information */ #define DEBUG_INIT 0 #define DEBUG_IOCTL 0 #define DEBUG_HW 0 #define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n") #define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A) #define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B) #define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n") #define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A) #define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B) #define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n") #define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A) #define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B) /* Supported RX Buffer Sizes */ #define EM_RXBUFFER_2048 2048 #define EM_RXBUFFER_4096 4096 #define EM_RXBUFFER_8192 8192 #define EM_RXBUFFER_16384 16384 #define EM_MAX_SCATTER 64 /* ****************************************************************************** * vendor_info_array * * This array contains the list of Subvendor/Subdevice IDs on which the driver * should load. * * ******************************************************************************/ typedef struct _em_vendor_info_t { unsigned int vendor_id; unsigned int device_id; unsigned int subvendor_id; unsigned int subdevice_id; unsigned int index; } em_vendor_info_t; struct em_buffer { struct mbuf *m_head; bus_dmamap_t map; /* bus_dma map for packet */ }; struct em_q { bus_dmamap_t map; /* bus_dma map for packet */ int nsegs; /* # of segments/descriptors */ bus_dma_segment_t segs[EM_MAX_SCATTER]; }; /* * Bus dma allocation structure used by * em_dma_malloc and em_dma_free. */ struct em_dma_alloc { bus_addr_t dma_paddr; caddr_t dma_vaddr; bus_dma_tag_t dma_tag; bus_dmamap_t dma_map; bus_dma_segment_t dma_seg; bus_size_t dma_size; int dma_nseg; }; typedef enum _XSUM_CONTEXT_T { OFFLOAD_NONE, OFFLOAD_TCP_IP, OFFLOAD_UDP_IP } XSUM_CONTEXT_T; struct adapter; struct em_int_delay_info { struct adapter *adapter; /* Back-pointer to the adapter struct */ int offset; /* Register offset to read/write */ int value; /* Current value in usecs */ }; /* For 82544 PCIX Workaround */ typedef struct _ADDRESS_LENGTH_PAIR { u_int64_t address; u_int32_t length; } ADDRESS_LENGTH_PAIR, *PADDRESS_LENGTH_PAIR; typedef struct _DESCRIPTOR_PAIR { ADDRESS_LENGTH_PAIR descriptor[4]; u_int32_t elements; } DESC_ARRAY, *PDESC_ARRAY; /* Our adapter structure */ struct adapter { struct arpcom interface_data; struct adapter *next; struct adapter *prev; struct em_hw hw; /* FreeBSD operating-system-specific structures */ struct em_osdep osdep; struct device *dev; struct resource *res_memory; struct resource *res_ioport; struct resource *res_interrupt; void *int_handler_tag; struct ifmedia media; struct callout timer; struct callout tx_fifo_timer; int io_rid; u_int8_t unit; struct mtx mtx; /* Info about the board itself */ u_int32_t part_num; u_int8_t link_active; u_int16_t link_speed; u_int16_t link_duplex; u_int32_t smartspeed; struct em_int_delay_info tx_int_delay; struct em_int_delay_info tx_abs_int_delay; struct em_int_delay_info rx_int_delay; struct em_int_delay_info rx_abs_int_delay; XSUM_CONTEXT_T active_checksum_context; /* * Transmit definitions * * We have an array of num_tx_desc descriptors (handled * by the controller) paired with an array of tx_buffers * (at tx_buffer_area). * The index of the next available descriptor is next_avail_tx_desc. * The number of remaining tx_desc is num_tx_desc_avail. */ struct em_dma_alloc txdma; /* bus_dma glue for tx desc */ struct em_tx_desc *tx_desc_base; u_int32_t next_avail_tx_desc; u_int32_t oldest_used_tx_desc; volatile u_int16_t num_tx_desc_avail; u_int16_t num_tx_desc; u_int32_t txd_cmd; struct em_buffer *tx_buffer_area; bus_dma_tag_t txtag; /* dma tag for tx */ /* * Receive definitions * * we have an array of num_rx_desc rx_desc (handled by the * controller), and paired with an array of rx_buffers * (at rx_buffer_area). * The next pair to check on receive is at offset next_rx_desc_to_check */ struct em_dma_alloc rxdma; /* bus_dma glue for rx desc */ struct em_rx_desc *rx_desc_base; u_int32_t next_rx_desc_to_check; u_int16_t num_rx_desc; u_int32_t rx_buffer_len; struct em_buffer *rx_buffer_area; bus_dma_tag_t rxtag; /* Jumbo frame */ struct mbuf *fmp; struct mbuf *lmp; u_int16_t tx_fifo_head; struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; /* Misc stats maintained by the driver */ unsigned long dropped_pkts; unsigned long mbuf_alloc_failed; unsigned long mbuf_cluster_failed; unsigned long no_tx_desc_avail1; unsigned long no_tx_desc_avail2; unsigned long no_tx_map_avail; unsigned long no_tx_dma_setup; u_int64_t tx_fifo_reset; u_int64_t tx_fifo_wrk; /* For 82544 PCIX Workaround */ boolean_t pcix_82544; boolean_t in_detach; #ifdef DBG_STATS unsigned long no_pkts_avail; unsigned long clean_tx_interrupts; #endif struct em_hw_stats stats; }; #define EM_LOCK_INIT(_sc, _name) \ mtx_init(&(_sc)->mtx, _name, MTX_NETWORK_LOCK, MTX_DEF) #define EM_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx) #define EM_LOCK(_sc) mtx_lock(&(_sc)->mtx) #define EM_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx) #define EM_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->mtx, MA_OWNED) #endif /* _EM_H_DEFINED_ */ Index: head/sys/dev/en/if_en_pci.c =================================================================== --- head/sys/dev/en/if_en_pci.c (revision 129878) +++ head/sys/dev/en/if_en_pci.c (revision 129879) @@ -1,489 +1,490 @@ /* $NetBSD: if_en_pci.c,v 1.1 1996/06/22 02:00:31 chuck Exp $ */ /* * * Copyright (c) 1996 Charles D. Cranor and Washington University. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Charles D. Cranor and * Washington University. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); /* * i f _ e n _ p c i . c * * author: Chuck Cranor * started: spring, 1996. * * FreeBSD PCI glue for the eni155p card. * thanks to Matt Thomas for figuring out FreeBSD vs NetBSD vs etc.. diffs. */ #include __FBSDID("$FreeBSD$"); #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MODULE_DEPEND(en, pci, 1, 1, 1); MODULE_DEPEND(en, atm, 1, 1, 1); MODULE_DEPEND(en, utopia, 1, 1, 1); /* * local structures */ struct en_pci_softc { /* bus independent stuff */ struct en_softc esc; /* includes "device" structure */ /* freebsd newbus glue */ struct resource *res; /* resource descriptor for registers */ struct resource *irq; /* resource descriptor for interrupt */ void *ih; /* interrupt handle */ }; static void eni_get_macaddr(device_t, struct en_pci_softc *); static void adp_get_macaddr(struct en_pci_softc *); /* * address of config base memory address register in PCI config space * (this is card specific) */ #define PCI_CBMA 0x10 /* * tonga (pci bridge). ENI cards only! */ #define EN_TONGA 0x60 /* PCI config addr of tonga reg */ #define TONGA_SWAP_DMA 0x80 /* endian swap control */ #define TONGA_SWAP_BYTE 0x40 #define TONGA_SWAP_WORD 0x20 #define TONGA_READ_MULT 0x00 #define TONGA_READ_MEM 0x04 #define TONGA_READ_IVAN 0x08 #define TONGA_READ_KEN 0x0C /* * adaptec pci bridge. ADP cards only! */ #define ADP_PCIREG 0x050040 /* PCI control register */ #define ADP_PCIREG_RESET 0x1 /* reset card */ #define ADP_PCIREG_IENABLE 0x2 /* interrupt enable */ #define ADP_PCIREG_SWAP_WORD 0x4 /* swap byte on slave access */ #define ADP_PCIREG_SWAP_DMA 0x8 /* swap byte on DMA */ #define PCI_VENDOR_EFFICIENTNETS 0x111a /* Efficent Networks */ #define PCI_PRODUCT_EFFICIENTNETS_ENI155PF 0x0000 /* ENI-155P ATM */ #define PCI_PRODUCT_EFFICIENTNETS_ENI155PA 0x0002 /* ENI-155P ATM */ #define PCI_VENDOR_ADP 0x9004 /* adaptec */ #define PCI_PRODUCT_ADP_AIC5900 0x5900 #define PCI_PRODUCT_ADP_AIC5905 0x5905 #define PCI_VENDOR(x) ((x) & 0xFFFF) #define PCI_CHIPID(x) (((x) >> 16) & 0xFFFF) /* * bus specific reset function [ADP only!] */ static void adp_busreset(void *v) { struct en_softc *sc = (struct en_softc *)v; uint32_t dummy; bus_space_write_4(sc->en_memt, sc->en_base, ADP_PCIREG, ADP_PCIREG_RESET); DELAY(1000); /* let it reset */ dummy = bus_space_read_4(sc->en_memt, sc->en_base, ADP_PCIREG); bus_space_write_4(sc->en_memt, sc->en_base, ADP_PCIREG, (ADP_PCIREG_SWAP_DMA | ADP_PCIREG_IENABLE)); dummy = bus_space_read_4(sc->en_memt, sc->en_base, ADP_PCIREG); if ((dummy & (ADP_PCIREG_SWAP_WORD | ADP_PCIREG_SWAP_DMA)) != ADP_PCIREG_SWAP_DMA) if_printf(&sc->ifatm.ifnet, "adp_busreset: Adaptec ATM did " "NOT reset!\n"); } /***********************************************************************/ /* * autoconfig stuff */ static int en_pci_probe(device_t dev) { switch (pci_get_vendor(dev)) { case PCI_VENDOR_EFFICIENTNETS: switch (pci_get_device(dev)) { case PCI_PRODUCT_EFFICIENTNETS_ENI155PF: case PCI_PRODUCT_EFFICIENTNETS_ENI155PA: device_set_desc(dev, "Efficient Networks ENI-155p"); return (0); } break; case PCI_VENDOR_ADP: switch (pci_get_device(dev)) { case PCI_PRODUCT_ADP_AIC5900: case PCI_PRODUCT_ADP_AIC5905: device_set_desc(dev, "Adaptec 155 ATM"); return (0); } break; } return (ENXIO); } static int en_pci_attach(device_t dev) { struct en_softc *sc; struct en_pci_softc *scp; u_long val; int rid, error = 0; sc = device_get_softc(dev); scp = (struct en_pci_softc *)sc; if_initname(&(sc->ifatm.ifnet), device_get_name(dev), device_get_unit(dev)); /* * Enable bus mastering. */ val = pci_read_config(dev, PCIR_COMMAND, 2); val |= (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); pci_write_config(dev, PCIR_COMMAND, val, 2); /* * Map control/status registers. */ rid = PCI_CBMA; scp->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (scp->res == NULL) { device_printf(dev, "could not map memory\n"); error = ENXIO; goto fail; } sc->dev = dev; sc->en_memt = rman_get_bustag(scp->res); sc->en_base = rman_get_bushandle(scp->res); /* * Allocate our interrupt. */ rid = 0; scp->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (scp->irq == NULL) { device_printf(dev, "could not map interrupt\n"); bus_release_resource(dev, SYS_RES_MEMORY, PCI_CBMA, scp->res); error = ENXIO; goto fail; } sc->ipl = 1; /* XXX (required to enable interrupt on midway) */ /* figure out if we are an adaptec card or not */ sc->is_adaptec = (pci_get_vendor(dev) == PCI_VENDOR_ADP) ? 1 : 0; /* * set up pci bridge */ if (sc->is_adaptec) { adp_get_macaddr(scp); sc->en_busreset = adp_busreset; adp_busreset(sc); } else { eni_get_macaddr(dev, scp); sc->en_busreset = NULL; pci_write_config(dev, EN_TONGA, TONGA_SWAP_DMA | TONGA_READ_IVAN, 4); } /* * Common attach stuff */ if ((error = en_attach(sc)) != 0) { device_printf(dev, "attach failed\n"); bus_teardown_intr(dev, scp->irq, scp->ih); bus_release_resource(dev, SYS_RES_IRQ, 0, scp->irq); bus_release_resource(dev, SYS_RES_MEMORY, PCI_CBMA, scp->res); goto fail; } /* * Do the interrupt SETUP last just before returning */ error = bus_setup_intr(dev, scp->irq, INTR_TYPE_NET, en_intr, sc, &scp->ih); if (error) { en_reset(sc); atm_ifdetach(&sc->ifatm.ifnet); device_printf(dev, "could not setup irq\n"); bus_release_resource(dev, SYS_RES_IRQ, 0, scp->irq); bus_release_resource(dev, SYS_RES_MEMORY, PCI_CBMA, scp->res); en_destroy(sc); goto fail; } return (0); fail: return (error); } /* * Detach the adapter */ static int en_pci_detach(device_t dev) { struct en_softc *sc = device_get_softc(dev); struct en_pci_softc *scp = (struct en_pci_softc *)sc; /* * Stop DMA and drop transmit queue. */ if ((sc->ifatm.ifnet.if_flags & IFF_RUNNING)) { if_printf(&sc->ifatm.ifnet, "still running\n"); sc->ifatm.ifnet.if_flags &= ~IFF_RUNNING; } /* * Close down routes etc. */ en_reset(sc); atm_ifdetach(&sc->ifatm.ifnet); /* * Deallocate resources. */ bus_teardown_intr(dev, scp->irq, scp->ih); bus_release_resource(dev, SYS_RES_IRQ, 0, scp->irq); bus_release_resource(dev, SYS_RES_MEMORY, PCI_CBMA, scp->res); /* * Free all the driver internal resources */ en_destroy(sc); return (0); } static int en_pci_shutdown(device_t dev) { struct en_pci_softc *psc = device_get_softc(dev); en_reset(&psc->esc); DELAY(10); /* is this necessary? */ return (0); } /* * Get the MAC address from an Adaptec board. No idea how to get * serial number or other stuff, because I have no documentation for that * card. */ static void adp_get_macaddr(struct en_pci_softc *scp) { struct en_softc * sc = (struct en_softc *)scp; int lcv; for (lcv = 0; lcv < sizeof(sc->ifatm.mib.esi); lcv++) sc->ifatm.mib.esi[lcv] = bus_space_read_1(sc->en_memt, sc->en_base, MID_ADPMACOFF + lcv); } /* * Read station (MAC) address from serial EEPROM. * derived from linux drivers/atm/eni.c by Werner Almesberger, EPFL LRC. */ #define EN_PROM_MAGIC 0x0c #define EN_PROM_DATA 0x02 #define EN_PROM_CLK 0x01 #define EN_ESI 64 #define EN_SERIAL 112 /* * Read a byte from the given address in the EEPROM */ static uint8_t eni_get_byte(device_t dev, uint32_t *data, u_int address) { int j; uint8_t tmp; address = (address << 1) + 1; /* start operation */ *data |= EN_PROM_DATA ; pci_write_config(dev, EN_TONGA, *data, 4); *data |= EN_PROM_CLK ; pci_write_config(dev, EN_TONGA, *data, 4); *data &= ~EN_PROM_DATA ; pci_write_config(dev, EN_TONGA, *data, 4); *data &= ~EN_PROM_CLK ; pci_write_config(dev, EN_TONGA, *data, 4); /* send address with serial line */ for ( j = 7 ; j >= 0 ; j --) { *data = ((address >> j) & 1) ? (*data | EN_PROM_DATA) : (*data & ~EN_PROM_DATA); pci_write_config(dev, EN_TONGA, *data, 4); *data |= EN_PROM_CLK ; pci_write_config(dev, EN_TONGA, *data, 4); *data &= ~EN_PROM_CLK ; pci_write_config(dev, EN_TONGA, *data, 4); } /* get ack */ *data |= EN_PROM_DATA ; pci_write_config(dev, EN_TONGA, *data, 4); *data |= EN_PROM_CLK ; pci_write_config(dev, EN_TONGA, *data, 4); *data = pci_read_config(dev, EN_TONGA, 4); *data &= ~EN_PROM_CLK ; pci_write_config(dev, EN_TONGA, *data, 4); *data |= EN_PROM_DATA ; pci_write_config(dev, EN_TONGA, *data, 4); tmp = 0; for ( j = 7 ; j >= 0 ; j --) { tmp <<= 1; *data |= EN_PROM_DATA ; pci_write_config(dev, EN_TONGA, *data, 4); *data |= EN_PROM_CLK ; pci_write_config(dev, EN_TONGA, *data, 4); *data = pci_read_config(dev, EN_TONGA, 4); if(*data & EN_PROM_DATA) tmp |= 1; *data &= ~EN_PROM_CLK ; pci_write_config(dev, EN_TONGA, *data, 4); *data |= EN_PROM_DATA ; pci_write_config(dev, EN_TONGA, *data, 4); } /* get ack */ *data |= EN_PROM_DATA ; pci_write_config(dev, EN_TONGA, *data, 4); *data |= EN_PROM_CLK ; pci_write_config(dev, EN_TONGA, *data, 4); *data = pci_read_config(dev, EN_TONGA, 4); *data &= ~EN_PROM_CLK ; pci_write_config(dev, EN_TONGA, *data, 4); *data |= EN_PROM_DATA ; pci_write_config(dev, EN_TONGA, *data, 4); return (tmp); } /* * Get MAC address and other stuff from the EEPROM */ static void eni_get_macaddr(device_t dev, struct en_pci_softc *scp) { struct en_softc * sc = (struct en_softc *)scp; int i; uint32_t data, t_data; t_data = pci_read_config(dev, EN_TONGA, 4) & 0xffffff00; data = EN_PROM_MAGIC | EN_PROM_DATA | EN_PROM_CLK; pci_write_config(dev, EN_TONGA, data, 4); for (i = 0; i < sizeof(sc->ifatm.mib.esi); i ++) sc->ifatm.mib.esi[i] = eni_get_byte(dev, &data, i + EN_ESI); sc->ifatm.mib.serial = 0; for (i = 0; i < 4; i++) { sc->ifatm.mib.serial <<= 8; sc->ifatm.mib.serial |= eni_get_byte(dev, &data, i + EN_SERIAL); } /* stop operation */ data &= ~EN_PROM_DATA; pci_write_config(dev, EN_TONGA, data, 4); data |= EN_PROM_CLK; pci_write_config(dev, EN_TONGA, data, 4); data |= EN_PROM_DATA; pci_write_config(dev, EN_TONGA, data, 4); pci_write_config(dev, EN_TONGA, t_data, 4); } /* * Driver infrastructure */ static device_method_t en_methods[] = { /* Device interface */ DEVMETHOD(device_probe, en_pci_probe), DEVMETHOD(device_attach, en_pci_attach), DEVMETHOD(device_detach, en_pci_detach), DEVMETHOD(device_shutdown, en_pci_shutdown), { 0, 0 } }; static driver_t en_driver = { "en", en_methods, sizeof(struct en_pci_softc), }; static devclass_t en_devclass; DRIVER_MODULE(en, pci, en_driver, en_devclass, en_modevent, 0); Index: head/sys/dev/fb/splash.c =================================================================== --- head/sys/dev/fb/splash.c (revision 129878) +++ head/sys/dev/fb/splash.c (revision 129879) @@ -1,212 +1,213 @@ /*- * Copyright (c) 1999 Kazutaka YOKOTA * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer as * the first lines of this file unmodified. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include "opt_splash.h" #include #include #include #include #include #include +#include #include #include MODULE_VERSION(splash, 1); /* video adapter and image decoder */ static video_adapter_t *splash_adp; static splash_decoder_t *splash_decoder; /* decoder candidates */ static int decoders; static splash_decoder_t **decoder_set; #define DECODER_ARRAY_DELTA 4 /* console driver callback */ static int (*splash_callback)(int, void *); static void *splash_arg; static int splash_find_data(splash_decoder_t *decoder) { caddr_t image_module; caddr_t p; if (decoder->data_type == NULL) return 0; image_module = preload_search_by_type(decoder->data_type); if (image_module == NULL) return ENOENT; p = preload_search_info(image_module, MODINFO_ADDR); if (p == NULL) return ENOENT; decoder->data = *(void **)p; p = preload_search_info(image_module, MODINFO_SIZE); if (p == NULL) return ENOENT; decoder->data_size = *(size_t *)p; if (bootverbose) printf("splash: image@%p, size:%lu\n", (void *)decoder->data, (long)decoder->data_size); return 0; } static int splash_test(splash_decoder_t *decoder) { if (splash_find_data(decoder)) return ENOENT; /* XXX */ if (*decoder->init && (*decoder->init)(splash_adp)) { decoder->data = NULL; decoder->data_size = 0; return ENODEV; /* XXX */ } if (bootverbose) printf("splash: image decoder found: %s\n", decoder->name); return 0; } static void splash_new(splash_decoder_t *decoder) { splash_decoder = decoder; if (splash_callback != NULL) (*splash_callback)(SPLASH_INIT, splash_arg); } int splash_register(splash_decoder_t *decoder) { splash_decoder_t **p; int error; int i; if (splash_adp != NULL) { /* * If the video card has aleady been initialized, test * this decoder immediately. */ error = splash_test(decoder); if (error == 0) { /* replace the current decoder with new one */ if (splash_decoder != NULL) error = splash_term(splash_adp); if (error == 0) splash_new(decoder); } return error; } else { /* register the decoder for later use */ for (i = 0; i < decoders; ++i) { if (decoder_set[i] == NULL) break; } if ((i >= decoders) && (decoders % DECODER_ARRAY_DELTA) == 0) { p = malloc(sizeof(*p)*(decoders + DECODER_ARRAY_DELTA), M_DEVBUF, M_NOWAIT); if (p == NULL) return ENOMEM; if (decoder_set != NULL) { bcopy(decoder_set, p, sizeof(*p)*decoders); free(decoder_set, M_DEVBUF); } decoder_set = p; i = decoders++; } decoder_set[i] = decoder; } return 0; } int splash_unregister(splash_decoder_t *decoder) { int error; if (splash_decoder == decoder) { if ((error = splash_term(splash_adp)) != 0) return error; } return 0; } int splash_init(video_adapter_t *adp, int (*callback)(int, void *), void *arg) { int i; splash_adp = adp; splash_callback = callback; splash_arg = arg; splash_decoder = NULL; for (i = 0; i < decoders; ++i) { if (decoder_set[i] == NULL) continue; if (splash_test(decoder_set[i]) == 0) { splash_new(decoder_set[i]); break; } decoder_set[i] = NULL; } for (++i; i < decoders; ++i) { decoder_set[i] = NULL; } return 0; } int splash_term(video_adapter_t *adp) { int error = 0; if (splash_adp != adp) return EINVAL; if (splash_decoder != NULL) { if (splash_callback != NULL) error = (*splash_callback)(SPLASH_TERM, splash_arg); if (error == 0 && splash_decoder->term) error = (*splash_decoder->term)(adp); if (error == 0) splash_decoder = NULL; } return error; } int splash(video_adapter_t *adp, int on) { if (splash_decoder != NULL) return (*splash_decoder->splash)(adp, on); return ENODEV; } Index: head/sys/dev/firewire/firewire.c =================================================================== --- head/sys/dev/firewire/firewire.c (revision 129878) +++ head/sys/dev/firewire/firewire.c (revision 129879) @@ -1,2283 +1,2284 @@ /* * Copyright (c) 2003 Hidetoshi Shimokawa * Copyright (c) 1998-2002 Katsushi Kobayashi and Hidetoshi Shimokawa * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the acknowledgement as bellow: * * This product includes software developed by K. Kobayashi and H. Shimokawa * * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ * */ #include #include #include #include +#include #include #include #include #if defined(__DragonFly__) || __FreeBSD_version < 500000 #include /* for DELAY() */ #endif #include /* used by smbus and newbus */ #include #ifdef __DragonFly__ #include "firewire.h" #include "firewirereg.h" #include "fwmem.h" #include "iec13213.h" #include "iec68113.h" #else #include #include #include #include #include #endif struct crom_src_buf { struct crom_src src; struct crom_chunk root; struct crom_chunk vendor; struct crom_chunk hw; }; int firewire_debug=0, try_bmr=1, hold_count=3; SYSCTL_INT(_debug, OID_AUTO, firewire_debug, CTLFLAG_RW, &firewire_debug, 0, "FireWire driver debug flag"); SYSCTL_NODE(_hw, OID_AUTO, firewire, CTLFLAG_RD, 0, "FireWire Subsystem"); SYSCTL_INT(_hw_firewire, OID_AUTO, try_bmr, CTLFLAG_RW, &try_bmr, 0, "Try to be a bus manager"); SYSCTL_INT(_hw_firewire, OID_AUTO, hold_count, CTLFLAG_RW, &hold_count, 0, "Number of count of bus resets for removing lost device information"); MALLOC_DEFINE(M_FW, "firewire", "FireWire"); MALLOC_DEFINE(M_FWXFER, "fw_xfer", "XFER/FireWire"); #define FW_MAXASYRTY 4 devclass_t firewire_devclass; static void firewire_identify (driver_t *, device_t); static int firewire_probe (device_t); static int firewire_attach (device_t); static int firewire_detach (device_t); static int firewire_resume (device_t); #if 0 static int firewire_shutdown (device_t); #endif static device_t firewire_add_child (device_t, int, const char *, int); static void fw_try_bmr (void *); static void fw_try_bmr_callback (struct fw_xfer *); static void fw_asystart (struct fw_xfer *); static int fw_get_tlabel (struct firewire_comm *, struct fw_xfer *); static void fw_bus_probe (struct firewire_comm *); static void fw_bus_explore (struct firewire_comm *); static void fw_bus_explore_callback (struct fw_xfer *); static void fw_attach_dev (struct firewire_comm *); #ifdef FW_VMACCESS static void fw_vmaccess (struct fw_xfer *); #endif struct fw_xfer *asyreqq (struct firewire_comm *, uint8_t, uint8_t, uint8_t, uint32_t, uint32_t, void (*)(struct fw_xfer *)); static int fw_bmr (struct firewire_comm *); static device_method_t firewire_methods[] = { /* Device interface */ DEVMETHOD(device_identify, firewire_identify), DEVMETHOD(device_probe, firewire_probe), DEVMETHOD(device_attach, firewire_attach), DEVMETHOD(device_detach, firewire_detach), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, firewire_resume), DEVMETHOD(device_shutdown, bus_generic_shutdown), /* Bus interface */ DEVMETHOD(bus_add_child, firewire_add_child), DEVMETHOD(bus_print_child, bus_generic_print_child), { 0, 0 } }; char *linkspeed[] = { "S100", "S200", "S400", "S800", "S1600", "S3200", "undef", "undef" }; static char *tcode_str[] = { "WREQQ", "WREQB", "WRES", "undef", "RREQQ", "RREQB", "RRESQ", "RRESB", "CYCS", "LREQ", "STREAM", "LRES", "undef", "undef", "PHY", "undef" }; /* IEEE-1394a Table C-2 Gap count as a function of hops*/ #define MAX_GAPHOP 15 u_int gap_cnt[] = { 5, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40}; static driver_t firewire_driver = { "firewire", firewire_methods, sizeof(struct firewire_softc), }; /* * Lookup fwdev by node id. */ struct fw_device * fw_noderesolve_nodeid(struct firewire_comm *fc, int dst) { struct fw_device *fwdev; int s; s = splfw(); STAILQ_FOREACH(fwdev, &fc->devices, link) if (fwdev->dst == dst && fwdev->status != FWDEVINVAL) break; splx(s); return fwdev; } /* * Lookup fwdev by EUI64. */ struct fw_device * fw_noderesolve_eui64(struct firewire_comm *fc, struct fw_eui64 *eui) { struct fw_device *fwdev; int s; s = splfw(); STAILQ_FOREACH(fwdev, &fc->devices, link) if (FW_EUI64_EQUAL(fwdev->eui, *eui)) break; splx(s); if(fwdev == NULL) return NULL; if(fwdev->status == FWDEVINVAL) return NULL; return fwdev; } /* * Async. request procedure for userland application. */ int fw_asyreq(struct firewire_comm *fc, int sub, struct fw_xfer *xfer) { int err = 0; struct fw_xferq *xferq; int tl = 0, len; struct fw_pkt *fp; int tcode; struct tcode_info *info; if(xfer == NULL) return EINVAL; if(xfer->act.hand == NULL){ printf("act.hand == NULL\n"); return EINVAL; } fp = &xfer->send.hdr; tcode = fp->mode.common.tcode & 0xf; info = &fc->tcode[tcode]; if (info->flag == 0) { printf("invalid tcode=%x\n", tcode); return EINVAL; } if (info->flag & FWTI_REQ) xferq = fc->atq; else xferq = fc->ats; len = info->hdr_len; if (xfer->send.pay_len > MAXREC(fc->maxrec)) { printf("send.pay_len > maxrec\n"); return EINVAL; } if (info->flag & FWTI_BLOCK_STR) len = fp->mode.stream.len; else if (info->flag & FWTI_BLOCK_ASY) len = fp->mode.rresb.len; else len = 0; if (len != xfer->send.pay_len){ printf("len(%d) != send.pay_len(%d) %s(%x)\n", len, xfer->send.pay_len, tcode_str[tcode], tcode); return EINVAL; } if(xferq->start == NULL){ printf("xferq->start == NULL\n"); return EINVAL; } if(!(xferq->queued < xferq->maxq)){ device_printf(fc->bdev, "Discard a packet (queued=%d)\n", xferq->queued); return EINVAL; } if (info->flag & FWTI_TLABEL) { if((tl = fw_get_tlabel(fc, xfer)) == -1 ) return EIO; fp->mode.hdr.tlrt = tl << 2; } xfer->tl = tl; xfer->resp = 0; xfer->fc = fc; xfer->q = xferq; xfer->retry_req = fw_asybusy; fw_asystart(xfer); return err; } /* * Wakeup blocked process. */ void fw_asy_callback(struct fw_xfer *xfer){ wakeup(xfer); return; } /* * Postpone to later retry. */ void fw_asybusy(struct fw_xfer *xfer){ printf("fw_asybusy\n"); /* xfer->ch = timeout((timeout_t *)fw_asystart, (void *)xfer, 20000); */ #if 0 DELAY(20000); #endif fw_asystart(xfer); return; } /* * Async. request with given xfer structure. */ static void fw_asystart(struct fw_xfer *xfer) { struct firewire_comm *fc = xfer->fc; int s; if(xfer->retry++ >= fc->max_asyretry){ device_printf(fc->bdev, "max_asyretry exceeded\n"); xfer->resp = EBUSY; xfer->state = FWXF_BUSY; xfer->act.hand(xfer); return; } #if 0 /* XXX allow bus explore packets only after bus rest */ if (fc->status < FWBUSEXPLORE) { xfer->resp = EAGAIN; xfer->state = FWXF_BUSY; if (xfer->act.hand != NULL) xfer->act.hand(xfer); return; } #endif microtime(&xfer->tv); s = splfw(); xfer->state = FWXF_INQ; STAILQ_INSERT_TAIL(&xfer->q->q, xfer, link); xfer->q->queued ++; splx(s); /* XXX just queue for mbuf */ if (xfer->mbuf == NULL) xfer->q->start(fc); return; } static void firewire_identify(driver_t *driver, device_t parent) { BUS_ADD_CHILD(parent, 0, "firewire", -1); } static int firewire_probe(device_t dev) { device_set_desc(dev, "IEEE1394(FireWire) bus"); return (0); } static void firewire_xfer_timeout(struct firewire_comm *fc) { struct fw_xfer *xfer; struct tlabel *tl; struct timeval tv; struct timeval split_timeout; int i, s; split_timeout.tv_sec = 0; split_timeout.tv_usec = 200 * 1000; /* 200 msec */ microtime(&tv); timevalsub(&tv, &split_timeout); s = splfw(); for (i = 0; i < 0x40; i ++) { while ((tl = STAILQ_FIRST(&fc->tlabels[i])) != NULL) { xfer = tl->xfer; if (timevalcmp(&xfer->tv, &tv, >)) /* the rests are newer than this */ break; if (xfer->state == FWXF_START) /* not sent yet */ break; device_printf(fc->bdev, "split transaction timeout dst=0x%x tl=0x%x state=%d\n", xfer->send.hdr.mode.hdr.dst, i, xfer->state); xfer->resp = ETIMEDOUT; STAILQ_REMOVE_HEAD(&fc->tlabels[i], link); fw_xfer_done(xfer); } } splx(s); } #define WATCHDOC_HZ 10 static void firewire_watchdog(void *arg) { struct firewire_comm *fc; static int watchdoc_clock = 0; fc = (struct firewire_comm *)arg; /* * At boot stage, the device interrupt is disabled and * We encounter a timeout easily. To avoid this, * ignore clock interrupt for a while. */ if (watchdoc_clock > WATCHDOC_HZ * 15) { firewire_xfer_timeout(fc); fc->timeout(fc); } else watchdoc_clock ++; callout_reset(&fc->timeout_callout, hz / WATCHDOC_HZ, (void *)firewire_watchdog, (void *)fc); } /* * The attach routine. */ static int firewire_attach(device_t dev) { int unit; struct firewire_softc *sc = device_get_softc(dev); device_t pa = device_get_parent(dev); struct firewire_comm *fc; fc = (struct firewire_comm *)device_get_softc(pa); sc->fc = fc; fc->status = FWBUSNOTREADY; unit = device_get_unit(dev); if( fc->nisodma > FWMAXNDMA) fc->nisodma = FWMAXNDMA; fwdev_makedev(sc); CALLOUT_INIT(&sc->fc->timeout_callout); CALLOUT_INIT(&sc->fc->bmr_callout); CALLOUT_INIT(&sc->fc->retry_probe_callout); CALLOUT_INIT(&sc->fc->busprobe_callout); callout_reset(&sc->fc->timeout_callout, hz, (void *)firewire_watchdog, (void *)sc->fc); /* Locate our children */ bus_generic_probe(dev); /* launch attachement of the added children */ bus_generic_attach(dev); /* bus_reset */ fw_busreset(fc); fc->ibr(fc); return 0; } /* * Attach it as child. */ static device_t firewire_add_child(device_t dev, int order, const char *name, int unit) { device_t child; struct firewire_softc *sc; sc = (struct firewire_softc *)device_get_softc(dev); child = device_add_child(dev, name, unit); if (child) { device_set_ivars(child, sc->fc); device_probe_and_attach(child); } return child; } static int firewire_resume(device_t dev) { struct firewire_softc *sc; sc = (struct firewire_softc *)device_get_softc(dev); sc->fc->status = FWBUSNOTREADY; bus_generic_resume(dev); return(0); } /* * Dettach it. */ static int firewire_detach(device_t dev) { struct firewire_softc *sc; struct csrdir *csrd, *next; struct fw_device *fwdev, *fwdev_next; int err; sc = (struct firewire_softc *)device_get_softc(dev); if ((err = fwdev_destroydev(sc)) != 0) return err; if ((err = bus_generic_detach(dev)) != 0) return err; callout_stop(&sc->fc->timeout_callout); callout_stop(&sc->fc->bmr_callout); callout_stop(&sc->fc->retry_probe_callout); callout_stop(&sc->fc->busprobe_callout); /* XXX xfree_free and untimeout on all xfers */ for (fwdev = STAILQ_FIRST(&sc->fc->devices); fwdev != NULL; fwdev = fwdev_next) { fwdev_next = STAILQ_NEXT(fwdev, link); free(fwdev, M_FW); } for (csrd = SLIST_FIRST(&sc->fc->csrfree); csrd != NULL; csrd = next) { next = SLIST_NEXT(csrd, link); free(csrd, M_FW); } free(sc->fc->topology_map, M_FW); free(sc->fc->speed_map, M_FW); free(sc->fc->crom_src_buf, M_FW); return(0); } #if 0 static int firewire_shutdown( device_t dev ) { return 0; } #endif static void fw_xferq_drain(struct fw_xferq *xferq) { struct fw_xfer *xfer; while ((xfer = STAILQ_FIRST(&xferq->q)) != NULL) { STAILQ_REMOVE_HEAD(&xferq->q, link); xferq->queued --; xfer->resp = EAGAIN; fw_xfer_done(xfer); } } void fw_drain_txq(struct firewire_comm *fc) { int i; fw_xferq_drain(fc->atq); fw_xferq_drain(fc->ats); for(i = 0; i < fc->nisodma; i++) fw_xferq_drain(fc->it[i]); } static void fw_reset_csr(struct firewire_comm *fc) { int i; CSRARC(fc, STATE_CLEAR) = 1 << 23 | 0 << 17 | 1 << 16 | 1 << 15 | 1 << 14 ; CSRARC(fc, STATE_SET) = CSRARC(fc, STATE_CLEAR); CSRARC(fc, NODE_IDS) = 0x3f; CSRARC(fc, TOPO_MAP + 8) = 0; fc->irm = -1; fc->max_node = -1; for(i = 2; i < 0x100/4 - 2 ; i++){ CSRARC(fc, SPED_MAP + i * 4) = 0; } CSRARC(fc, STATE_CLEAR) = 1 << 23 | 0 << 17 | 1 << 16 | 1 << 15 | 1 << 14 ; CSRARC(fc, STATE_SET) = CSRARC(fc, STATE_CLEAR); CSRARC(fc, RESET_START) = 0; CSRARC(fc, SPLIT_TIMEOUT_HI) = 0; CSRARC(fc, SPLIT_TIMEOUT_LO) = 800 << 19; CSRARC(fc, CYCLE_TIME) = 0x0; CSRARC(fc, BUS_TIME) = 0x0; CSRARC(fc, BUS_MGR_ID) = 0x3f; CSRARC(fc, BANDWIDTH_AV) = 4915; CSRARC(fc, CHANNELS_AV_HI) = 0xffffffff; CSRARC(fc, CHANNELS_AV_LO) = 0xffffffff; CSRARC(fc, IP_CHANNELS) = (1 << 31); CSRARC(fc, CONF_ROM) = 0x04 << 24; CSRARC(fc, CONF_ROM + 4) = 0x31333934; /* means strings 1394 */ CSRARC(fc, CONF_ROM + 8) = 1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 | 0xff << 16 | 0x09 << 8; CSRARC(fc, CONF_ROM + 0xc) = 0; /* DV depend CSRs see blue book */ CSRARC(fc, oPCR) &= ~DV_BROADCAST_ON; CSRARC(fc, iPCR) &= ~DV_BROADCAST_ON; CSRARC(fc, STATE_CLEAR) &= ~(1 << 23 | 1 << 15 | 1 << 14 ); CSRARC(fc, STATE_SET) = CSRARC(fc, STATE_CLEAR); } static void fw_init_crom(struct firewire_comm *fc) { struct crom_src *src; fc->crom_src_buf = (struct crom_src_buf *) malloc(sizeof(struct crom_src_buf), M_FW, M_WAITOK | M_ZERO); if (fc->crom_src_buf == NULL) return; src = &fc->crom_src_buf->src; bzero(src, sizeof(struct crom_src)); /* BUS info sample */ src->hdr.info_len = 4; src->businfo.bus_name = CSR_BUS_NAME_IEEE1394; src->businfo.irmc = 1; src->businfo.cmc = 1; src->businfo.isc = 1; src->businfo.bmc = 1; src->businfo.pmc = 0; src->businfo.cyc_clk_acc = 100; src->businfo.max_rec = fc->maxrec; src->businfo.max_rom = MAXROM_4; src->businfo.generation = 1; src->businfo.link_spd = fc->speed; src->businfo.eui64.hi = fc->eui.hi; src->businfo.eui64.lo = fc->eui.lo; STAILQ_INIT(&src->chunk_list); fc->crom_src = src; fc->crom_root = &fc->crom_src_buf->root; } static void fw_reset_crom(struct firewire_comm *fc) { struct crom_src_buf *buf; struct crom_src *src; struct crom_chunk *root; if (fc->crom_src_buf == NULL) fw_init_crom(fc); buf = fc->crom_src_buf; src = fc->crom_src; root = fc->crom_root; STAILQ_INIT(&src->chunk_list); bzero(root, sizeof(struct crom_chunk)); crom_add_chunk(src, NULL, root, 0); crom_add_entry(root, CSRKEY_NCAP, 0x0083c0); /* XXX */ /* private company_id */ crom_add_entry(root, CSRKEY_VENDOR, CSRVAL_VENDOR_PRIVATE); #ifdef __DragonFly__ crom_add_simple_text(src, root, &buf->vendor, "DragonFly Project"); crom_add_entry(root, CSRKEY_HW, __DragonFly_cc_version); #else crom_add_simple_text(src, root, &buf->vendor, "FreeBSD Project"); crom_add_entry(root, CSRKEY_HW, __FreeBSD_version); #endif crom_add_simple_text(src, root, &buf->hw, hostname); } /* * Called after bus reset. */ void fw_busreset(struct firewire_comm *fc) { struct firewire_dev_comm *fdc; struct crom_src *src; device_t *devlistp; void *newrom; int i, devcnt; switch(fc->status){ case FWBUSMGRELECT: callout_stop(&fc->bmr_callout); break; default: break; } fc->status = FWBUSRESET; fw_reset_csr(fc); fw_reset_crom(fc); if (device_get_children(fc->bdev, &devlistp, &devcnt) == 0) { for( i = 0 ; i < devcnt ; i++) if (device_get_state(devlistp[i]) >= DS_ATTACHED) { fdc = device_get_softc(devlistp[i]); if (fdc->post_busreset != NULL) fdc->post_busreset(fdc); } free(devlistp, M_TEMP); } newrom = malloc(CROMSIZE, M_FW, M_NOWAIT | M_ZERO); src = &fc->crom_src_buf->src; crom_load(src, (uint32_t *)newrom, CROMSIZE); if (bcmp(newrom, fc->config_rom, CROMSIZE) != 0) { /* bump generation and reload */ src->businfo.generation ++; /* generation must be between 0x2 and 0xF */ if (src->businfo.generation < 2) src->businfo.generation ++; crom_load(src, (uint32_t *)newrom, CROMSIZE); bcopy(newrom, (void *)fc->config_rom, CROMSIZE); } free(newrom, M_FW); } /* Call once after reboot */ void fw_init(struct firewire_comm *fc) { int i; struct csrdir *csrd; #ifdef FW_VMACCESS struct fw_xfer *xfer; struct fw_bind *fwb; #endif fc->max_asyretry = FW_MAXASYRTY; fc->arq->queued = 0; fc->ars->queued = 0; fc->atq->queued = 0; fc->ats->queued = 0; fc->arq->buf = NULL; fc->ars->buf = NULL; fc->atq->buf = NULL; fc->ats->buf = NULL; fc->arq->flag = 0; fc->ars->flag = 0; fc->atq->flag = 0; fc->ats->flag = 0; STAILQ_INIT(&fc->atq->q); STAILQ_INIT(&fc->ats->q); for( i = 0 ; i < fc->nisodma ; i ++ ){ fc->it[i]->queued = 0; fc->ir[i]->queued = 0; fc->it[i]->start = NULL; fc->ir[i]->start = NULL; fc->it[i]->buf = NULL; fc->ir[i]->buf = NULL; fc->it[i]->flag = FWXFERQ_STREAM; fc->ir[i]->flag = FWXFERQ_STREAM; STAILQ_INIT(&fc->it[i]->q); STAILQ_INIT(&fc->ir[i]->q); STAILQ_INIT(&fc->it[i]->binds); STAILQ_INIT(&fc->ir[i]->binds); } fc->arq->maxq = FWMAXQUEUE; fc->ars->maxq = FWMAXQUEUE; fc->atq->maxq = FWMAXQUEUE; fc->ats->maxq = FWMAXQUEUE; for( i = 0 ; i < fc->nisodma ; i++){ fc->ir[i]->maxq = FWMAXQUEUE; fc->it[i]->maxq = FWMAXQUEUE; } /* Initialize csr registers */ fc->topology_map = (struct fw_topology_map *)malloc( sizeof(struct fw_topology_map), M_FW, M_NOWAIT | M_ZERO); fc->speed_map = (struct fw_speed_map *)malloc( sizeof(struct fw_speed_map), M_FW, M_NOWAIT | M_ZERO); CSRARC(fc, TOPO_MAP) = 0x3f1 << 16; CSRARC(fc, TOPO_MAP + 4) = 1; CSRARC(fc, SPED_MAP) = 0x3f1 << 16; CSRARC(fc, SPED_MAP + 4) = 1; STAILQ_INIT(&fc->devices); /* Initialize csr ROM work space */ SLIST_INIT(&fc->ongocsr); SLIST_INIT(&fc->csrfree); for( i = 0 ; i < FWMAXCSRDIR ; i++){ csrd = (struct csrdir *) malloc(sizeof(struct csrdir), M_FW,M_NOWAIT); if(csrd == NULL) break; SLIST_INSERT_HEAD(&fc->csrfree, csrd, link); } /* Initialize Async handlers */ STAILQ_INIT(&fc->binds); for( i = 0 ; i < 0x40 ; i++){ STAILQ_INIT(&fc->tlabels[i]); } /* DV depend CSRs see blue book */ #if 0 CSRARC(fc, oMPR) = 0x3fff0001; /* # output channel = 1 */ CSRARC(fc, oPCR) = 0x8000007a; for(i = 4 ; i < 0x7c/4 ; i+=4){ CSRARC(fc, i + oPCR) = 0x8000007a; } CSRARC(fc, iMPR) = 0x00ff0001; /* # input channel = 1 */ CSRARC(fc, iPCR) = 0x803f0000; for(i = 4 ; i < 0x7c/4 ; i+=4){ CSRARC(fc, i + iPCR) = 0x0; } #endif fc->crom_src_buf = NULL; #ifdef FW_VMACCESS xfer = fw_xfer_alloc(); if(xfer == NULL) return; fwb = (struct fw_bind *)malloc(sizeof (struct fw_bind), M_FW, M_NOWAIT); if(fwb == NULL){ fw_xfer_free(xfer); } xfer->act.hand = fw_vmaccess; xfer->fc = fc; xfer->sc = NULL; fwb->start_hi = 0x2; fwb->start_lo = 0; fwb->addrlen = 0xffffffff; fwb->xfer = xfer; fw_bindadd(fc, fwb); #endif } #define BIND_CMP(addr, fwb) (((addr) < (fwb)->start)?-1:\ ((fwb)->end < (addr))?1:0) /* * To lookup bound process from IEEE1394 address. */ struct fw_bind * fw_bindlookup(struct firewire_comm *fc, uint16_t dest_hi, uint32_t dest_lo) { u_int64_t addr; struct fw_bind *tfw; addr = ((u_int64_t)dest_hi << 32) | dest_lo; STAILQ_FOREACH(tfw, &fc->binds, fclist) if (tfw->act_type != FWACT_NULL && BIND_CMP(addr, tfw) == 0) return(tfw); return(NULL); } /* * To bind IEEE1394 address block to process. */ int fw_bindadd(struct firewire_comm *fc, struct fw_bind *fwb) { struct fw_bind *tfw, *prev = NULL; if (fwb->start > fwb->end) { printf("%s: invalid range\n", __func__); return EINVAL; } STAILQ_FOREACH(tfw, &fc->binds, fclist) { if (fwb->end < tfw->start) break; prev = tfw; } if (prev == NULL) { STAILQ_INSERT_HEAD(&fc->binds, fwb, fclist); goto out; } if (prev->end < fwb->start) { STAILQ_INSERT_AFTER(&fc->binds, prev, fwb, fclist); goto out; } printf("%s: bind failed\n", __func__); return (EBUSY); out: if (fwb->act_type == FWACT_CH) STAILQ_INSERT_HEAD(&fc->ir[fwb->sub]->binds, fwb, chlist); return (0); } /* * To free IEEE1394 address block. */ int fw_bindremove(struct firewire_comm *fc, struct fw_bind *fwb) { #if 0 struct fw_xfer *xfer, *next; #endif struct fw_bind *tfw; int s; s = splfw(); STAILQ_FOREACH(tfw, &fc->binds, fclist) if (tfw == fwb) { STAILQ_REMOVE(&fc->binds, fwb, fw_bind, fclist); goto found; } printf("%s: no such binding\n", __func__); splx(s); return (1); found: #if 0 /* shall we do this? */ for (xfer = STAILQ_FIRST(&fwb->xferlist); xfer != NULL; xfer = next) { next = STAILQ_NEXT(xfer, link); fw_xfer_free(xfer); } STAILQ_INIT(&fwb->xferlist); #endif splx(s); return 0; } /* * To free transaction label. */ static void fw_tl_free(struct firewire_comm *fc, struct fw_xfer *xfer) { struct tlabel *tl; int s = splfw(); for( tl = STAILQ_FIRST(&fc->tlabels[xfer->tl]); tl != NULL; tl = STAILQ_NEXT(tl, link)){ if(tl->xfer == xfer){ STAILQ_REMOVE(&fc->tlabels[xfer->tl], tl, tlabel, link); free(tl, M_FW); splx(s); return; } } splx(s); return; } /* * To obtain XFER structure by transaction label. */ static struct fw_xfer * fw_tl2xfer(struct firewire_comm *fc, int node, int tlabel) { struct fw_xfer *xfer; struct tlabel *tl; int s = splfw(); for( tl = STAILQ_FIRST(&fc->tlabels[tlabel]); tl != NULL; tl = STAILQ_NEXT(tl, link)){ if(tl->xfer->send.hdr.mode.hdr.dst == node){ xfer = tl->xfer; splx(s); if (firewire_debug > 2) printf("fw_tl2xfer: found tl=%d\n", tlabel); return(xfer); } } if (firewire_debug > 1) printf("fw_tl2xfer: not found tl=%d\n", tlabel); splx(s); return(NULL); } /* * To allocate IEEE1394 XFER structure. */ struct fw_xfer * fw_xfer_alloc(struct malloc_type *type) { struct fw_xfer *xfer; xfer = malloc(sizeof(struct fw_xfer), type, M_NOWAIT | M_ZERO); if (xfer == NULL) return xfer; xfer->malloc = type; return xfer; } struct fw_xfer * fw_xfer_alloc_buf(struct malloc_type *type, int send_len, int recv_len) { struct fw_xfer *xfer; xfer = fw_xfer_alloc(type); if (xfer == NULL) return(NULL); xfer->send.pay_len = send_len; xfer->recv.pay_len = recv_len; if (send_len > 0) { xfer->send.payload = malloc(send_len, type, M_NOWAIT | M_ZERO); if (xfer->send.payload == NULL) { fw_xfer_free(xfer); return(NULL); } } if (recv_len > 0) { xfer->recv.payload = malloc(recv_len, type, M_NOWAIT); if (xfer->recv.payload == NULL) { if (xfer->send.payload != NULL) free(xfer->send.payload, type); fw_xfer_free(xfer); return(NULL); } } return(xfer); } /* * IEEE1394 XFER post process. */ void fw_xfer_done(struct fw_xfer *xfer) { if (xfer->act.hand == NULL) { printf("act.hand == NULL\n"); return; } if (xfer->fc == NULL) panic("fw_xfer_done: why xfer->fc is NULL?"); xfer->act.hand(xfer); } void fw_xfer_unload(struct fw_xfer* xfer) { int s; if(xfer == NULL ) return; if(xfer->state == FWXF_INQ){ printf("fw_xfer_free FWXF_INQ\n"); s = splfw(); STAILQ_REMOVE(&xfer->q->q, xfer, fw_xfer, link); xfer->q->queued --; splx(s); } if (xfer->fc != NULL) { #if 1 if(xfer->state == FWXF_START) /* * This could happen if: * 1. We call fwohci_arcv() before fwohci_txd(). * 2. firewire_watch() is called. */ printf("fw_xfer_free FWXF_START\n"); #endif fw_tl_free(xfer->fc, xfer); } xfer->state = FWXF_INIT; xfer->resp = 0; xfer->retry = 0; } /* * To free IEEE1394 XFER structure. */ void fw_xfer_free_buf( struct fw_xfer* xfer) { if (xfer == NULL) { printf("%s: xfer == NULL\n", __func__); return; } fw_xfer_unload(xfer); if(xfer->send.payload != NULL){ free(xfer->send.payload, xfer->malloc); } if(xfer->recv.payload != NULL){ free(xfer->recv.payload, xfer->malloc); } free(xfer, xfer->malloc); } void fw_xfer_free( struct fw_xfer* xfer) { if (xfer == NULL) { printf("%s: xfer == NULL\n", __func__); return; } fw_xfer_unload(xfer); free(xfer, xfer->malloc); } void fw_asy_callback_free(struct fw_xfer *xfer) { #if 0 printf("asyreq done state=%d resp=%d\n", xfer->state, xfer->resp); #endif fw_xfer_free(xfer); } /* * To configure PHY. */ static void fw_phy_config(struct firewire_comm *fc, int root_node, int gap_count) { struct fw_xfer *xfer; struct fw_pkt *fp; fc->status = FWBUSPHYCONF; xfer = fw_xfer_alloc(M_FWXFER); if (xfer == NULL) return; xfer->fc = fc; xfer->retry_req = fw_asybusy; xfer->act.hand = fw_asy_callback_free; fp = &xfer->send.hdr; fp->mode.ld[1] = 0; if (root_node >= 0) fp->mode.ld[1] |= (root_node & 0x3f) << 24 | 1 << 23; if (gap_count >= 0) fp->mode.ld[1] |= 1 << 22 | (gap_count & 0x3f) << 16; fp->mode.ld[2] = ~fp->mode.ld[1]; /* XXX Dangerous, how to pass PHY packet to device driver */ fp->mode.common.tcode |= FWTCODE_PHY; if (firewire_debug) printf("send phy_config root_node=%d gap_count=%d\n", root_node, gap_count); fw_asyreq(fc, -1, xfer); } #if 0 /* * Dump self ID. */ static void fw_print_sid(uint32_t sid) { union fw_self_id *s; s = (union fw_self_id *) &sid; printf("node:%d link:%d gap:%d spd:%d del:%d con:%d pwr:%d" " p0:%d p1:%d p2:%d i:%d m:%d\n", s->p0.phy_id, s->p0.link_active, s->p0.gap_count, s->p0.phy_speed, s->p0.phy_delay, s->p0.contender, s->p0.power_class, s->p0.port0, s->p0.port1, s->p0.port2, s->p0.initiated_reset, s->p0.more_packets); } #endif /* * To receive self ID. */ void fw_sidrcv(struct firewire_comm* fc, uint32_t *sid, u_int len) { uint32_t *p; union fw_self_id *self_id; u_int i, j, node, c_port = 0, i_branch = 0; fc->sid_cnt = len /(sizeof(uint32_t) * 2); fc->status = FWBUSINIT; fc->max_node = fc->nodeid & 0x3f; CSRARC(fc, NODE_IDS) = ((uint32_t)fc->nodeid) << 16; fc->status = FWBUSCYMELECT; fc->topology_map->crc_len = 2; fc->topology_map->generation ++; fc->topology_map->self_id_count = 0; fc->topology_map->node_count = 0; fc->speed_map->generation ++; fc->speed_map->crc_len = 1 + (64*64 + 3) / 4; self_id = &fc->topology_map->self_id[0]; for(i = 0; i < fc->sid_cnt; i ++){ if (sid[1] != ~sid[0]) { printf("fw_sidrcv: invalid self-id packet\n"); sid += 2; continue; } *self_id = *((union fw_self_id *)sid); fc->topology_map->crc_len++; if(self_id->p0.sequel == 0){ fc->topology_map->node_count ++; c_port = 0; #if 0 fw_print_sid(sid[0]); #endif node = self_id->p0.phy_id; if(fc->max_node < node){ fc->max_node = self_id->p0.phy_id; } /* XXX I'm not sure this is the right speed_map */ fc->speed_map->speed[node][node] = self_id->p0.phy_speed; for (j = 0; j < node; j ++) { fc->speed_map->speed[j][node] = fc->speed_map->speed[node][j] = min(fc->speed_map->speed[j][j], self_id->p0.phy_speed); } if ((fc->irm == -1 || self_id->p0.phy_id > fc->irm) && (self_id->p0.link_active && self_id->p0.contender)) { fc->irm = self_id->p0.phy_id; } if(self_id->p0.port0 >= 0x2){ c_port++; } if(self_id->p0.port1 >= 0x2){ c_port++; } if(self_id->p0.port2 >= 0x2){ c_port++; } } if(c_port > 2){ i_branch += (c_port - 2); } sid += 2; self_id++; fc->topology_map->self_id_count ++; } device_printf(fc->bdev, "%d nodes", fc->max_node + 1); /* CRC */ fc->topology_map->crc = fw_crc16( (uint32_t *)&fc->topology_map->generation, fc->topology_map->crc_len * 4); fc->speed_map->crc = fw_crc16( (uint32_t *)&fc->speed_map->generation, fc->speed_map->crc_len * 4); /* byteswap and copy to CSR */ p = (uint32_t *)fc->topology_map; for (i = 0; i <= fc->topology_map->crc_len; i++) CSRARC(fc, TOPO_MAP + i * 4) = htonl(*p++); p = (uint32_t *)fc->speed_map; CSRARC(fc, SPED_MAP) = htonl(*p++); CSRARC(fc, SPED_MAP + 4) = htonl(*p++); /* don't byte-swap uint8_t array */ bcopy(p, &CSRARC(fc, SPED_MAP + 8), (fc->speed_map->crc_len - 1)*4); fc->max_hop = fc->max_node - i_branch; printf(", maxhop <= %d", fc->max_hop); if(fc->irm == -1 ){ printf(", Not found IRM capable node"); }else{ printf(", cable IRM = %d", fc->irm); if (fc->irm == fc->nodeid) printf(" (me)"); } printf("\n"); if (try_bmr && (fc->irm != -1) && (CSRARC(fc, BUS_MGR_ID) == 0x3f)) { if (fc->irm == fc->nodeid) { fc->status = FWBUSMGRDONE; CSRARC(fc, BUS_MGR_ID) = fc->set_bmr(fc, fc->irm); fw_bmr(fc); } else { fc->status = FWBUSMGRELECT; callout_reset(&fc->bmr_callout, hz/8, (void *)fw_try_bmr, (void *)fc); } } else fc->status = FWBUSMGRDONE; callout_reset(&fc->busprobe_callout, hz/4, (void *)fw_bus_probe, (void *)fc); } /* * To probe devices on the IEEE1394 bus. */ static void fw_bus_probe(struct firewire_comm *fc) { int s; struct fw_device *fwdev; s = splfw(); fc->status = FWBUSEXPLORE; fc->retry_count = 0; /* Invalidate all devices, just after bus reset. */ STAILQ_FOREACH(fwdev, &fc->devices, link) if (fwdev->status != FWDEVINVAL) { fwdev->status = FWDEVINVAL; fwdev->rcnt = 0; } fc->ongonode = 0; fc->ongoaddr = CSRROMOFF; fc->ongodev = NULL; fc->ongoeui.hi = 0xffffffff; fc->ongoeui.lo = 0xffffffff; fw_bus_explore(fc); splx(s); } /* * Find the self_id packet for a node, ignoring sequels. */ static union fw_self_id * fw_find_self_id(struct firewire_comm *fc, int node) { uint32_t i; union fw_self_id *s; for (i = 0; i < fc->topology_map->self_id_count; i++) { s = &fc->topology_map->self_id[i]; if (s->p0.sequel) continue; if (s->p0.phy_id == node) return s; } return 0; } /* * To collect device informations on the IEEE1394 bus. */ static void fw_bus_explore(struct firewire_comm *fc ) { int err = 0; struct fw_device *fwdev, *pfwdev, *tfwdev; uint32_t addr; struct fw_xfer *xfer; struct fw_pkt *fp; if(fc->status != FWBUSEXPLORE) return; loop: if(fc->ongonode == fc->nodeid) fc->ongonode++; if(fc->ongonode > fc->max_node) goto done; if(fc->ongonode >= 0x3f) goto done; /* check link */ /* XXX we need to check phy_id first */ if (!fw_find_self_id(fc, fc->ongonode)->p0.link_active) { if (firewire_debug) printf("node%d: link down\n", fc->ongonode); fc->ongonode++; goto loop; } if(fc->ongoaddr <= CSRROMOFF && fc->ongoeui.hi == 0xffffffff && fc->ongoeui.lo == 0xffffffff ){ fc->ongoaddr = CSRROMOFF; addr = 0xf0000000 | fc->ongoaddr; }else if(fc->ongoeui.hi == 0xffffffff ){ fc->ongoaddr = CSRROMOFF + 0xc; addr = 0xf0000000 | fc->ongoaddr; }else if(fc->ongoeui.lo == 0xffffffff ){ fc->ongoaddr = CSRROMOFF + 0x10; addr = 0xf0000000 | fc->ongoaddr; }else if(fc->ongodev == NULL){ STAILQ_FOREACH(fwdev, &fc->devices, link) if (FW_EUI64_EQUAL(fwdev->eui, fc->ongoeui)) break; if(fwdev != NULL){ fwdev->dst = fc->ongonode; fwdev->status = FWDEVINIT; fc->ongodev = fwdev; fc->ongoaddr = CSRROMOFF; addr = 0xf0000000 | fc->ongoaddr; goto dorequest; } fwdev = malloc(sizeof(struct fw_device), M_FW, M_NOWAIT | M_ZERO); if(fwdev == NULL) return; fwdev->fc = fc; fwdev->rommax = 0; fwdev->dst = fc->ongonode; fwdev->eui.hi = fc->ongoeui.hi; fwdev->eui.lo = fc->ongoeui.lo; fwdev->status = FWDEVINIT; fwdev->speed = fc->speed_map->speed[fc->nodeid][fc->ongonode]; pfwdev = NULL; STAILQ_FOREACH(tfwdev, &fc->devices, link) { if (tfwdev->eui.hi > fwdev->eui.hi || (tfwdev->eui.hi == fwdev->eui.hi && tfwdev->eui.lo > fwdev->eui.lo)) break; pfwdev = tfwdev; } if (pfwdev == NULL) STAILQ_INSERT_HEAD(&fc->devices, fwdev, link); else STAILQ_INSERT_AFTER(&fc->devices, pfwdev, fwdev, link); device_printf(fc->bdev, "New %s device ID:%08x%08x\n", linkspeed[fwdev->speed], fc->ongoeui.hi, fc->ongoeui.lo); fc->ongodev = fwdev; fc->ongoaddr = CSRROMOFF; addr = 0xf0000000 | fc->ongoaddr; }else{ addr = 0xf0000000 | fc->ongoaddr; } dorequest: #if 0 xfer = asyreqq(fc, FWSPD_S100, 0, 0, ((FWLOCALBUS | fc->ongonode) << 16) | 0xffff , addr, fw_bus_explore_callback); if(xfer == NULL) goto done; #else xfer = fw_xfer_alloc(M_FWXFER); if(xfer == NULL){ goto done; } xfer->send.spd = 0; fp = &xfer->send.hdr; fp->mode.rreqq.dest_hi = 0xffff; fp->mode.rreqq.tlrt = 0; fp->mode.rreqq.tcode = FWTCODE_RREQQ; fp->mode.rreqq.pri = 0; fp->mode.rreqq.src = 0; fp->mode.rreqq.dst = FWLOCALBUS | fc->ongonode; fp->mode.rreqq.dest_lo = addr; xfer->act.hand = fw_bus_explore_callback; if (firewire_debug) printf("node%d: explore addr=0x%x\n", fc->ongonode, fc->ongoaddr); err = fw_asyreq(fc, -1, xfer); if(err){ fw_xfer_free( xfer); return; } #endif return; done: /* fw_attach_devs */ fc->status = FWBUSEXPDONE; if (firewire_debug) printf("bus_explore done\n"); fw_attach_dev(fc); return; } /* Portable Async. request read quad */ struct fw_xfer * asyreqq(struct firewire_comm *fc, uint8_t spd, uint8_t tl, uint8_t rt, uint32_t addr_hi, uint32_t addr_lo, void (*hand) (struct fw_xfer*)) { struct fw_xfer *xfer; struct fw_pkt *fp; int err; xfer = fw_xfer_alloc(M_FWXFER); if (xfer == NULL) return NULL; xfer->send.spd = spd; /* XXX:min(spd, fc->spd) */ fp = &xfer->send.hdr; fp->mode.rreqq.dest_hi = addr_hi & 0xffff; if(tl & FWP_TL_VALID){ fp->mode.rreqq.tlrt = (tl & 0x3f) << 2; }else{ fp->mode.rreqq.tlrt = 0; } fp->mode.rreqq.tlrt |= rt & 0x3; fp->mode.rreqq.tcode = FWTCODE_RREQQ; fp->mode.rreqq.pri = 0; fp->mode.rreqq.src = 0; fp->mode.rreqq.dst = addr_hi >> 16; fp->mode.rreqq.dest_lo = addr_lo; xfer->act.hand = hand; err = fw_asyreq(fc, -1, xfer); if(err){ fw_xfer_free( xfer); return NULL; } return xfer; } /* * Callback for the IEEE1394 bus information collection. */ static void fw_bus_explore_callback(struct fw_xfer *xfer) { struct firewire_comm *fc; struct fw_pkt *sfp,*rfp; struct csrhdr *chdr; struct csrdir *csrd; struct csrreg *csrreg; uint32_t offset; if(xfer == NULL) { printf("xfer == NULL\n"); return; } fc = xfer->fc; if (firewire_debug) printf("node%d: callback addr=0x%x\n", fc->ongonode, fc->ongoaddr); if(xfer->resp != 0){ device_printf(fc->bdev, "bus_explore node=%d addr=0x%x resp=%d retry=%d\n", fc->ongonode, fc->ongoaddr, xfer->resp, xfer->retry); if (xfer->retry < fc->max_asyretry) { fw_asystart(xfer); return; } goto errnode; } sfp = &xfer->send.hdr; rfp = &xfer->recv.hdr; #if 0 { uint32_t *qld; int i; qld = (uint32_t *)xfer->recv.buf; printf("len:%d\n", xfer->recv.len); for( i = 0 ; i <= xfer->recv.len && i < 32; i+= 4){ printf("0x%08x ", rfp->mode.ld[i/4]); if((i % 16) == 15) printf("\n"); } if((i % 16) != 15) printf("\n"); } #endif if(fc->ongodev == NULL){ if(sfp->mode.rreqq.dest_lo == (0xf0000000 | CSRROMOFF)){ rfp->mode.rresq.data = ntohl(rfp->mode.rresq.data); chdr = (struct csrhdr *)(&rfp->mode.rresq.data); /* If CSR is minimal confinguration, more investigation is not needed. */ if(chdr->info_len == 1){ if (firewire_debug) printf("node%d: minimal config\n", fc->ongonode); goto nextnode; }else{ fc->ongoaddr = CSRROMOFF + 0xc; } }else if(sfp->mode.rreqq.dest_lo == (0xf0000000 |(CSRROMOFF + 0xc))){ fc->ongoeui.hi = ntohl(rfp->mode.rresq.data); fc->ongoaddr = CSRROMOFF + 0x10; }else if(sfp->mode.rreqq.dest_lo == (0xf0000000 |(CSRROMOFF + 0x10))){ fc->ongoeui.lo = ntohl(rfp->mode.rresq.data); if (fc->ongoeui.hi == 0 && fc->ongoeui.lo == 0) { if (firewire_debug) printf("node%d: eui64 is zero.\n", fc->ongonode); goto nextnode; } fc->ongoaddr = CSRROMOFF; } }else{ if (fc->ongoaddr == CSRROMOFF && fc->ongodev->csrrom[0] == ntohl(rfp->mode.rresq.data)) { fc->ongodev->status = FWDEVATTACHED; goto nextnode; } fc->ongodev->csrrom[(fc->ongoaddr - CSRROMOFF)/4] = ntohl(rfp->mode.rresq.data); if(fc->ongoaddr > fc->ongodev->rommax){ fc->ongodev->rommax = fc->ongoaddr; } csrd = SLIST_FIRST(&fc->ongocsr); if((csrd = SLIST_FIRST(&fc->ongocsr)) == NULL){ chdr = (struct csrhdr *)(fc->ongodev->csrrom); offset = CSRROMOFF; }else{ chdr = (struct csrhdr *)&fc->ongodev->csrrom[(csrd->off - CSRROMOFF)/4]; offset = csrd->off; } if(fc->ongoaddr > (CSRROMOFF + 0x14) && fc->ongoaddr != offset){ csrreg = (struct csrreg *)&fc->ongodev->csrrom[(fc->ongoaddr - CSRROMOFF)/4]; if( csrreg->key == 0x81 || csrreg->key == 0xd1){ csrd = SLIST_FIRST(&fc->csrfree); if(csrd == NULL){ goto nextnode; }else{ csrd->ongoaddr = fc->ongoaddr; fc->ongoaddr += csrreg->val * 4; csrd->off = fc->ongoaddr; SLIST_REMOVE_HEAD(&fc->csrfree, link); SLIST_INSERT_HEAD(&fc->ongocsr, csrd, link); goto nextaddr; } } } fc->ongoaddr += 4; if(((fc->ongoaddr - offset)/4 > chdr->crc_len) && (fc->ongodev->rommax < 0x414)){ if(fc->ongodev->rommax <= 0x414){ csrd = SLIST_FIRST(&fc->csrfree); if(csrd == NULL) goto nextnode; csrd->off = fc->ongoaddr; csrd->ongoaddr = fc->ongoaddr; SLIST_REMOVE_HEAD(&fc->csrfree, link); SLIST_INSERT_HEAD(&fc->ongocsr, csrd, link); } goto nextaddr; } while(((fc->ongoaddr - offset)/4 > chdr->crc_len)){ if(csrd == NULL){ goto nextnode; }; fc->ongoaddr = csrd->ongoaddr + 4; SLIST_REMOVE_HEAD(&fc->ongocsr, link); SLIST_INSERT_HEAD(&fc->csrfree, csrd, link); csrd = SLIST_FIRST(&fc->ongocsr); if((csrd = SLIST_FIRST(&fc->ongocsr)) == NULL){ chdr = (struct csrhdr *)(fc->ongodev->csrrom); offset = CSRROMOFF; }else{ chdr = (struct csrhdr *)&(fc->ongodev->csrrom[(csrd->off - CSRROMOFF)/4]); offset = csrd->off; } } if((fc->ongoaddr - CSRROMOFF) > CSRROMSIZE){ goto nextnode; } } nextaddr: fw_xfer_free( xfer); fw_bus_explore(fc); return; errnode: fc->retry_count++; if (fc->ongodev != NULL) { fc->ongodev->status = FWDEVINVAL; /* Invalidate ROM */ fc->ongodev->csrrom[0] = 0; } nextnode: fw_xfer_free( xfer); fc->ongonode++; /* housekeeping work space */ fc->ongoaddr = CSRROMOFF; fc->ongodev = NULL; fc->ongoeui.hi = 0xffffffff; fc->ongoeui.lo = 0xffffffff; while((csrd = SLIST_FIRST(&fc->ongocsr)) != NULL){ SLIST_REMOVE_HEAD(&fc->ongocsr, link); SLIST_INSERT_HEAD(&fc->csrfree, csrd, link); } fw_bus_explore(fc); return; } /* * To attach sub-devices layer onto IEEE1394 bus. */ static void fw_attach_dev(struct firewire_comm *fc) { struct fw_device *fwdev, *next; int i, err; device_t *devlistp; int devcnt; struct firewire_dev_comm *fdc; for (fwdev = STAILQ_FIRST(&fc->devices); fwdev != NULL; fwdev = next) { next = STAILQ_NEXT(fwdev, link); if (fwdev->status == FWDEVINIT) { fwdev->status = FWDEVATTACHED; } else if (fwdev->status == FWDEVINVAL) { fwdev->rcnt ++; if (fwdev->rcnt > hold_count) { /* * Remove devices which have not been seen * for a while. */ STAILQ_REMOVE(&fc->devices, fwdev, fw_device, link); free(fwdev, M_FW); } } } err = device_get_children(fc->bdev, &devlistp, &devcnt); if( err != 0 ) return; for( i = 0 ; i < devcnt ; i++){ if (device_get_state(devlistp[i]) >= DS_ATTACHED) { fdc = device_get_softc(devlistp[i]); if (fdc->post_explore != NULL) fdc->post_explore(fdc); } } free(devlistp, M_TEMP); if (fc->retry_count > 0) { device_printf(fc->bdev, "bus_explore failed for %d nodes\n", fc->retry_count); #if 0 callout_reset(&fc->retry_probe_callout, hz*2, (void *)fc->ibr, (void *)fc); #endif } return; } /* * To allocate unique transaction label. */ static int fw_get_tlabel(struct firewire_comm *fc, struct fw_xfer *xfer) { u_int i; struct tlabel *tl, *tmptl; int s; static uint32_t label = 0; s = splfw(); for( i = 0 ; i < 0x40 ; i ++){ label = (label + 1) & 0x3f; for(tmptl = STAILQ_FIRST(&fc->tlabels[label]); tmptl != NULL; tmptl = STAILQ_NEXT(tmptl, link)){ if (tmptl->xfer->send.hdr.mode.hdr.dst == xfer->send.hdr.mode.hdr.dst) break; } if(tmptl == NULL) { tl = malloc(sizeof(struct tlabel),M_FW,M_NOWAIT); if (tl == NULL) { splx(s); return (-1); } tl->xfer = xfer; STAILQ_INSERT_TAIL(&fc->tlabels[label], tl, link); splx(s); if (firewire_debug > 1) printf("fw_get_tlabel: dst=%d tl=%d\n", xfer->send.hdr.mode.hdr.dst, label); return(label); } } splx(s); printf("fw_get_tlabel: no free tlabel\n"); return(-1); } static void fw_rcv_copy(struct fw_rcv_buf *rb) { struct fw_pkt *pkt; u_char *p; struct tcode_info *tinfo; u_int res, i, len, plen; rb->xfer->recv.spd -= rb->spd; pkt = (struct fw_pkt *)rb->vec->iov_base; tinfo = &rb->fc->tcode[pkt->mode.hdr.tcode]; /* Copy header */ p = (u_char *)&rb->xfer->recv.hdr; bcopy(rb->vec->iov_base, p, tinfo->hdr_len); (u_char *)rb->vec->iov_base += tinfo->hdr_len; rb->vec->iov_len -= tinfo->hdr_len; /* Copy payload */ p = (u_char *)rb->xfer->recv.payload; res = rb->xfer->recv.pay_len; /* special handling for RRESQ */ if (pkt->mode.hdr.tcode == FWTCODE_RRESQ && p != NULL && res >= sizeof(uint32_t)) { *(uint32_t *)p = pkt->mode.rresq.data; rb->xfer->recv.pay_len = sizeof(uint32_t); return; } if ((tinfo->flag & FWTI_BLOCK_ASY) == 0) return; plen = pkt->mode.rresb.len; for (i = 0; i < rb->nvec; i++, rb->vec++) { len = MIN(rb->vec->iov_len, plen); if (res < len) { printf("rcv buffer(%d) is %d bytes short.\n", rb->xfer->recv.pay_len, len - res); len = res; } bcopy(rb->vec->iov_base, p, len); p += len; res -= len; plen -= len; if (res == 0 || plen == 0) break; } rb->xfer->recv.pay_len -= res; } /* * Generic packet receiving process. */ void fw_rcv(struct fw_rcv_buf *rb) { struct fw_pkt *fp, *resfp; struct fw_bind *bind; int tcode, s; int i, len, oldstate; #if 0 { uint32_t *qld; int i; qld = (uint32_t *)buf; printf("spd %d len:%d\n", spd, len); for( i = 0 ; i <= len && i < 32; i+= 4){ printf("0x%08x ", ntohl(qld[i/4])); if((i % 16) == 15) printf("\n"); } if((i % 16) != 15) printf("\n"); } #endif fp = (struct fw_pkt *)rb->vec[0].iov_base; tcode = fp->mode.common.tcode; switch (tcode) { case FWTCODE_WRES: case FWTCODE_RRESQ: case FWTCODE_RRESB: case FWTCODE_LRES: rb->xfer = fw_tl2xfer(rb->fc, fp->mode.hdr.src, fp->mode.hdr.tlrt >> 2); if(rb->xfer == NULL) { printf("fw_rcv: unknown response " "%s(%x) src=0x%x tl=0x%x rt=%d data=0x%x\n", tcode_str[tcode], tcode, fp->mode.hdr.src, fp->mode.hdr.tlrt >> 2, fp->mode.hdr.tlrt & 3, fp->mode.rresq.data); #if 1 printf("try ad-hoc work around!!\n"); rb->xfer = fw_tl2xfer(rb->fc, fp->mode.hdr.src, (fp->mode.hdr.tlrt >> 2)^3); if (rb->xfer == NULL) { printf("no use...\n"); goto err; } #else goto err; #endif } fw_rcv_copy(rb); if (rb->xfer->recv.hdr.mode.wres.rtcode != RESP_CMP) rb->xfer->resp = EIO; else rb->xfer->resp = 0; /* make sure the packet is drained in AT queue */ oldstate = rb->xfer->state; rb->xfer->state = FWXF_RCVD; switch (oldstate) { case FWXF_SENT: fw_xfer_done(rb->xfer); break; case FWXF_START: #if 0 if (firewire_debug) printf("not sent yet tl=%x\n", rb->xfer->tl); #endif break; default: printf("unexpected state %d\n", rb->xfer->state); } return; case FWTCODE_WREQQ: case FWTCODE_WREQB: case FWTCODE_RREQQ: case FWTCODE_RREQB: case FWTCODE_LREQ: bind = fw_bindlookup(rb->fc, fp->mode.rreqq.dest_hi, fp->mode.rreqq.dest_lo); if(bind == NULL){ printf("Unknown service addr 0x%04x:0x%08x %s(%x)" #if defined(__DragonFly__) || __FreeBSD_version < 500000 " src=0x%x data=%lx\n", #else " src=0x%x data=%x\n", #endif fp->mode.wreqq.dest_hi, fp->mode.wreqq.dest_lo, tcode_str[tcode], tcode, fp->mode.hdr.src, ntohl(fp->mode.wreqq.data)); if (rb->fc->status == FWBUSRESET) { printf("fw_rcv: cannot respond(bus reset)!\n"); goto err; } rb->xfer = fw_xfer_alloc(M_FWXFER); if(rb->xfer == NULL){ return; } rb->xfer->send.spd = rb->spd; rb->xfer->send.pay_len = 0; resfp = &rb->xfer->send.hdr; switch (tcode) { case FWTCODE_WREQQ: case FWTCODE_WREQB: resfp->mode.hdr.tcode = FWTCODE_WRES; break; case FWTCODE_RREQQ: resfp->mode.hdr.tcode = FWTCODE_RRESQ; break; case FWTCODE_RREQB: resfp->mode.hdr.tcode = FWTCODE_RRESB; break; case FWTCODE_LREQ: resfp->mode.hdr.tcode = FWTCODE_LRES; break; } resfp->mode.hdr.dst = fp->mode.hdr.src; resfp->mode.hdr.tlrt = fp->mode.hdr.tlrt; resfp->mode.hdr.pri = fp->mode.hdr.pri; resfp->mode.rresb.rtcode = RESP_ADDRESS_ERROR; resfp->mode.rresb.extcode = 0; resfp->mode.rresb.len = 0; /* rb->xfer->act.hand = fw_asy_callback; */ rb->xfer->act.hand = fw_xfer_free; if(fw_asyreq(rb->fc, -1, rb->xfer)){ fw_xfer_free(rb->xfer); return; } goto err; } len = 0; for (i = 0; i < rb->nvec; i ++) len += rb->vec[i].iov_len; switch(bind->act_type){ case FWACT_XFER: /* splfw()?? */ rb->xfer = STAILQ_FIRST(&bind->xferlist); if (rb->xfer == NULL) { printf("Discard a packet for this bind.\n"); goto err; } STAILQ_REMOVE_HEAD(&bind->xferlist, link); fw_rcv_copy(rb); rb->xfer->act.hand(rb->xfer); return; break; case FWACT_CH: if(rb->fc->ir[bind->sub]->queued >= rb->fc->ir[bind->sub]->maxq){ device_printf(rb->fc->bdev, "Discard a packet %x %d\n", bind->sub, rb->fc->ir[bind->sub]->queued); goto err; } rb->xfer = STAILQ_FIRST(&bind->xferlist); if (rb->xfer == NULL) { printf("Discard packet for this bind\n"); goto err; } STAILQ_REMOVE_HEAD(&bind->xferlist, link); fw_rcv_copy(rb); s = splfw(); rb->fc->ir[bind->sub]->queued++; STAILQ_INSERT_TAIL(&rb->fc->ir[bind->sub]->q, rb->xfer, link); splx(s); wakeup((caddr_t)rb->fc->ir[bind->sub]); return; break; default: goto err; break; } break; #if 0 /* shouldn't happen ?? or for GASP */ case FWTCODE_STREAM: { struct fw_xferq *xferq; xferq = rb->fc->ir[sub]; #if 0 printf("stream rcv dma %d len %d off %d spd %d\n", sub, len, off, spd); #endif if(xferq->queued >= xferq->maxq) { printf("receive queue is full\n"); goto err; } /* XXX get xfer from xfer queue, we don't need copy for per packet mode */ rb->xfer = fw_xfer_alloc_buf(M_FWXFER, 0, /* XXX */ vec[0].iov_len); if (rb->xfer == NULL) goto err; fw_rcv_copy(rb) s = splfw(); xferq->queued++; STAILQ_INSERT_TAIL(&xferq->q, rb->xfer, link); splx(s); sc = device_get_softc(rb->fc->bdev); #if defined(__DragonFly__) || __FreeBSD_version < 500000 if (&xferq->rsel.si_pid != 0) #else if (SEL_WAITING(&xferq->rsel)) #endif selwakeuppri(&xferq->rsel, FWPRI); if (xferq->flag & FWXFERQ_WAKEUP) { xferq->flag &= ~FWXFERQ_WAKEUP; wakeup((caddr_t)xferq); } if (xferq->flag & FWXFERQ_HANDLER) { xferq->hand(xferq); } return; break; } #endif default: printf("fw_rcv: unknow tcode %d\n", tcode); break; } err: return; } /* * Post process for Bus Manager election process. */ static void fw_try_bmr_callback(struct fw_xfer *xfer) { struct firewire_comm *fc; int bmr; if (xfer == NULL) return; fc = xfer->fc; if (xfer->resp != 0) goto error; if (xfer->recv.payload == NULL) goto error; if (xfer->recv.hdr.mode.lres.rtcode != FWRCODE_COMPLETE) goto error; bmr = ntohl(xfer->recv.payload[0]); if (bmr == 0x3f) bmr = fc->nodeid; CSRARC(fc, BUS_MGR_ID) = fc->set_bmr(fc, bmr & 0x3f); fw_xfer_free_buf(xfer); fw_bmr(fc); return; error: device_printf(fc->bdev, "bus manager election failed\n"); fw_xfer_free_buf(xfer); } /* * To candidate Bus Manager election process. */ static void fw_try_bmr(void *arg) { struct fw_xfer *xfer; struct firewire_comm *fc = (struct firewire_comm *)arg; struct fw_pkt *fp; int err = 0; xfer = fw_xfer_alloc_buf(M_FWXFER, 8, 4); if(xfer == NULL){ return; } xfer->send.spd = 0; fc->status = FWBUSMGRELECT; fp = &xfer->send.hdr; fp->mode.lreq.dest_hi = 0xffff; fp->mode.lreq.tlrt = 0; fp->mode.lreq.tcode = FWTCODE_LREQ; fp->mode.lreq.pri = 0; fp->mode.lreq.src = 0; fp->mode.lreq.len = 8; fp->mode.lreq.extcode = EXTCODE_CMP_SWAP; fp->mode.lreq.dst = FWLOCALBUS | fc->irm; fp->mode.lreq.dest_lo = 0xf0000000 | BUS_MGR_ID; xfer->send.payload[0] = htonl(0x3f); xfer->send.payload[1] = htonl(fc->nodeid); xfer->act.hand = fw_try_bmr_callback; err = fw_asyreq(fc, -1, xfer); if(err){ fw_xfer_free_buf(xfer); return; } return; } #ifdef FW_VMACCESS /* * Software implementation for physical memory block access. * XXX:Too slow, usef for debug purpose only. */ static void fw_vmaccess(struct fw_xfer *xfer){ struct fw_pkt *rfp, *sfp = NULL; uint32_t *ld = (uint32_t *)xfer->recv.buf; printf("vmaccess spd:%2x len:%03x data:%08x %08x %08x %08x\n", xfer->spd, xfer->recv.len, ntohl(ld[0]), ntohl(ld[1]), ntohl(ld[2]), ntohl(ld[3])); printf("vmaccess data:%08x %08x %08x %08x\n", ntohl(ld[4]), ntohl(ld[5]), ntohl(ld[6]), ntohl(ld[7])); if(xfer->resp != 0){ fw_xfer_free( xfer); return; } if(xfer->recv.buf == NULL){ fw_xfer_free( xfer); return; } rfp = (struct fw_pkt *)xfer->recv.buf; switch(rfp->mode.hdr.tcode){ /* XXX need fix for 64bit arch */ case FWTCODE_WREQB: xfer->send.buf = malloc(12, M_FW, M_NOWAIT); xfer->send.len = 12; sfp = (struct fw_pkt *)xfer->send.buf; bcopy(rfp->mode.wreqb.payload, (caddr_t)ntohl(rfp->mode.wreqb.dest_lo), ntohs(rfp->mode.wreqb.len)); sfp->mode.wres.tcode = FWTCODE_WRES; sfp->mode.wres.rtcode = 0; break; case FWTCODE_WREQQ: xfer->send.buf = malloc(12, M_FW, M_NOWAIT); xfer->send.len = 12; sfp->mode.wres.tcode = FWTCODE_WRES; *((uint32_t *)(ntohl(rfp->mode.wreqb.dest_lo))) = rfp->mode.wreqq.data; sfp->mode.wres.rtcode = 0; break; case FWTCODE_RREQB: xfer->send.buf = malloc(16 + rfp->mode.rreqb.len, M_FW, M_NOWAIT); xfer->send.len = 16 + ntohs(rfp->mode.rreqb.len); sfp = (struct fw_pkt *)xfer->send.buf; bcopy((caddr_t)ntohl(rfp->mode.rreqb.dest_lo), sfp->mode.rresb.payload, (uint16_t)ntohs(rfp->mode.rreqb.len)); sfp->mode.rresb.tcode = FWTCODE_RRESB; sfp->mode.rresb.len = rfp->mode.rreqb.len; sfp->mode.rresb.rtcode = 0; sfp->mode.rresb.extcode = 0; break; case FWTCODE_RREQQ: xfer->send.buf = malloc(16, M_FW, M_NOWAIT); xfer->send.len = 16; sfp = (struct fw_pkt *)xfer->send.buf; sfp->mode.rresq.data = *(uint32_t *)(ntohl(rfp->mode.rreqq.dest_lo)); sfp->mode.wres.tcode = FWTCODE_RRESQ; sfp->mode.rresb.rtcode = 0; break; default: fw_xfer_free( xfer); return; } sfp->mode.hdr.dst = rfp->mode.hdr.src; xfer->dst = ntohs(rfp->mode.hdr.src); xfer->act.hand = fw_xfer_free; xfer->retry_req = fw_asybusy; sfp->mode.hdr.tlrt = rfp->mode.hdr.tlrt; sfp->mode.hdr.pri = 0; fw_asyreq(xfer->fc, -1, xfer); /**/ return; } #endif /* * CRC16 check-sum for IEEE1394 register blocks. */ uint16_t fw_crc16(uint32_t *ptr, uint32_t len){ uint32_t i, sum, crc = 0; int shift; len = (len + 3) & ~3; for(i = 0 ; i < len ; i+= 4){ for( shift = 28 ; shift >= 0 ; shift -= 4){ sum = ((crc >> 12) ^ (ptr[i/4] >> shift)) & 0xf; crc = (crc << 4) ^ ( sum << 12 ) ^ ( sum << 5) ^ sum; } crc &= 0xffff; } return((uint16_t) crc); } static int fw_bmr(struct firewire_comm *fc) { struct fw_device fwdev; union fw_self_id *self_id; int cmstr; uint32_t quad; /* Check to see if the current root node is cycle master capable */ self_id = fw_find_self_id(fc, fc->max_node); if (fc->max_node > 0) { /* XXX check cmc bit of businfo block rather than contender */ if (self_id->p0.link_active && self_id->p0.contender) cmstr = fc->max_node; else { device_printf(fc->bdev, "root node is not cycle master capable\n"); /* XXX shall we be the cycle master? */ cmstr = fc->nodeid; /* XXX need bus reset */ } } else cmstr = -1; device_printf(fc->bdev, "bus manager %d ", CSRARC(fc, BUS_MGR_ID)); if(CSRARC(fc, BUS_MGR_ID) != fc->nodeid) { /* We are not the bus manager */ printf("\n"); return(0); } printf("(me)\n"); /* Optimize gapcount */ if(fc->max_hop <= MAX_GAPHOP ) fw_phy_config(fc, cmstr, gap_cnt[fc->max_hop]); /* If we are the cycle master, nothing to do */ if (cmstr == fc->nodeid || cmstr == -1) return 0; /* Bus probe has not finished, make dummy fwdev for cmstr */ bzero(&fwdev, sizeof(fwdev)); fwdev.fc = fc; fwdev.dst = cmstr; fwdev.speed = 0; fwdev.maxrec = 8; /* 512 */ fwdev.status = FWDEVINIT; /* Set cmstr bit on the cycle master */ quad = htonl(1 << 8); fwmem_write_quad(&fwdev, NULL, 0/*spd*/, 0xffff, 0xf0000000 | STATE_SET, &quad, fw_asy_callback_free); return 0; } static int fw_modevent(module_t mode, int type, void *data) { int err = 0; #if defined(__FreeBSD__) && __FreeBSD_version >= 500000 static eventhandler_tag fwdev_ehtag = NULL; #endif switch (type) { case MOD_LOAD: #if defined(__FreeBSD__) && __FreeBSD_version >= 500000 fwdev_ehtag = EVENTHANDLER_REGISTER(dev_clone, fwdev_clone, 0, 1000); #endif break; case MOD_UNLOAD: #if defined(__FreeBSD__) && __FreeBSD_version >= 500000 if (fwdev_ehtag != NULL) EVENTHANDLER_DEREGISTER(dev_clone, fwdev_ehtag); #endif break; case MOD_SHUTDOWN: break; } return (err); } #ifdef __DragonFly__ DECLARE_DUMMY_MODULE(firewire); #endif DRIVER_MODULE(firewire,fwohci,firewire_driver,firewire_devclass,fw_modevent,0); MODULE_VERSION(firewire, 1); Index: head/sys/dev/fxp/if_fxp.c =================================================================== --- head/sys/dev/fxp/if_fxp.c (revision 129878) +++ head/sys/dev/fxp/if_fxp.c (revision 129879) @@ -1,2772 +1,2773 @@ /*- * Copyright (c) 1995, David Greenman * Copyright (c) 2001 Jonathan Lemon * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); /* * Intel EtherExpress Pro/100B PCI Fast Ethernet driver */ #include #include #include #include /* #include */ #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for DELAY */ #include #include #ifdef FXP_IP_CSUM_WAR #include #include #include #include #endif #include #include /* for PCIM_CMD_xxx */ #include #include #include #include #include MODULE_DEPEND(fxp, pci, 1, 1, 1); MODULE_DEPEND(fxp, ether, 1, 1, 1); MODULE_DEPEND(fxp, miibus, 1, 1, 1); #include "miibus_if.h" /* * NOTE! On the Alpha, we have an alignment constraint. The * card DMAs the packet immediately following the RFA. However, * the first thing in the packet is a 14-byte Ethernet header. * This means that the packet is misaligned. To compensate, * we actually offset the RFA 2 bytes into the cluster. This * alignes the packet after the Ethernet header at a 32-bit * boundary. HOWEVER! This means that the RFA is misaligned! */ #define RFA_ALIGNMENT_FUDGE 2 /* * Set initial transmit threshold at 64 (512 bytes). This is * increased by 64 (512 bytes) at a time, to maximum of 192 * (1536 bytes), if an underrun occurs. */ static int tx_threshold = 64; /* * The configuration byte map has several undefined fields which * must be one or must be zero. Set up a template for these bits * only, (assuming a 82557 chip) leaving the actual configuration * to fxp_init. * * See struct fxp_cb_config for the bit definitions. */ static u_char fxp_cb_config_template[] = { 0x0, 0x0, /* cb_status */ 0x0, 0x0, /* cb_command */ 0x0, 0x0, 0x0, 0x0, /* link_addr */ 0x0, /* 0 */ 0x0, /* 1 */ 0x0, /* 2 */ 0x0, /* 3 */ 0x0, /* 4 */ 0x0, /* 5 */ 0x32, /* 6 */ 0x0, /* 7 */ 0x0, /* 8 */ 0x0, /* 9 */ 0x6, /* 10 */ 0x0, /* 11 */ 0x0, /* 12 */ 0x0, /* 13 */ 0xf2, /* 14 */ 0x48, /* 15 */ 0x0, /* 16 */ 0x40, /* 17 */ 0xf0, /* 18 */ 0x0, /* 19 */ 0x3f, /* 20 */ 0x5 /* 21 */ }; struct fxp_ident { u_int16_t devid; int16_t revid; /* -1 matches anything */ char *name; }; /* * Claim various Intel PCI device identifiers for this driver. The * sub-vendor and sub-device field are extensively used to identify * particular variants, but we don't currently differentiate between * them. */ static struct fxp_ident fxp_ident_table[] = { { 0x1029, -1, "Intel 82559 PCI/CardBus Pro/100" }, { 0x1030, -1, "Intel 82559 Pro/100 Ethernet" }, { 0x1031, -1, "Intel 82801CAM (ICH3) Pro/100 VE Ethernet" }, { 0x1032, -1, "Intel 82801CAM (ICH3) Pro/100 VE Ethernet" }, { 0x1033, -1, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" }, { 0x1034, -1, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" }, { 0x1035, -1, "Intel 82801CAM (ICH3) Pro/100 Ethernet" }, { 0x1036, -1, "Intel 82801CAM (ICH3) Pro/100 Ethernet" }, { 0x1037, -1, "Intel 82801CAM (ICH3) Pro/100 Ethernet" }, { 0x1038, -1, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" }, { 0x1039, -1, "Intel 82801DB (ICH4) Pro/100 VE Ethernet" }, { 0x103A, -1, "Intel 82801DB (ICH4) Pro/100 Ethernet" }, { 0x103B, -1, "Intel 82801DB (ICH4) Pro/100 VM Ethernet" }, { 0x103C, -1, "Intel 82801DB (ICH4) Pro/100 Ethernet" }, { 0x103D, -1, "Intel 82801DB (ICH4) Pro/100 VE Ethernet" }, { 0x103E, -1, "Intel 82801DB (ICH4) Pro/100 VM Ethernet" }, { 0x1050, -1, "Intel 82801BA (D865) Pro/100 VE Ethernet" }, { 0x1051, -1, "Intel 82562ET (ICH5/ICH5R) Pro/100 VE Ethernet" }, { 0x1059, -1, "Intel 82551QM Pro/100 M Mobile Connection" }, { 0x1209, -1, "Intel 82559ER Embedded 10/100 Ethernet" }, { 0x1229, 0x01, "Intel 82557 Pro/100 Ethernet" }, { 0x1229, 0x02, "Intel 82557 Pro/100 Ethernet" }, { 0x1229, 0x03, "Intel 82557 Pro/100 Ethernet" }, { 0x1229, 0x04, "Intel 82558 Pro/100 Ethernet" }, { 0x1229, 0x05, "Intel 82558 Pro/100 Ethernet" }, { 0x1229, 0x06, "Intel 82559 Pro/100 Ethernet" }, { 0x1229, 0x07, "Intel 82559 Pro/100 Ethernet" }, { 0x1229, 0x08, "Intel 82559 Pro/100 Ethernet" }, { 0x1229, 0x09, "Intel 82559ER Pro/100 Ethernet" }, { 0x1229, 0x0c, "Intel 82550 Pro/100 Ethernet" }, { 0x1229, 0x0d, "Intel 82550 Pro/100 Ethernet" }, { 0x1229, 0x0e, "Intel 82550 Pro/100 Ethernet" }, { 0x1229, 0x0f, "Intel 82551 Pro/100 Ethernet" }, { 0x1229, 0x10, "Intel 82551 Pro/100 Ethernet" }, { 0x1229, -1, "Intel 82557/8/9 Pro/100 Ethernet" }, { 0x2449, -1, "Intel 82801BA/CAM (ICH2/3) Pro/100 Ethernet" }, { 0, -1, NULL }, }; #ifdef FXP_IP_CSUM_WAR #define FXP_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) #else #define FXP_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) #endif static int fxp_probe(device_t dev); static int fxp_attach(device_t dev); static int fxp_detach(device_t dev); static int fxp_shutdown(device_t dev); static int fxp_suspend(device_t dev); static int fxp_resume(device_t dev); static void fxp_intr(void *xsc); static void fxp_intr_body(struct fxp_softc *sc, struct ifnet *ifp, u_int8_t statack, int count); static void fxp_init(void *xsc); static void fxp_init_body(struct fxp_softc *sc); static void fxp_tick(void *xsc); #ifndef BURN_BRIDGES static void fxp_powerstate_d0(device_t dev); #endif static void fxp_start(struct ifnet *ifp); static void fxp_start_body(struct ifnet *ifp); static void fxp_stop(struct fxp_softc *sc); static void fxp_release(struct fxp_softc *sc); static int fxp_ioctl(struct ifnet *ifp, u_long command, caddr_t data); static void fxp_watchdog(struct ifnet *ifp); static int fxp_add_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp); static int fxp_mc_addrs(struct fxp_softc *sc); static void fxp_mc_setup(struct fxp_softc *sc); static u_int16_t fxp_eeprom_getword(struct fxp_softc *sc, int offset, int autosize); static void fxp_eeprom_putword(struct fxp_softc *sc, int offset, u_int16_t data); static void fxp_autosize_eeprom(struct fxp_softc *sc); static void fxp_read_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words); static void fxp_write_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words); static int fxp_ifmedia_upd(struct ifnet *ifp); static void fxp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); static int fxp_serial_ifmedia_upd(struct ifnet *ifp); static void fxp_serial_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); static volatile int fxp_miibus_readreg(device_t dev, int phy, int reg); static void fxp_miibus_writereg(device_t dev, int phy, int reg, int value); static void fxp_load_ucode(struct fxp_softc *sc); static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high); static int sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS); static int sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS); static void fxp_scb_wait(struct fxp_softc *sc); static void fxp_scb_cmd(struct fxp_softc *sc, int cmd); static void fxp_dma_wait(struct fxp_softc *sc, volatile u_int16_t *status, bus_dma_tag_t dmat, bus_dmamap_t map); static device_method_t fxp_methods[] = { /* Device interface */ DEVMETHOD(device_probe, fxp_probe), DEVMETHOD(device_attach, fxp_attach), DEVMETHOD(device_detach, fxp_detach), DEVMETHOD(device_shutdown, fxp_shutdown), DEVMETHOD(device_suspend, fxp_suspend), DEVMETHOD(device_resume, fxp_resume), /* MII interface */ DEVMETHOD(miibus_readreg, fxp_miibus_readreg), DEVMETHOD(miibus_writereg, fxp_miibus_writereg), { 0, 0 } }; static driver_t fxp_driver = { "fxp", fxp_methods, sizeof(struct fxp_softc), }; static devclass_t fxp_devclass; DRIVER_MODULE(fxp, pci, fxp_driver, fxp_devclass, 0, 0); DRIVER_MODULE(fxp, cardbus, fxp_driver, fxp_devclass, 0, 0); DRIVER_MODULE(miibus, fxp, miibus_driver, miibus_devclass, 0, 0); static int fxp_rnr; SYSCTL_INT(_hw, OID_AUTO, fxp_rnr, CTLFLAG_RW, &fxp_rnr, 0, "fxp rnr events"); static int fxp_noflow; SYSCTL_INT(_hw, OID_AUTO, fxp_noflow, CTLFLAG_RW, &fxp_noflow, 0, "fxp flow control disabled"); TUNABLE_INT("hw.fxp_noflow", &fxp_noflow); /* * Wait for the previous command to be accepted (but not necessarily * completed). */ static void fxp_scb_wait(struct fxp_softc *sc) { int i = 10000; while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i) DELAY(2); if (i == 0) device_printf(sc->dev, "SCB timeout: 0x%x 0x%x 0x%x 0x%x\n", CSR_READ_1(sc, FXP_CSR_SCB_COMMAND), CSR_READ_1(sc, FXP_CSR_SCB_STATACK), CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS), CSR_READ_2(sc, FXP_CSR_FLOWCONTROL)); } static void fxp_scb_cmd(struct fxp_softc *sc, int cmd) { if (cmd == FXP_SCB_COMMAND_CU_RESUME && sc->cu_resume_bug) { CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_CB_COMMAND_NOP); fxp_scb_wait(sc); } CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, cmd); } static void fxp_dma_wait(struct fxp_softc *sc, volatile u_int16_t *status, bus_dma_tag_t dmat, bus_dmamap_t map) { int i = 10000; bus_dmamap_sync(dmat, map, BUS_DMASYNC_POSTREAD); while (!(le16toh(*status) & FXP_CB_STATUS_C) && --i) { DELAY(2); bus_dmamap_sync(dmat, map, BUS_DMASYNC_POSTREAD); } if (i == 0) device_printf(sc->dev, "DMA timeout\n"); } /* * Return identification string if this device is ours. */ static int fxp_probe(device_t dev) { u_int16_t devid; u_int8_t revid; struct fxp_ident *ident; if (pci_get_vendor(dev) == FXP_VENDORID_INTEL) { devid = pci_get_device(dev); revid = pci_get_revid(dev); for (ident = fxp_ident_table; ident->name != NULL; ident++) { if (ident->devid == devid && (ident->revid == revid || ident->revid == -1)) { device_set_desc(dev, ident->name); return (0); } } } return (ENXIO); } #ifndef BURN_BRIDGES static void fxp_powerstate_d0(device_t dev) { #if __FreeBSD_version >= 430002 u_int32_t iobase, membase, irq; if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { /* Save important PCI config data. */ iobase = pci_read_config(dev, FXP_PCI_IOBA, 4); membase = pci_read_config(dev, FXP_PCI_MMBA, 4); irq = pci_read_config(dev, PCIR_INTLINE, 4); /* Reset the power state. */ device_printf(dev, "chip is in D%d power mode " "-- setting to D0\n", pci_get_powerstate(dev)); pci_set_powerstate(dev, PCI_POWERSTATE_D0); /* Restore PCI config data. */ pci_write_config(dev, FXP_PCI_IOBA, iobase, 4); pci_write_config(dev, FXP_PCI_MMBA, membase, 4); pci_write_config(dev, PCIR_INTLINE, irq, 4); } #endif } #endif static void fxp_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { u_int32_t *addr; if (error) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); addr = arg; *addr = segs->ds_addr; } static int fxp_attach(device_t dev) { int error = 0; struct fxp_softc *sc = device_get_softc(dev); struct ifnet *ifp; struct fxp_rx *rxp; u_int32_t val; u_int16_t data, myea[ETHER_ADDR_LEN / 2]; int i, rid, m1, m2, prefer_iomap, maxtxseg; int s, ipcbxmit_disable; sc->dev = dev; callout_init(&sc->stat_ch, CALLOUT_MPSAFE); sysctl_ctx_init(&sc->sysctl_ctx); mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); ifmedia_init(&sc->sc_media, 0, fxp_serial_ifmedia_upd, fxp_serial_ifmedia_sts); s = splimp(); /* * Enable bus mastering. */ pci_enable_busmaster(dev); val = pci_read_config(dev, PCIR_COMMAND, 2); #ifndef BURN_BRIDGES fxp_powerstate_d0(dev); #endif /* * Figure out which we should try first - memory mapping or i/o mapping? * We default to memory mapping. Then we accept an override from the * command line. Then we check to see which one is enabled. */ m1 = PCIM_CMD_MEMEN; m2 = PCIM_CMD_PORTEN; prefer_iomap = 0; if (resource_int_value(device_get_name(dev), device_get_unit(dev), "prefer_iomap", &prefer_iomap) == 0 && prefer_iomap != 0) { m1 = PCIM_CMD_PORTEN; m2 = PCIM_CMD_MEMEN; } sc->rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; sc->rgd = (m1 == PCIM_CMD_MEMEN)? FXP_PCI_MMBA : FXP_PCI_IOBA; sc->mem = bus_alloc_resource_any(dev, sc->rtp, &sc->rgd, RF_ACTIVE); if (sc->mem == NULL) { sc->rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; sc->rgd = (m2 == PCIM_CMD_MEMEN)? FXP_PCI_MMBA : FXP_PCI_IOBA; sc->mem = bus_alloc_resource_any(dev, sc->rtp, &sc->rgd, RF_ACTIVE); } if (!sc->mem) { error = ENXIO; goto fail; } if (bootverbose) { device_printf(dev, "using %s space register mapping\n", sc->rtp == SYS_RES_MEMORY? "memory" : "I/O"); } sc->sc_st = rman_get_bustag(sc->mem); sc->sc_sh = rman_get_bushandle(sc->mem); /* * Allocate our interrupt. */ rid = 0; sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->irq == NULL) { device_printf(dev, "could not map interrupt\n"); error = ENXIO; goto fail; } /* * Reset to a stable state. */ CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); DELAY(10); /* * Find out how large of an SEEPROM we have. */ fxp_autosize_eeprom(sc); /* * Determine whether we must use the 503 serial interface. */ fxp_read_eeprom(sc, &data, 6, 1); if ((data & FXP_PHY_DEVICE_MASK) != 0 && (data & FXP_PHY_SERIAL_ONLY)) sc->flags |= FXP_FLAG_SERIAL_MEDIA; /* * Create the sysctl tree */ sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, device_get_nameunit(dev), CTLFLAG_RD, 0, ""); if (sc->sysctl_tree == NULL) { error = ENXIO; goto fail; } SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "int_delay", CTLTYPE_INT | CTLFLAG_RW, &sc->tunable_int_delay, 0, sysctl_hw_fxp_int_delay, "I", "FXP driver receive interrupt microcode bundling delay"); SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "bundle_max", CTLTYPE_INT | CTLFLAG_RW, &sc->tunable_bundle_max, 0, sysctl_hw_fxp_bundle_max, "I", "FXP driver receive interrupt microcode bundle size limit"); /* * Pull in device tunables. */ sc->tunable_int_delay = TUNABLE_INT_DELAY; sc->tunable_bundle_max = TUNABLE_BUNDLE_MAX; (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "int_delay", &sc->tunable_int_delay); (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "bundle_max", &sc->tunable_bundle_max); /* * Find out the chip revision; lump all 82557 revs together. */ fxp_read_eeprom(sc, &data, 5, 1); if ((data >> 8) == 1) sc->revision = FXP_REV_82557; else sc->revision = pci_get_revid(dev); /* * Enable workarounds for certain chip revision deficiencies. * * Systems based on the ICH2/ICH2-M chip from Intel, and possibly * some systems based a normal 82559 design, have a defect where * the chip can cause a PCI protocol violation if it receives * a CU_RESUME command when it is entering the IDLE state. The * workaround is to disable Dynamic Standby Mode, so the chip never * deasserts CLKRUN#, and always remains in an active state. * * See Intel 82801BA/82801BAM Specification Update, Errata #30. */ i = pci_get_device(dev); if (i == 0x2449 || (i > 0x1030 && i < 0x1039) || sc->revision >= FXP_REV_82559_A0) { fxp_read_eeprom(sc, &data, 10, 1); if (data & 0x02) { /* STB enable */ u_int16_t cksum; int i; device_printf(dev, "Disabling dynamic standby mode in EEPROM\n"); data &= ~0x02; fxp_write_eeprom(sc, &data, 10, 1); device_printf(dev, "New EEPROM ID: 0x%x\n", data); cksum = 0; for (i = 0; i < (1 << sc->eeprom_size) - 1; i++) { fxp_read_eeprom(sc, &data, i, 1); cksum += data; } i = (1 << sc->eeprom_size) - 1; cksum = 0xBABA - cksum; fxp_read_eeprom(sc, &data, i, 1); fxp_write_eeprom(sc, &cksum, i, 1); device_printf(dev, "EEPROM checksum @ 0x%x: 0x%x -> 0x%x\n", i, data, cksum); #if 1 /* * If the user elects to continue, try the software * workaround, as it is better than nothing. */ sc->flags |= FXP_FLAG_CU_RESUME_BUG; #endif } } /* * If we are not a 82557 chip, we can enable extended features. */ if (sc->revision != FXP_REV_82557) { /* * If MWI is enabled in the PCI configuration, and there * is a valid cacheline size (8 or 16 dwords), then tell * the board to turn on MWI. */ if (val & PCIM_CMD_MWRICEN && pci_read_config(dev, PCIR_CACHELNSZ, 1) != 0) sc->flags |= FXP_FLAG_MWI_ENABLE; /* turn on the extended TxCB feature */ sc->flags |= FXP_FLAG_EXT_TXCB; /* enable reception of long frames for VLAN */ sc->flags |= FXP_FLAG_LONG_PKT_EN; } else { /* a hack to get long VLAN frames on a 82557 */ sc->flags |= FXP_FLAG_SAVE_BAD; } /* * Enable use of extended RFDs and TCBs for 82550 * and later chips. Note: we need extended TXCB support * too, but that's already enabled by the code above. * Be careful to do this only on the right devices. * * At least some 82550 cards probed as "chip=0x12298086 rev=0x0d" * truncate packets that end with an mbuf containing 1 to 3 bytes * when used with this feature enabled in the previous version of the * driver. This problem appears to be fixed now that the driver * always sets the hardware parse bit in the IPCB structure, which * the "Intel 8255x 10/100 Mbps Ethernet Controller Family Open * Source Software Developer Manual" says is necessary in the * cases where packet truncation was observed. * * The device hint "hint.fxp.UNIT_NUMBER.ipcbxmit_disable" * allows this feature to be disabled at boot time. * * If fxp is not compiled into the kernel, this feature may also * be disabled at run time: * # kldunload fxp * # kenv hint.fxp.0.ipcbxmit_disable=1 * # kldload fxp */ if (resource_int_value("fxp", device_get_unit(dev), "ipcbxmit_disable", &ipcbxmit_disable) != 0) ipcbxmit_disable = 0; if (ipcbxmit_disable == 0 && (sc->revision == FXP_REV_82550 || sc->revision == FXP_REV_82550_C)) { sc->rfa_size = sizeof (struct fxp_rfa); sc->tx_cmd = FXP_CB_COMMAND_IPCBXMIT; sc->flags |= FXP_FLAG_EXT_RFA; } else { sc->rfa_size = sizeof (struct fxp_rfa) - FXP_RFAX_LEN; sc->tx_cmd = FXP_CB_COMMAND_XMIT; } /* * Allocate DMA tags and DMA safe memory. */ maxtxseg = sc->flags & FXP_FLAG_EXT_RFA ? FXP_NTXSEG - 1 : FXP_NTXSEG; error = bus_dma_tag_create(NULL, 2, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * maxtxseg, maxtxseg, MCLBYTES, 0, busdma_lock_mutex, &Giant, &sc->fxp_mtag); if (error) { device_printf(dev, "could not allocate dma tag\n"); goto fail; } error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct fxp_stats), 1, sizeof(struct fxp_stats), 0, busdma_lock_mutex, &Giant, &sc->fxp_stag); if (error) { device_printf(dev, "could not allocate dma tag\n"); goto fail; } error = bus_dmamem_alloc(sc->fxp_stag, (void **)&sc->fxp_stats, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->fxp_smap); if (error) goto fail; error = bus_dmamap_load(sc->fxp_stag, sc->fxp_smap, sc->fxp_stats, sizeof(struct fxp_stats), fxp_dma_map_addr, &sc->stats_addr, 0); if (error) { device_printf(dev, "could not map the stats buffer\n"); goto fail; } error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, FXP_TXCB_SZ, 1, FXP_TXCB_SZ, 0, busdma_lock_mutex, &Giant, &sc->cbl_tag); if (error) { device_printf(dev, "could not allocate dma tag\n"); goto fail; } error = bus_dmamem_alloc(sc->cbl_tag, (void **)&sc->fxp_desc.cbl_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->cbl_map); if (error) goto fail; error = bus_dmamap_load(sc->cbl_tag, sc->cbl_map, sc->fxp_desc.cbl_list, FXP_TXCB_SZ, fxp_dma_map_addr, &sc->fxp_desc.cbl_addr, 0); if (error) { device_printf(dev, "could not map DMA memory\n"); goto fail; } error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct fxp_cb_mcs), 1, sizeof(struct fxp_cb_mcs), 0, busdma_lock_mutex, &Giant, &sc->mcs_tag); if (error) { device_printf(dev, "could not allocate dma tag\n"); goto fail; } error = bus_dmamem_alloc(sc->mcs_tag, (void **)&sc->mcsp, BUS_DMA_NOWAIT, &sc->mcs_map); if (error) goto fail; error = bus_dmamap_load(sc->mcs_tag, sc->mcs_map, sc->mcsp, sizeof(struct fxp_cb_mcs), fxp_dma_map_addr, &sc->mcs_addr, 0); if (error) { device_printf(dev, "can't map the multicast setup command\n"); goto fail; } /* * Pre-allocate the TX DMA maps. */ for (i = 0; i < FXP_NTXCB; i++) { error = bus_dmamap_create(sc->fxp_mtag, 0, &sc->fxp_desc.tx_list[i].tx_map); if (error) { device_printf(dev, "can't create DMA map for TX\n"); goto fail; } } error = bus_dmamap_create(sc->fxp_mtag, 0, &sc->spare_map); if (error) { device_printf(dev, "can't create spare DMA map\n"); goto fail; } /* * Pre-allocate our receive buffers. */ sc->fxp_desc.rx_head = sc->fxp_desc.rx_tail = NULL; for (i = 0; i < FXP_NRFABUFS; i++) { rxp = &sc->fxp_desc.rx_list[i]; error = bus_dmamap_create(sc->fxp_mtag, 0, &rxp->rx_map); if (error) { device_printf(dev, "can't create DMA map for RX\n"); goto fail; } if (fxp_add_rfabuf(sc, rxp) != 0) { error = ENOMEM; goto fail; } } /* * Read MAC address. */ fxp_read_eeprom(sc, myea, 0, 3); sc->arpcom.ac_enaddr[0] = myea[0] & 0xff; sc->arpcom.ac_enaddr[1] = myea[0] >> 8; sc->arpcom.ac_enaddr[2] = myea[1] & 0xff; sc->arpcom.ac_enaddr[3] = myea[1] >> 8; sc->arpcom.ac_enaddr[4] = myea[2] & 0xff; sc->arpcom.ac_enaddr[5] = myea[2] >> 8; if (bootverbose) { device_printf(dev, "PCI IDs: %04x %04x %04x %04x %04x\n", pci_get_vendor(dev), pci_get_device(dev), pci_get_subvendor(dev), pci_get_subdevice(dev), pci_get_revid(dev)); fxp_read_eeprom(sc, &data, 10, 1); device_printf(dev, "Dynamic Standby mode is %s\n", data & 0x02 ? "enabled" : "disabled"); } /* * If this is only a 10Mbps device, then there is no MII, and * the PHY will use a serial interface instead. * * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter * doesn't have a programming interface of any sort. The * media is sensed automatically based on how the link partner * is configured. This is, in essence, manual configuration. */ if (sc->flags & FXP_FLAG_SERIAL_MEDIA) { ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL); } else { if (mii_phy_probe(dev, &sc->miibus, fxp_ifmedia_upd, fxp_ifmedia_sts)) { device_printf(dev, "MII without any PHY!\n"); error = ENXIO; goto fail; } } ifp = &sc->arpcom.ac_if; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_baudrate = 100000000; ifp->if_init = fxp_init; ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = fxp_ioctl; ifp->if_start = fxp_start; ifp->if_watchdog = fxp_watchdog; ifp->if_capabilities = ifp->if_capenable = 0; /* Enable checksum offload for 82550 or better chips */ if (sc->flags & FXP_FLAG_EXT_RFA) { ifp->if_hwassist = FXP_CSUM_FEATURES; ifp->if_capabilities |= IFCAP_HWCSUM; ifp->if_capenable |= IFCAP_HWCSUM; } #ifdef DEVICE_POLLING /* Inform the world we support polling. */ ifp->if_capabilities |= IFCAP_POLLING; ifp->if_capenable |= IFCAP_POLLING; #endif /* * Attach the interface. */ ether_ifattach(ifp, sc->arpcom.ac_enaddr); /* * Tell the upper layer(s) we support long frames. * Must appear after the call to ether_ifattach() because * ether_ifattach() sets ifi_hdrlen to the default value. */ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_VLAN_MTU; ifp->if_capenable |= IFCAP_VLAN_MTU; /* the hw bits already set */ /* * Let the system queue as many packets as we have available * TX descriptors. */ ifp->if_snd.ifq_maxlen = FXP_NTXCB - 1; /* * Hook our interrupt after all initialization is complete. * XXX This driver has been tested with the INTR_MPSAFFE flag set * however, ifp and its functions are not fully locked so MPSAFE * should not be used unless you can handle potential data loss. */ error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, fxp_intr, sc, &sc->ih); if (error) { device_printf(dev, "could not setup irq\n"); ether_ifdetach(&sc->arpcom.ac_if); goto fail; } fail: splx(s); if (error) fxp_release(sc); return (error); } /* * Release all resources. The softc lock should not be held and the * interrupt should already be torn down. */ static void fxp_release(struct fxp_softc *sc) { struct fxp_rx *rxp; struct fxp_tx *txp; int i; mtx_assert(&sc->sc_mtx, MA_NOTOWNED); KASSERT(sc->ih == NULL, ("fxp_release() called with intr handle still active")); if (sc->miibus) device_delete_child(sc->dev, sc->miibus); bus_generic_detach(sc->dev); ifmedia_removeall(&sc->sc_media); if (sc->fxp_desc.cbl_list) { bus_dmamap_unload(sc->cbl_tag, sc->cbl_map); bus_dmamem_free(sc->cbl_tag, sc->fxp_desc.cbl_list, sc->cbl_map); } if (sc->fxp_stats) { bus_dmamap_unload(sc->fxp_stag, sc->fxp_smap); bus_dmamem_free(sc->fxp_stag, sc->fxp_stats, sc->fxp_smap); } if (sc->mcsp) { bus_dmamap_unload(sc->mcs_tag, sc->mcs_map); bus_dmamem_free(sc->mcs_tag, sc->mcsp, sc->mcs_map); } if (sc->irq) bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->irq); if (sc->mem) bus_release_resource(sc->dev, sc->rtp, sc->rgd, sc->mem); if (sc->fxp_mtag) { for (i = 0; i < FXP_NRFABUFS; i++) { rxp = &sc->fxp_desc.rx_list[i]; if (rxp->rx_mbuf != NULL) { bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->fxp_mtag, rxp->rx_map); m_freem(rxp->rx_mbuf); } bus_dmamap_destroy(sc->fxp_mtag, rxp->rx_map); } bus_dmamap_destroy(sc->fxp_mtag, sc->spare_map); bus_dma_tag_destroy(sc->fxp_mtag); } if (sc->fxp_stag) { for (i = 0; i < FXP_NTXCB; i++) { txp = &sc->fxp_desc.tx_list[i]; if (txp->tx_mbuf != NULL) { bus_dmamap_sync(sc->fxp_mtag, txp->tx_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->fxp_mtag, txp->tx_map); m_freem(txp->tx_mbuf); } bus_dmamap_destroy(sc->fxp_mtag, txp->tx_map); } bus_dma_tag_destroy(sc->fxp_stag); } if (sc->cbl_tag) bus_dma_tag_destroy(sc->cbl_tag); if (sc->mcs_tag) bus_dma_tag_destroy(sc->mcs_tag); sysctl_ctx_free(&sc->sysctl_ctx); mtx_destroy(&sc->sc_mtx); } /* * Detach interface. */ static int fxp_detach(device_t dev) { struct fxp_softc *sc = device_get_softc(dev); int s; FXP_LOCK(sc); s = splimp(); sc->suspended = 1; /* Do same thing as we do for suspend */ /* * Close down routes etc. */ ether_ifdetach(&sc->arpcom.ac_if); /* * Stop DMA and drop transmit queue, but disable interrupts first. */ CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE); fxp_stop(sc); FXP_UNLOCK(sc); /* * Unhook interrupt before dropping lock. This is to prevent * races with fxp_intr(). */ bus_teardown_intr(sc->dev, sc->irq, sc->ih); sc->ih = NULL; splx(s); /* Release our allocated resources. */ fxp_release(sc); return (0); } /* * Device shutdown routine. Called at system shutdown after sync. The * main purpose of this routine is to shut off receiver DMA so that * kernel memory doesn't get clobbered during warmboot. */ static int fxp_shutdown(device_t dev) { /* * Make sure that DMA is disabled prior to reboot. Not doing * do could allow DMA to corrupt kernel memory during the * reboot before the driver initializes. */ fxp_stop((struct fxp_softc *) device_get_softc(dev)); return (0); } /* * Device suspend routine. Stop the interface and save some PCI * settings in case the BIOS doesn't restore them properly on * resume. */ static int fxp_suspend(device_t dev) { struct fxp_softc *sc = device_get_softc(dev); int i, s; FXP_LOCK(sc); s = splimp(); fxp_stop(sc); for (i = 0; i < 5; i++) sc->saved_maps[i] = pci_read_config(dev, PCIR_BAR(i), 4); sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); sc->suspended = 1; FXP_UNLOCK(sc); splx(s); return (0); } /* * Device resume routine. Restore some PCI settings in case the BIOS * doesn't, re-enable busmastering, and restart the interface if * appropriate. */ static int fxp_resume(device_t dev) { struct fxp_softc *sc = device_get_softc(dev); struct ifnet *ifp = &sc->sc_if; u_int16_t pci_command; int i, s; FXP_LOCK(sc); s = splimp(); #ifndef BURN_BRIDGES fxp_powerstate_d0(dev); #endif /* better way to do this? */ for (i = 0; i < 5; i++) pci_write_config(dev, PCIR_BAR(i), sc->saved_maps[i], 4); pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4); pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1); pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1); pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1); /* reenable busmastering */ pci_command = pci_read_config(dev, PCIR_COMMAND, 2); pci_command |= (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); pci_write_config(dev, PCIR_COMMAND, pci_command, 2); CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); DELAY(10); /* reinitialize interface if necessary */ if (ifp->if_flags & IFF_UP) fxp_init_body(sc); sc->suspended = 0; FXP_UNLOCK(sc); splx(s); return (0); } static void fxp_eeprom_shiftin(struct fxp_softc *sc, int data, int length) { u_int16_t reg; int x; /* * Shift in data. */ for (x = 1 << (length - 1); x; x >>= 1) { if (data & x) reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; else reg = FXP_EEPROM_EECS; CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); DELAY(1); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK); DELAY(1); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); DELAY(1); } } /* * Read from the serial EEPROM. Basically, you manually shift in * the read opcode (one bit at a time) and then shift in the address, * and then you shift out the data (all of this one bit at a time). * The word size is 16 bits, so you have to provide the address for * every 16 bits of data. */ static u_int16_t fxp_eeprom_getword(struct fxp_softc *sc, int offset, int autosize) { u_int16_t reg, data; int x; CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); /* * Shift in read opcode. */ fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_READ, 3); /* * Shift in address. */ data = 0; for (x = 1 << (sc->eeprom_size - 1); x; x >>= 1) { if (offset & x) reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; else reg = FXP_EEPROM_EECS; CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); DELAY(1); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK); DELAY(1); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); DELAY(1); reg = CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO; data++; if (autosize && reg == 0) { sc->eeprom_size = data; break; } } /* * Shift out data. */ data = 0; reg = FXP_EEPROM_EECS; for (x = 1 << 15; x; x >>= 1) { CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK); DELAY(1); if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO) data |= x; CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); DELAY(1); } CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); DELAY(1); return (data); } static void fxp_eeprom_putword(struct fxp_softc *sc, int offset, u_int16_t data) { int i; /* * Erase/write enable. */ CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); fxp_eeprom_shiftin(sc, 0x4, 3); fxp_eeprom_shiftin(sc, 0x03 << (sc->eeprom_size - 2), sc->eeprom_size); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); DELAY(1); /* * Shift in write opcode, address, data. */ CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_WRITE, 3); fxp_eeprom_shiftin(sc, offset, sc->eeprom_size); fxp_eeprom_shiftin(sc, data, 16); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); DELAY(1); /* * Wait for EEPROM to finish up. */ CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); DELAY(1); for (i = 0; i < 1000; i++) { if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO) break; DELAY(50); } CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); DELAY(1); /* * Erase/write disable. */ CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); fxp_eeprom_shiftin(sc, 0x4, 3); fxp_eeprom_shiftin(sc, 0, sc->eeprom_size); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); DELAY(1); } /* * From NetBSD: * * Figure out EEPROM size. * * 559's can have either 64-word or 256-word EEPROMs, the 558 * datasheet only talks about 64-word EEPROMs, and the 557 datasheet * talks about the existance of 16 to 256 word EEPROMs. * * The only known sizes are 64 and 256, where the 256 version is used * by CardBus cards to store CIS information. * * The address is shifted in msb-to-lsb, and after the last * address-bit the EEPROM is supposed to output a `dummy zero' bit, * after which follows the actual data. We try to detect this zero, by * probing the data-out bit in the EEPROM control register just after * having shifted in a bit. If the bit is zero, we assume we've * shifted enough address bits. The data-out should be tri-state, * before this, which should translate to a logical one. */ static void fxp_autosize_eeprom(struct fxp_softc *sc) { /* guess maximum size of 256 words */ sc->eeprom_size = 8; /* autosize */ (void) fxp_eeprom_getword(sc, 0, 1); } static void fxp_read_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words) { int i; for (i = 0; i < words; i++) data[i] = fxp_eeprom_getword(sc, offset + i, 0); } static void fxp_write_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words) { int i; for (i = 0; i < words; i++) fxp_eeprom_putword(sc, offset + i, data[i]); } static void fxp_dma_map_txbuf(void *arg, bus_dma_segment_t *segs, int nseg, bus_size_t mapsize, int error) { struct fxp_softc *sc; struct fxp_cb_tx *txp; int i; if (error) return; KASSERT(nseg <= FXP_NTXSEG, ("too many DMA segments")); sc = arg; txp = sc->fxp_desc.tx_last->tx_next->tx_cb; for (i = 0; i < nseg; i++) { KASSERT(segs[i].ds_len <= MCLBYTES, ("segment size too large")); /* * If this is an 82550/82551, then we're using extended * TxCBs _and_ we're using checksum offload. This means * that the TxCB is really an IPCB. One major difference * between the two is that with plain extended TxCBs, * the bottom half of the TxCB contains two entries from * the TBD array, whereas IPCBs contain just one entry: * one entry (8 bytes) has been sacrificed for the TCP/IP * checksum offload control bits. So to make things work * right, we have to start filling in the TBD array * starting from a different place depending on whether * the chip is an 82550/82551 or not. */ if (sc->flags & FXP_FLAG_EXT_RFA) { txp->tbd[i + 1].tb_addr = htole32(segs[i].ds_addr); txp->tbd[i + 1].tb_size = htole32(segs[i].ds_len); } else { txp->tbd[i].tb_addr = htole32(segs[i].ds_addr); txp->tbd[i].tb_size = htole32(segs[i].ds_len); } } txp->tbd_number = nseg; } /* * Grab the softc lock and call the real fxp_start_body() routine */ static void fxp_start(struct ifnet *ifp) { struct fxp_softc *sc = ifp->if_softc; FXP_LOCK(sc); fxp_start_body(ifp); FXP_UNLOCK(sc); } /* * Start packet transmission on the interface. * This routine must be called with the softc lock held, and is an * internal entry point only. */ static void fxp_start_body(struct ifnet *ifp) { struct fxp_softc *sc = ifp->if_softc; struct fxp_tx *txp; struct mbuf *mb_head; int error; mtx_assert(&sc->sc_mtx, MA_OWNED); /* * See if we need to suspend xmit until the multicast filter * has been reprogrammed (which can only be done at the head * of the command chain). */ if (sc->need_mcsetup) { return; } txp = NULL; /* * We're finished if there is nothing more to add to the list or if * we're all filled up with buffers to transmit. * NOTE: One TxCB is reserved to guarantee that fxp_mc_setup() can add * a NOP command when needed. */ while (ifp->if_snd.ifq_head != NULL && sc->tx_queued < FXP_NTXCB - 1) { /* * Grab a packet to transmit. */ IF_DEQUEUE(&ifp->if_snd, mb_head); /* * Get pointer to next available tx desc. */ txp = sc->fxp_desc.tx_last->tx_next; /* * A note in Appendix B of the Intel 8255x 10/100 Mbps * Ethernet Controller Family Open Source Software * Developer Manual says: * Using software parsing is only allowed with legal * TCP/IP or UDP/IP packets. * ... * For all other datagrams, hardware parsing must * be used. * Software parsing appears to truncate ICMP and * fragmented UDP packets that contain one to three * bytes in the second (and final) mbuf of the packet. */ if (sc->flags & FXP_FLAG_EXT_RFA) txp->tx_cb->ipcb_ip_activation_high = FXP_IPCB_HARDWAREPARSING_ENABLE; /* * Deal with TCP/IP checksum offload. Note that * in order for TCP checksum offload to work, * the pseudo header checksum must have already * been computed and stored in the checksum field * in the TCP header. The stack should have * already done this for us. */ if (mb_head->m_pkthdr.csum_flags) { if (mb_head->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { txp->tx_cb->ipcb_ip_schedule = FXP_IPCB_TCPUDP_CHECKSUM_ENABLE; if (mb_head->m_pkthdr.csum_flags & CSUM_TCP) txp->tx_cb->ipcb_ip_schedule |= FXP_IPCB_TCP_PACKET; } #ifdef FXP_IP_CSUM_WAR /* * XXX The 82550 chip appears to have trouble * dealing with IP header checksums in very small * datagrams, namely fragments from 1 to 3 bytes * in size. For example, say you want to transmit * a UDP packet of 1473 bytes. The packet will be * fragmented over two IP datagrams, the latter * containing only one byte of data. The 82550 will * botch the header checksum on the 1-byte fragment. * As long as the datagram contains 4 or more bytes * of data, you're ok. * * The following code attempts to work around this * problem: if the datagram is less than 38 bytes * in size (14 bytes ether header, 20 bytes IP header, * plus 4 bytes of data), we punt and compute the IP * header checksum by hand. This workaround doesn't * work very well, however, since it can be fooled * by things like VLAN tags and IP options that make * the header sizes/offsets vary. */ if (mb_head->m_pkthdr.csum_flags & CSUM_IP) { if (mb_head->m_pkthdr.len < 38) { struct ip *ip; mb_head->m_data += ETHER_HDR_LEN; ip = mtod(mb_head, struct ip *); ip->ip_sum = in_cksum(mb_head, ip->ip_hl << 2); mb_head->m_data -= ETHER_HDR_LEN; } else { txp->tx_cb->ipcb_ip_activation_high = FXP_IPCB_HARDWAREPARSING_ENABLE; txp->tx_cb->ipcb_ip_schedule |= FXP_IPCB_IP_CHECKSUM_ENABLE; } } #endif } /* * Go through each of the mbufs in the chain and initialize * the transmit buffer descriptors with the physical address * and size of the mbuf. */ error = bus_dmamap_load_mbuf(sc->fxp_mtag, txp->tx_map, mb_head, fxp_dma_map_txbuf, sc, 0); if (error && error != EFBIG) { device_printf(sc->dev, "can't map mbuf (error %d)\n", error); m_freem(mb_head); break; } if (error) { struct mbuf *mn; /* * We ran out of segments. We have to recopy this * mbuf chain first. Bail out if we can't get the * new buffers. */ mn = m_defrag(mb_head, M_DONTWAIT); if (mn == NULL) { m_freem(mb_head); break; } else { mb_head = mn; } error = bus_dmamap_load_mbuf(sc->fxp_mtag, txp->tx_map, mb_head, fxp_dma_map_txbuf, sc, 0); if (error) { device_printf(sc->dev, "can't map mbuf (error %d)\n", error); m_freem(mb_head); break; } } bus_dmamap_sync(sc->fxp_mtag, txp->tx_map, BUS_DMASYNC_PREWRITE); txp->tx_mbuf = mb_head; txp->tx_cb->cb_status = 0; txp->tx_cb->byte_count = 0; if (sc->tx_queued != FXP_CXINT_THRESH - 1) { txp->tx_cb->cb_command = htole16(sc->tx_cmd | FXP_CB_COMMAND_SF | FXP_CB_COMMAND_S); } else { txp->tx_cb->cb_command = htole16(sc->tx_cmd | FXP_CB_COMMAND_SF | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I); /* * Set a 5 second timer just in case we don't hear * from the card again. */ ifp->if_timer = 5; } txp->tx_cb->tx_threshold = tx_threshold; /* * Advance the end of list forward. */ #ifdef __alpha__ /* * On platforms which can't access memory in 16-bit * granularities, we must prevent the card from DMA'ing * up the status while we update the command field. * This could cause us to overwrite the completion status. * XXX This is probably bogus and we're _not_ looking * for atomicity here. */ atomic_clear_16(&sc->fxp_desc.tx_last->tx_cb->cb_command, htole16(FXP_CB_COMMAND_S)); #else sc->fxp_desc.tx_last->tx_cb->cb_command &= htole16(~FXP_CB_COMMAND_S); #endif /*__alpha__*/ sc->fxp_desc.tx_last = txp; /* * Advance the beginning of the list forward if there are * no other packets queued (when nothing is queued, tx_first * sits on the last TxCB that was sent out). */ if (sc->tx_queued == 0) sc->fxp_desc.tx_first = txp; sc->tx_queued++; /* * Pass packet to bpf if there is a listener. */ BPF_MTAP(ifp, mb_head); } bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE); /* * We're finished. If we added to the list, issue a RESUME to get DMA * going again if suspended. */ if (txp != NULL) { fxp_scb_wait(sc); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME); } } #ifdef DEVICE_POLLING static poll_handler_t fxp_poll; static void fxp_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct fxp_softc *sc = ifp->if_softc; u_int8_t statack; FXP_LOCK(sc); if (!(ifp->if_capenable & IFCAP_POLLING)) { ether_poll_deregister(ifp); cmd = POLL_DEREGISTER; } if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */ CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0); FXP_UNLOCK(sc); return; } statack = FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA | FXP_SCB_STATACK_FR; if (cmd == POLL_AND_CHECK_STATUS) { u_int8_t tmp; tmp = CSR_READ_1(sc, FXP_CSR_SCB_STATACK); if (tmp == 0xff || tmp == 0) { FXP_UNLOCK(sc); return; /* nothing to do */ } tmp &= ~statack; /* ack what we can */ if (tmp != 0) CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, tmp); statack |= tmp; } fxp_intr_body(sc, ifp, statack, count); FXP_UNLOCK(sc); } #endif /* DEVICE_POLLING */ /* * Process interface interrupts. */ static void fxp_intr(void *xsc) { struct fxp_softc *sc = xsc; struct ifnet *ifp = &sc->sc_if; u_int8_t statack; FXP_LOCK(sc); if (sc->suspended) { FXP_UNLOCK(sc); return; } #ifdef DEVICE_POLLING if (ifp->if_flags & IFF_POLLING) { FXP_UNLOCK(sc); return; } if ((ifp->if_capenable & IFCAP_POLLING) && ether_poll_register(fxp_poll, ifp)) { /* disable interrupts */ CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE); FXP_UNLOCK(sc); fxp_poll(ifp, 0, 1); return; } #endif while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) { /* * It should not be possible to have all bits set; the * FXP_SCB_INTR_SWI bit always returns 0 on a read. If * all bits are set, this may indicate that the card has * been physically ejected, so ignore it. */ if (statack == 0xff) { FXP_UNLOCK(sc); return; } /* * First ACK all the interrupts in this pass. */ CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack); fxp_intr_body(sc, ifp, statack, -1); } FXP_UNLOCK(sc); } static void fxp_txeof(struct fxp_softc *sc) { struct fxp_tx *txp; bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREREAD); for (txp = sc->fxp_desc.tx_first; sc->tx_queued && (le16toh(txp->tx_cb->cb_status) & FXP_CB_STATUS_C) != 0; txp = txp->tx_next) { if (txp->tx_mbuf != NULL) { bus_dmamap_sync(sc->fxp_mtag, txp->tx_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->fxp_mtag, txp->tx_map); m_freem(txp->tx_mbuf); txp->tx_mbuf = NULL; /* clear this to reset csum offload bits */ txp->tx_cb->tbd[0].tb_addr = 0; } sc->tx_queued--; } sc->fxp_desc.tx_first = txp; bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE); } static void fxp_intr_body(struct fxp_softc *sc, struct ifnet *ifp, u_int8_t statack, int count) { struct mbuf *m; struct fxp_rx *rxp; struct fxp_rfa *rfa; int rnr = (statack & FXP_SCB_STATACK_RNR) ? 1 : 0; mtx_assert(&sc->sc_mtx, MA_OWNED); if (rnr) fxp_rnr++; #ifdef DEVICE_POLLING /* Pick up a deferred RNR condition if `count' ran out last time. */ if (sc->flags & FXP_FLAG_DEFERRED_RNR) { sc->flags &= ~FXP_FLAG_DEFERRED_RNR; rnr = 1; } #endif /* * Free any finished transmit mbuf chains. * * Handle the CNA event likt a CXTNO event. It used to * be that this event (control unit not ready) was not * encountered, but it is now with the SMPng modifications. * The exact sequence of events that occur when the interface * is brought up are different now, and if this event * goes unhandled, the configuration/rxfilter setup sequence * can stall for several seconds. The result is that no * packets go out onto the wire for about 5 to 10 seconds * after the interface is ifconfig'ed for the first time. */ if (statack & (FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA)) { fxp_txeof(sc); ifp->if_timer = 0; if (sc->tx_queued == 0) { if (sc->need_mcsetup) fxp_mc_setup(sc); } /* * Try to start more packets transmitting. */ if (ifp->if_snd.ifq_head != NULL) fxp_start_body(ifp); } /* * Just return if nothing happened on the receive side. */ if (!rnr && (statack & FXP_SCB_STATACK_FR) == 0) return; /* * Process receiver interrupts. If a no-resource (RNR) * condition exists, get whatever packets we can and * re-start the receiver. * * When using polling, we do not process the list to completion, * so when we get an RNR interrupt we must defer the restart * until we hit the last buffer with the C bit set. * If we run out of cycles and rfa_headm has the C bit set, * record the pending RNR in the FXP_FLAG_DEFERRED_RNR flag so * that the info will be used in the subsequent polling cycle. */ for (;;) { rxp = sc->fxp_desc.rx_head; m = rxp->rx_mbuf; rfa = (struct fxp_rfa *)(m->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE); bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map, BUS_DMASYNC_POSTREAD); #ifdef DEVICE_POLLING /* loop at most count times if count >=0 */ if (count >= 0 && count-- == 0) { if (rnr) { /* Defer RNR processing until the next time. */ sc->flags |= FXP_FLAG_DEFERRED_RNR; rnr = 0; } break; } #endif /* DEVICE_POLLING */ if ((le16toh(rfa->rfa_status) & FXP_RFA_STATUS_C) == 0) break; /* * Advance head forward. */ sc->fxp_desc.rx_head = rxp->rx_next; /* * Add a new buffer to the receive chain. * If this fails, the old buffer is recycled * instead. */ if (fxp_add_rfabuf(sc, rxp) == 0) { int total_len; /* * Fetch packet length (the top 2 bits of * actual_size are flags set by the controller * upon completion), and drop the packet in case * of bogus length or CRC errors. */ total_len = le16toh(rfa->actual_size) & 0x3fff; if (total_len < sizeof(struct ether_header) || total_len > MCLBYTES - RFA_ALIGNMENT_FUDGE - sc->rfa_size || le16toh(rfa->rfa_status) & FXP_RFA_STATUS_CRC) { m_freem(m); continue; } /* Do IP checksum checking. */ if (le16toh(rfa->rfa_status) & FXP_RFA_STATUS_PARSE) { if (rfa->rfax_csum_sts & FXP_RFDX_CS_IP_CSUM_BIT_VALID) m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (rfa->rfax_csum_sts & FXP_RFDX_CS_IP_CSUM_VALID) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if ((rfa->rfax_csum_sts & FXP_RFDX_CS_TCPUDP_CSUM_BIT_VALID) && (rfa->rfax_csum_sts & FXP_RFDX_CS_TCPUDP_CSUM_VALID)) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID|CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } m->m_pkthdr.len = m->m_len = total_len; m->m_pkthdr.rcvif = ifp; /* * Drop locks before calling if_input() since it * may re-enter fxp_start() in the netisr case. * This would result in a lock reversal. Better * performance might be obtained by chaining all * packets received, dropping the lock, and then * calling if_input() on each one. */ FXP_UNLOCK(sc); (*ifp->if_input)(ifp, m); FXP_LOCK(sc); } } if (rnr) { fxp_scb_wait(sc); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.rx_head->rx_addr); fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START); } } /* * Update packet in/out/collision statistics. The i82557 doesn't * allow you to access these counters without doing a fairly * expensive DMA to get _all_ of the statistics it maintains, so * we do this operation here only once per second. The statistics * counters in the kernel are updated from the previous dump-stats * DMA and then a new dump-stats DMA is started. The on-chip * counters are zeroed when the DMA completes. If we can't start * the DMA immediately, we don't wait - we just prepare to read * them again next time. */ static void fxp_tick(void *xsc) { struct fxp_softc *sc = xsc; struct ifnet *ifp = &sc->sc_if; struct fxp_stats *sp = sc->fxp_stats; int s; FXP_LOCK(sc); s = splimp(); bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap, BUS_DMASYNC_POSTREAD); ifp->if_opackets += le32toh(sp->tx_good); ifp->if_collisions += le32toh(sp->tx_total_collisions); if (sp->rx_good) { ifp->if_ipackets += le32toh(sp->rx_good); sc->rx_idle_secs = 0; } else { /* * Receiver's been idle for another second. */ sc->rx_idle_secs++; } ifp->if_ierrors += le32toh(sp->rx_crc_errors) + le32toh(sp->rx_alignment_errors) + le32toh(sp->rx_rnr_errors) + le32toh(sp->rx_overrun_errors); /* * If any transmit underruns occured, bump up the transmit * threshold by another 512 bytes (64 * 8). */ if (sp->tx_underruns) { ifp->if_oerrors += le32toh(sp->tx_underruns); if (tx_threshold < 192) tx_threshold += 64; } /* * Release any xmit buffers that have completed DMA. This isn't * strictly necessary to do here, but it's advantagous for mbufs * with external storage to be released in a timely manner rather * than being defered for a potentially long time. This limits * the delay to a maximum of one second. */ fxp_txeof(sc); /* * If we haven't received any packets in FXP_MAC_RX_IDLE seconds, * then assume the receiver has locked up and attempt to clear * the condition by reprogramming the multicast filter. This is * a work-around for a bug in the 82557 where the receiver locks * up if it gets certain types of garbage in the syncronization * bits prior to the packet header. This bug is supposed to only * occur in 10Mbps mode, but has been seen to occur in 100Mbps * mode as well (perhaps due to a 10/100 speed transition). */ if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) { sc->rx_idle_secs = 0; fxp_mc_setup(sc); } /* * If there is no pending command, start another stats * dump. Otherwise punt for now. */ if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) { /* * Start another stats dump. */ bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap, BUS_DMASYNC_PREREAD); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMPRESET); } else { /* * A previous command is still waiting to be accepted. * Just zero our copy of the stats and wait for the * next timer event to update them. */ sp->tx_good = 0; sp->tx_underruns = 0; sp->tx_total_collisions = 0; sp->rx_good = 0; sp->rx_crc_errors = 0; sp->rx_alignment_errors = 0; sp->rx_rnr_errors = 0; sp->rx_overrun_errors = 0; } if (sc->miibus != NULL) mii_tick(device_get_softc(sc->miibus)); /* * Schedule another timeout one second from now. */ callout_reset(&sc->stat_ch, hz, fxp_tick, sc); FXP_UNLOCK(sc); splx(s); } /* * Stop the interface. Cancels the statistics updater and resets * the interface. */ static void fxp_stop(struct fxp_softc *sc) { struct ifnet *ifp = &sc->sc_if; struct fxp_tx *txp; int i; ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); ifp->if_timer = 0; #ifdef DEVICE_POLLING ether_poll_deregister(ifp); #endif /* * Cancel stats updater. */ callout_stop(&sc->stat_ch); /* * Issue software reset, which also unloads the microcode. */ sc->flags &= ~FXP_FLAG_UCODE; CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SOFTWARE_RESET); DELAY(50); /* * Release any xmit buffers. */ txp = sc->fxp_desc.tx_list; if (txp != NULL) { for (i = 0; i < FXP_NTXCB; i++) { if (txp[i].tx_mbuf != NULL) { bus_dmamap_sync(sc->fxp_mtag, txp[i].tx_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->fxp_mtag, txp[i].tx_map); m_freem(txp[i].tx_mbuf); txp[i].tx_mbuf = NULL; /* clear this to reset csum offload bits */ txp[i].tx_cb->tbd[0].tb_addr = 0; } } } bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE); sc->tx_queued = 0; } /* * Watchdog/transmission transmit timeout handler. Called when a * transmission is started on the interface, but no interrupt is * received before the timeout. This usually indicates that the * card has wedged for some reason. */ static void fxp_watchdog(struct ifnet *ifp) { struct fxp_softc *sc = ifp->if_softc; FXP_LOCK(sc); device_printf(sc->dev, "device timeout\n"); ifp->if_oerrors++; fxp_init_body(sc); FXP_UNLOCK(sc); } /* * Acquire locks and then call the real initialization function. This * is necessary because ether_ioctl() calls if_init() and this would * result in mutex recursion if the mutex was held. */ static void fxp_init(void *xsc) { struct fxp_softc *sc = xsc; FXP_LOCK(sc); fxp_init_body(sc); FXP_UNLOCK(sc); } /* * Perform device initialization. This routine must be called with the * softc lock held. */ static void fxp_init_body(struct fxp_softc *sc) { struct ifnet *ifp = &sc->sc_if; struct fxp_cb_config *cbp; struct fxp_cb_ias *cb_ias; struct fxp_cb_tx *tcbp; struct fxp_tx *txp; struct fxp_cb_mcs *mcsp; int i, prm, s; mtx_assert(&sc->sc_mtx, MA_OWNED); s = splimp(); /* * Cancel any pending I/O */ fxp_stop(sc); prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0; /* * Initialize base of CBL and RFA memory. Loading with zero * sets it up for regular linear addressing. */ CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_BASE); fxp_scb_wait(sc); fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_BASE); /* * Initialize base of dump-stats buffer. */ fxp_scb_wait(sc); bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap, BUS_DMASYNC_PREREAD); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->stats_addr); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMP_ADR); /* * Attempt to load microcode if requested. */ if (ifp->if_flags & IFF_LINK0 && (sc->flags & FXP_FLAG_UCODE) == 0) fxp_load_ucode(sc); /* * Initialize the multicast address list. */ if (fxp_mc_addrs(sc)) { mcsp = sc->mcsp; mcsp->cb_status = 0; mcsp->cb_command = htole16(FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_EL); mcsp->link_addr = 0xffffffff; /* * Start the multicast setup command. */ fxp_scb_wait(sc); bus_dmamap_sync(sc->mcs_tag, sc->mcs_map, BUS_DMASYNC_PREWRITE); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->mcs_addr); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); /* ...and wait for it to complete. */ fxp_dma_wait(sc, &mcsp->cb_status, sc->mcs_tag, sc->mcs_map); bus_dmamap_sync(sc->mcs_tag, sc->mcs_map, BUS_DMASYNC_POSTWRITE); } /* * We temporarily use memory that contains the TxCB list to * construct the config CB. The TxCB list memory is rebuilt * later. */ cbp = (struct fxp_cb_config *)sc->fxp_desc.cbl_list; /* * This bcopy is kind of disgusting, but there are a bunch of must be * zero and must be one bits in this structure and this is the easiest * way to initialize them all to proper values. */ bcopy(fxp_cb_config_template, cbp, sizeof(fxp_cb_config_template)); cbp->cb_status = 0; cbp->cb_command = htole16(FXP_CB_COMMAND_CONFIG | FXP_CB_COMMAND_EL); cbp->link_addr = 0xffffffff; /* (no) next command */ cbp->byte_count = sc->flags & FXP_FLAG_EXT_RFA ? 32 : 22; cbp->rx_fifo_limit = 8; /* rx fifo threshold (32 bytes) */ cbp->tx_fifo_limit = 0; /* tx fifo threshold (0 bytes) */ cbp->adaptive_ifs = 0; /* (no) adaptive interframe spacing */ cbp->mwi_enable = sc->flags & FXP_FLAG_MWI_ENABLE ? 1 : 0; cbp->type_enable = 0; /* actually reserved */ cbp->read_align_en = sc->flags & FXP_FLAG_READ_ALIGN ? 1 : 0; cbp->end_wr_on_cl = sc->flags & FXP_FLAG_WRITE_ALIGN ? 1 : 0; cbp->rx_dma_bytecount = 0; /* (no) rx DMA max */ cbp->tx_dma_bytecount = 0; /* (no) tx DMA max */ cbp->dma_mbce = 0; /* (disable) dma max counters */ cbp->late_scb = 0; /* (don't) defer SCB update */ cbp->direct_dma_dis = 1; /* disable direct rcv dma mode */ cbp->tno_int_or_tco_en =0; /* (disable) tx not okay interrupt */ cbp->ci_int = 1; /* interrupt on CU idle */ cbp->ext_txcb_dis = sc->flags & FXP_FLAG_EXT_TXCB ? 0 : 1; cbp->ext_stats_dis = 1; /* disable extended counters */ cbp->keep_overrun_rx = 0; /* don't pass overrun frames to host */ cbp->save_bf = sc->flags & FXP_FLAG_SAVE_BAD ? 1 : prm; cbp->disc_short_rx = !prm; /* discard short packets */ cbp->underrun_retry = 1; /* retry mode (once) on DMA underrun */ cbp->two_frames = 0; /* do not limit FIFO to 2 frames */ cbp->dyn_tbd = 0; /* (no) dynamic TBD mode */ cbp->ext_rfa = sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0; cbp->mediatype = sc->flags & FXP_FLAG_SERIAL_MEDIA ? 0 : 1; cbp->csma_dis = 0; /* (don't) disable link */ cbp->tcp_udp_cksum = 0; /* (don't) enable checksum */ cbp->vlan_tco = 0; /* (don't) enable vlan wakeup */ cbp->link_wake_en = 0; /* (don't) assert PME# on link change */ cbp->arp_wake_en = 0; /* (don't) assert PME# on arp */ cbp->mc_wake_en = 0; /* (don't) enable PME# on mcmatch */ cbp->nsai = 1; /* (don't) disable source addr insert */ cbp->preamble_length = 2; /* (7 byte) preamble */ cbp->loopback = 0; /* (don't) loopback */ cbp->linear_priority = 0; /* (normal CSMA/CD operation) */ cbp->linear_pri_mode = 0; /* (wait after xmit only) */ cbp->interfrm_spacing = 6; /* (96 bits of) interframe spacing */ cbp->promiscuous = prm; /* promiscuous mode */ cbp->bcast_disable = 0; /* (don't) disable broadcasts */ cbp->wait_after_win = 0; /* (don't) enable modified backoff alg*/ cbp->ignore_ul = 0; /* consider U/L bit in IA matching */ cbp->crc16_en = 0; /* (don't) enable crc-16 algorithm */ cbp->crscdt = sc->flags & FXP_FLAG_SERIAL_MEDIA ? 1 : 0; cbp->stripping = !prm; /* truncate rx packet to byte count */ cbp->padding = 1; /* (do) pad short tx packets */ cbp->rcv_crc_xfer = 0; /* (don't) xfer CRC to host */ cbp->long_rx_en = sc->flags & FXP_FLAG_LONG_PKT_EN ? 1 : 0; cbp->ia_wake_en = 0; /* (don't) wake up on address match */ cbp->magic_pkt_dis = 0; /* (don't) disable magic packet */ /* must set wake_en in PMCSR also */ cbp->force_fdx = 0; /* (don't) force full duplex */ cbp->fdx_pin_en = 1; /* (enable) FDX# pin */ cbp->multi_ia = 0; /* (don't) accept multiple IAs */ cbp->mc_all = sc->flags & FXP_FLAG_ALL_MCAST ? 1 : 0; cbp->gamla_rx = sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0; if (fxp_noflow || sc->revision == FXP_REV_82557) { /* * The 82557 has no hardware flow control, the values * below are the defaults for the chip. */ cbp->fc_delay_lsb = 0; cbp->fc_delay_msb = 0x40; cbp->pri_fc_thresh = 3; cbp->tx_fc_dis = 0; cbp->rx_fc_restop = 0; cbp->rx_fc_restart = 0; cbp->fc_filter = 0; cbp->pri_fc_loc = 1; } else { cbp->fc_delay_lsb = 0x1f; cbp->fc_delay_msb = 0x01; cbp->pri_fc_thresh = 3; cbp->tx_fc_dis = 0; /* enable transmit FC */ cbp->rx_fc_restop = 1; /* enable FC restop frames */ cbp->rx_fc_restart = 1; /* enable FC restart frames */ cbp->fc_filter = !prm; /* drop FC frames to host */ cbp->pri_fc_loc = 1; /* FC pri location (byte31) */ } /* * Start the config command/DMA. */ fxp_scb_wait(sc); bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); /* ...and wait for it to complete. */ fxp_dma_wait(sc, &cbp->cb_status, sc->cbl_tag, sc->cbl_map); bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE); /* * Now initialize the station address. Temporarily use the TxCB * memory area like we did above for the config CB. */ cb_ias = (struct fxp_cb_ias *)sc->fxp_desc.cbl_list; cb_ias->cb_status = 0; cb_ias->cb_command = htole16(FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL); cb_ias->link_addr = 0xffffffff; bcopy(sc->arpcom.ac_enaddr, cb_ias->macaddr, sizeof(sc->arpcom.ac_enaddr)); /* * Start the IAS (Individual Address Setup) command/DMA. */ fxp_scb_wait(sc); bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); /* ...and wait for it to complete. */ fxp_dma_wait(sc, &cb_ias->cb_status, sc->cbl_tag, sc->cbl_map); bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE); /* * Initialize transmit control block (TxCB) list. */ txp = sc->fxp_desc.tx_list; tcbp = sc->fxp_desc.cbl_list; bzero(tcbp, FXP_TXCB_SZ); for (i = 0; i < FXP_NTXCB; i++) { txp[i].tx_cb = tcbp + i; txp[i].tx_mbuf = NULL; tcbp[i].cb_status = htole16(FXP_CB_STATUS_C | FXP_CB_STATUS_OK); tcbp[i].cb_command = htole16(FXP_CB_COMMAND_NOP); tcbp[i].link_addr = htole32(sc->fxp_desc.cbl_addr + (((i + 1) & FXP_TXCB_MASK) * sizeof(struct fxp_cb_tx))); if (sc->flags & FXP_FLAG_EXT_TXCB) tcbp[i].tbd_array_addr = htole32(FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[2])); else tcbp[i].tbd_array_addr = htole32(FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[0])); txp[i].tx_next = &txp[(i + 1) & FXP_TXCB_MASK]; } /* * Set the suspend flag on the first TxCB and start the control * unit. It will execute the NOP and then suspend. */ tcbp->cb_command = htole16(FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S); bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE); sc->fxp_desc.tx_first = sc->fxp_desc.tx_last = txp; sc->tx_queued = 1; fxp_scb_wait(sc); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); /* * Initialize receiver buffer area - RFA. */ fxp_scb_wait(sc); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.rx_head->rx_addr); fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START); /* * Set current media. */ if (sc->miibus != NULL) mii_mediachg(device_get_softc(sc->miibus)); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; /* * Enable interrupts. */ #ifdef DEVICE_POLLING /* * ... but only do that if we are not polling. And because (presumably) * the default is interrupts on, we need to disable them explicitly! */ if ( ifp->if_flags & IFF_POLLING ) CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE); else #endif /* DEVICE_POLLING */ CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0); /* * Start stats updater. */ callout_reset(&sc->stat_ch, hz, fxp_tick, sc); splx(s); } static int fxp_serial_ifmedia_upd(struct ifnet *ifp) { return (0); } static void fxp_serial_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; } /* * Change media according to request. */ static int fxp_ifmedia_upd(struct ifnet *ifp) { struct fxp_softc *sc = ifp->if_softc; struct mii_data *mii; mii = device_get_softc(sc->miibus); mii_mediachg(mii); return (0); } /* * Notify the world which media we're using. */ static void fxp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct fxp_softc *sc = ifp->if_softc; struct mii_data *mii; mii = device_get_softc(sc->miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; if (ifmr->ifm_status & IFM_10_T && sc->flags & FXP_FLAG_CU_RESUME_BUG) sc->cu_resume_bug = 1; else sc->cu_resume_bug = 0; } /* * Add a buffer to the end of the RFA buffer list. * Return 0 if successful, 1 for failure. A failure results in * adding the 'oldm' (if non-NULL) on to the end of the list - * tossing out its old contents and recycling it. * The RFA struct is stuck at the beginning of mbuf cluster and the * data pointer is fixed up to point just past it. */ static int fxp_add_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp) { struct mbuf *m; struct fxp_rfa *rfa, *p_rfa; struct fxp_rx *p_rx; bus_dmamap_t tmp_map; int error; m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); /* * Move the data pointer up so that the incoming data packet * will be 32-bit aligned. */ m->m_data += RFA_ALIGNMENT_FUDGE; /* * Get a pointer to the base of the mbuf cluster and move * data start past it. */ rfa = mtod(m, struct fxp_rfa *); m->m_data += sc->rfa_size; rfa->size = htole16(MCLBYTES - sc->rfa_size - RFA_ALIGNMENT_FUDGE); rfa->rfa_status = 0; rfa->rfa_control = htole16(FXP_RFA_CONTROL_EL); rfa->actual_size = 0; /* * Initialize the rest of the RFA. Note that since the RFA * is misaligned, we cannot store values directly. We're thus * using the le32enc() function which handles endianness and * is also alignment-safe. */ le32enc(&rfa->link_addr, 0xffffffff); le32enc(&rfa->rbd_addr, 0xffffffff); /* Map the RFA into DMA memory. */ error = bus_dmamap_load(sc->fxp_mtag, sc->spare_map, rfa, MCLBYTES - RFA_ALIGNMENT_FUDGE, fxp_dma_map_addr, &rxp->rx_addr, 0); if (error) { m_freem(m); return (error); } bus_dmamap_unload(sc->fxp_mtag, rxp->rx_map); tmp_map = sc->spare_map; sc->spare_map = rxp->rx_map; rxp->rx_map = tmp_map; rxp->rx_mbuf = m; bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* * If there are other buffers already on the list, attach this * one to the end by fixing up the tail to point to this one. */ if (sc->fxp_desc.rx_head != NULL) { p_rx = sc->fxp_desc.rx_tail; p_rfa = (struct fxp_rfa *) (p_rx->rx_mbuf->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE); p_rx->rx_next = rxp; le32enc(&p_rfa->link_addr, rxp->rx_addr); p_rfa->rfa_control = 0; bus_dmamap_sync(sc->fxp_mtag, p_rx->rx_map, BUS_DMASYNC_PREWRITE); } else { rxp->rx_next = NULL; sc->fxp_desc.rx_head = rxp; } sc->fxp_desc.rx_tail = rxp; return (0); } static volatile int fxp_miibus_readreg(device_t dev, int phy, int reg) { struct fxp_softc *sc = device_get_softc(dev); int count = 10000; int value; CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21)); while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0 && count--) DELAY(10); if (count <= 0) device_printf(dev, "fxp_miibus_readreg: timed out\n"); return (value & 0xffff); } static void fxp_miibus_writereg(device_t dev, int phy, int reg, int value) { struct fxp_softc *sc = device_get_softc(dev); int count = 10000; CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) | (value & 0xffff)); while ((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 && count--) DELAY(10); if (count <= 0) device_printf(dev, "fxp_miibus_writereg: timed out\n"); } static int fxp_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct fxp_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; struct mii_data *mii; int flag, mask, s, error = 0; /* * Detaching causes us to call ioctl with the mutex owned. Preclude * that by saying we're busy if the lock is already held. */ if (mtx_owned(&sc->sc_mtx)) return (EBUSY); FXP_LOCK(sc); s = splimp(); switch (command) { case SIOCSIFFLAGS: if (ifp->if_flags & IFF_ALLMULTI) sc->flags |= FXP_FLAG_ALL_MCAST; else sc->flags &= ~FXP_FLAG_ALL_MCAST; /* * If interface is marked up and not running, then start it. * If it is marked down and running, stop it. * XXX If it's up then re-initialize it. This is so flags * such as IFF_PROMISC are handled. */ if (ifp->if_flags & IFF_UP) { fxp_init_body(sc); } else { if (ifp->if_flags & IFF_RUNNING) fxp_stop(sc); } break; case SIOCADDMULTI: case SIOCDELMULTI: if (ifp->if_flags & IFF_ALLMULTI) sc->flags |= FXP_FLAG_ALL_MCAST; else sc->flags &= ~FXP_FLAG_ALL_MCAST; /* * Multicast list has changed; set the hardware filter * accordingly. */ if ((sc->flags & FXP_FLAG_ALL_MCAST) == 0) fxp_mc_setup(sc); /* * fxp_mc_setup() can set FXP_FLAG_ALL_MCAST, so check it * again rather than else {}. */ if (sc->flags & FXP_FLAG_ALL_MCAST) fxp_init_body(sc); error = 0; break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: if (sc->miibus != NULL) { mii = device_get_softc(sc->miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); } else { error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command); } break; case SIOCSIFCAP: mask = ifp->if_capenable ^ ifr->ifr_reqcap; if (mask & IFCAP_POLLING) ifp->if_capenable ^= IFCAP_POLLING; if (mask & IFCAP_VLAN_MTU) { ifp->if_capenable ^= IFCAP_VLAN_MTU; if (sc->revision != FXP_REV_82557) flag = FXP_FLAG_LONG_PKT_EN; else /* a hack to get long frames on the old chip */ flag = FXP_FLAG_SAVE_BAD; sc->flags ^= flag; if (ifp->if_flags & IFF_UP) fxp_init_body(sc); } break; default: /* * ether_ioctl() will eventually call fxp_start() which * will result in mutex recursion so drop it first. */ FXP_UNLOCK(sc); error = ether_ioctl(ifp, command, data); } if (mtx_owned(&sc->sc_mtx)) FXP_UNLOCK(sc); splx(s); return (error); } /* * Fill in the multicast address list and return number of entries. */ static int fxp_mc_addrs(struct fxp_softc *sc) { struct fxp_cb_mcs *mcsp = sc->mcsp; struct ifnet *ifp = &sc->sc_if; struct ifmultiaddr *ifma; int nmcasts; nmcasts = 0; if ((sc->flags & FXP_FLAG_ALL_MCAST) == 0) { #if __FreeBSD_version < 500000 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { #else TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { #endif if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (nmcasts >= MAXMCADDR) { sc->flags |= FXP_FLAG_ALL_MCAST; nmcasts = 0; break; } bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), &sc->mcsp->mc_addr[nmcasts][0], ETHER_ADDR_LEN); nmcasts++; } } mcsp->mc_cnt = htole16(nmcasts * ETHER_ADDR_LEN); return (nmcasts); } /* * Program the multicast filter. * * We have an artificial restriction that the multicast setup command * must be the first command in the chain, so we take steps to ensure * this. By requiring this, it allows us to keep up the performance of * the pre-initialized command ring (esp. link pointers) by not actually * inserting the mcsetup command in the ring - i.e. its link pointer * points to the TxCB ring, but the mcsetup descriptor itself is not part * of it. We then can do 'CU_START' on the mcsetup descriptor and have it * lead into the regular TxCB ring when it completes. * * This function must be called at splimp. */ static void fxp_mc_setup(struct fxp_softc *sc) { struct fxp_cb_mcs *mcsp = sc->mcsp; struct ifnet *ifp = &sc->sc_if; struct fxp_tx *txp; int count; /* * If there are queued commands, we must wait until they are all * completed. If we are already waiting, then add a NOP command * with interrupt option so that we're notified when all commands * have been completed - fxp_start() ensures that no additional * TX commands will be added when need_mcsetup is true. */ if (sc->tx_queued) { /* * need_mcsetup will be true if we are already waiting for the * NOP command to be completed (see below). In this case, bail. */ if (sc->need_mcsetup) return; sc->need_mcsetup = 1; /* * Add a NOP command with interrupt so that we are notified * when all TX commands have been processed. */ txp = sc->fxp_desc.tx_last->tx_next; txp->tx_mbuf = NULL; txp->tx_cb->cb_status = 0; txp->tx_cb->cb_command = htole16(FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I); /* * Advance the end of list forward. */ sc->fxp_desc.tx_last->tx_cb->cb_command &= htole16(~FXP_CB_COMMAND_S); bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE); sc->fxp_desc.tx_last = txp; sc->tx_queued++; /* * Issue a resume in case the CU has just suspended. */ fxp_scb_wait(sc); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME); /* * Set a 5 second timer just in case we don't hear from the * card again. */ ifp->if_timer = 5; return; } sc->need_mcsetup = 0; /* * Initialize multicast setup descriptor. */ mcsp->cb_status = 0; mcsp->cb_command = htole16(FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I); mcsp->link_addr = htole32(sc->fxp_desc.cbl_addr); txp = &sc->fxp_desc.mcs_tx; txp->tx_mbuf = NULL; txp->tx_cb = (struct fxp_cb_tx *)sc->mcsp; txp->tx_next = sc->fxp_desc.tx_list; (void) fxp_mc_addrs(sc); sc->fxp_desc.tx_first = sc->fxp_desc.tx_last = txp; sc->tx_queued = 1; /* * Wait until command unit is not active. This should never * be the case when nothing is queued, but make sure anyway. */ count = 100; while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) == FXP_SCB_CUS_ACTIVE && --count) DELAY(10); if (count == 0) { device_printf(sc->dev, "command queue timeout\n"); return; } /* * Start the multicast setup command. */ fxp_scb_wait(sc); bus_dmamap_sync(sc->mcs_tag, sc->mcs_map, BUS_DMASYNC_PREWRITE); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->mcs_addr); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); ifp->if_timer = 2; return; } static u_int32_t fxp_ucode_d101a[] = D101_A_RCVBUNDLE_UCODE; static u_int32_t fxp_ucode_d101b0[] = D101_B0_RCVBUNDLE_UCODE; static u_int32_t fxp_ucode_d101ma[] = D101M_B_RCVBUNDLE_UCODE; static u_int32_t fxp_ucode_d101s[] = D101S_RCVBUNDLE_UCODE; static u_int32_t fxp_ucode_d102[] = D102_B_RCVBUNDLE_UCODE; static u_int32_t fxp_ucode_d102c[] = D102_C_RCVBUNDLE_UCODE; #define UCODE(x) x, sizeof(x) struct ucode { u_int32_t revision; u_int32_t *ucode; int length; u_short int_delay_offset; u_short bundle_max_offset; } ucode_table[] = { { FXP_REV_82558_A4, UCODE(fxp_ucode_d101a), D101_CPUSAVER_DWORD, 0 }, { FXP_REV_82558_B0, UCODE(fxp_ucode_d101b0), D101_CPUSAVER_DWORD, 0 }, { FXP_REV_82559_A0, UCODE(fxp_ucode_d101ma), D101M_CPUSAVER_DWORD, D101M_CPUSAVER_BUNDLE_MAX_DWORD }, { FXP_REV_82559S_A, UCODE(fxp_ucode_d101s), D101S_CPUSAVER_DWORD, D101S_CPUSAVER_BUNDLE_MAX_DWORD }, { FXP_REV_82550, UCODE(fxp_ucode_d102), D102_B_CPUSAVER_DWORD, D102_B_CPUSAVER_BUNDLE_MAX_DWORD }, { FXP_REV_82550_C, UCODE(fxp_ucode_d102c), D102_C_CPUSAVER_DWORD, D102_C_CPUSAVER_BUNDLE_MAX_DWORD }, { 0, NULL, 0, 0, 0 } }; static void fxp_load_ucode(struct fxp_softc *sc) { struct ucode *uc; struct fxp_cb_ucode *cbp; for (uc = ucode_table; uc->ucode != NULL; uc++) if (sc->revision == uc->revision) break; if (uc->ucode == NULL) return; cbp = (struct fxp_cb_ucode *)sc->fxp_desc.cbl_list; cbp->cb_status = 0; cbp->cb_command = htole16(FXP_CB_COMMAND_UCODE | FXP_CB_COMMAND_EL); cbp->link_addr = 0xffffffff; /* (no) next command */ memcpy(cbp->ucode, uc->ucode, uc->length); if (uc->int_delay_offset) *(u_int16_t *)&cbp->ucode[uc->int_delay_offset] = htole16(sc->tunable_int_delay + sc->tunable_int_delay / 2); if (uc->bundle_max_offset) *(u_int16_t *)&cbp->ucode[uc->bundle_max_offset] = htole16(sc->tunable_bundle_max); /* * Download the ucode to the chip. */ fxp_scb_wait(sc); bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); /* ...and wait for it to complete. */ fxp_dma_wait(sc, &cbp->cb_status, sc->cbl_tag, sc->cbl_map); bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE); device_printf(sc->dev, "Microcode loaded, int_delay: %d usec bundle_max: %d\n", sc->tunable_int_delay, uc->bundle_max_offset == 0 ? 0 : sc->tunable_bundle_max); sc->flags |= FXP_FLAG_UCODE; } static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) { int error, value; value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if (error || !req->newptr) return (error); if (value < low || value > high) return (EINVAL); *(int *)arg1 = value; return (0); } /* * Interrupt delay is expressed in microseconds, a multiplier is used * to convert this to the appropriate clock ticks before using. */ static int sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, 300, 3000)); } static int sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, 1, 0xffff)); } Index: head/sys/dev/gx/if_gx.c =================================================================== --- head/sys/dev/gx/if_gx.c (revision 129878) +++ head/sys/dev/gx/if_gx.c (revision 129879) @@ -1,1612 +1,1613 @@ /*- * Copyright (c) 1999,2000,2001 Jonathan Lemon * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include /* for DELAY */ #include #include #include #include #include #include #include #include #include #include #include MODULE_DEPEND(gx, pci, 1, 1, 1); MODULE_DEPEND(gx, ether, 1, 1, 1); MODULE_DEPEND(gx, miibus, 1, 1, 1); #include "miibus_if.h" #define TUNABLE_TX_INTR_DELAY 100 #define TUNABLE_RX_INTR_DELAY 100 #define GX_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_IP_FRAGS) /* * Various supported device vendors/types and their names. */ struct gx_device { u_int16_t vendor; u_int16_t device; int version_flags; u_int32_t version_ipg; char *name; }; static struct gx_device gx_devs[] = { { INTEL_VENDORID, DEVICEID_WISEMAN, GXF_FORCE_TBI | GXF_OLD_REGS, 10 | 2 << 10 | 10 << 20, "Intel Gigabit Ethernet (82542)" }, { INTEL_VENDORID, DEVICEID_LIVINGOOD_FIBER, GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM, 6 | 8 << 10 | 6 << 20, "Intel Gigabit Ethernet (82543GC-F)" }, { INTEL_VENDORID, DEVICEID_LIVINGOOD_COPPER, GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM, 8 | 8 << 10 | 6 << 20, "Intel Gigabit Ethernet (82543GC-T)" }, #if 0 /* notyet.. */ { INTEL_VENDORID, DEVICEID_CORDOVA_FIBER, GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM, 6 | 8 << 10 | 6 << 20, "Intel Gigabit Ethernet (82544EI-F)" }, { INTEL_VENDORID, DEVICEID_CORDOVA_COPPER, GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM, 8 | 8 << 10 | 6 << 20, "Intel Gigabit Ethernet (82544EI-T)" }, { INTEL_VENDORID, DEVICEID_CORDOVA2_COPPER, GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM, 8 | 8 << 10 | 6 << 20, "Intel Gigabit Ethernet (82544GC-T)" }, #endif { 0, 0, 0, 0, NULL } }; static struct gx_regs new_regs = { GX_RX_RING_BASE, GX_RX_RING_LEN, GX_RX_RING_HEAD, GX_RX_RING_TAIL, GX_RX_INTR_DELAY, GX_RX_DMA_CTRL, GX_TX_RING_BASE, GX_TX_RING_LEN, GX_TX_RING_HEAD, GX_TX_RING_TAIL, GX_TX_INTR_DELAY, GX_TX_DMA_CTRL, }; static struct gx_regs old_regs = { GX_RX_OLD_RING_BASE, GX_RX_OLD_RING_LEN, GX_RX_OLD_RING_HEAD, GX_RX_OLD_RING_TAIL, GX_RX_OLD_INTR_DELAY, GX_RX_OLD_DMA_CTRL, GX_TX_OLD_RING_BASE, GX_TX_OLD_RING_LEN, GX_TX_OLD_RING_HEAD, GX_TX_OLD_RING_TAIL, GX_TX_OLD_INTR_DELAY, GX_TX_OLD_DMA_CTRL, }; static int gx_probe(device_t dev); static int gx_attach(device_t dev); static int gx_detach(device_t dev); static void gx_shutdown(device_t dev); static void gx_intr(void *xsc); static void gx_init(void *xsc); static struct gx_device *gx_match(device_t dev); static void gx_eeprom_getword(struct gx_softc *gx, int addr, u_int16_t *dest); static int gx_read_eeprom(struct gx_softc *gx, caddr_t dest, int off, int cnt); static int gx_ifmedia_upd(struct ifnet *ifp); static void gx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); static int gx_miibus_readreg(device_t dev, int phy, int reg); static void gx_miibus_writereg(device_t dev, int phy, int reg, int value); static void gx_miibus_statchg(device_t dev); static int gx_ioctl(struct ifnet *ifp, u_long command, caddr_t data); static void gx_setmulti(struct gx_softc *gx); static void gx_reset(struct gx_softc *gx); static void gx_phy_reset(struct gx_softc *gx); static void gx_release(struct gx_softc *gx); static void gx_stop(struct gx_softc *gx); static void gx_watchdog(struct ifnet *ifp); static void gx_start(struct ifnet *ifp); static int gx_init_rx_ring(struct gx_softc *gx); static void gx_free_rx_ring(struct gx_softc *gx); static int gx_init_tx_ring(struct gx_softc *gx); static void gx_free_tx_ring(struct gx_softc *gx); static device_method_t gx_methods[] = { /* Device interface */ DEVMETHOD(device_probe, gx_probe), DEVMETHOD(device_attach, gx_attach), DEVMETHOD(device_detach, gx_detach), DEVMETHOD(device_shutdown, gx_shutdown), /* MII interface */ DEVMETHOD(miibus_readreg, gx_miibus_readreg), DEVMETHOD(miibus_writereg, gx_miibus_writereg), DEVMETHOD(miibus_statchg, gx_miibus_statchg), { 0, 0 } }; static driver_t gx_driver = { "gx", gx_methods, sizeof(struct gx_softc) }; static devclass_t gx_devclass; DRIVER_MODULE(gx, pci, gx_driver, gx_devclass, 0, 0); DRIVER_MODULE(miibus, gx, miibus_driver, miibus_devclass, 0, 0); static struct gx_device * gx_match(device_t dev) { int i; for (i = 0; gx_devs[i].name != NULL; i++) { if ((pci_get_vendor(dev) == gx_devs[i].vendor) && (pci_get_device(dev) == gx_devs[i].device)) return (&gx_devs[i]); } return (NULL); } static int gx_probe(device_t dev) { struct gx_device *gx_dev; gx_dev = gx_match(dev); if (gx_dev == NULL) return (ENXIO); device_set_desc(dev, gx_dev->name); return (0); } static int gx_attach(device_t dev) { struct gx_softc *gx; struct gx_device *gx_dev; struct ifnet *ifp; u_int32_t command; int rid, s; int error = 0; s = splimp(); gx = device_get_softc(dev); bzero(gx, sizeof(struct gx_softc)); gx->gx_dev = dev; gx_dev = gx_match(dev); gx->gx_vflags = gx_dev->version_flags; gx->gx_ipg = gx_dev->version_ipg; mtx_init(&gx->gx_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); GX_LOCK(gx); /* * Map control/status registers. */ command = pci_read_config(dev, PCIR_COMMAND, 4); command |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN; if (gx->gx_vflags & GXF_ENABLE_MWI) command |= PCIM_CMD_MWIEN; pci_write_config(dev, PCIR_COMMAND, command, 4); command = pci_read_config(dev, PCIR_COMMAND, 4); /* XXX check cache line size? */ if ((command & PCIM_CMD_MEMEN) == 0) { device_printf(dev, "failed to enable memory mapping!\n"); error = ENXIO; goto fail; } rid = GX_PCI_LOMEM; gx->gx_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); #if 0 /* support PIO mode */ rid = PCI_LOIO; gx->gx_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); #endif if (gx->gx_res == NULL) { device_printf(dev, "couldn't map memory\n"); error = ENXIO; goto fail; } gx->gx_btag = rman_get_bustag(gx->gx_res); gx->gx_bhandle = rman_get_bushandle(gx->gx_res); /* Allocate interrupt */ rid = 0; gx->gx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (gx->gx_irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } error = bus_setup_intr(dev, gx->gx_irq, INTR_TYPE_NET, gx_intr, gx, &gx->gx_intrhand); if (error) { device_printf(dev, "couldn't setup irq\n"); goto fail; } /* compensate for different register mappings */ if (gx->gx_vflags & GXF_OLD_REGS) gx->gx_reg = old_regs; else gx->gx_reg = new_regs; if (gx_read_eeprom(gx, (caddr_t)&gx->arpcom.ac_enaddr, GX_EEMAP_MAC, 3)) { device_printf(dev, "failed to read station address\n"); error = ENXIO; goto fail; } /* Allocate the ring buffers. */ gx->gx_rdata = contigmalloc(sizeof(struct gx_ring_data), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (gx->gx_rdata == NULL) { device_printf(dev, "no memory for list buffers!\n"); error = ENXIO; goto fail; } bzero(gx->gx_rdata, sizeof(struct gx_ring_data)); /* Set default tuneable values. */ gx->gx_tx_intr_delay = TUNABLE_TX_INTR_DELAY; gx->gx_rx_intr_delay = TUNABLE_RX_INTR_DELAY; /* Set up ifnet structure */ ifp = &gx->arpcom.ac_if; ifp->if_softc = gx; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = gx_ioctl; ifp->if_start = gx_start; ifp->if_watchdog = gx_watchdog; ifp->if_init = gx_init; ifp->if_mtu = ETHERMTU; ifp->if_snd.ifq_maxlen = GX_TX_RING_CNT - 1; ifp->if_capabilities = IFCAP_VLAN_HWTAGGING; /* see if we can enable hardware checksumming */ if (gx->gx_vflags & GXF_CSUM) ifp->if_capabilities |= IFCAP_HWCSUM; ifp->if_capenable = ifp->if_capabilities; /* figure out transciever type */ if (gx->gx_vflags & GXF_FORCE_TBI || CSR_READ_4(gx, GX_STATUS) & GX_STAT_TBIMODE) gx->gx_tbimode = 1; if (gx->gx_tbimode) { /* SERDES transceiver */ ifmedia_init(&gx->gx_media, IFM_IMASK, gx_ifmedia_upd, gx_ifmedia_sts); ifmedia_add(&gx->gx_media, IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); ifmedia_add(&gx->gx_media, IFM_ETHER|IFM_AUTO, 0, NULL); ifmedia_set(&gx->gx_media, IFM_ETHER|IFM_AUTO); } else { /* GMII/MII transceiver */ gx_phy_reset(gx); if (mii_phy_probe(dev, &gx->gx_miibus, gx_ifmedia_upd, gx_ifmedia_sts)) { device_printf(dev, "GMII/MII, PHY not detected\n"); error = ENXIO; goto fail; } } /* * Call MI attach routines. */ ether_ifattach(ifp, gx->arpcom.ac_enaddr); GX_UNLOCK(gx); splx(s); return (0); fail: GX_UNLOCK(gx); gx_release(gx); splx(s); return (error); } static void gx_release(struct gx_softc *gx) { bus_generic_detach(gx->gx_dev); if (gx->gx_miibus) device_delete_child(gx->gx_dev, gx->gx_miibus); if (gx->gx_intrhand) bus_teardown_intr(gx->gx_dev, gx->gx_irq, gx->gx_intrhand); if (gx->gx_irq) bus_release_resource(gx->gx_dev, SYS_RES_IRQ, 0, gx->gx_irq); if (gx->gx_res) bus_release_resource(gx->gx_dev, SYS_RES_MEMORY, GX_PCI_LOMEM, gx->gx_res); } static void gx_init(void *xsc) { struct gx_softc *gx = (struct gx_softc *)xsc; struct ifmedia *ifm; struct ifnet *ifp; device_t dev; u_int16_t *m; u_int32_t ctrl; int s, i, tmp; dev = gx->gx_dev; ifp = &gx->arpcom.ac_if; s = splimp(); GX_LOCK(gx); /* Disable host interrupts, halt chip. */ gx_reset(gx); /* disable I/O, flush RX/TX FIFOs, and free RX/TX buffers */ gx_stop(gx); /* Load our MAC address, invalidate other 15 RX addresses. */ m = (u_int16_t *)&gx->arpcom.ac_enaddr[0]; CSR_WRITE_4(gx, GX_RX_ADDR_BASE, (m[1] << 16) | m[0]); CSR_WRITE_4(gx, GX_RX_ADDR_BASE + 4, m[2] | GX_RA_VALID); for (i = 1; i < 16; i++) CSR_WRITE_8(gx, GX_RX_ADDR_BASE + i * 8, (u_quad_t)0); /* Program multicast filter. */ gx_setmulti(gx); /* Init RX ring. */ gx_init_rx_ring(gx); /* Init TX ring. */ gx_init_tx_ring(gx); if (gx->gx_vflags & GXF_DMA) { /* set up DMA control */ CSR_WRITE_4(gx, gx->gx_reg.r_rx_dma_ctrl, 0x00010000); CSR_WRITE_4(gx, gx->gx_reg.r_tx_dma_ctrl, 0x00000000); } /* enable receiver */ ctrl = GX_RXC_ENABLE | GX_RXC_RX_THOLD_EIGHTH | GX_RXC_RX_BSIZE_2K; ctrl |= GX_RXC_BCAST_ACCEPT; /* Enable or disable promiscuous mode as needed. */ if (ifp->if_flags & IFF_PROMISC) ctrl |= GX_RXC_UNI_PROMISC; /* This is required if we want to accept jumbo frames */ if (ifp->if_mtu > ETHERMTU) ctrl |= GX_RXC_LONG_PKT_ENABLE; /* setup receive checksum control */ if (ifp->if_capenable & IFCAP_RXCSUM) CSR_WRITE_4(gx, GX_RX_CSUM_CONTROL, GX_CSUM_TCP/* | GX_CSUM_IP*/); /* setup transmit checksum control */ if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist = GX_CSUM_FEATURES; else ifp->if_hwassist = 0; ctrl |= GX_RXC_STRIP_ETHERCRC; /* not on 82542? */ CSR_WRITE_4(gx, GX_RX_CONTROL, ctrl); /* enable transmitter */ ctrl = GX_TXC_ENABLE | GX_TXC_PAD_SHORT_PKTS | GX_TXC_COLL_RETRY_16; /* XXX we should support half-duplex here too... */ ctrl |= GX_TXC_COLL_TIME_FDX; CSR_WRITE_4(gx, GX_TX_CONTROL, ctrl); /* * set up recommended IPG times, which vary depending on chip type: * IPG transmit time: 80ns * IPG receive time 1: 20ns * IPG receive time 2: 80ns */ CSR_WRITE_4(gx, GX_TX_IPG, gx->gx_ipg); /* set up 802.3x MAC flow control address -- 01:80:c2:00:00:01 */ CSR_WRITE_4(gx, GX_FLOW_CTRL_BASE, 0x00C28001); CSR_WRITE_4(gx, GX_FLOW_CTRL_BASE+4, 0x00000100); /* set up 802.3x MAC flow control type -- 88:08 */ CSR_WRITE_4(gx, GX_FLOW_CTRL_TYPE, 0x8808); /* Set up tuneables */ CSR_WRITE_4(gx, gx->gx_reg.r_rx_delay, gx->gx_rx_intr_delay); CSR_WRITE_4(gx, gx->gx_reg.r_tx_delay, gx->gx_tx_intr_delay); /* * Configure chip for correct operation. */ ctrl = GX_CTRL_DUPLEX; #if BYTE_ORDER == BIG_ENDIAN ctrl |= GX_CTRL_BIGENDIAN; #endif ctrl |= GX_CTRL_VLAN_ENABLE; if (gx->gx_tbimode) { /* * It seems that TXCW must be initialized from the EEPROM * manually. * * XXX * should probably read the eeprom and re-insert the * values here. */ #define TXCONFIG_WORD 0x000001A0 CSR_WRITE_4(gx, GX_TX_CONFIG, TXCONFIG_WORD); /* turn on hardware autonegotiate */ GX_SETBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG); } else { /* * Auto-detect speed from PHY, instead of using direct * indication. The SLU bit doesn't force the link, but * must be present for ASDE to work. */ gx_phy_reset(gx); ctrl |= GX_CTRL_SET_LINK_UP | GX_CTRL_AUTOSPEED; } /* * Take chip out of reset and start it running. */ CSR_WRITE_4(gx, GX_CTRL, ctrl); /* Turn interrupts on. */ CSR_WRITE_4(gx, GX_INT_MASK_SET, GX_INT_WANTED); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; /* * Set the current media. */ if (gx->gx_miibus != NULL) { mii_mediachg(device_get_softc(gx->gx_miibus)); } else { ifm = &gx->gx_media; tmp = ifm->ifm_media; ifm->ifm_media = ifm->ifm_cur->ifm_media; gx_ifmedia_upd(ifp); ifm->ifm_media = tmp; } /* * XXX * Have the LINK0 flag force the link in TBI mode. */ if (gx->gx_tbimode && ifp->if_flags & IFF_LINK0) { GX_CLRBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG); GX_SETBIT(gx, GX_CTRL, GX_CTRL_SET_LINK_UP); } #if 0 printf("66mhz: %s 64bit: %s\n", CSR_READ_4(gx, GX_STATUS) & GX_STAT_PCI66 ? "yes" : "no", CSR_READ_4(gx, GX_STATUS) & GX_STAT_BUS64 ? "yes" : "no"); #endif GX_UNLOCK(gx); splx(s); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void gx_shutdown(device_t dev) { struct gx_softc *gx; gx = device_get_softc(dev); gx_reset(gx); gx_stop(gx); } static int gx_detach(device_t dev) { struct gx_softc *gx; struct ifnet *ifp; int s; s = splimp(); gx = device_get_softc(dev); ifp = &gx->arpcom.ac_if; GX_LOCK(gx); ether_ifdetach(ifp); gx_reset(gx); gx_stop(gx); ifmedia_removeall(&gx->gx_media); gx_release(gx); contigfree(gx->gx_rdata, sizeof(struct gx_ring_data), M_DEVBUF); GX_UNLOCK(gx); mtx_destroy(&gx->gx_mtx); splx(s); return (0); } static void gx_eeprom_getword(struct gx_softc *gx, int addr, u_int16_t *dest) { u_int16_t word = 0; u_int32_t base, reg; int x; addr = (GX_EE_OPC_READ << GX_EE_ADDR_SIZE) | (addr & ((1 << GX_EE_ADDR_SIZE) - 1)); base = CSR_READ_4(gx, GX_EEPROM_CTRL); base &= ~(GX_EE_DATA_OUT | GX_EE_DATA_IN | GX_EE_CLOCK); base |= GX_EE_SELECT; CSR_WRITE_4(gx, GX_EEPROM_CTRL, base); for (x = 1 << ((GX_EE_OPC_SIZE + GX_EE_ADDR_SIZE) - 1); x; x >>= 1) { reg = base | (addr & x ? GX_EE_DATA_IN : 0); CSR_WRITE_4(gx, GX_EEPROM_CTRL, reg); DELAY(10); CSR_WRITE_4(gx, GX_EEPROM_CTRL, reg | GX_EE_CLOCK); DELAY(10); CSR_WRITE_4(gx, GX_EEPROM_CTRL, reg); DELAY(10); } for (x = 1 << 15; x; x >>= 1) { CSR_WRITE_4(gx, GX_EEPROM_CTRL, base | GX_EE_CLOCK); DELAY(10); reg = CSR_READ_4(gx, GX_EEPROM_CTRL); if (reg & GX_EE_DATA_OUT) word |= x; CSR_WRITE_4(gx, GX_EEPROM_CTRL, base); DELAY(10); } CSR_WRITE_4(gx, GX_EEPROM_CTRL, base & ~GX_EE_SELECT); DELAY(10); *dest = word; } static int gx_read_eeprom(struct gx_softc *gx, caddr_t dest, int off, int cnt) { u_int16_t *word; int i; word = (u_int16_t *)dest; for (i = 0; i < cnt; i ++) { gx_eeprom_getword(gx, off + i, word); word++; } return (0); } /* * Set media options. */ static int gx_ifmedia_upd(struct ifnet *ifp) { struct gx_softc *gx; struct ifmedia *ifm; struct mii_data *mii; gx = ifp->if_softc; if (gx->gx_tbimode) { ifm = &gx->gx_media; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: GX_SETBIT(gx, GX_CTRL, GX_CTRL_LINK_RESET); GX_SETBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG); GX_CLRBIT(gx, GX_CTRL, GX_CTRL_LINK_RESET); break; case IFM_1000_SX: device_printf(gx->gx_dev, "manual config not supported yet.\n"); #if 0 GX_CLRBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG); config = /* bit symbols for 802.3z */0; ctrl |= GX_CTRL_SET_LINK_UP; if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ctrl |= GX_CTRL_DUPLEX; #endif break; default: return (EINVAL); } } else { ifm = &gx->gx_media; /* * 1000TX half duplex does not work. */ if (IFM_TYPE(ifm->ifm_media) == IFM_ETHER && IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_T && (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) == 0) return (EINVAL); mii = device_get_softc(gx->gx_miibus); mii_mediachg(mii); } return (0); } /* * Report current media status. */ static void gx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct gx_softc *gx; struct mii_data *mii; u_int32_t status; gx = ifp->if_softc; if (gx->gx_tbimode) { ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; status = CSR_READ_4(gx, GX_STATUS); if ((status & GX_STAT_LINKUP) == 0) return; ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; } else { mii = device_get_softc(gx->gx_miibus); mii_pollstat(mii); if ((mii->mii_media_active & (IFM_1000_T | IFM_HDX)) == (IFM_1000_T | IFM_HDX)) mii->mii_media_active = IFM_ETHER | IFM_NONE; ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } } static void gx_mii_shiftin(struct gx_softc *gx, int data, int length) { u_int32_t reg, x; /* * Set up default GPIO direction + PHY data out. */ reg = CSR_READ_4(gx, GX_CTRL); reg &= ~(GX_CTRL_GPIO_DIR_MASK | GX_CTRL_PHY_IO | GX_CTRL_PHY_CLK); reg |= GX_CTRL_GPIO_DIR | GX_CTRL_PHY_IO_DIR; /* * Shift in data to PHY. */ for (x = 1 << (length - 1); x; x >>= 1) { if (data & x) reg |= GX_CTRL_PHY_IO; else reg &= ~GX_CTRL_PHY_IO; CSR_WRITE_4(gx, GX_CTRL, reg); DELAY(10); CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK); DELAY(10); CSR_WRITE_4(gx, GX_CTRL, reg); DELAY(10); } } static u_int16_t gx_mii_shiftout(struct gx_softc *gx) { u_int32_t reg; u_int16_t data; int x; /* * Set up default GPIO direction + PHY data in. */ reg = CSR_READ_4(gx, GX_CTRL); reg &= ~(GX_CTRL_GPIO_DIR_MASK | GX_CTRL_PHY_IO | GX_CTRL_PHY_CLK); reg |= GX_CTRL_GPIO_DIR; CSR_WRITE_4(gx, GX_CTRL, reg); DELAY(10); CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK); DELAY(10); CSR_WRITE_4(gx, GX_CTRL, reg); DELAY(10); /* * Shift out data from PHY. */ data = 0; for (x = 1 << 15; x; x >>= 1) { CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK); DELAY(10); if (CSR_READ_4(gx, GX_CTRL) & GX_CTRL_PHY_IO) data |= x; CSR_WRITE_4(gx, GX_CTRL, reg); DELAY(10); } CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK); DELAY(10); CSR_WRITE_4(gx, GX_CTRL, reg); DELAY(10); return (data); } static int gx_miibus_readreg(device_t dev, int phy, int reg) { struct gx_softc *gx; gx = device_get_softc(dev); if (gx->gx_tbimode) return (0); /* * XXX * Note: Cordova has a MDIC register. livingood and < have mii bits */ gx_mii_shiftin(gx, GX_PHY_PREAMBLE, GX_PHY_PREAMBLE_LEN); gx_mii_shiftin(gx, (GX_PHY_SOF << 12) | (GX_PHY_OP_READ << 10) | (phy << 5) | reg, GX_PHY_READ_LEN); return (gx_mii_shiftout(gx)); } static void gx_miibus_writereg(device_t dev, int phy, int reg, int value) { struct gx_softc *gx; gx = device_get_softc(dev); if (gx->gx_tbimode) return; gx_mii_shiftin(gx, GX_PHY_PREAMBLE, GX_PHY_PREAMBLE_LEN); gx_mii_shiftin(gx, (GX_PHY_SOF << 30) | (GX_PHY_OP_WRITE << 28) | (phy << 23) | (reg << 18) | (GX_PHY_TURNAROUND << 16) | (value & 0xffff), GX_PHY_WRITE_LEN); } static void gx_miibus_statchg(device_t dev) { struct gx_softc *gx; struct mii_data *mii; int reg, s; gx = device_get_softc(dev); if (gx->gx_tbimode) return; /* * Set flow control behavior to mirror what PHY negotiated. */ mii = device_get_softc(gx->gx_miibus); s = splimp(); GX_LOCK(gx); reg = CSR_READ_4(gx, GX_CTRL); if (mii->mii_media_active & IFM_FLAG0) reg |= GX_CTRL_RX_FLOWCTRL; else reg &= ~GX_CTRL_RX_FLOWCTRL; if (mii->mii_media_active & IFM_FLAG1) reg |= GX_CTRL_TX_FLOWCTRL; else reg &= ~GX_CTRL_TX_FLOWCTRL; CSR_WRITE_4(gx, GX_CTRL, reg); GX_UNLOCK(gx); splx(s); } static int gx_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct gx_softc *gx = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; struct mii_data *mii; int s, mask, error = 0; s = splimp(); GX_LOCK(gx); switch (command) { case SIOCSIFMTU: if (ifr->ifr_mtu > GX_MAX_MTU) { error = EINVAL; } else { ifp->if_mtu = ifr->ifr_mtu; gx_init(gx); } break; case SIOCSIFFLAGS: if ((ifp->if_flags & IFF_UP) == 0) { gx_stop(gx); } else if (ifp->if_flags & IFF_RUNNING && ((ifp->if_flags & IFF_PROMISC) != (gx->gx_if_flags & IFF_PROMISC))) { if (ifp->if_flags & IFF_PROMISC) GX_SETBIT(gx, GX_RX_CONTROL, GX_RXC_UNI_PROMISC); else GX_CLRBIT(gx, GX_RX_CONTROL, GX_RXC_UNI_PROMISC); } else { gx_init(gx); } gx->gx_if_flags = ifp->if_flags; break; case SIOCADDMULTI: case SIOCDELMULTI: if (ifp->if_flags & IFF_RUNNING) gx_setmulti(gx); break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: if (gx->gx_miibus != NULL) { mii = device_get_softc(gx->gx_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); } else { error = ifmedia_ioctl(ifp, ifr, &gx->gx_media, command); } break; case SIOCSIFCAP: mask = ifr->ifr_reqcap ^ ifp->if_capenable; if (mask & IFCAP_HWCSUM) { if (IFCAP_HWCSUM & ifp->if_capenable) ifp->if_capenable &= ~IFCAP_HWCSUM; else ifp->if_capenable |= IFCAP_HWCSUM; if (ifp->if_flags & IFF_RUNNING) gx_init(gx); } break; default: error = ether_ioctl(ifp, command, data); break; } GX_UNLOCK(gx); splx(s); return (error); } static void gx_phy_reset(struct gx_softc *gx) { int reg; GX_SETBIT(gx, GX_CTRL, GX_CTRL_SET_LINK_UP); /* * PHY reset is active low. */ reg = CSR_READ_4(gx, GX_CTRL_EXT); reg &= ~(GX_CTRLX_GPIO_DIR_MASK | GX_CTRLX_PHY_RESET); reg |= GX_CTRLX_GPIO_DIR; CSR_WRITE_4(gx, GX_CTRL_EXT, reg | GX_CTRLX_PHY_RESET); DELAY(10); CSR_WRITE_4(gx, GX_CTRL_EXT, reg); DELAY(10); CSR_WRITE_4(gx, GX_CTRL_EXT, reg | GX_CTRLX_PHY_RESET); DELAY(10); #if 0 /* post-livingood (cordova) only */ GX_SETBIT(gx, GX_CTRL, 0x80000000); DELAY(1000); GX_CLRBIT(gx, GX_CTRL, 0x80000000); #endif } static void gx_reset(struct gx_softc *gx) { /* Disable host interrupts. */ CSR_WRITE_4(gx, GX_INT_MASK_CLR, GX_INT_ALL); /* reset chip (THWAP!) */ GX_SETBIT(gx, GX_CTRL, GX_CTRL_DEVICE_RESET); DELAY(10); } static void gx_stop(struct gx_softc *gx) { struct ifnet *ifp; ifp = &gx->arpcom.ac_if; /* reset and flush transmitter */ CSR_WRITE_4(gx, GX_TX_CONTROL, GX_TXC_RESET); /* reset and flush receiver */ CSR_WRITE_4(gx, GX_RX_CONTROL, GX_RXC_RESET); /* reset link */ if (gx->gx_tbimode) GX_SETBIT(gx, GX_CTRL, GX_CTRL_LINK_RESET); /* Free the RX lists. */ gx_free_rx_ring(gx); /* Free TX buffers. */ gx_free_tx_ring(gx); ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); } static void gx_watchdog(struct ifnet *ifp) { struct gx_softc *gx; gx = ifp->if_softc; device_printf(gx->gx_dev, "watchdog timeout -- resetting\n"); gx_reset(gx); gx_init(gx); ifp->if_oerrors++; } /* * Intialize a receive ring descriptor. */ static int gx_newbuf(struct gx_softc *gx, int idx, struct mbuf *m) { struct mbuf *m_new = NULL; struct gx_rx_desc *r; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { device_printf(gx->gx_dev, "mbuf allocation failed -- packet dropped\n"); return (ENOBUFS); } MCLGET(m_new, M_DONTWAIT); if ((m_new->m_flags & M_EXT) == 0) { device_printf(gx->gx_dev, "cluster allocation failed -- packet dropped\n"); m_freem(m_new); return (ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m->m_len = m->m_pkthdr.len = MCLBYTES; m->m_data = m->m_ext.ext_buf; m->m_next = NULL; m_new = m; } /* * XXX * this will _NOT_ work for large MTU's; it will overwrite * the end of the buffer. E.g.: take this out for jumbograms, * but then that breaks alignment. */ if (gx->arpcom.ac_if.if_mtu <= ETHERMTU) m_adj(m_new, ETHER_ALIGN); gx->gx_cdata.gx_rx_chain[idx] = m_new; r = &gx->gx_rdata->gx_rx_ring[idx]; r->rx_addr = vtophys(mtod(m_new, caddr_t)); r->rx_staterr = 0; return (0); } /* * The receive ring can have up to 64K descriptors, which at 2K per mbuf * cluster, could add up to 128M of memory. Due to alignment constraints, * the number of descriptors must be a multiple of 8. For now, we * allocate 256 entries and hope that our CPU is fast enough to keep up * with the NIC. */ static int gx_init_rx_ring(struct gx_softc *gx) { int i, error; for (i = 0; i < GX_RX_RING_CNT; i++) { error = gx_newbuf(gx, i, NULL); if (error) return (error); } /* bring receiver out of reset state, leave disabled */ CSR_WRITE_4(gx, GX_RX_CONTROL, 0); /* set up ring registers */ CSR_WRITE_8(gx, gx->gx_reg.r_rx_base, (u_quad_t)vtophys(gx->gx_rdata->gx_rx_ring)); CSR_WRITE_4(gx, gx->gx_reg.r_rx_length, GX_RX_RING_CNT * sizeof(struct gx_rx_desc)); CSR_WRITE_4(gx, gx->gx_reg.r_rx_head, 0); CSR_WRITE_4(gx, gx->gx_reg.r_rx_tail, GX_RX_RING_CNT - 1); gx->gx_rx_tail_idx = 0; return (0); } static void gx_free_rx_ring(struct gx_softc *gx) { struct mbuf **mp; int i; mp = gx->gx_cdata.gx_rx_chain; for (i = 0; i < GX_RX_RING_CNT; i++, mp++) { if (*mp != NULL) { m_freem(*mp); *mp = NULL; } } bzero((void *)gx->gx_rdata->gx_rx_ring, GX_RX_RING_CNT * sizeof(struct gx_rx_desc)); /* release any partially-received packet chain */ if (gx->gx_pkthdr != NULL) { m_freem(gx->gx_pkthdr); gx->gx_pkthdr = NULL; } } static int gx_init_tx_ring(struct gx_softc *gx) { /* bring transmitter out of reset state, leave disabled */ CSR_WRITE_4(gx, GX_TX_CONTROL, 0); /* set up ring registers */ CSR_WRITE_8(gx, gx->gx_reg.r_tx_base, (u_quad_t)vtophys(gx->gx_rdata->gx_tx_ring)); CSR_WRITE_4(gx, gx->gx_reg.r_tx_length, GX_TX_RING_CNT * sizeof(struct gx_tx_desc)); CSR_WRITE_4(gx, gx->gx_reg.r_tx_head, 0); CSR_WRITE_4(gx, gx->gx_reg.r_tx_tail, 0); gx->gx_tx_head_idx = 0; gx->gx_tx_tail_idx = 0; gx->gx_txcnt = 0; /* set up initial TX context */ gx->gx_txcontext = GX_TXCONTEXT_NONE; return (0); } static void gx_free_tx_ring(struct gx_softc *gx) { struct mbuf **mp; int i; mp = gx->gx_cdata.gx_tx_chain; for (i = 0; i < GX_TX_RING_CNT; i++, mp++) { if (*mp != NULL) { m_freem(*mp); *mp = NULL; } } bzero((void *)&gx->gx_rdata->gx_tx_ring, GX_TX_RING_CNT * sizeof(struct gx_tx_desc)); } static void gx_setmulti(struct gx_softc *gx) { int i; /* wipe out the multicast table */ for (i = 1; i < 128; i++) CSR_WRITE_4(gx, GX_MULTICAST_BASE + i * 4, 0); } static void gx_rxeof(struct gx_softc *gx) { struct gx_rx_desc *rx; struct ifnet *ifp; int idx, staterr, len; struct mbuf *m; gx->gx_rx_interrupts++; ifp = &gx->arpcom.ac_if; idx = gx->gx_rx_tail_idx; while (gx->gx_rdata->gx_rx_ring[idx].rx_staterr & GX_RXSTAT_COMPLETED) { rx = &gx->gx_rdata->gx_rx_ring[idx]; m = gx->gx_cdata.gx_rx_chain[idx]; /* * gx_newbuf overwrites status and length bits, so we * make a copy of them here. */ len = rx->rx_len; staterr = rx->rx_staterr; if (staterr & GX_INPUT_ERROR) goto ierror; if (gx_newbuf(gx, idx, NULL) == ENOBUFS) goto ierror; GX_INC(idx, GX_RX_RING_CNT); if (staterr & GX_RXSTAT_INEXACT_MATCH) { /* * multicast packet, must verify against * multicast address. */ } if ((staterr & GX_RXSTAT_END_OF_PACKET) == 0) { if (gx->gx_pkthdr == NULL) { m->m_len = len; m->m_pkthdr.len = len; gx->gx_pkthdr = m; gx->gx_pktnextp = &m->m_next; } else { m->m_len = len; m->m_flags &= ~M_PKTHDR; gx->gx_pkthdr->m_pkthdr.len += len; *(gx->gx_pktnextp) = m; gx->gx_pktnextp = &m->m_next; } continue; } if (gx->gx_pkthdr == NULL) { m->m_len = len; m->m_pkthdr.len = len; } else { m->m_len = len; m->m_flags &= ~M_PKTHDR; gx->gx_pkthdr->m_pkthdr.len += len; *(gx->gx_pktnextp) = m; m = gx->gx_pkthdr; gx->gx_pkthdr = NULL; } ifp->if_ipackets++; m->m_pkthdr.rcvif = ifp; #define IP_CSMASK (GX_RXSTAT_IGNORE_CSUM | GX_RXSTAT_HAS_IP_CSUM) #define TCP_CSMASK \ (GX_RXSTAT_IGNORE_CSUM | GX_RXSTAT_HAS_TCP_CSUM | GX_RXERR_TCP_CSUM) if (ifp->if_capenable & IFCAP_RXCSUM) { #if 0 /* * Intel Erratum #23 indicates that the Receive IP * Checksum offload feature has been completely * disabled. */ if ((staterr & IP_CSUM_MASK) == GX_RXSTAT_HAS_IP_CSUM) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if ((staterr & GX_RXERR_IP_CSUM) == 0) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; } #endif if ((staterr & TCP_CSMASK) == GX_RXSTAT_HAS_TCP_CSUM) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } /* * If we received a packet with a vlan tag, * mark the packet before it's passed up. */ if (staterr & GX_RXSTAT_VLAN_PKT) { VLAN_INPUT_TAG(ifp, m, rx->rx_special, continue); } (*ifp->if_input)(ifp, m); continue; ierror: ifp->if_ierrors++; gx_newbuf(gx, idx, m); /* * XXX * this isn't quite right. Suppose we have a packet that * spans 5 descriptors (9K split into 2K buffers). If * the 3rd descriptor sets an error, we need to ignore * the last two. The way things stand now, the last two * will be accepted as a single packet. * * we don't worry about this -- the chip may not set an * error in this case, and the checksum of the upper layers * will catch the error. */ if (gx->gx_pkthdr != NULL) { m_freem(gx->gx_pkthdr); gx->gx_pkthdr = NULL; } GX_INC(idx, GX_RX_RING_CNT); } gx->gx_rx_tail_idx = idx; if (--idx < 0) idx = GX_RX_RING_CNT - 1; CSR_WRITE_4(gx, gx->gx_reg.r_rx_tail, idx); } static void gx_txeof(struct gx_softc *gx) { struct ifnet *ifp; int idx, cnt; gx->gx_tx_interrupts++; ifp = &gx->arpcom.ac_if; idx = gx->gx_tx_head_idx; cnt = gx->gx_txcnt; /* * If the system chipset performs I/O write buffering, it is * possible for the PIO read of the head descriptor to bypass the * memory write of the descriptor, resulting in reading a descriptor * which has not been updated yet. */ while (cnt) { struct gx_tx_desc_old *tx; tx = (struct gx_tx_desc_old *)&gx->gx_rdata->gx_tx_ring[idx]; cnt--; if ((tx->tx_command & GX_TXOLD_END_OF_PKT) == 0) { GX_INC(idx, GX_TX_RING_CNT); continue; } if ((tx->tx_status & GX_TXSTAT_DONE) == 0) break; ifp->if_opackets++; m_freem(gx->gx_cdata.gx_tx_chain[idx]); gx->gx_cdata.gx_tx_chain[idx] = NULL; gx->gx_txcnt = cnt; ifp->if_timer = 0; GX_INC(idx, GX_TX_RING_CNT); gx->gx_tx_head_idx = idx; } if (gx->gx_txcnt == 0) ifp->if_flags &= ~IFF_OACTIVE; } static void gx_intr(void *xsc) { struct gx_softc *gx; struct ifnet *ifp; u_int32_t intr; int s; gx = xsc; ifp = &gx->arpcom.ac_if; s = splimp(); gx->gx_interrupts++; /* Disable host interrupts. */ CSR_WRITE_4(gx, GX_INT_MASK_CLR, GX_INT_ALL); /* * find out why we're being bothered. * reading this register automatically clears all bits. */ intr = CSR_READ_4(gx, GX_INT_READ); /* Check RX return ring producer/consumer */ if (intr & (GX_INT_RCV_TIMER | GX_INT_RCV_THOLD | GX_INT_RCV_OVERRUN)) gx_rxeof(gx); /* Check TX ring producer/consumer */ if (intr & (GX_INT_XMIT_DONE | GX_INT_XMIT_EMPTY)) gx_txeof(gx); /* * handle other interrupts here. */ /* * Link change interrupts are not reliable; the interrupt may * not be generated if the link is lost. However, the register * read is reliable, so check that. Use SEQ errors to possibly * indicate that the link has changed. */ if (intr & GX_INT_LINK_CHANGE) { if ((CSR_READ_4(gx, GX_STATUS) & GX_STAT_LINKUP) == 0) { device_printf(gx->gx_dev, "link down\n"); } else { device_printf(gx->gx_dev, "link up\n"); } } /* Turn interrupts on. */ CSR_WRITE_4(gx, GX_INT_MASK_SET, GX_INT_WANTED); if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL) gx_start(ifp); splx(s); } /* * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data * pointers to descriptors. */ static int gx_encap(struct gx_softc *gx, struct mbuf *m_head) { struct gx_tx_desc_data *tx = NULL; struct gx_tx_desc_ctx *tctx; struct mbuf *m; int idx, cnt, csumopts, txcontext; struct m_tag *mtag; cnt = gx->gx_txcnt; idx = gx->gx_tx_tail_idx; txcontext = gx->gx_txcontext; /* * Insure we have at least 4 descriptors pre-allocated. */ if (cnt >= GX_TX_RING_CNT - 4) return (ENOBUFS); /* * Set up the appropriate offload context if necessary. */ csumopts = 0; if (m_head->m_pkthdr.csum_flags) { if (m_head->m_pkthdr.csum_flags & CSUM_IP) csumopts |= GX_TXTCP_OPT_IP_CSUM; if (m_head->m_pkthdr.csum_flags & CSUM_TCP) { csumopts |= GX_TXTCP_OPT_TCP_CSUM; txcontext = GX_TXCONTEXT_TCPIP; } else if (m_head->m_pkthdr.csum_flags & CSUM_UDP) { csumopts |= GX_TXTCP_OPT_TCP_CSUM; txcontext = GX_TXCONTEXT_UDPIP; } else if (txcontext == GX_TXCONTEXT_NONE) txcontext = GX_TXCONTEXT_TCPIP; if (txcontext == gx->gx_txcontext) goto context_done; tctx = (struct gx_tx_desc_ctx *)&gx->gx_rdata->gx_tx_ring[idx]; tctx->tx_ip_csum_start = ETHER_HDR_LEN; tctx->tx_ip_csum_end = ETHER_HDR_LEN + sizeof(struct ip) - 1; tctx->tx_ip_csum_offset = ETHER_HDR_LEN + offsetof(struct ip, ip_sum); tctx->tx_tcp_csum_start = ETHER_HDR_LEN + sizeof(struct ip); tctx->tx_tcp_csum_end = 0; if (txcontext == GX_TXCONTEXT_TCPIP) tctx->tx_tcp_csum_offset = ETHER_HDR_LEN + sizeof(struct ip) + offsetof(struct tcphdr, th_sum); else tctx->tx_tcp_csum_offset = ETHER_HDR_LEN + sizeof(struct ip) + offsetof(struct udphdr, uh_sum); tctx->tx_command = GX_TXCTX_EXTENSION | GX_TXCTX_INT_DELAY; tctx->tx_type = 0; tctx->tx_status = 0; GX_INC(idx, GX_TX_RING_CNT); cnt++; } context_done: /* * Start packing the mbufs in this chain into the transmit * descriptors. Stop when we run out of descriptors or hit * the end of the mbuf chain. */ for (m = m_head; m != NULL; m = m->m_next) { if (m->m_len == 0) continue; if (cnt == GX_TX_RING_CNT) { printf("overflow(2): %d, %d\n", cnt, GX_TX_RING_CNT); return (ENOBUFS); } tx = (struct gx_tx_desc_data *)&gx->gx_rdata->gx_tx_ring[idx]; tx->tx_addr = vtophys(mtod(m, vm_offset_t)); tx->tx_status = 0; tx->tx_len = m->m_len; if (gx->arpcom.ac_if.if_capenable & IFCAP_TXCSUM) { tx->tx_type = 1; tx->tx_command = GX_TXTCP_EXTENSION; tx->tx_options = csumopts; } else { /* * This is really a struct gx_tx_desc_old. */ tx->tx_command = 0; } GX_INC(idx, GX_TX_RING_CNT); cnt++; } if (tx != NULL) { tx->tx_command |= GX_TXTCP_REPORT_STATUS | GX_TXTCP_INT_DELAY | GX_TXTCP_ETHER_CRC | GX_TXTCP_END_OF_PKT; mtag = VLAN_OUTPUT_TAG(&gx->arpcom.ac_if, m); if (mtag != NULL) { tx->tx_command |= GX_TXTCP_VLAN_ENABLE; tx->tx_vlan = VLAN_TAG_VALUE(mtag); } gx->gx_txcnt = cnt; gx->gx_tx_tail_idx = idx; gx->gx_txcontext = txcontext; idx = GX_PREV(idx, GX_TX_RING_CNT); gx->gx_cdata.gx_tx_chain[idx] = m_head; CSR_WRITE_4(gx, gx->gx_reg.r_tx_tail, gx->gx_tx_tail_idx); } return (0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit descriptors. */ static void gx_start(struct ifnet *ifp) { struct gx_softc *gx; struct mbuf *m_head; int s; s = splimp(); gx = ifp->if_softc; for (;;) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* * Pack the data into the transmit ring. If we * don't have room, set the OACTIVE flag and wait * for the NIC to drain the ring. */ if (gx_encap(gx, m_head) != 0) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; break; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; } splx(s); } Index: head/sys/dev/hifn/hifn7751.c =================================================================== --- head/sys/dev/hifn/hifn7751.c (revision 129878) +++ head/sys/dev/hifn/hifn7751.c (revision 129879) @@ -1,2771 +1,2772 @@ /* $OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $ */ /* * Invertex AEON / Hifn 7751 driver * Copyright (c) 1999 Invertex Inc. All rights reserved. * Copyright (c) 1999 Theo de Raadt * Copyright (c) 2000-2001 Network Security Technologies, Inc. * http://www.netsec.net * Copyright (c) 2003 Hifn Inc. * * This driver is based on a previous driver by Invertex, for which they * requested: Please send any comments, feedback, bug-fixes, or feature * requests to software@invertex.com. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Effort sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F30602-01-2-0537. */ #include __FBSDID("$FreeBSD$"); /* * Driver for various Hifn encryption processors. */ #include "opt_hifn.h" #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HIFN_RNDTEST #include #endif #include #include /* * Prototypes and count for the pci_device structure */ static int hifn_probe(device_t); static int hifn_attach(device_t); static int hifn_detach(device_t); static int hifn_suspend(device_t); static int hifn_resume(device_t); static void hifn_shutdown(device_t); static device_method_t hifn_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hifn_probe), DEVMETHOD(device_attach, hifn_attach), DEVMETHOD(device_detach, hifn_detach), DEVMETHOD(device_suspend, hifn_suspend), DEVMETHOD(device_resume, hifn_resume), DEVMETHOD(device_shutdown, hifn_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), { 0, 0 } }; static driver_t hifn_driver = { "hifn", hifn_methods, sizeof (struct hifn_softc) }; static devclass_t hifn_devclass; DRIVER_MODULE(hifn, pci, hifn_driver, hifn_devclass, 0, 0); MODULE_DEPEND(hifn, crypto, 1, 1, 1); #ifdef HIFN_RNDTEST MODULE_DEPEND(hifn, rndtest, 1, 1, 1); #endif static void hifn_reset_board(struct hifn_softc *, int); static void hifn_reset_puc(struct hifn_softc *); static void hifn_puc_wait(struct hifn_softc *); static int hifn_enable_crypto(struct hifn_softc *); static void hifn_set_retry(struct hifn_softc *sc); static void hifn_init_dma(struct hifn_softc *); static void hifn_init_pci_registers(struct hifn_softc *); static int hifn_sramsize(struct hifn_softc *); static int hifn_dramsize(struct hifn_softc *); static int hifn_ramtype(struct hifn_softc *); static void hifn_sessions(struct hifn_softc *); static void hifn_intr(void *); static u_int hifn_write_command(struct hifn_command *, u_int8_t *); static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt); static int hifn_newsession(void *, u_int32_t *, struct cryptoini *); static int hifn_freesession(void *, u_int64_t); static int hifn_process(void *, struct cryptop *, int); static void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *); static int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int); static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *); static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *); static int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *); static int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *); static int hifn_init_pubrng(struct hifn_softc *); static void hifn_rng(void *); static void hifn_tick(void *); static void hifn_abort(struct hifn_softc *); static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *); static void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t); static void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t); static __inline__ u_int32_t READ_REG_0(struct hifn_softc *sc, bus_size_t reg) { u_int32_t v = bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg); sc->sc_bar0_lastreg = (bus_size_t) -1; return (v); } #define WRITE_REG_0(sc, reg, val) hifn_write_reg_0(sc, reg, val) static __inline__ u_int32_t READ_REG_1(struct hifn_softc *sc, bus_size_t reg) { u_int32_t v = bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg); sc->sc_bar1_lastreg = (bus_size_t) -1; return (v); } #define WRITE_REG_1(sc, reg, val) hifn_write_reg_1(sc, reg, val) SYSCTL_NODE(_hw, OID_AUTO, hifn, CTLFLAG_RD, 0, "Hifn driver parameters"); #ifdef HIFN_DEBUG static int hifn_debug = 0; SYSCTL_INT(_hw_hifn, OID_AUTO, debug, CTLFLAG_RW, &hifn_debug, 0, "control debugging msgs"); #endif static struct hifn_stats hifnstats; SYSCTL_STRUCT(_hw_hifn, OID_AUTO, stats, CTLFLAG_RD, &hifnstats, hifn_stats, "driver statistics"); static int hifn_maxbatch = 1; SYSCTL_INT(_hw_hifn, OID_AUTO, maxbatch, CTLFLAG_RW, &hifn_maxbatch, 0, "max ops to batch w/o interrupt"); /* * Probe for a supported device. The PCI vendor and device * IDs are used to detect devices we know how to handle. */ static int hifn_probe(device_t dev) { if (pci_get_vendor(dev) == PCI_VENDOR_INVERTEX && pci_get_device(dev) == PCI_PRODUCT_INVERTEX_AEON) return (0); if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && (pci_get_device(dev) == PCI_PRODUCT_HIFN_7751 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7956 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)) return (0); if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC && pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751) return (0); return (ENXIO); } static void hifn_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *paddr = (bus_addr_t*) arg; *paddr = segs->ds_addr; } static const char* hifn_partname(struct hifn_softc *sc) { /* XXX sprintf numbers when not decoded */ switch (pci_get_vendor(sc->sc_dev)) { case PCI_VENDOR_HIFN: switch (pci_get_device(sc->sc_dev)) { case PCI_PRODUCT_HIFN_6500: return "Hifn 6500"; case PCI_PRODUCT_HIFN_7751: return "Hifn 7751"; case PCI_PRODUCT_HIFN_7811: return "Hifn 7811"; case PCI_PRODUCT_HIFN_7951: return "Hifn 7951"; case PCI_PRODUCT_HIFN_7955: return "Hifn 7955"; case PCI_PRODUCT_HIFN_7956: return "Hifn 7956"; } return "Hifn unknown-part"; case PCI_VENDOR_INVERTEX: switch (pci_get_device(sc->sc_dev)) { case PCI_PRODUCT_INVERTEX_AEON: return "Invertex AEON"; } return "Invertex unknown-part"; case PCI_VENDOR_NETSEC: switch (pci_get_device(sc->sc_dev)) { case PCI_PRODUCT_NETSEC_7751: return "NetSec 7751"; } return "NetSec unknown-part"; } return "Unknown-vendor unknown-part"; } static void default_harvest(struct rndtest_state *rsp, void *buf, u_int count) { random_harvest(buf, count, count*NBBY, 0, RANDOM_PURE); } /* * Attach an interface that successfully probed. */ static int hifn_attach(device_t dev) { struct hifn_softc *sc = device_get_softc(dev); u_int32_t cmd; caddr_t kva; int rseg, rid; char rbase; u_int16_t ena, rev; KASSERT(sc != NULL, ("hifn_attach: null software carrier!")); bzero(sc, sizeof (*sc)); sc->sc_dev = dev; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "hifn driver", MTX_DEF); /* XXX handle power management */ /* * The 7951 and 795x have a random number generator and * public key support; note this. */ if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC; /* * The 7811 has a random number generator and * we also note it's identity 'cuz of some quirks. */ if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && pci_get_device(dev) == PCI_PRODUCT_HIFN_7811) sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG; /* * The 795x parts support AES. */ if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES; /* * Configure support for memory-mapped access to * registers and for DMA operations. */ #define PCIM_ENA (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN) cmd = pci_read_config(dev, PCIR_COMMAND, 4); cmd |= PCIM_ENA; pci_write_config(dev, PCIR_COMMAND, cmd, 4); cmd = pci_read_config(dev, PCIR_COMMAND, 4); if ((cmd & PCIM_ENA) != PCIM_ENA) { device_printf(dev, "failed to enable %s\n", (cmd & PCIM_ENA) == 0 ? "memory mapping & bus mastering" : (cmd & PCIM_CMD_MEMEN) == 0 ? "memory mapping" : "bus mastering"); goto fail_pci; } #undef PCIM_ENA /* * Setup PCI resources. Note that we record the bus * tag and handle for each register mapping, this is * used by the READ_REG_0, WRITE_REG_0, READ_REG_1, * and WRITE_REG_1 macros throughout the driver. */ rid = HIFN_BAR0; sc->sc_bar0res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_bar0res == NULL) { device_printf(dev, "cannot map bar%d register space\n", 0); goto fail_pci; } sc->sc_st0 = rman_get_bustag(sc->sc_bar0res); sc->sc_sh0 = rman_get_bushandle(sc->sc_bar0res); sc->sc_bar0_lastreg = (bus_size_t) -1; rid = HIFN_BAR1; sc->sc_bar1res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_bar1res == NULL) { device_printf(dev, "cannot map bar%d register space\n", 1); goto fail_io0; } sc->sc_st1 = rman_get_bustag(sc->sc_bar1res); sc->sc_sh1 = rman_get_bushandle(sc->sc_bar1res); sc->sc_bar1_lastreg = (bus_size_t) -1; hifn_set_retry(sc); /* * Setup the area where the Hifn DMA's descriptors * and associated data structures. */ if (bus_dma_tag_create(NULL, /* parent */ 1, 0, /* alignment,boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ HIFN_MAX_DMALEN, /* maxsize */ MAX_SCATTER, /* nsegments */ HIFN_MAX_SEGLEN, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &sc->sc_dmat)) { device_printf(dev, "cannot allocate DMA tag\n"); goto fail_io1; } if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &sc->sc_dmamap)) { device_printf(dev, "cannot create dma map\n"); bus_dma_tag_destroy(sc->sc_dmat); goto fail_io1; } if (bus_dmamem_alloc(sc->sc_dmat, (void**) &kva, BUS_DMA_NOWAIT, &sc->sc_dmamap)) { device_printf(dev, "cannot alloc dma buffer\n"); bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); bus_dma_tag_destroy(sc->sc_dmat); goto fail_io1; } if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, kva, sizeof (*sc->sc_dma), hifn_dmamap_cb, &sc->sc_dma_physaddr, BUS_DMA_NOWAIT)) { device_printf(dev, "cannot load dma map\n"); bus_dmamem_free(sc->sc_dmat, kva, sc->sc_dmamap); bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); bus_dma_tag_destroy(sc->sc_dmat); goto fail_io1; } sc->sc_dma = (struct hifn_dma *)kva; bzero(sc->sc_dma, sizeof(*sc->sc_dma)); KASSERT(sc->sc_st0 != 0, ("hifn_attach: null bar0 tag!")); KASSERT(sc->sc_sh0 != 0, ("hifn_attach: null bar0 handle!")); KASSERT(sc->sc_st1 != 0, ("hifn_attach: null bar1 tag!")); KASSERT(sc->sc_sh1 != 0, ("hifn_attach: null bar1 handle!")); /* * Reset the board and do the ``secret handshake'' * to enable the crypto support. Then complete the * initialization procedure by setting up the interrupt * and hooking in to the system crypto support so we'll * get used for system services like the crypto device, * IPsec, RNG device, etc. */ hifn_reset_board(sc, 0); if (hifn_enable_crypto(sc) != 0) { device_printf(dev, "crypto enabling failed\n"); goto fail_mem; } hifn_reset_puc(sc); hifn_init_dma(sc); hifn_init_pci_registers(sc); /* XXX can't dynamically determine ram type for 795x; force dram */ if (sc->sc_flags & HIFN_IS_7956) sc->sc_drammodel = 1; else if (hifn_ramtype(sc)) goto fail_mem; if (sc->sc_drammodel == 0) hifn_sramsize(sc); else hifn_dramsize(sc); /* * Workaround for NetSec 7751 rev A: half ram size because two * of the address lines were left floating */ if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC && pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 && pci_get_revid(dev) == 0x61) /*XXX???*/ sc->sc_ramsize >>= 1; /* * Arrange the interrupt line. */ rid = 0; sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE|RF_ACTIVE); if (sc->sc_irq == NULL) { device_printf(dev, "could not map interrupt\n"); goto fail_mem; } /* * NB: Network code assumes we are blocked with splimp() * so make sure the IRQ is marked appropriately. */ if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, hifn_intr, sc, &sc->sc_intrhand)) { device_printf(dev, "could not setup interrupt\n"); goto fail_intr2; } hifn_sessions(sc); /* * NB: Keep only the low 16 bits; this masks the chip id * from the 7951. */ rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff; rseg = sc->sc_ramsize / 1024; rbase = 'K'; if (sc->sc_ramsize >= (1024 * 1024)) { rbase = 'M'; rseg /= 1024; } device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram, %u sessions\n", hifn_partname(sc), rev, rseg, rbase, sc->sc_drammodel ? 'd' : 's', sc->sc_maxses); sc->sc_cid = crypto_get_driverid(0); if (sc->sc_cid < 0) { device_printf(dev, "could not get crypto driver id\n"); goto fail_intr; } WRITE_REG_0(sc, HIFN_0_PUCNFG, READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID); ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; switch (ena) { case HIFN_PUSTAT_ENA_2: crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0, hifn_newsession, hifn_freesession, hifn_process, sc); crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0, hifn_newsession, hifn_freesession, hifn_process, sc); if (sc->sc_flags & HIFN_HAS_AES) crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0, hifn_newsession, hifn_freesession, hifn_process, sc); /*FALLTHROUGH*/ case HIFN_PUSTAT_ENA_1: crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0, hifn_newsession, hifn_freesession, hifn_process, sc); crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0, hifn_newsession, hifn_freesession, hifn_process, sc); crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0, hifn_newsession, hifn_freesession, hifn_process, sc); crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0, hifn_newsession, hifn_freesession, hifn_process, sc); crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0, hifn_newsession, hifn_freesession, hifn_process, sc); break; } bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG)) hifn_init_pubrng(sc); callout_init(&sc->sc_tickto, CALLOUT_MPSAFE); callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); return (0); fail_intr: bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand); fail_intr2: /* XXX don't store rid */ bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); fail_mem: bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap); bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap); bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); bus_dma_tag_destroy(sc->sc_dmat); /* Turn off DMA polling */ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); fail_io1: bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res); fail_io0: bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res); fail_pci: mtx_destroy(&sc->sc_mtx); return (ENXIO); } /* * Detach an interface that successfully probed. */ static int hifn_detach(device_t dev) { struct hifn_softc *sc = device_get_softc(dev); KASSERT(sc != NULL, ("hifn_detach: null software carrier!")); /* disable interrupts */ WRITE_REG_1(sc, HIFN_1_DMA_IER, 0); /*XXX other resources */ callout_stop(&sc->sc_tickto); callout_stop(&sc->sc_rngto); #ifdef HIFN_RNDTEST if (sc->sc_rndtest) rndtest_detach(sc->sc_rndtest); #endif /* Turn off DMA polling */ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); crypto_unregister_all(sc->sc_cid); bus_generic_detach(dev); /*XXX should be no children, right? */ bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand); /* XXX don't store rid */ bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap); bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap); bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); bus_dma_tag_destroy(sc->sc_dmat); bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res); bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res); mtx_destroy(&sc->sc_mtx); return (0); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void hifn_shutdown(device_t dev) { #ifdef notyet hifn_stop(device_get_softc(dev)); #endif } /* * Device suspend routine. Stop the interface and save some PCI * settings in case the BIOS doesn't restore them properly on * resume. */ static int hifn_suspend(device_t dev) { struct hifn_softc *sc = device_get_softc(dev); #ifdef notyet int i; hifn_stop(sc); for (i = 0; i < 5; i++) sc->saved_maps[i] = pci_read_config(dev, PCIR_BAR(i), 4); sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); #endif sc->sc_suspended = 1; return (0); } /* * Device resume routine. Restore some PCI settings in case the BIOS * doesn't, re-enable busmastering, and restart the interface if * appropriate. */ static int hifn_resume(device_t dev) { struct hifn_softc *sc = device_get_softc(dev); #ifdef notyet int i; /* better way to do this? */ for (i = 0; i < 5; i++) pci_write_config(dev, PCIR_BAR(i), sc->saved_maps[i], 4); pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4); pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1); pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1); pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1); /* reenable busmastering */ pci_enable_busmaster(dev); pci_enable_io(dev, HIFN_RES); /* reinitialize interface if necessary */ if (ifp->if_flags & IFF_UP) rl_init(sc); #endif sc->sc_suspended = 0; return (0); } static int hifn_init_pubrng(struct hifn_softc *sc) { u_int32_t r; int i; #ifdef HIFN_RNDTEST sc->sc_rndtest = rndtest_attach(sc->sc_dev); if (sc->sc_rndtest) sc->sc_harvest = rndtest_harvest; else sc->sc_harvest = default_harvest; #else sc->sc_harvest = default_harvest; #endif if ((sc->sc_flags & HIFN_IS_7811) == 0) { /* Reset 7951 public key/rng engine */ WRITE_REG_1(sc, HIFN_1_PUB_RESET, READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET); for (i = 0; i < 100; i++) { DELAY(1000); if ((READ_REG_1(sc, HIFN_1_PUB_RESET) & HIFN_PUBRST_RESET) == 0) break; } if (i == 100) { device_printf(sc->sc_dev, "public key init failed\n"); return (1); } } /* Enable the rng, if available */ if (sc->sc_flags & HIFN_HAS_RNG) { if (sc->sc_flags & HIFN_IS_7811) { r = READ_REG_1(sc, HIFN_1_7811_RNGENA); if (r & HIFN_7811_RNGENA_ENA) { r &= ~HIFN_7811_RNGENA_ENA; WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); } WRITE_REG_1(sc, HIFN_1_7811_RNGCFG, HIFN_7811_RNGCFG_DEFL); r |= HIFN_7811_RNGENA_ENA; WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); } else WRITE_REG_1(sc, HIFN_1_RNG_CONFIG, READ_REG_1(sc, HIFN_1_RNG_CONFIG) | HIFN_RNGCFG_ENA); sc->sc_rngfirst = 1; if (hz >= 100) sc->sc_rnghz = hz / 100; else sc->sc_rnghz = 1; callout_init(&sc->sc_rngto, CALLOUT_MPSAFE); callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc); } /* Enable public key engine, if available */ if (sc->sc_flags & HIFN_HAS_PUBLIC) { WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE); sc->sc_dmaier |= HIFN_DMAIER_PUBDONE; WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); } return (0); } static void hifn_rng(void *vsc) { #define RANDOM_BITS(n) (n)*sizeof (u_int32_t), (n)*sizeof (u_int32_t)*NBBY, 0 struct hifn_softc *sc = vsc; u_int32_t sts, num[2]; int i; if (sc->sc_flags & HIFN_IS_7811) { for (i = 0; i < 5; i++) { sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS); if (sts & HIFN_7811_RNGSTS_UFL) { device_printf(sc->sc_dev, "RNG underflow: disabling\n"); return; } if ((sts & HIFN_7811_RNGSTS_RDY) == 0) break; /* * There are at least two words in the RNG FIFO * at this point. */ num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT); num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT); /* NB: discard first data read */ if (sc->sc_rngfirst) sc->sc_rngfirst = 0; else (*sc->sc_harvest)(sc->sc_rndtest, num, sizeof (num)); } } else { num[0] = READ_REG_1(sc, HIFN_1_RNG_DATA); /* NB: discard first data read */ if (sc->sc_rngfirst) sc->sc_rngfirst = 0; else (*sc->sc_harvest)(sc->sc_rndtest, num, sizeof (num[0])); } callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc); #undef RANDOM_BITS } static void hifn_puc_wait(struct hifn_softc *sc) { int i; for (i = 5000; i > 0; i--) { DELAY(1); if (!(READ_REG_0(sc, HIFN_0_PUCTRL) & HIFN_PUCTRL_RESET)) break; } if (!i) device_printf(sc->sc_dev, "proc unit did not reset\n"); } /* * Reset the processing unit. */ static void hifn_reset_puc(struct hifn_softc *sc) { /* Reset processing unit */ WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); hifn_puc_wait(sc); } /* * Set the Retry and TRDY registers; note that we set them to * zero because the 7811 locks up when forced to retry (section * 3.6 of "Specification Update SU-0014-04". Not clear if we * should do this for all Hifn parts, but it doesn't seem to hurt. */ static void hifn_set_retry(struct hifn_softc *sc) { /* NB: RETRY only responds to 8-bit reads/writes */ pci_write_config(sc->sc_dev, HIFN_RETRY_TIMEOUT, 0, 1); pci_write_config(sc->sc_dev, HIFN_TRDY_TIMEOUT, 0, 4); } /* * Resets the board. Values in the regesters are left as is * from the reset (i.e. initial values are assigned elsewhere). */ static void hifn_reset_board(struct hifn_softc *sc, int full) { u_int32_t reg; /* * Set polling in the DMA configuration register to zero. 0x7 avoids * resetting the board and zeros out the other fields. */ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); /* * Now that polling has been disabled, we have to wait 1 ms * before resetting the board. */ DELAY(1000); /* Reset the DMA unit */ if (full) { WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE); DELAY(1000); } else { WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET); hifn_reset_puc(sc); } KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!")); bzero(sc->sc_dma, sizeof(*sc->sc_dma)); /* Bring dma unit out of reset */ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); hifn_puc_wait(sc); hifn_set_retry(sc); if (sc->sc_flags & HIFN_IS_7811) { for (reg = 0; reg < 1000; reg++) { if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) & HIFN_MIPSRST_CRAMINIT) break; DELAY(1000); } if (reg == 1000) printf(": cram init timeout\n"); } } static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt) { int i; u_int32_t v; for (i = 0; i < cnt; i++) { /* get the parity */ v = a & 0x80080125; v ^= v >> 16; v ^= v >> 8; v ^= v >> 4; v ^= v >> 2; v ^= v >> 1; a = (v & 1) ^ (a << 1); } return a; } struct pci2id { u_short pci_vendor; u_short pci_prod; char card_id[13]; }; static struct pci2id pci2id[] = { { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, { PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, { PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, { /* * Other vendors share this PCI ID as well, such as * http://www.powercrypt.com, and obviously they also * use the same key. */ PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, }; /* * Checks to see if crypto is already enabled. If crypto isn't enable, * "hifn_enable_crypto" is called to enable it. The check is important, * as enabling crypto twice will lock the board. */ static int hifn_enable_crypto(struct hifn_softc *sc) { u_int32_t dmacfg, ramcfg, encl, addr, i; char *offtbl = NULL; for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) { if (pci2id[i].pci_vendor == pci_get_vendor(sc->sc_dev) && pci2id[i].pci_prod == pci_get_device(sc->sc_dev)) { offtbl = pci2id[i].card_id; break; } } if (offtbl == NULL) { device_printf(sc->sc_dev, "Unknown card!\n"); return (1); } ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG); dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG); /* * The RAM config register's encrypt level bit needs to be set before * every read performed on the encryption level register. */ WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; /* * Make sure we don't re-unlock. Two unlocks kills chip until the * next reboot. */ if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) { #ifdef HIFN_DEBUG if (hifn_debug) device_printf(sc->sc_dev, "Strong crypto already enabled!\n"); #endif goto report; } if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) { #ifdef HIFN_DEBUG if (hifn_debug) device_printf(sc->sc_dev, "Unknown encryption level 0x%x\n", encl); #endif return 1; } WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK | HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); DELAY(1000); addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1); DELAY(1000); WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0); DELAY(1000); for (i = 0; i <= 12; i++) { addr = hifn_next_signature(addr, offtbl[i] + 0x101); WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr); DELAY(1000); } WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; #ifdef HIFN_DEBUG if (hifn_debug) { if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2) device_printf(sc->sc_dev, "Engine is permanently " "locked until next system reset!\n"); else device_printf(sc->sc_dev, "Engine enabled " "successfully!\n"); } #endif report: WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg); WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg); switch (encl) { case HIFN_PUSTAT_ENA_1: case HIFN_PUSTAT_ENA_2: break; case HIFN_PUSTAT_ENA_0: default: device_printf(sc->sc_dev, "disabled"); break; } return 0; } /* * Give initial values to the registers listed in the "Register Space" * section of the HIFN Software Development reference manual. */ static void hifn_init_pci_registers(struct hifn_softc *sc) { /* write fixed values needed by the Initialization registers */ WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD); WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER); /* write all 4 ring address registers */ WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0])); WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0])); WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0])); WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0])); DELAY(2000); /* write status register */ WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST | HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST | HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER | HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST | HIFN_DMACSR_S_WAIT | HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST | HIFN_DMACSR_C_WAIT | HIFN_DMACSR_ENGINE | ((sc->sc_flags & HIFN_HAS_PUBLIC) ? HIFN_DMACSR_PUBDONE : 0) | ((sc->sc_flags & HIFN_IS_7811) ? HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0)); sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0; sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT | HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER | HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT | ((sc->sc_flags & HIFN_IS_7811) ? HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0); sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); if (sc->sc_flags & HIFN_IS_7956) { WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | HIFN_PUCNFG_TCALLPHASES | HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32); WRITE_REG_1(sc, HIFN_1_PLL, HIFN_PLL_7956); } else { WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES | HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 | (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM)); } WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST | ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) | ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL)); } /* * The maximum number of sessions supported by the card * is dependent on the amount of context ram, which * encryption algorithms are enabled, and how compression * is configured. This should be configured before this * routine is called. */ static void hifn_sessions(struct hifn_softc *sc) { u_int32_t pucnfg; int ctxsize; pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG); if (pucnfg & HIFN_PUCNFG_COMPSING) { if (pucnfg & HIFN_PUCNFG_ENCCNFG) ctxsize = 128; else ctxsize = 512; /* * 7955/7956 has internal context memory of 32K */ if (sc->sc_flags & HIFN_IS_7956) sc->sc_maxses = 32768 / ctxsize; else sc->sc_maxses = 1 + ((sc->sc_ramsize - 32768) / ctxsize); } else sc->sc_maxses = sc->sc_ramsize / 16384; if (sc->sc_maxses > 2048) sc->sc_maxses = 2048; } /* * Determine ram type (sram or dram). Board should be just out of a reset * state when this is called. */ static int hifn_ramtype(struct hifn_softc *sc) { u_int8_t data[8], dataexpect[8]; int i; for (i = 0; i < sizeof(data); i++) data[i] = dataexpect[i] = 0x55; if (hifn_writeramaddr(sc, 0, data)) return (-1); if (hifn_readramaddr(sc, 0, data)) return (-1); if (bcmp(data, dataexpect, sizeof(data)) != 0) { sc->sc_drammodel = 1; return (0); } for (i = 0; i < sizeof(data); i++) data[i] = dataexpect[i] = 0xaa; if (hifn_writeramaddr(sc, 0, data)) return (-1); if (hifn_readramaddr(sc, 0, data)) return (-1); if (bcmp(data, dataexpect, sizeof(data)) != 0) { sc->sc_drammodel = 1; return (0); } return (0); } #define HIFN_SRAM_MAX (32 << 20) #define HIFN_SRAM_STEP_SIZE 16384 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE) static int hifn_sramsize(struct hifn_softc *sc) { u_int32_t a; u_int8_t data[8]; u_int8_t dataexpect[sizeof(data)]; int32_t i; for (i = 0; i < sizeof(data); i++) data[i] = dataexpect[i] = i ^ 0x5a; for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) { a = i * HIFN_SRAM_STEP_SIZE; bcopy(&i, data, sizeof(i)); hifn_writeramaddr(sc, a, data); } for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) { a = i * HIFN_SRAM_STEP_SIZE; bcopy(&i, dataexpect, sizeof(i)); if (hifn_readramaddr(sc, a, data) < 0) return (0); if (bcmp(data, dataexpect, sizeof(data)) != 0) return (0); sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE; } return (0); } /* * XXX For dram boards, one should really try all of the * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG * is already set up correctly. */ static int hifn_dramsize(struct hifn_softc *sc) { u_int32_t cnfg; if (sc->sc_flags & HIFN_IS_7956) { /* * 7955/7956 have a fixed internal ram of only 32K. */ sc->sc_ramsize = 32768; } else { cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) & HIFN_PUCNFG_DRAMMASK; sc->sc_ramsize = 1 << ((cnfg >> 13) + 18); } return (0); } static void hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp) { struct hifn_dma *dma = sc->sc_dma; if (dma->cmdi == HIFN_D_CMD_RSIZE) { dma->cmdi = 0; dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); } *cmdp = dma->cmdi++; dma->cmdk = dma->cmdi; if (dma->srci == HIFN_D_SRC_RSIZE) { dma->srci = 0; dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); } *srcp = dma->srci++; dma->srck = dma->srci; if (dma->dsti == HIFN_D_DST_RSIZE) { dma->dsti = 0; dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); } *dstp = dma->dsti++; dma->dstk = dma->dsti; if (dma->resi == HIFN_D_RES_RSIZE) { dma->resi = 0; dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); } *resp = dma->resi++; dma->resk = dma->resi; } static int hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) { struct hifn_dma *dma = sc->sc_dma; hifn_base_command_t wc; const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; int r, cmdi, resi, srci, dsti; wc.masks = htole16(3 << 13); wc.session_num = htole16(addr >> 14); wc.total_source_count = htole16(8); wc.total_dest_count = htole16(addr & 0x3fff); hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); /* build write command */ bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND); *(hifn_base_command_t *)dma->command_bufs[cmdi] = wc; bcopy(data, &dma->test_src, sizeof(dma->test_src)); dma->srcr[srci].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, test_src)); dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, test_dst)); dma->cmdr[cmdi].l = htole32(16 | masks); dma->srcr[srci].l = htole32(8 | masks); dma->dstr[dsti].l = htole32(4 | masks); dma->resr[resi].l = htole32(4 | masks); bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); for (r = 10000; r >= 0; r--) { DELAY(10); bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) break; bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } if (r == 0) { device_printf(sc->sc_dev, "writeramaddr -- " "result[%d](addr %d) still valid\n", resi, addr); r = -1; return (-1); } else r = 0; WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); return (r); } static int hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) { struct hifn_dma *dma = sc->sc_dma; hifn_base_command_t rc; const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; int r, cmdi, srci, dsti, resi; rc.masks = htole16(2 << 13); rc.session_num = htole16(addr >> 14); rc.total_source_count = htole16(addr & 0x3fff); rc.total_dest_count = htole16(8); hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND); *(hifn_base_command_t *)dma->command_bufs[cmdi] = rc; dma->srcr[srci].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, test_src)); dma->test_src = 0; dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, test_dst)); dma->test_dst = 0; dma->cmdr[cmdi].l = htole32(8 | masks); dma->srcr[srci].l = htole32(8 | masks); dma->dstr[dsti].l = htole32(8 | masks); dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks); bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); for (r = 10000; r >= 0; r--) { DELAY(10); bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) break; bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } if (r == 0) { device_printf(sc->sc_dev, "readramaddr -- " "result[%d](addr %d) still valid\n", resi, addr); r = -1; } else { r = 0; bcopy(&dma->test_dst, data, sizeof(dma->test_dst)); } WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); return (r); } /* * Initialize the descriptor rings. */ static void hifn_init_dma(struct hifn_softc *sc) { struct hifn_dma *dma = sc->sc_dma; int i; hifn_set_retry(sc); /* initialize static pointer values */ for (i = 0; i < HIFN_D_CMD_RSIZE; i++) dma->cmdr[i].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, command_bufs[i][0])); for (i = 0; i < HIFN_D_RES_RSIZE; i++) dma->resr[i].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, result_bufs[i][0])); dma->cmdr[HIFN_D_CMD_RSIZE].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0])); dma->srcr[HIFN_D_SRC_RSIZE].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0])); dma->dstr[HIFN_D_DST_RSIZE].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0])); dma->resr[HIFN_D_RES_RSIZE].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0])); dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0; dma->cmdi = dma->srci = dma->dsti = dma->resi = 0; dma->cmdk = dma->srck = dma->dstk = dma->resk = 0; } /* * Writes out the raw command buffer space. Returns the * command buffer size. */ static u_int hifn_write_command(struct hifn_command *cmd, u_int8_t *buf) { u_int8_t *buf_pos; hifn_base_command_t *base_cmd; hifn_mac_command_t *mac_cmd; hifn_crypt_command_t *cry_cmd; int using_mac, using_crypt, len, ivlen; u_int32_t dlen, slen; buf_pos = buf; using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC; using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT; base_cmd = (hifn_base_command_t *)buf_pos; base_cmd->masks = htole16(cmd->base_masks); slen = cmd->src_mapsize; if (cmd->sloplen) dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t); else dlen = cmd->dst_mapsize; base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO); base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO); dlen >>= 16; slen >>= 16; base_cmd->session_num = htole16(cmd->session_num | ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) | ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M)); buf_pos += sizeof(hifn_base_command_t); if (using_mac) { mac_cmd = (hifn_mac_command_t *)buf_pos; dlen = cmd->maccrd->crd_len; mac_cmd->source_count = htole16(dlen & 0xffff); dlen >>= 16; mac_cmd->masks = htole16(cmd->mac_masks | ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M)); mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip); mac_cmd->reserved = 0; buf_pos += sizeof(hifn_mac_command_t); } if (using_crypt) { cry_cmd = (hifn_crypt_command_t *)buf_pos; dlen = cmd->enccrd->crd_len; cry_cmd->source_count = htole16(dlen & 0xffff); dlen >>= 16; cry_cmd->masks = htole16(cmd->cry_masks | ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M)); cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip); cry_cmd->reserved = 0; buf_pos += sizeof(hifn_crypt_command_t); } if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) { bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH); buf_pos += HIFN_MAC_KEY_LENGTH; } if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) { switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { case HIFN_CRYPT_CMD_ALG_3DES: bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH); buf_pos += HIFN_3DES_KEY_LENGTH; break; case HIFN_CRYPT_CMD_ALG_DES: bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH); buf_pos += HIFN_DES_KEY_LENGTH; break; case HIFN_CRYPT_CMD_ALG_RC4: len = 256; do { int clen; clen = MIN(cmd->cklen, len); bcopy(cmd->ck, buf_pos, clen); len -= clen; buf_pos += clen; } while (len > 0); bzero(buf_pos, 4); buf_pos += 4; break; case HIFN_CRYPT_CMD_ALG_AES: /* * AES keys are variable 128, 192 and * 256 bits (16, 24 and 32 bytes). */ bcopy(cmd->ck, buf_pos, cmd->cklen); buf_pos += cmd->cklen; break; } } if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) { switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { case HIFN_CRYPT_CMD_ALG_AES: ivlen = HIFN_AES_IV_LENGTH; break; default: ivlen = HIFN_IV_LENGTH; break; } bcopy(cmd->iv, buf_pos, ivlen); buf_pos += ivlen; } if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) { bzero(buf_pos, 8); buf_pos += 8; } return (buf_pos - buf); } static int hifn_dmamap_aligned(struct hifn_operand *op) { int i; for (i = 0; i < op->nsegs; i++) { if (op->segs[i].ds_addr & 3) return (0); if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3)) return (0); } return (1); } static int hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd) { struct hifn_dma *dma = sc->sc_dma; struct hifn_operand *dst = &cmd->dst; u_int32_t p, l; int idx, used = 0, i; idx = dma->dsti; for (i = 0; i < dst->nsegs - 1; i++) { dma->dstr[idx].p = htole32(dst->segs[i].ds_addr); dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len); HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); used++; if (++idx == HIFN_D_DST_RSIZE) { dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); idx = 0; } } if (cmd->sloplen == 0) { p = dst->segs[i].ds_addr; l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | dst->segs[i].ds_len; } else { p = sc->sc_dma_physaddr + offsetof(struct hifn_dma, slop[cmd->slopidx]); l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | sizeof(u_int32_t); if ((dst->segs[i].ds_len - cmd->sloplen) != 0) { dma->dstr[idx].p = htole32(dst->segs[i].ds_addr); dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_MASKDONEIRQ | (dst->segs[i].ds_len - cmd->sloplen)); HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); used++; if (++idx == HIFN_D_DST_RSIZE) { dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); idx = 0; } } } dma->dstr[idx].p = htole32(p); dma->dstr[idx].l = htole32(l); HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); used++; if (++idx == HIFN_D_DST_RSIZE) { dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); idx = 0; } dma->dsti = idx; dma->dstu += used; return (idx); } static int hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd) { struct hifn_dma *dma = sc->sc_dma; struct hifn_operand *src = &cmd->src; int idx, i; u_int32_t last = 0; idx = dma->srci; for (i = 0; i < src->nsegs; i++) { if (i == src->nsegs - 1) last = HIFN_D_LAST; dma->srcr[idx].p = htole32(src->segs[i].ds_addr); dma->srcr[idx].l = htole32(src->segs[i].ds_len | HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last); HIFN_SRCR_SYNC(sc, idx, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); if (++idx == HIFN_D_SRC_RSIZE) { dma->srcr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); idx = 0; } } dma->srci = idx; dma->srcu += src->nsegs; return (idx); } static void hifn_op_cb(void* arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error) { struct hifn_operand *op = arg; KASSERT(nsegs <= MAX_SCATTER, ("hifn_op_cb: too many DMA segments (%u > %u) " "returned when mapping operand", nsegs, MAX_SCATTER)); op->mapsize = mapsize; op->nsegs = nsegs; bcopy(seg, op->segs, nsegs * sizeof (seg[0])); } static int hifn_crypto( struct hifn_softc *sc, struct hifn_command *cmd, struct cryptop *crp, int hint) { struct hifn_dma *dma = sc->sc_dma; u_int32_t cmdlen; int cmdi, resi, err = 0; /* * need 1 cmd, and 1 res * * NB: check this first since it's easy. */ HIFN_LOCK(sc); if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE || (dma->resu + 1) > HIFN_D_RES_RSIZE) { #ifdef HIFN_DEBUG if (hifn_debug) { device_printf(sc->sc_dev, "cmd/result exhaustion, cmdu %u resu %u\n", dma->cmdu, dma->resu); } #endif hifnstats.hst_nomem_cr++; HIFN_UNLOCK(sc); return (ERESTART); } if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->src_map)) { hifnstats.hst_nomem_map++; HIFN_UNLOCK(sc); return (ENOMEM); } if (crp->crp_flags & CRYPTO_F_IMBUF) { if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map, cmd->src_m, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) { hifnstats.hst_nomem_load++; err = ENOMEM; goto err_srcmap1; } } else if (crp->crp_flags & CRYPTO_F_IOV) { if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map, cmd->src_io, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) { hifnstats.hst_nomem_load++; err = ENOMEM; goto err_srcmap1; } } else { err = EINVAL; goto err_srcmap1; } if (hifn_dmamap_aligned(&cmd->src)) { cmd->sloplen = cmd->src_mapsize & 3; cmd->dst = cmd->src; } else { if (crp->crp_flags & CRYPTO_F_IOV) { err = EINVAL; goto err_srcmap; } else if (crp->crp_flags & CRYPTO_F_IMBUF) { int totlen, len; struct mbuf *m, *m0, *mlast; KASSERT(cmd->dst_m == cmd->src_m, ("hifn_crypto: dst_m initialized improperly")); hifnstats.hst_unaligned++; /* * Source is not aligned on a longword boundary. * Copy the data to insure alignment. If we fail * to allocate mbufs or clusters while doing this * we return ERESTART so the operation is requeued * at the crypto later, but only if there are * ops already posted to the hardware; otherwise we * have no guarantee that we'll be re-entered. */ totlen = cmd->src_mapsize; if (cmd->src_m->m_flags & M_PKTHDR) { len = MHLEN; MGETHDR(m0, M_DONTWAIT, MT_DATA); if (m0 && !m_dup_pkthdr(m0, cmd->src_m, M_DONTWAIT)) { m_free(m0); m0 = NULL; } } else { len = MLEN; MGET(m0, M_DONTWAIT, MT_DATA); } if (m0 == NULL) { hifnstats.hst_nomem_mbuf++; err = dma->cmdu ? ERESTART : ENOMEM; goto err_srcmap; } if (totlen >= MINCLSIZE) { MCLGET(m0, M_DONTWAIT); if ((m0->m_flags & M_EXT) == 0) { hifnstats.hst_nomem_mcl++; err = dma->cmdu ? ERESTART : ENOMEM; m_freem(m0); goto err_srcmap; } len = MCLBYTES; } totlen -= len; m0->m_pkthdr.len = m0->m_len = len; mlast = m0; while (totlen > 0) { MGET(m, M_DONTWAIT, MT_DATA); if (m == NULL) { hifnstats.hst_nomem_mbuf++; err = dma->cmdu ? ERESTART : ENOMEM; m_freem(m0); goto err_srcmap; } len = MLEN; if (totlen >= MINCLSIZE) { MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { hifnstats.hst_nomem_mcl++; err = dma->cmdu ? ERESTART : ENOMEM; mlast->m_next = m; m_freem(m0); goto err_srcmap; } len = MCLBYTES; } m->m_len = len; m0->m_pkthdr.len += len; totlen -= len; mlast->m_next = m; mlast = m; } cmd->dst_m = m0; } } if (cmd->dst_map == NULL) { if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->dst_map)) { hifnstats.hst_nomem_map++; err = ENOMEM; goto err_srcmap; } if (crp->crp_flags & CRYPTO_F_IMBUF) { if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map, cmd->dst_m, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) { hifnstats.hst_nomem_map++; err = ENOMEM; goto err_dstmap1; } } else if (crp->crp_flags & CRYPTO_F_IOV) { if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map, cmd->dst_io, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) { hifnstats.hst_nomem_load++; err = ENOMEM; goto err_dstmap1; } } } #ifdef HIFN_DEBUG if (hifn_debug) { device_printf(sc->sc_dev, "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n", READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER), dma->cmdu, dma->srcu, dma->dstu, dma->resu, cmd->src_nsegs, cmd->dst_nsegs); } #endif if (cmd->src_map == cmd->dst_map) { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); } else { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, BUS_DMASYNC_PREREAD); } /* * need N src, and N dst */ if ((dma->srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE || (dma->dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) { #ifdef HIFN_DEBUG if (hifn_debug) { device_printf(sc->sc_dev, "src/dst exhaustion, srcu %u+%u dstu %u+%u\n", dma->srcu, cmd->src_nsegs, dma->dstu, cmd->dst_nsegs); } #endif hifnstats.hst_nomem_sd++; err = ERESTART; goto err_dstmap; } if (dma->cmdi == HIFN_D_CMD_RSIZE) { dma->cmdi = 0; dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); } cmdi = dma->cmdi++; cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]); HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE); /* .p for command/result already set */ dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ); HIFN_CMDR_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); dma->cmdu++; if (sc->sc_c_busy == 0) { WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA); sc->sc_c_busy = 1; } /* * We don't worry about missing an interrupt (which a "command wait" * interrupt salvages us from), unless there is more than one command * in the queue. */ if (dma->cmdu > 1) { sc->sc_dmaier |= HIFN_DMAIER_C_WAIT; WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); } hifnstats.hst_ipackets++; hifnstats.hst_ibytes += cmd->src_mapsize; hifn_dmamap_load_src(sc, cmd); if (sc->sc_s_busy == 0) { WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA); sc->sc_s_busy = 1; } /* * Unlike other descriptors, we don't mask done interrupt from * result descriptor. */ #ifdef HIFN_DEBUG if (hifn_debug) printf("load res\n"); #endif if (dma->resi == HIFN_D_RES_RSIZE) { dma->resi = 0; dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } resi = dma->resi++; KASSERT(dma->hifn_commands[resi] == NULL, ("hifn_crypto: command slot %u busy", resi)); dma->hifn_commands[resi] = cmd; HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD); if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) { dma->resr[resi].l = htole32(HIFN_MAX_RESULT | HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ); sc->sc_curbatch++; if (sc->sc_curbatch > hifnstats.hst_maxbatch) hifnstats.hst_maxbatch = sc->sc_curbatch; hifnstats.hst_totbatch++; } else { dma->resr[resi].l = htole32(HIFN_MAX_RESULT | HIFN_D_VALID | HIFN_D_LAST); sc->sc_curbatch = 0; } HIFN_RESR_SYNC(sc, resi, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); dma->resu++; if (sc->sc_r_busy == 0) { WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA); sc->sc_r_busy = 1; } if (cmd->sloplen) cmd->slopidx = resi; hifn_dmamap_load_dst(sc, cmd); if (sc->sc_d_busy == 0) { WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA); sc->sc_d_busy = 1; } #ifdef HIFN_DEBUG if (hifn_debug) { device_printf(sc->sc_dev, "command: stat %8x ier %8x\n", READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER)); } #endif sc->sc_active = 5; HIFN_UNLOCK(sc); KASSERT(err == 0, ("hifn_crypto: success with error %u", err)); return (err); /* success */ err_dstmap: if (cmd->src_map != cmd->dst_map) bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); err_dstmap1: if (cmd->src_map != cmd->dst_map) bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); err_srcmap: if (crp->crp_flags & CRYPTO_F_IMBUF) { if (cmd->src_m != cmd->dst_m) m_freem(cmd->dst_m); } bus_dmamap_unload(sc->sc_dmat, cmd->src_map); err_srcmap1: bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); HIFN_UNLOCK(sc); return (err); } static void hifn_tick(void* vsc) { struct hifn_softc *sc = vsc; HIFN_LOCK(sc); if (sc->sc_active == 0) { struct hifn_dma *dma = sc->sc_dma; u_int32_t r = 0; if (dma->cmdu == 0 && sc->sc_c_busy) { sc->sc_c_busy = 0; r |= HIFN_DMACSR_C_CTRL_DIS; } if (dma->srcu == 0 && sc->sc_s_busy) { sc->sc_s_busy = 0; r |= HIFN_DMACSR_S_CTRL_DIS; } if (dma->dstu == 0 && sc->sc_d_busy) { sc->sc_d_busy = 0; r |= HIFN_DMACSR_D_CTRL_DIS; } if (dma->resu == 0 && sc->sc_r_busy) { sc->sc_r_busy = 0; r |= HIFN_DMACSR_R_CTRL_DIS; } if (r) WRITE_REG_1(sc, HIFN_1_DMA_CSR, r); } else sc->sc_active--; HIFN_UNLOCK(sc); callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); } static void hifn_intr(void *arg) { struct hifn_softc *sc = arg; struct hifn_dma *dma; u_int32_t dmacsr, restart; int i, u; dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR); /* Nothing in the DMA unit interrupted */ if ((dmacsr & sc->sc_dmaier) == 0) return; HIFN_LOCK(sc); dma = sc->sc_dma; #ifdef HIFN_DEBUG if (hifn_debug) { device_printf(sc->sc_dev, "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n", dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier, dma->cmdi, dma->srci, dma->dsti, dma->resi, dma->cmdk, dma->srck, dma->dstk, dma->resk, dma->cmdu, dma->srcu, dma->dstu, dma->resu); } #endif WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier); if ((sc->sc_flags & HIFN_HAS_PUBLIC) && (dmacsr & HIFN_DMACSR_PUBDONE)) WRITE_REG_1(sc, HIFN_1_PUB_STATUS, READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE); restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER); if (restart) device_printf(sc->sc_dev, "overrun %x\n", dmacsr); if (sc->sc_flags & HIFN_IS_7811) { if (dmacsr & HIFN_DMACSR_ILLR) device_printf(sc->sc_dev, "illegal read\n"); if (dmacsr & HIFN_DMACSR_ILLW) device_printf(sc->sc_dev, "illegal write\n"); } restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT | HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT); if (restart) { device_printf(sc->sc_dev, "abort, resetting.\n"); hifnstats.hst_abort++; hifn_abort(sc); HIFN_UNLOCK(sc); return; } if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) { /* * If no slots to process and we receive a "waiting on * command" interrupt, we disable the "waiting on command" * (by clearing it). */ sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); } /* clear the rings */ i = dma->resk; u = dma->resu; while (u != 0) { HIFN_RESR_SYNC(sc, i, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (dma->resr[i].l & htole32(HIFN_D_VALID)) { HIFN_RESR_SYNC(sc, i, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); break; } if (i != HIFN_D_RES_RSIZE) { struct hifn_command *cmd; u_int8_t *macbuf = NULL; HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD); cmd = dma->hifn_commands[i]; KASSERT(cmd != NULL, ("hifn_intr: null command slot %u", i)); dma->hifn_commands[i] = NULL; if (cmd->base_masks & HIFN_BASE_CMD_MAC) { macbuf = dma->result_bufs[i]; macbuf += 12; } hifn_callback(sc, cmd, macbuf); hifnstats.hst_opackets++; u--; } if (++i == (HIFN_D_RES_RSIZE + 1)) i = 0; } dma->resk = i; dma->resu = u; i = dma->srck; u = dma->srcu; while (u != 0) { if (i == HIFN_D_SRC_RSIZE) i = 0; HIFN_SRCR_SYNC(sc, i, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (dma->srcr[i].l & htole32(HIFN_D_VALID)) { HIFN_SRCR_SYNC(sc, i, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); break; } i++, u--; } dma->srck = i; dma->srcu = u; i = dma->cmdk; u = dma->cmdu; while (u != 0) { HIFN_CMDR_SYNC(sc, i, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) { HIFN_CMDR_SYNC(sc, i, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); break; } if (i != HIFN_D_CMD_RSIZE) { u--; HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE); } if (++i == (HIFN_D_CMD_RSIZE + 1)) i = 0; } dma->cmdk = i; dma->cmdu = u; HIFN_UNLOCK(sc); if (sc->sc_needwakeup) { /* XXX check high watermark */ int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); #ifdef HIFN_DEBUG if (hifn_debug) device_printf(sc->sc_dev, "wakeup crypto (%x) u %d/%d/%d/%d\n", sc->sc_needwakeup, dma->cmdu, dma->srcu, dma->dstu, dma->resu); #endif sc->sc_needwakeup &= ~wakeup; crypto_unblock(sc->sc_cid, wakeup); } } /* * Allocate a new 'session' and return an encoded session id. 'sidp' * contains our registration id, and should contain an encoded session * id on successful allocation. */ static int hifn_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri) { struct cryptoini *c; struct hifn_softc *sc = arg; int i, mac = 0, cry = 0; KASSERT(sc != NULL, ("hifn_newsession: null softc")); if (sidp == NULL || cri == NULL || sc == NULL) return (EINVAL); for (i = 0; i < sc->sc_maxses; i++) if (sc->sc_sessions[i].hs_state == HS_STATE_FREE) break; if (i == sc->sc_maxses) return (ENOMEM); for (c = cri; c != NULL; c = c->cri_next) { switch (c->cri_alg) { case CRYPTO_MD5: case CRYPTO_SHA1: case CRYPTO_MD5_HMAC: case CRYPTO_SHA1_HMAC: if (mac) return (EINVAL); mac = 1; break; case CRYPTO_DES_CBC: case CRYPTO_3DES_CBC: case CRYPTO_AES_CBC: /* XXX this may read fewer, does it matter? */ read_random(sc->sc_sessions[i].hs_iv, c->cri_alg == CRYPTO_AES_CBC ? HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); /*FALLTHROUGH*/ case CRYPTO_ARC4: if (cry) return (EINVAL); cry = 1; break; default: return (EINVAL); } } if (mac == 0 && cry == 0) return (EINVAL); *sidp = HIFN_SID(device_get_unit(sc->sc_dev), i); sc->sc_sessions[i].hs_state = HS_STATE_USED; return (0); } /* * Deallocate a session. * XXX this routine should run a zero'd mac/encrypt key into context ram. * XXX to blow away any keys already stored there. */ static int hifn_freesession(void *arg, u_int64_t tid) { struct hifn_softc *sc = arg; int session; u_int32_t sid = CRYPTO_SESID2LID(tid); KASSERT(sc != NULL, ("hifn_freesession: null softc")); if (sc == NULL) return (EINVAL); session = HIFN_SESSION(sid); if (session >= sc->sc_maxses) return (EINVAL); bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session])); return (0); } static int hifn_process(void *arg, struct cryptop *crp, int hint) { struct hifn_softc *sc = arg; struct hifn_command *cmd = NULL; int session, err, ivlen; struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; if (crp == NULL || crp->crp_callback == NULL) { hifnstats.hst_invalid++; return (EINVAL); } session = HIFN_SESSION(crp->crp_sid); if (sc == NULL || session >= sc->sc_maxses) { err = EINVAL; goto errout; } cmd = malloc(sizeof(struct hifn_command), M_DEVBUF, M_NOWAIT | M_ZERO); if (cmd == NULL) { hifnstats.hst_nomem++; err = ENOMEM; goto errout; } if (crp->crp_flags & CRYPTO_F_IMBUF) { cmd->src_m = (struct mbuf *)crp->crp_buf; cmd->dst_m = (struct mbuf *)crp->crp_buf; } else if (crp->crp_flags & CRYPTO_F_IOV) { cmd->src_io = (struct uio *)crp->crp_buf; cmd->dst_io = (struct uio *)crp->crp_buf; } else { err = EINVAL; goto errout; /* XXX we don't handle contiguous buffers! */ } crd1 = crp->crp_desc; if (crd1 == NULL) { err = EINVAL; goto errout; } crd2 = crd1->crd_next; if (crd2 == NULL) { if (crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC || crd1->crd_alg == CRYPTO_SHA1 || crd1->crd_alg == CRYPTO_MD5) { maccrd = crd1; enccrd = NULL; } else if (crd1->crd_alg == CRYPTO_DES_CBC || crd1->crd_alg == CRYPTO_3DES_CBC || crd1->crd_alg == CRYPTO_AES_CBC || crd1->crd_alg == CRYPTO_ARC4) { if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0) cmd->base_masks |= HIFN_BASE_CMD_DECODE; maccrd = NULL; enccrd = crd1; } else { err = EINVAL; goto errout; } } else { if ((crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC || crd1->crd_alg == CRYPTO_MD5 || crd1->crd_alg == CRYPTO_SHA1) && (crd2->crd_alg == CRYPTO_DES_CBC || crd2->crd_alg == CRYPTO_3DES_CBC || crd2->crd_alg == CRYPTO_AES_CBC || crd2->crd_alg == CRYPTO_ARC4) && ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { cmd->base_masks = HIFN_BASE_CMD_DECODE; maccrd = crd1; enccrd = crd2; } else if ((crd1->crd_alg == CRYPTO_DES_CBC || crd1->crd_alg == CRYPTO_ARC4 || crd1->crd_alg == CRYPTO_3DES_CBC || crd1->crd_alg == CRYPTO_AES_CBC) && (crd2->crd_alg == CRYPTO_MD5_HMAC || crd2->crd_alg == CRYPTO_SHA1_HMAC || crd2->crd_alg == CRYPTO_MD5 || crd2->crd_alg == CRYPTO_SHA1) && (crd1->crd_flags & CRD_F_ENCRYPT)) { enccrd = crd1; maccrd = crd2; } else { /* * We cannot order the 7751 as requested */ err = EINVAL; goto errout; } } if (enccrd) { cmd->enccrd = enccrd; cmd->base_masks |= HIFN_BASE_CMD_CRYPT; switch (enccrd->crd_alg) { case CRYPTO_ARC4: cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4; if ((enccrd->crd_flags & CRD_F_ENCRYPT) != sc->sc_sessions[session].hs_prev_op) sc->sc_sessions[session].hs_state = HS_STATE_USED; break; case CRYPTO_DES_CBC: cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES | HIFN_CRYPT_CMD_MODE_CBC | HIFN_CRYPT_CMD_NEW_IV; break; case CRYPTO_3DES_CBC: cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES | HIFN_CRYPT_CMD_MODE_CBC | HIFN_CRYPT_CMD_NEW_IV; break; case CRYPTO_AES_CBC: cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES | HIFN_CRYPT_CMD_MODE_CBC | HIFN_CRYPT_CMD_NEW_IV; break; default: err = EINVAL; goto errout; } if (enccrd->crd_alg != CRYPTO_ARC4) { ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ? HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); if (enccrd->crd_flags & CRD_F_ENCRYPT) { if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) bcopy(enccrd->crd_iv, cmd->iv, ivlen); else bcopy(sc->sc_sessions[session].hs_iv, cmd->iv, ivlen); if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) { if (crp->crp_flags & CRYPTO_F_IMBUF) m_copyback(cmd->src_m, enccrd->crd_inject, ivlen, cmd->iv); else if (crp->crp_flags & CRYPTO_F_IOV) cuio_copyback(cmd->src_io, enccrd->crd_inject, ivlen, cmd->iv); } } else { if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) bcopy(enccrd->crd_iv, cmd->iv, ivlen); else if (crp->crp_flags & CRYPTO_F_IMBUF) m_copydata(cmd->src_m, enccrd->crd_inject, ivlen, cmd->iv); else if (crp->crp_flags & CRYPTO_F_IOV) cuio_copydata(cmd->src_io, enccrd->crd_inject, ivlen, cmd->iv); } } if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY; cmd->ck = enccrd->crd_key; cmd->cklen = enccrd->crd_klen >> 3; /* * Need to specify the size for the AES key in the masks. */ if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) == HIFN_CRYPT_CMD_ALG_AES) { switch (cmd->cklen) { case 16: cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128; break; case 24: cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192; break; case 32: cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256; break; default: err = EINVAL; goto errout; } } if (sc->sc_sessions[session].hs_state == HS_STATE_USED) cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY; } if (maccrd) { cmd->maccrd = maccrd; cmd->base_masks |= HIFN_BASE_CMD_MAC; switch (maccrd->crd_alg) { case CRYPTO_MD5: cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | HIFN_MAC_CMD_POS_IPSEC; break; case CRYPTO_MD5_HMAC: cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; break; case CRYPTO_SHA1: cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | HIFN_MAC_CMD_POS_IPSEC; break; case CRYPTO_SHA1_HMAC: cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; break; } if ((maccrd->crd_alg == CRYPTO_SHA1_HMAC || maccrd->crd_alg == CRYPTO_MD5_HMAC) && sc->sc_sessions[session].hs_state == HS_STATE_USED) { cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY; bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3); bzero(cmd->mac + (maccrd->crd_klen >> 3), HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3)); } } cmd->crp = crp; cmd->session_num = session; cmd->softc = sc; err = hifn_crypto(sc, cmd, crp, hint); if (!err) { if (enccrd) sc->sc_sessions[session].hs_prev_op = enccrd->crd_flags & CRD_F_ENCRYPT; if (sc->sc_sessions[session].hs_state == HS_STATE_USED) sc->sc_sessions[session].hs_state = HS_STATE_KEY; return 0; } else if (err == ERESTART) { /* * There weren't enough resources to dispatch the request * to the part. Notify the caller so they'll requeue this * request and resubmit it again soon. */ #ifdef HIFN_DEBUG if (hifn_debug) device_printf(sc->sc_dev, "requeue request\n"); #endif free(cmd, M_DEVBUF); sc->sc_needwakeup |= CRYPTO_SYMQ; return (err); } errout: if (cmd != NULL) free(cmd, M_DEVBUF); if (err == EINVAL) hifnstats.hst_invalid++; else hifnstats.hst_nomem++; crp->crp_etype = err; crypto_done(crp); return (err); } static void hifn_abort(struct hifn_softc *sc) { struct hifn_dma *dma = sc->sc_dma; struct hifn_command *cmd; struct cryptop *crp; int i, u; i = dma->resk; u = dma->resu; while (u != 0) { cmd = dma->hifn_commands[i]; KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i)); dma->hifn_commands[i] = NULL; crp = cmd->crp; if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) { /* Salvage what we can. */ u_int8_t *macbuf; if (cmd->base_masks & HIFN_BASE_CMD_MAC) { macbuf = dma->result_bufs[i]; macbuf += 12; } else macbuf = NULL; hifnstats.hst_opackets++; hifn_callback(sc, cmd, macbuf); } else { if (cmd->src_map == cmd->dst_map) { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); } else { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, BUS_DMASYNC_POSTREAD); } if (cmd->src_m != cmd->dst_m) { m_freem(cmd->src_m); crp->crp_buf = (caddr_t)cmd->dst_m; } /* non-shared buffers cannot be restarted */ if (cmd->src_map != cmd->dst_map) { /* * XXX should be EAGAIN, delayed until * after the reset. */ crp->crp_etype = ENOMEM; bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); } else crp->crp_etype = ENOMEM; bus_dmamap_unload(sc->sc_dmat, cmd->src_map); bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); free(cmd, M_DEVBUF); if (crp->crp_etype != EAGAIN) crypto_done(crp); } if (++i == HIFN_D_RES_RSIZE) i = 0; u--; } dma->resk = i; dma->resu = u; /* Force upload of key next time */ for (i = 0; i < sc->sc_maxses; i++) if (sc->sc_sessions[i].hs_state == HS_STATE_KEY) sc->sc_sessions[i].hs_state = HS_STATE_USED; hifn_reset_board(sc, 1); hifn_init_dma(sc); hifn_init_pci_registers(sc); } static void hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf) { struct hifn_dma *dma = sc->sc_dma; struct cryptop *crp = cmd->crp; struct cryptodesc *crd; struct mbuf *m; int totlen, i, u, ivlen; if (cmd->src_map == cmd->dst_map) { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); } else { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, BUS_DMASYNC_POSTREAD); } if (crp->crp_flags & CRYPTO_F_IMBUF) { if (cmd->src_m != cmd->dst_m) { crp->crp_buf = (caddr_t)cmd->dst_m; totlen = cmd->src_mapsize; for (m = cmd->dst_m; m != NULL; m = m->m_next) { if (totlen < m->m_len) { m->m_len = totlen; totlen = 0; } else totlen -= m->m_len; } cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len; m_freem(cmd->src_m); } } if (cmd->sloplen != 0) { if (crp->crp_flags & CRYPTO_F_IMBUF) m_copyback((struct mbuf *)crp->crp_buf, cmd->src_mapsize - cmd->sloplen, cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]); else if (crp->crp_flags & CRYPTO_F_IOV) cuio_copyback((struct uio *)crp->crp_buf, cmd->src_mapsize - cmd->sloplen, cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]); } i = dma->dstk; u = dma->dstu; while (u != 0) { if (i == HIFN_D_DST_RSIZE) i = 0; bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (dma->dstr[i].l & htole32(HIFN_D_VALID)) { bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); break; } i++, u--; } dma->dstk = i; dma->dstu = u; hifnstats.hst_obytes += cmd->dst_mapsize; if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) == HIFN_BASE_CMD_CRYPT) { for (crd = crp->crp_desc; crd; crd = crd->crd_next) { if (crd->crd_alg != CRYPTO_DES_CBC && crd->crd_alg != CRYPTO_3DES_CBC && crd->crd_alg != CRYPTO_AES_CBC) continue; ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ? HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); if (crp->crp_flags & CRYPTO_F_IMBUF) m_copydata((struct mbuf *)crp->crp_buf, crd->crd_skip + crd->crd_len - ivlen, ivlen, cmd->softc->sc_sessions[cmd->session_num].hs_iv); else if (crp->crp_flags & CRYPTO_F_IOV) { cuio_copydata((struct uio *)crp->crp_buf, crd->crd_skip + crd->crd_len - ivlen, ivlen, cmd->softc->sc_sessions[cmd->session_num].hs_iv); } break; } } if (macbuf != NULL) { for (crd = crp->crp_desc; crd; crd = crd->crd_next) { int len; if (crd->crd_alg == CRYPTO_MD5) len = 16; else if (crd->crd_alg == CRYPTO_SHA1) len = 20; else if (crd->crd_alg == CRYPTO_MD5_HMAC || crd->crd_alg == CRYPTO_SHA1_HMAC) len = 12; else continue; if (crp->crp_flags & CRYPTO_F_IMBUF) m_copyback((struct mbuf *)crp->crp_buf, crd->crd_inject, len, macbuf); else if ((crp->crp_flags & CRYPTO_F_IOV) && crp->crp_mac) bcopy((caddr_t)macbuf, crp->crp_mac, len); break; } } if (cmd->src_map != cmd->dst_map) { bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); } bus_dmamap_unload(sc->sc_dmat, cmd->src_map); bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); free(cmd, M_DEVBUF); crypto_done(crp); } /* * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0 * and Group 1 registers; avoid conditions that could create * burst writes by doing a read in between the writes. * * NB: The read we interpose is always to the same register; * we do this because reading from an arbitrary (e.g. last) * register may not always work. */ static void hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val) { if (sc->sc_flags & HIFN_IS_7811) { if (sc->sc_bar0_lastreg == reg - 4) bus_space_read_4(sc->sc_st0, sc->sc_sh0, HIFN_0_PUCNFG); sc->sc_bar0_lastreg = reg; } bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val); } static void hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val) { if (sc->sc_flags & HIFN_IS_7811) { if (sc->sc_bar1_lastreg == reg - 4) bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID); sc->sc_bar1_lastreg = reg; } bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val); } Index: head/sys/dev/ichsmb/ichsmb_pci.c =================================================================== --- head/sys/dev/ichsmb/ichsmb_pci.c (revision 129878) +++ head/sys/dev/ichsmb/ichsmb_pci.c (revision 129879) @@ -1,218 +1,219 @@ /*- * ichsmb_pci.c * * Author: Archie Cobbs * Copyright (c) 2000 Whistle Communications, Inc. * All rights reserved. * Author: Archie Cobbs * * Subject to the following obligations and disclaimer of warranty, use and * redistribution of this software, in source or object code forms, with or * without modifications are expressly permitted by Whistle Communications; * provided, however, that: * 1. Any and all reproductions of the source or object code must include the * copyright notice above and the following disclaimer of warranties; and * 2. No rights are granted, in any manner or form, to use Whistle * Communications, Inc. trademarks, including the mark "WHISTLE * COMMUNICATIONS" on advertising, endorsements, or otherwise except as * such appears in the above copyright notice or in the software. * * THIS SOFTWARE IS BEING PROVIDED BY WHISTLE COMMUNICATIONS "AS IS", AND * TO THE MAXIMUM EXTENT PERMITTED BY LAW, WHISTLE COMMUNICATIONS MAKES NO * REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED, REGARDING THIS SOFTWARE, * INCLUDING WITHOUT LIMITATION, ANY AND ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT. * WHISTLE COMMUNICATIONS DOES NOT WARRANT, GUARANTEE, OR MAKE ANY * REPRESENTATIONS REGARDING THE USE OF, OR THE RESULTS OF THE USE OF THIS * SOFTWARE IN TERMS OF ITS CORRECTNESS, ACCURACY, RELIABILITY OR OTHERWISE. * IN NO EVENT SHALL WHISTLE COMMUNICATIONS BE LIABLE FOR ANY DAMAGES * RESULTING FROM OR ARISING OUT OF ANY USE OF THIS SOFTWARE, INCLUDING * WITHOUT LIMITATION, ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, * PUNITIVE, OR CONSEQUENTIAL DAMAGES, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES, LOSS OF USE, DATA OR PROFITS, HOWEVER CAUSED AND UNDER ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF WHISTLE COMMUNICATIONS IS ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Support for the SMBus controller logical device which is part of the * Intel 81801AA/AB/BA/CA/DC/EB (ICH/ICH[02345]) I/O controller hub chips. */ #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include /* PCI unique identifiers */ #define ID_82801AA 0x24138086 #define ID_82801AB 0x24238086 #define ID_82801BA 0x24438086 #define ID_82801CA 0x24838086 #define ID_82801DC 0x24C38086 #define ID_82801EB 0x24D38086 #define PCIS_SERIALBUS_SMBUS_PROGIF 0x00 /* Internal functions */ static int ichsmb_pci_probe(device_t dev); static int ichsmb_pci_attach(device_t dev); /* Device methods */ static device_method_t ichsmb_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ichsmb_pci_probe), DEVMETHOD(device_attach, ichsmb_pci_attach), /* Bus methods */ DEVMETHOD(bus_print_child, bus_generic_print_child), /* SMBus methods */ DEVMETHOD(smbus_callback, ichsmb_callback), DEVMETHOD(smbus_quick, ichsmb_quick), DEVMETHOD(smbus_sendb, ichsmb_sendb), DEVMETHOD(smbus_recvb, ichsmb_recvb), DEVMETHOD(smbus_writeb, ichsmb_writeb), DEVMETHOD(smbus_writew, ichsmb_writew), DEVMETHOD(smbus_readb, ichsmb_readb), DEVMETHOD(smbus_readw, ichsmb_readw), DEVMETHOD(smbus_pcall, ichsmb_pcall), DEVMETHOD(smbus_bwrite, ichsmb_bwrite), DEVMETHOD(smbus_bread, ichsmb_bread), { 0, 0 } }; static driver_t ichsmb_pci_driver = { "ichsmb", ichsmb_pci_methods, sizeof(struct ichsmb_softc) }; static devclass_t ichsmb_pci_devclass; DRIVER_MODULE(ichsmb, pci, ichsmb_pci_driver, ichsmb_pci_devclass, 0, 0); static int ichsmb_pci_probe(device_t dev) { /* Check PCI identifier */ switch (pci_get_devid(dev)) { case ID_82801AA: device_set_desc(dev, "Intel 82801AA (ICH) SMBus controller"); break; case ID_82801AB: device_set_desc(dev, "Intel 82801AB (ICH0) SMBus controller"); break; case ID_82801BA: device_set_desc(dev, "Intel 82801BA (ICH2) SMBus controller"); break; case ID_82801CA: device_set_desc(dev, "Intel 82801CA (ICH3) SMBus controller"); break; case ID_82801DC: device_set_desc(dev, "Intel 82801DC (ICH4) SMBus controller"); break; case ID_82801EB: device_set_desc(dev, "Intel 82801EB (ICH5) SMBus controller"); break; default: if (pci_get_class(dev) == PCIC_SERIALBUS && pci_get_subclass(dev) == PCIS_SERIALBUS_SMBUS && pci_get_progif(dev) == PCIS_SERIALBUS_SMBUS_PROGIF) { device_set_desc(dev, "SMBus controller"); return (-2); /* XXX */ } return (ENXIO); } /* Done */ return (ichsmb_probe(dev)); } static int ichsmb_pci_attach(device_t dev) { const sc_p sc = device_get_softc(dev); u_int32_t cmd; int error; /* Initialize private state */ bzero(sc, sizeof(*sc)); sc->ich_cmd = -1; sc->dev = dev; /* Allocate an I/O range */ sc->io_rid = ICH_SMB_BASE; sc->io_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &sc->io_rid, 0, ~0, 16, RF_ACTIVE); if (sc->io_res == NULL) { log(LOG_ERR, "%s: can't map I/O\n", device_get_nameunit(dev)); error = ENXIO; goto fail; } sc->io_bst = rman_get_bustag(sc->io_res); sc->io_bsh = rman_get_bushandle(sc->io_res); /* Allocate interrupt */ sc->irq_rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE | RF_SHAREABLE); if (sc->irq_res == NULL) { log(LOG_ERR, "%s: can't get IRQ\n", device_get_nameunit(dev)); error = ENXIO; goto fail; } /* Set up interrupt handler */ error = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC, ichsmb_device_intr, sc, &sc->irq_handle); if (error != 0) { log(LOG_ERR, "%s: can't setup irq\n", device_get_nameunit(dev)); goto fail; } /* Enable I/O mapping */ cmd = pci_read_config(dev, PCIR_COMMAND, 4); cmd |= PCIM_CMD_PORTEN; pci_write_config(dev, PCIR_COMMAND, cmd, 4); cmd = pci_read_config(dev, PCIR_COMMAND, 4); if ((cmd & PCIM_CMD_PORTEN) == 0) { log(LOG_ERR, "%s: can't enable memory map\n", device_get_nameunit(dev)); error = ENXIO; goto fail; } /* Enable device */ pci_write_config(dev, ICH_HOSTC, ICH_HOSTC_HST_EN, 1); /* Done */ return (ichsmb_attach(dev)); fail: /* Attach failed, release resources */ ichsmb_release_resources(sc); return (error); } Index: head/sys/dev/ichwd/ichwd.c =================================================================== --- head/sys/dev/ichwd/ichwd.c (revision 129878) +++ head/sys/dev/ichwd/ichwd.c (revision 129879) @@ -1,381 +1,382 @@ /* * Copyright (c) 2004 Texas A&M University * All rights reserved. * * Developer: Wm. Daryl Hawkins * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Intel ICH Watchdog Timer (WDT) driver * * Originally developed by Wm. Daryl Hawkins of Texas A&M * Heavily modified by * * This is a tricky one. The ICH WDT can't be treated as a regular PCI * device as it's actually an integrated function of the ICH LPC interface * bridge. Detection is also awkward, because we can only infer the * presence of the watchdog timer from the fact that the machine has an * ICH chipset, or, on ACPI 2.x systems, by the presence of the 'WDDT' * ACPI table (although this driver does not support the ACPI detection * method). * * There is one slight problem on non-ACPI or ACPI 1.x systems: we have no * way of knowing if the WDT is permanently disabled (either by the BIOS * or in hardware). * * The WDT is programmed through I/O registers in the ACPI I/O space. * Intel swears it's always at offset 0x60, so we use that. * * For details about the ICH WDT, see Intel Application Note AP-725 * (document no. 292273-001). The WDT is also described in the individual * chipset datasheets, e.g. Intel82801EB ICH5 / 82801ER ICH5R Datasheet * (document no. 252516-001) sections 9.10 and 9.11. */ #include __FBSDID("$FreeBSD$"); #include #include +#include #include #include #include #include #include #include #include #include static struct ichwd_device ichwd_devices[] = { { VENDORID_INTEL, DEVICEID_82801AA, "Intel 82801AA watchdog timer" }, { VENDORID_INTEL, DEVICEID_82801AB, "Intel 82801AB watchdog timer" }, { VENDORID_INTEL, DEVICEID_82801BA, "Intel 82801BA watchdog timer" }, { VENDORID_INTEL, DEVICEID_82801BAM, "Intel 82801BAM watchdog timer" }, { VENDORID_INTEL, DEVICEID_82801CA, "Intel 82801CA watchdog timer" }, { VENDORID_INTEL, DEVICEID_82801CAM, "Intel 82801CAM watchdog timer" }, { VENDORID_INTEL, DEVICEID_82801DB, "Intel 82801DB watchdog timer" }, { VENDORID_INTEL, DEVICEID_82801DBM, "Intel 82801DBM watchdog timer" }, { VENDORID_INTEL, DEVICEID_82801E, "Intel 82801E watchdog timer" }, { VENDORID_INTEL, DEVICEID_82801EBR, "Intel 82801EB/ER watchdog timer" }, { 0, 0, NULL }, }; static devclass_t ichwd_devclass; #define ichwd_read_1(sc, off) \ bus_space_read_1((sc)->smi_bst, (sc)->smi_bsh, (off)) #define ichwd_read_2(sc, off) \ bus_space_read_2((sc)->smi_bst, (sc)->smi_bsh, (off)) #define ichwd_read_4(sc, off) \ bus_space_read_4((sc)->smi_bst, (sc)->smi_bsh, (off)) #define ichwd_write_1(sc, off, val) \ bus_space_write_1((sc)->smi_bst, (sc)->smi_bsh, (off), (val)) #define ichwd_write_2(sc, off, val) \ bus_space_write_2((sc)->smi_bst, (sc)->smi_bsh, (off), (val)) #define ichwd_write_4(sc, off, val) \ bus_space_write_4((sc)->smi_bst, (sc)->smi_bsh, (off), (val)) static __inline void ichwd_intr_enable(struct ichwd_softc *sc) { ichwd_write_4(sc, SMI_EN, ichwd_read_4(sc, SMI_EN) | SMI_TCO_EN); } static __inline void ichwd_intr_disable(struct ichwd_softc *sc) { ichwd_write_4(sc, SMI_EN, ichwd_read_4(sc, SMI_EN) & ~SMI_TCO_EN); } static __inline void ichwd_sts_reset(struct ichwd_softc *sc) { ichwd_write_2(sc, TCO1_STS, TCO_TIMEOUT); ichwd_write_2(sc, TCO2_STS, TCO_BOOT_STS); ichwd_write_2(sc, TCO2_STS, TCO_SECOND_TO_STS); } static __inline void ichwd_tmr_enable(struct ichwd_softc *sc) { uint16_t cnt; cnt = ichwd_read_2(sc, TCO1_CNT) & TCO_CNT_PRESERVE; ichwd_write_2(sc, TCO1_CNT, cnt & ~TCO_TMR_HALT); sc->active = 1; if (bootverbose) device_printf(sc->device, "timer enabled\n"); } static __inline void ichwd_tmr_disable(struct ichwd_softc *sc) { uint16_t cnt; cnt = ichwd_read_2(sc, TCO1_CNT) & TCO_CNT_PRESERVE; ichwd_write_2(sc, TCO1_CNT, cnt | TCO_TMR_HALT); sc->active = 0; if (bootverbose) device_printf(sc->device, "timer disabled\n"); } static __inline void ichwd_tmr_reload(struct ichwd_softc *sc) { ichwd_write_1(sc, TCO_RLD, 1); if (bootverbose) device_printf(sc->device, "timer reloaded\n"); } static __inline void ichwd_tmr_set(struct ichwd_softc *sc, uint8_t timeout) { ichwd_write_1(sc, TCO_TMR, timeout); sc->timeout = timeout; if (bootverbose) device_printf(sc->device, "timeout set to %u ticks\n", timeout); } /* * Watchdog event handler. */ static void ichwd_event(void *arg, unsigned int cmd, int *error) { struct ichwd_softc *sc = arg; unsigned int timeout; cmd &= WD_INTERVAL; /* disable / enable */ if (cmd == 0) { if (sc->active) ichwd_tmr_disable(sc); *error = 0; return; } if (!sc->active) ichwd_tmr_enable(sc); /* convert from power-of-to-ns to WDT ticks */ if (cmd >= 64) { *error = EINVAL; return; } timeout = ((uint64_t)1 << cmd) / ICHWD_TICK; if (timeout < ICHWD_MIN_TIMEOUT || timeout > ICHWD_MAX_TIMEOUT) { *error = EINVAL; return; } /* set new initial value */ if (timeout != sc->timeout) ichwd_tmr_set(sc, timeout); /* reload */ ichwd_tmr_reload(sc); *error = 0; return; } static unsigned long pmbase; /* * Look for an ICH LPC interface bridge. If one is found, register an * ichwd device. There can be only one. */ static void ichwd_identify(driver_t *driver, device_t parent) { struct ichwd_device *id; device_t ich = NULL; device_t dev; /* look for an ICH LPC interface bridge */ for (id = ichwd_devices; id->desc != NULL; ++id) if ((ich = pci_find_device(id->vendor, id->device)) != NULL) break; if (ich == NULL) return; if (bootverbose) printf("%s(): found ICH chipset: %s\n", __func__, id->desc); /* get for ACPI base address */ pmbase = pci_read_config(ich, ICH_PMBASE, 2) & ICH_PMBASE_MASK; if (pmbase == 0) { if (bootverbose) printf("%s(): ICH PMBASE register is empty\n", __func__); return; } /* try to clear the NO_REBOOT bit */ pci_write_config(ich, ICH_GEN_STA, 0x00, 1); if (pci_read_config(ich, ICH_GEN_STA, 1) & ICH_GEN_STA_NO_REBOOT) { if (bootverbose) printf("%s(): ICH WDT present but disabled\n", __func__); return; } /* good, add child to bus */ if ((dev = device_find_child(parent, driver->name, 0)) == NULL) dev = BUS_ADD_CHILD(parent, 0, driver->name, -1); device_set_desc_copy(dev, id->desc); } static int ichwd_probe(device_t dev) { (void)dev; return (0); } static int ichwd_attach(device_t dev) { struct ichwd_softc *sc; sc = device_get_softc(dev); sc->device = dev; /* allocate I/O register space */ sc->smi_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &sc->smi_rid, pmbase + SMI_BASE, pmbase + SMI_BASE + SMI_LEN - 1, SMI_LEN, RF_ACTIVE|RF_SHAREABLE); if (sc->smi_res == NULL) { device_printf(dev, "unable to reserve SMI registers\n"); goto fail; } sc->smi_bst = rman_get_bustag(sc->smi_res); sc->smi_bsh = rman_get_bushandle(sc->smi_res); sc->tco_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &sc->tco_rid, pmbase + TCO_BASE, pmbase + TCO_BASE + TCO_LEN - 1, TCO_LEN, RF_ACTIVE|RF_SHAREABLE); if (sc->tco_res == NULL) { device_printf(dev, "unable to reserve TCO registers\n"); goto fail; } sc->tco_bst = rman_get_bustag(sc->tco_res); sc->tco_bsh = rman_get_bushandle(sc->tco_res); /* reset the watchdog status registers */ ichwd_sts_reset(sc); /* make sure the WDT starts out inactive */ ichwd_tmr_disable(sc); /* register the watchdog event handler */ sc->ev_tag = EVENTHANDLER_REGISTER(watchdog_list, ichwd_event, sc, 0); /* enable watchdog timeout interrupts */ ichwd_intr_enable(sc); return (0); fail: sc = device_get_softc(dev); if (sc->tco_res != NULL) bus_release_resource(dev, SYS_RES_IOPORT, sc->tco_rid, sc->tco_res); if (sc->smi_res != NULL) bus_release_resource(dev, SYS_RES_IOPORT, sc->smi_rid, sc->smi_res); return (ENXIO); } static int ichwd_detach(device_t dev) { struct ichwd_softc *sc; device_printf(dev, "detaching\n"); sc = device_get_softc(dev); /* halt the watchdog timer */ if (sc->active) ichwd_tmr_disable(sc); /* disable watchdog timeout interrupts */ ichwd_intr_disable(sc); /* deregister event handler */ if (sc->ev_tag != NULL) EVENTHANDLER_DEREGISTER(watchdog_list, sc->ev_tag); sc->ev_tag = NULL; /* reset the watchdog status registers */ ichwd_sts_reset(sc); /* deallocate I/O register space */ bus_release_resource(dev, SYS_RES_IOPORT, sc->tco_rid, sc->tco_res); bus_release_resource(dev, SYS_RES_IOPORT, sc->smi_rid, sc->smi_res); return (0); } static device_method_t ichwd_methods[] = { DEVMETHOD(device_identify, ichwd_identify), DEVMETHOD(device_probe, ichwd_probe), DEVMETHOD(device_attach, ichwd_attach), DEVMETHOD(device_detach, ichwd_detach), {0,0} }; static driver_t ichwd_driver = { "ichwd", ichwd_methods, sizeof(struct ichwd_softc), }; static int ichwd_modevent(module_t mode, int type, void *data) { int error = 0; switch (type) { case MOD_LOAD: printf("ichwd module loaded\n"); break; case MOD_UNLOAD: printf("ichwd module unloaded\n"); break; case MOD_SHUTDOWN: printf("ichwd module shutting down\n"); break; } return (error); } DRIVER_MODULE(ichwd, nexus, ichwd_driver, ichwd_devclass, ichwd_modevent, NULL); /* * this doesn't seem to work, though I can't figure out why. * currently not a big issue since watchdog is standard. MODULE_DEPEND(ichwd, watchdog, 1, 1, 1); */ Index: head/sys/dev/ida/ida_disk.c =================================================================== --- head/sys/dev/ida/ida_disk.c (revision 129878) +++ head/sys/dev/ida/ida_disk.c (revision 129879) @@ -1,231 +1,232 @@ /*- * Copyright (c) 1999,2000 Jonathan Lemon * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Disk driver for Compaq SMART RAID adapters. */ #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* prototypes */ static int idad_probe(device_t dev); static int idad_attach(device_t dev); static int idad_detach(device_t dev); static d_strategy_t idad_strategy; static dumper_t idad_dump; static devclass_t idad_devclass; static device_method_t idad_methods[] = { DEVMETHOD(device_probe, idad_probe), DEVMETHOD(device_attach, idad_attach), DEVMETHOD(device_detach, idad_detach), { 0, 0 } }; static driver_t idad_driver = { "idad", idad_methods, sizeof(struct idad_softc) }; DRIVER_MODULE(idad, ida, idad_driver, idad_devclass, 0, 0); /* * Read/write routine for a buffer. Finds the proper unit, range checks * arguments, and schedules the transfer. Does not wait for the transfer * to complete. Multi-page transfers are supported. All I/O requests must * be a multiple of a sector in length. */ static void idad_strategy(struct bio *bp) { struct idad_softc *drv; int s; drv = bp->bio_disk->d_drv1; if (drv == NULL) { bp->bio_error = EINVAL; goto bad; } /* * software write protect check */ if (drv->flags & DRV_WRITEPROT && (bp->bio_cmd == BIO_WRITE)) { bp->bio_error = EROFS; goto bad; } bp->bio_driver1 = drv; s = splbio(); ida_submit_buf(drv->controller, bp); splx(s); return; bad: bp->bio_flags |= BIO_ERROR; /* * Correctly set the buf to indicate a completed transfer */ bp->bio_resid = bp->bio_bcount; biodone(bp); return; } static int idad_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length) { struct idad_softc *drv; int error = 0; struct disk *dp; dp = arg; drv = dp->d_drv1; if (drv == NULL) return (ENXIO); drv->controller->flags &= ~IDA_INTERRUPTS; if (length > 0) { error = ida_command(drv->controller, CMD_WRITE, virtual, length, drv->drive, offset / DEV_BSIZE, DMA_DATA_OUT); } drv->controller->flags |= IDA_INTERRUPTS; return (error); } void idad_intr(struct bio *bp) { struct idad_softc *drv; drv = bp->bio_disk->d_drv1; if (bp->bio_flags & BIO_ERROR) bp->bio_error = EIO; else bp->bio_resid = 0; biodone(bp); } static int idad_probe(device_t dev) { device_set_desc(dev, "Compaq Logical Drive"); return (0); } static int idad_attach(device_t dev) { struct ida_drive_info dinfo; struct idad_softc *drv; device_t parent; int error; drv = (struct idad_softc *)device_get_softc(dev); parent = device_get_parent(dev); drv->dev = dev; drv->controller = (struct ida_softc *)device_get_softc(parent); drv->unit = device_get_unit(dev); drv->drive = drv->controller->num_drives; drv->controller->num_drives++; error = ida_command(drv->controller, CMD_GET_LOG_DRV_INFO, &dinfo, sizeof(dinfo), drv->drive, 0, DMA_DATA_IN); if (error) { device_printf(dev, "CMD_GET_LOG_DRV_INFO failed\n"); return (ENXIO); } drv->cylinders = dinfo.dp.ncylinders; drv->heads = dinfo.dp.nheads; drv->sectors = dinfo.dp.nsectors; drv->secsize = dinfo.secsize == 0 ? 512 : dinfo.secsize; drv->secperunit = dinfo.secperunit; /* XXX * other initialization */ device_printf(dev, "%uMB (%u sectors), blocksize=%d\n", drv->secperunit / ((1024 * 1024) / drv->secsize), drv->secperunit, drv->secsize); drv->disk = disk_alloc(); drv->disk->d_strategy = idad_strategy; drv->disk->d_name = "idad"; drv->disk->d_dump = idad_dump; drv->disk->d_sectorsize = drv->secsize; drv->disk->d_mediasize = (off_t)drv->secperunit * drv->secsize; drv->disk->d_fwsectors = drv->sectors; drv->disk->d_fwheads = drv->heads; drv->disk->d_drv1 = drv; drv->disk->d_maxsize = DFLTPHYS; /* XXX guess? */ drv->disk->d_unit = drv->unit; drv->disk->d_flags = DISKFLAG_NEEDSGIANT; disk_create(drv->disk, DISK_VERSION); return (0); } static int idad_detach(device_t dev) { struct idad_softc *drv; drv = (struct idad_softc *)device_get_softc(dev); disk_destroy(drv->disk); return (0); } Index: head/sys/dev/ida/ida_eisa.c =================================================================== --- head/sys/dev/ida/ida_eisa.c (revision 129878) +++ head/sys/dev/ida/ida_eisa.c (revision 129879) @@ -1,346 +1,347 @@ /* * Copyright (c) 2000 Jonathan Lemon * Copyright (c) 1999 by Matthew N. Dodd * All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #define IDA_EISA_IOPORT_START 0x0c88 #define IDA_EISA_IOPORT_LEN 0x0017 #define IDA_EISA_IRQ_REG 0x0cc0 #define IDA_EISA_IRQ_MASK 0xf0 #define IDA_EISA_IRQ_15 0x80 #define IDA_EISA_IRQ_14 0x40 #define IDA_EISA_IRQ_11 0x10 #define IDA_EISA_IRQ_10 0x20 static int ida_v1_fifo_full(struct ida_softc *ida) { u_int8_t status; status = ida_inb(ida, R_EISA_SYSTEM_DOORBELL); return ((status & EISA_CHANNEL_CLEAR) == 0); } static void ida_v1_submit(struct ida_softc *ida, struct ida_qcb *qcb) { u_int16_t size; /* * On these cards, this location is actually for control flags. * Set them to zero and pass in structure size via an I/O port. */ size = qcb->hwqcb->hdr.size << 2; qcb->hwqcb->hdr.size = 0; ida_outb(ida, R_EISA_SYSTEM_DOORBELL, EISA_CHANNEL_CLEAR); ida_outl(ida, R_EISA_LIST_ADDR, qcb->hwqcb_busaddr); ida_outw(ida, R_EISA_LIST_LEN, size); ida_outb(ida, R_EISA_LOCAL_DOORBELL, EISA_CHANNEL_BUSY); } static bus_addr_t ida_v1_done(struct ida_softc *ida) { struct ida_hardware_qcb *hwqcb; bus_addr_t completed; u_int8_t status; if ((ida_inb(ida, R_EISA_SYSTEM_DOORBELL) & EISA_CHANNEL_BUSY) == 0) return (0); ida_outb(ida, R_EISA_SYSTEM_DOORBELL, EISA_CHANNEL_BUSY); completed = ida_inl(ida, R_EISA_COMPLETE_ADDR); status = ida_inb(ida, R_EISA_LIST_STATUS); ida_outb(ida, R_EISA_LOCAL_DOORBELL, EISA_CHANNEL_CLEAR); if (completed != 0) { hwqcb = (struct ida_hardware_qcb *) ((bus_addr_t)ida->hwqcbs + ((completed & ~3) - ida->hwqcb_busaddr)); hwqcb->req.error = status; } return (completed); } static int ida_v1_int_pending(struct ida_softc *ida) { return (ida_inb(ida, R_EISA_SYSTEM_DOORBELL) & EISA_CHANNEL_BUSY); } static void ida_v1_int_enable(struct ida_softc *ida, int enable) { if (enable) { ida_outb(ida, R_EISA_SYSTEM_DOORBELL, ~EISA_CHANNEL_CLEAR); ida_outb(ida, R_EISA_LOCAL_DOORBELL, EISA_CHANNEL_BUSY); ida_outb(ida, R_EISA_INT_MASK, INT_ENABLE); ida_outb(ida, R_EISA_SYSTEM_MASK, INT_ENABLE); ida->flags |= IDA_INTERRUPTS; } else { ida_outb(ida, R_EISA_SYSTEM_MASK, INT_DISABLE); ida->flags &= ~IDA_INTERRUPTS; } } static int ida_v2_fifo_full(struct ida_softc *ida) { return (ida_inl(ida, R_CMD_FIFO) == 0); } static void ida_v2_submit(struct ida_softc *ida, struct ida_qcb *qcb) { ida_outl(ida, R_CMD_FIFO, qcb->hwqcb_busaddr); } static bus_addr_t ida_v2_done(struct ida_softc *ida) { return (ida_inl(ida, R_DONE_FIFO)); } static int ida_v2_int_pending(struct ida_softc *ida) { return (ida_inl(ida, R_INT_PENDING)); } static void ida_v2_int_enable(struct ida_softc *ida, int enable) { if (enable) ida->flags |= IDA_INTERRUPTS; else ida->flags &= ~IDA_INTERRUPTS; ida_outl(ida, R_INT_MASK, enable ? INT_ENABLE : INT_DISABLE); } static struct ida_access ida_v1_access = { ida_v1_fifo_full, ida_v1_submit, ida_v1_done, ida_v1_int_pending, ida_v1_int_enable, }; static struct ida_access ida_v2_access = { ida_v2_fifo_full, ida_v2_submit, ida_v2_done, ida_v2_int_pending, ida_v2_int_enable, }; static struct ida_board board_id[] = { { 0x0e114001, "Compaq IDA controller", &ida_v1_access, 0 }, { 0x0e114002, "Compaq IDA-2 controller", &ida_v1_access, 0 }, { 0x0e114010, "Compaq IAES controller", &ida_v1_access, 0 }, { 0x0e114020, "Compaq SMART array controller", &ida_v1_access, 0 }, { 0x0e114030, "Compaq SMART-2/E array controller", &ida_v2_access, 0 }, { 0, "", 0, 0 } }; static struct ida_board *ida_eisa_match(eisa_id_t); static int ida_eisa_probe(device_t); static int ida_eisa_attach(device_t); static device_method_t ida_eisa_methods[] = { DEVMETHOD(device_probe, ida_eisa_probe), DEVMETHOD(device_attach, ida_eisa_attach), DEVMETHOD(device_detach, ida_detach), { 0, 0 } }; static driver_t ida_eisa_driver = { "ida", ida_eisa_methods, sizeof(struct ida_softc) }; static devclass_t ida_devclass; static struct ida_board * ida_eisa_match(eisa_id_t id) { int i; for (i = 0; board_id[i].board; i++) if (board_id[i].board == id) return (&board_id[i]); return (NULL); } static int ida_eisa_probe(device_t dev) { struct ida_board *board; u_int32_t io_base; u_int irq = 0; board = ida_eisa_match(eisa_get_id(dev)); if (board == NULL) return (ENXIO); device_set_desc(dev, board->desc); io_base = (eisa_get_slot(dev) * EISA_SLOT_SIZE); switch (IDA_EISA_IRQ_MASK & (inb(IDA_EISA_IRQ_REG + io_base))) { case IDA_EISA_IRQ_15: irq = 15; break; case IDA_EISA_IRQ_14: irq = 14; break; case IDA_EISA_IRQ_11: irq = 11; break; case IDA_EISA_IRQ_10: irq = 10; break; default: device_printf(dev, "slot %d, illegal irq setting.\n", eisa_get_slot(dev)); return (ENXIO); } eisa_add_iospace(dev, (io_base + IDA_EISA_IOPORT_START), IDA_EISA_IOPORT_LEN, RESVADDR_NONE); eisa_add_intr(dev, irq, EISA_TRIGGER_LEVEL); /* XXX ??? */ return (0); } static int ida_eisa_attach(device_t dev) { struct ida_softc *ida; struct ida_board *board; int error; int rid; ida = device_get_softc(dev); ida->dev = dev; board = ida_eisa_match(eisa_get_id(dev)); ida->cmd = *board->accessor; ida->flags = board->flags; ida->regs_res_type = SYS_RES_IOPORT; ida->regs_res_id = 0; ida->regs = bus_alloc_resource_any(dev, ida->regs_res_type, &ida->regs_res_id, RF_ACTIVE); if (ida->regs == NULL) { device_printf(dev, "can't allocate register resources\n"); return (ENOMEM); } error = bus_dma_tag_create( /* parent */ NULL, /* alignment */ 0, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR_32BIT, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ MAXBSIZE, /* nsegments */ IDA_NSEG, /* maxsegsize */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ BUS_DMA_ALLOCNOW, /* lockfunc */ NULL, /* lockarg */ NULL, &ida->parent_dmat); if (error != 0) { device_printf(dev, "can't allocate DMA tag\n"); ida_free(ida); return (ENOMEM); } rid = 0; ida->irq_res_type = SYS_RES_IRQ; ida->irq = bus_alloc_resource_any(dev, ida->irq_res_type, &rid, RF_ACTIVE | RF_SHAREABLE); if (ida->irq == NULL) { ida_free(ida); return (ENOMEM); } error = bus_setup_intr(dev, ida->irq, INTR_TYPE_BIO | INTR_ENTROPY, ida_intr, ida, &ida->ih); if (error) { device_printf(dev, "can't setup interrupt\n"); ida_free(ida); return (ENOMEM); } error = ida_init(ida); if (error) { ida_free(ida); return (error); } ida_attach(ida); ida->flags |= IDA_ATTACHED; return (0); } DRIVER_MODULE(ida, eisa, ida_eisa_driver, ida_devclass, 0, 0); Index: head/sys/dev/ida/ida_pci.c =================================================================== --- head/sys/dev/ida/ida_pci.c (revision 129878) +++ head/sys/dev/ida/ida_pci.c (revision 129879) @@ -1,305 +1,306 @@ /*- * Copyright (c) 1999,2000 Jonathan Lemon * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #define IDA_PCI_MAX_DMA_ADDR 0xFFFFFFFF #define IDA_PCI_MAX_DMA_COUNT 0xFFFFFFFF #define IDA_PCI_MEMADDR PCIR_BAR(1) /* Mem I/O Address */ #define IDA_DEVICEID_SMART 0xAE100E11 #define IDA_DEVICEID_DEC_SMART 0x00461011 #define IDA_DEVICEID_NCR_53C1510 0x00101000 static int ida_v3_fifo_full(struct ida_softc *ida) { return (ida_inl(ida, R_CMD_FIFO) == 0); } static void ida_v3_submit(struct ida_softc *ida, struct ida_qcb *qcb) { ida_outl(ida, R_CMD_FIFO, qcb->hwqcb_busaddr); } static bus_addr_t ida_v3_done(struct ida_softc *ida) { return (ida_inl(ida, R_DONE_FIFO)); } static int ida_v3_int_pending(struct ida_softc *ida) { return (ida_inl(ida, R_INT_PENDING)); } static void ida_v3_int_enable(struct ida_softc *ida, int enable) { if (enable) ida->flags |= IDA_INTERRUPTS; else ida->flags &= ~IDA_INTERRUPTS; ida_outl(ida, R_INT_MASK, enable ? INT_ENABLE : INT_DISABLE); } static int ida_v4_fifo_full(struct ida_softc *ida) { return (ida_inl(ida, R_42XX_REQUEST) != 0); } static void ida_v4_submit(struct ida_softc *ida, struct ida_qcb *qcb) { ida_outl(ida, R_42XX_REQUEST, qcb->hwqcb_busaddr); } static bus_addr_t ida_v4_done(struct ida_softc *ida) { bus_addr_t completed; completed = ida_inl(ida, R_42XX_REPLY); if (completed == -1) return (0); /* fifo is empty */ ida_outl(ida, R_42XX_REPLY, 0); /* confirm read */ return (completed); } static int ida_v4_int_pending(struct ida_softc *ida) { return (ida_inl(ida, R_42XX_STATUS) & STATUS_42XX_INT_PENDING); } static void ida_v4_int_enable(struct ida_softc *ida, int enable) { if (enable) ida->flags |= IDA_INTERRUPTS; else ida->flags &= ~IDA_INTERRUPTS; ida_outl(ida, R_42XX_INT_MASK, enable ? INT_ENABLE_42XX : INT_DISABLE_42XX); } static struct ida_access ida_v3_access = { ida_v3_fifo_full, ida_v3_submit, ida_v3_done, ida_v3_int_pending, ida_v3_int_enable, }; static struct ida_access ida_v4_access = { ida_v4_fifo_full, ida_v4_submit, ida_v4_done, ida_v4_int_pending, ida_v4_int_enable, }; static struct ida_board board_id[] = { { 0x40300E11, "Compaq SMART-2/P array controller", &ida_v3_access, 0 }, { 0x40310E11, "Compaq SMART-2SL array controller", &ida_v3_access, 0 }, { 0x40320E11, "Compaq Smart Array 3200 controller", &ida_v3_access, 0 }, { 0x40330E11, "Compaq Smart Array 3100ES controller", &ida_v3_access, 0 }, { 0x40340E11, "Compaq Smart Array 221 controller", &ida_v3_access, 0 }, { 0x40400E11, "Compaq Integrated Array controller", &ida_v4_access, IDA_FIRMWARE }, { 0x40480E11, "Compaq RAID LC2 controller", &ida_v4_access, IDA_FIRMWARE }, { 0x40500E11, "Compaq Smart Array 4200 controller", &ida_v4_access, 0 }, { 0x40510E11, "Compaq Smart Array 4250ES controller", &ida_v4_access, 0 }, { 0x40580E11, "Compaq Smart Array 431 controller", &ida_v4_access, 0 }, { 0, "", 0, 0 }, }; static int ida_pci_probe(device_t dev); static int ida_pci_attach(device_t dev); static device_method_t ida_pci_methods[] = { DEVMETHOD(device_probe, ida_pci_probe), DEVMETHOD(device_attach, ida_pci_attach), DEVMETHOD(device_detach, ida_detach), DEVMETHOD(bus_print_child, bus_generic_print_child), { 0, 0 } }; static driver_t ida_pci_driver = { "ida", ida_pci_methods, sizeof(struct ida_softc) }; static devclass_t ida_devclass; static struct ida_board * ida_pci_match(device_t dev) { int i; u_int32_t id, sub_id; id = pci_get_devid(dev); sub_id = pci_get_subdevice(dev) << 16 | pci_get_subvendor(dev); if (id == IDA_DEVICEID_SMART || id == IDA_DEVICEID_DEC_SMART || id == IDA_DEVICEID_NCR_53C1510) { for (i = 0; board_id[i].board; i++) if (board_id[i].board == sub_id) return (&board_id[i]); } return (NULL); } static int ida_pci_probe(device_t dev) { struct ida_board *board = ida_pci_match(dev); if (board != NULL) { device_set_desc(dev, board->desc); return (0); } return (ENXIO); } static int ida_pci_attach(device_t dev) { struct ida_board *board = ida_pci_match(dev); u_int32_t id = pci_get_devid(dev); struct ida_softc *ida; u_int command; int error, rid; command = pci_read_config(dev, PCIR_COMMAND, 1); /* * it appears that this board only does MEMIO access. */ if ((command & PCIM_CMD_MEMEN) == 0) { device_printf(dev, "Only memory mapped I/O is supported\n"); return (ENXIO); } ida = (struct ida_softc *)device_get_softc(dev); ida->dev = dev; ida->cmd = *board->accessor; ida->flags = board->flags; ida->regs_res_type = SYS_RES_MEMORY; ida->regs_res_id = IDA_PCI_MEMADDR; if (id == IDA_DEVICEID_DEC_SMART) ida->regs_res_id = PCIR_BAR(0); ida->regs = bus_alloc_resource_any(dev, ida->regs_res_type, &ida->regs_res_id, RF_ACTIVE); if (ida->regs == NULL) { device_printf(dev, "can't allocate memory resources\n"); return (ENOMEM); } error = bus_dma_tag_create(/*parent*/NULL, /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/MAXBSIZE, /*nsegments*/IDA_NSEG, /*maxsegsize*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/BUS_DMA_ALLOCNOW, /*lockfunc*/NULL, /*lockarg*/NULL, &ida->parent_dmat); if (error != 0) { device_printf(dev, "can't allocate DMA tag\n"); ida_free(ida); return (ENOMEM); } rid = 0; ida->irq_res_type = SYS_RES_IRQ; ida->irq = bus_alloc_resource_any(dev, ida->irq_res_type, &rid, RF_ACTIVE | RF_SHAREABLE); if (ida->irq == NULL) { ida_free(ida); return (ENOMEM); } error = bus_setup_intr(dev, ida->irq, INTR_TYPE_BIO | INTR_ENTROPY, ida_intr, ida, &ida->ih); if (error) { device_printf(dev, "can't setup interrupt\n"); ida_free(ida); return (ENOMEM); } error = ida_init(ida); if (error) { ida_free(ida); return (error); } ida_attach(ida); ida->flags |= IDA_ATTACHED; return (0); } DRIVER_MODULE(ida, pci, ida_pci_driver, ida_devclass, 0, 0); Index: head/sys/dev/ips/ips.h =================================================================== --- head/sys/dev/ips/ips.h (revision 129878) +++ head/sys/dev/ips/ips.h (revision 129879) @@ -1,468 +1,469 @@ /*- * Copyright (c) 2002 Adaptec Inc. * All rights reserved. * * Written by: David Jeffery * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MALLOC_DECLARE(M_IPSBUF); /* * IPS CONSTANTS */ #define IPS_VENDOR_ID 0x1014 #define IPS_VENDOR_ID_ADAPTEC 0x9005 #define IPS_MORPHEUS_DEVICE_ID 0x01BD #define IPS_COPPERHEAD_DEVICE_ID 0x002E #define IPS_MARCO_DEVICE_ID 0x0250 #define IPS_CSL 0xff #define IPS_POCL 0x30 /* amounts of memory to allocate for certain commands */ #define IPS_ADAPTER_INFO_LEN (sizeof(ips_adapter_info_t)) #define IPS_DRIVE_INFO_LEN (sizeof(ips_drive_info_t)) #define IPS_COMMAND_LEN 24 #define IPS_MAX_SG_LEN (sizeof(ips_sg_element_t) * IPS_MAX_SG_ELEMENTS) #define IPS_NVRAM_PAGE_SIZE 128 /* various flags */ #define IPS_NOWAIT_FLAG 1 /* states for the card to be in */ #define IPS_DEV_OPEN 0x01 #define IPS_TIMEOUT 0x02 /* command time out, need reset */ #define IPS_OFFLINE 0x04 /* can't reset card/card failure */ /* max number of commands set to something low for now */ #define IPS_MAX_CMD_NUM 128 #define IPS_MAX_NUM_DRIVES 8 #define IPS_MAX_SG_ELEMENTS 32 #define IPS_MAX_IOBUF_SIZE (64 * 1024) #define IPS_BLKSIZE 512 /* logical drive states */ #define IPS_LD_OFFLINE 0x02 #define IPS_LD_OKAY 0x03 #define IPS_LD_DEGRADED 0x04 #define IPS_LD_FREE 0x00 #define IPS_LD_SYS 0x06 #define IPS_LD_CRS 0x24 /* register offsets */ #define MORPHEUS_REG_OMR0 0x0018 /* Outbound Msg. Reg. 0 */ #define MORPHEUS_REG_OMR1 0x001C /* Outbound Msg. Reg. 1 */ #define MORPHEUS_REG_IDR 0x0020 /* Inbound Doorbell Reg. */ #define MORPHEUS_REG_IISR 0x0024 /* Inbound IRQ Status Reg. */ #define MORPHEUS_REG_IIMR 0x0028 /* Inbound IRQ Mask Reg. */ #define MORPHEUS_REG_OISR 0x0030 /* Outbound IRQ Status Reg. */ #define MORPHEUS_REG_OIMR 0x0034 /* Outbound IRQ Status Reg. */ #define MORPHEUS_REG_IQPR 0x0040 /* Inbound Queue Port Reg. */ #define MORPHEUS_REG_OQPR 0x0044 /* Outbound Queue Port Reg. */ #define COPPER_REG_SCPR 0x05 /* Subsystem Ctrl. Port Reg. */ #define COPPER_REG_ISPR 0x06 /* IRQ Status Port Reg. */ #define COPPER_REG_CBSP 0x07 /* ? Reg. */ #define COPPER_REG_HISR 0x08 /* Host IRQ Status Reg. */ #define COPPER_REG_CCSAR 0x10 /* Cmd. Channel Sys Addr Reg.*/ #define COPPER_REG_CCCR 0x14 /* Cmd. Channel Ctrl. Reg. */ #define COPPER_REG_SQHR 0x20 /* Status Queue Head Reg. */ #define COPPER_REG_SQTR 0x24 /* Status Queue Tail Reg. */ #define COPPER_REG_SQER 0x28 /* Status Queue End Reg. */ #define COPPER_REG_SQSR 0x2C /* Status Queue Start Reg. */ /* bit definitions */ #define MORPHEUS_BIT_POST1 0x01 #define MORPHEUS_BIT_POST2 0x02 #define MORPHEUS_BIT_CMD_IRQ 0x08 #define COPPER_CMD_START 0x101A #define COPPER_SEM_BIT 0x08 #define COPPER_EI_BIT 0x80 #define COPPER_EBM_BIT 0x02 #define COPPER_RESET_BIT 0x80 #define COPPER_GHI_BIT 0x04 #define COPPER_SCE_BIT 0x01 #define COPPER_OP_BIT 0x01 #define COPPER_ILE_BIT 0x10 /* status defines */ #define IPS_POST1_OK 0x8000 #define IPS_POST2_OK 0x000f /* command op codes */ #define IPS_READ_CMD 0x02 #define IPS_WRITE_CMD 0x03 #define IPS_ADAPTER_INFO_CMD 0x05 #define IPS_CACHE_FLUSH_CMD 0x0A #define IPS_REBUILD_STATUS_CMD 0x0C #define IPS_ERROR_TABLE_CMD 0x17 #define IPS_DRIVE_INFO_CMD 0x19 #define IPS_SUBSYS_PARAM_CMD 0x40 #define IPS_CONFIG_SYNC_CMD 0x58 #define IPS_SG_READ_CMD 0x82 #define IPS_SG_WRITE_CMD 0x83 #define IPS_RW_NVRAM_CMD 0xBC #define IPS_FFDC_CMD 0xD7 /* error information returned by the adapter */ #define IPS_MIN_ERROR 0x02 #define IPS_ERROR_STATUS 0x13000200 /* ahh, magic numbers */ #define IPS_OS_FREEBSD 8 #define IPS_VERSION_MAJOR "0.90" #define IPS_VERSION_MINOR ".10" /* Adapter Types */ #define IPS_ADAPTER_COPPERHEAD 0x01 #define IPS_ADAPTER_COPPERHEAD2 0x02 #define IPS_ADAPTER_COPPERHEADOB1 0x03 #define IPS_ADAPTER_COPPERHEADOB2 0x04 #define IPS_ADAPTER_CLARINET 0x05 #define IPS_ADAPTER_CLARINETLITE 0x06 #define IPS_ADAPTER_TROMBONE 0x07 #define IPS_ADAPTER_MORPHEUS 0x08 #define IPS_ADAPTER_MORPHEUSLITE 0x09 #define IPS_ADAPTER_NEO 0x0A #define IPS_ADAPTER_NEOLITE 0x0B #define IPS_ADAPTER_SARASOTA2 0x0C #define IPS_ADAPTER_SARASOTA1 0x0D #define IPS_ADAPTER_MARCO 0x0E #define IPS_ADAPTER_SEBRING 0x0F #define IPS_ADAPTER_MAX_T IPS_ADAPTER_SEBRING /* values for ffdc_settime (from gmtime) */ #define IPS_SECSPERMIN 60 #define IPS_MINSPERHOUR 60 #define IPS_HOURSPERDAY 24 #define IPS_DAYSPERWEEK 7 #define IPS_DAYSPERNYEAR 365 #define IPS_DAYSPERLYEAR 366 #define IPS_SECSPERHOUR (IPS_SECSPERMIN * IPS_MINSPERHOUR) #define IPS_SECSPERDAY ((long) IPS_SECSPERHOUR * IPS_HOURSPERDAY) #define IPS_MONSPERYEAR 12 #define IPS_EPOCH_YEAR 1970 #define IPS_LEAPS_THRU_END_OF(y) ((y) / 4 - (y) / 100 + (y) / 400) #define ips_isleap(y) (((y) % 4) == 0 && (((y) % 100) != 0 || ((y) % 400) == 0)) /* * IPS MACROS */ #define ips_read_1(sc,offset) bus_space_read_1(sc->bustag, sc->bushandle, offset) #define ips_read_2(sc,offset) bus_space_read_2(sc->bustag, sc->bushandle, offset) #define ips_read_4(sc,offset) bus_space_read_4(sc->bustag, sc->bushandle, offset) #define ips_write_1(sc,offset,value) bus_space_write_1(sc->bustag, sc->bushandle, offset, value) #define ips_write_2(sc,offset,value) bus_space_write_2(sc->bustag, sc->bushandle, offset, value) #define ips_write_4(sc,offset,value) bus_space_write_4(sc->bustag, sc->bushandle, offset, value) /* this is ugly. It zeros the end elements in an ips_command_t struct starting with the status element */ #define clear_ips_command(command) bzero(&((command)->status), (unsigned long)(&(command)[1])-(unsigned long)&((command)->status)) #define ips_read_request(iobuf) ((iobuf)->bio_cmd == BIO_READ) #define COMMAND_ERROR(status) (((status)->fields.basic_status & 0x0f) >= IPS_MIN_ERROR) #ifndef IPS_DEBUG #define DEVICE_PRINTF(x...) #define PRINTF(x...) #else #define DEVICE_PRINTF(level,x...) if(IPS_DEBUG >= level)device_printf(x) #define PRINTF(level,x...) if(IPS_DEBUG >= level)printf(x) #endif /* * IPS STRUCTS */ struct ips_softc; typedef struct{ u_int8_t command; u_int8_t id; u_int8_t drivenum; u_int8_t reserve2; u_int32_t lba; u_int32_t buffaddr; u_int32_t reserve3; } __attribute__ ((packed)) ips_generic_cmd; typedef struct{ u_int8_t command; u_int8_t id; u_int8_t drivenum; u_int8_t segnum; u_int32_t lba; u_int32_t buffaddr; u_int16_t length; u_int16_t reserve1; } __attribute__ ((packed)) ips_io_cmd; typedef struct{ u_int8_t command; u_int8_t id; u_int8_t pagenum; u_int8_t rw; u_int32_t reserve1; u_int32_t buffaddr; u_int32_t reserve3; } __attribute__ ((packed)) ips_rw_nvram_cmd; typedef struct{ u_int8_t command; u_int8_t id; u_int8_t drivenum; u_int8_t reserve1; u_int32_t reserve2; u_int32_t buffaddr; u_int32_t reserve3; } __attribute__ ((packed)) ips_drive_cmd; typedef struct{ u_int8_t command; u_int8_t id; u_int8_t reserve1; u_int8_t commandtype; u_int32_t reserve2; u_int32_t buffaddr; u_int32_t reserve3; } __attribute__((packed)) ips_adapter_info_cmd; typedef struct{ u_int8_t command; u_int8_t id; u_int8_t reset_count; u_int8_t reset_type; u_int8_t second; u_int8_t minute; u_int8_t hour; u_int8_t day; u_int8_t reserve1[4]; u_int8_t month; u_int8_t yearH; u_int8_t yearL; u_int8_t reserve2; } __attribute__((packed)) ips_adapter_ffdc_cmd; typedef union{ ips_generic_cmd generic_cmd; ips_drive_cmd drive_cmd; ips_adapter_info_cmd adapter_info_cmd; } ips_cmd_buff_t; typedef struct { u_int32_t signature; u_int8_t reserved; u_int8_t adapter_slot; u_int16_t adapter_type; u_int8_t bios_high[4]; u_int8_t bios_low[4]; u_int16_t reserve2; u_int8_t reserve3; u_int8_t operating_system; u_int8_t driver_high[4]; u_int8_t driver_low[4]; u_int8_t reserve4[100]; }__attribute__((packed)) ips_nvram_page5; typedef struct{ u_int32_t addr; u_int32_t len; } ips_sg_element_t; typedef struct{ u_int8_t drivenum; u_int8_t merge_id; u_int8_t raid_lvl; u_int8_t state; u_int32_t sector_count; } __attribute__((packed)) ips_drive_t; typedef struct{ u_int8_t drivecount; u_int8_t reserve1; u_int16_t reserve2; ips_drive_t drives[IPS_MAX_NUM_DRIVES]; }__attribute__((packed)) ips_drive_info_t; typedef struct{ u_int8_t drivecount; u_int8_t miscflags; u_int8_t SLTflags; u_int8_t BSTflags; u_int8_t pwr_chg_count; u_int8_t wrong_addr_count; u_int8_t unident_count; u_int8_t nvram_dev_chg_count; u_int8_t codeblock_version[8]; u_int8_t bootblock_version[8]; u_int32_t drive_sector_count[IPS_MAX_NUM_DRIVES]; u_int8_t max_concurrent_cmds; u_int8_t max_phys_devices; u_int16_t flash_prog_count; u_int8_t defunct_disks; u_int8_t rebuildflags; u_int8_t offline_drivecount; u_int8_t critical_drivecount; u_int16_t config_update_count; u_int8_t blockedflags; u_int8_t psdn_error; u_int16_t addr_dead_disk[4*16];/* ugly, max # channels * max # scsi devices per channel */ }__attribute__((packed)) ips_adapter_info_t; typedef struct { u_int32_t status[IPS_MAX_CMD_NUM]; u_int32_t base_phys_addr; int nextstatus; bus_dma_tag_t dmatag; bus_dmamap_t dmamap; } ips_copper_queue_t; typedef union { struct { u_int8_t reserved; u_int8_t command_id; u_int8_t basic_status; u_int8_t extended_status; } fields; volatile u_int32_t value; } ips_cmd_status_t; /* used to keep track of current commands to the card */ typedef struct ips_command{ u_int8_t command_number; u_int8_t id; u_int8_t timeout; struct ips_softc * sc; bus_dmamap_t command_dmamap; void * command_buffer; u_int32_t command_phys_addr;/*WARNING! must be changed if 64bit addressing ever used*/ struct sema cmd_sema; ips_cmd_status_t status; SLIST_ENTRY(ips_command) next; bus_dma_tag_t data_dmatag; bus_dmamap_t data_dmamap; void * data_buffer; void * arg; void (* callback)(struct ips_command *command); }ips_command_t; typedef struct ips_wait_list{ STAILQ_ENTRY(ips_wait_list) next; void *data; int (* callback)(ips_command_t *command); }ips_wait_list_t; typedef struct ips_softc{ struct resource * iores; struct resource * irqres; struct intr_config_hook ips_ich; int configured; int state; int iotype; int rid; int irqrid; void * irqcookie; bus_space_tag_t bustag; bus_space_handle_t bushandle; bus_dma_tag_t adapter_dmatag; bus_dma_tag_t command_dmatag; bus_dma_tag_t sg_dmatag; device_t dev; dev_t device_file; struct callout_handle timer; u_int16_t adapter_type; ips_adapter_info_t adapter_info; device_t diskdev[IPS_MAX_NUM_DRIVES]; ips_drive_t drives[IPS_MAX_NUM_DRIVES]; u_int8_t drivecount; u_int16_t ffdc_resetcount; struct timeval ffdc_resettime; u_int8_t next_drive; u_int8_t max_cmds; volatile u_int8_t used_commands; ips_command_t commandarray[IPS_MAX_CMD_NUM]; SLIST_HEAD(command_list, ips_command) free_cmd_list; STAILQ_HEAD(command_wait_list,ips_wait_list) cmd_wait_list; int (* ips_adapter_reinit)(struct ips_softc *sc, int force); void (* ips_adapter_intr)(void *sc); void (* ips_issue_cmd)(ips_command_t *command); ips_copper_queue_t * copper_queue; struct mtx queue_mtx; struct bio_queue_head queue; }ips_softc_t; /* function defines from ips_ioctl.c */ extern int ips_ioctl_request(ips_softc_t *sc, u_long ioctl_cmd, caddr_t addr, int32_t flags); /* function defines from ips_disk.c */ extern void ipsd_finish(struct bio *iobuf); /* function defines from ips_commands.c */ extern int ips_flush_cache(ips_softc_t *sc); extern void ips_start_io_request(ips_softc_t *sc); extern int ips_get_drive_info(ips_softc_t *sc); extern int ips_get_adapter_info(ips_softc_t *sc); extern int ips_ffdc_reset(ips_softc_t *sc); extern int ips_update_nvram(ips_softc_t *sc); extern int ips_clear_adapter(ips_softc_t *sc); /* function defines from ips.c */ extern int ips_get_free_cmd(ips_softc_t *sc, int (*callback)(ips_command_t *), void *data, unsigned long flags); extern void ips_insert_free_cmd(ips_softc_t *sc, ips_command_t *command); extern int ips_adapter_init(ips_softc_t *sc); extern int ips_morpheus_reinit(ips_softc_t *sc, int force); extern int ips_adapter_free(ips_softc_t *sc); extern void ips_morpheus_intr(void *sc); extern void ips_issue_morpheus_cmd(ips_command_t *command); extern int ips_copperhead_reinit(ips_softc_t *sc, int force); extern void ips_copperhead_intr(void *sc); extern void ips_issue_copperhead_cmd(ips_command_t *command); Index: head/sys/dev/ispfw/ispfw.c =================================================================== --- head/sys/dev/ispfw/ispfw.c (revision 129878) +++ head/sys/dev/ispfw/ispfw.c (revision 129879) @@ -1,175 +1,176 @@ /*- * ISP Firmware Helper Pseudo Device for FreeBSD * * Copyright (c) 2000, 2001, by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include #include #include #include #if _MACHINE_ARCH == sparc64 #include #endif #define ISPFW_VERSION 0 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 #if _MACHINE_ARCH == sparc64 #define SBUS_PRODUCT_QLOGIC_ISP1000 0x1000 #endif typedef void ispfwfunc(int, int, int, const u_int16_t **); extern ispfwfunc *isp_get_firmware_p; static void isp_get_firmware(int, int, int, const u_int16_t **); static int ncallers = 0; static const u_int16_t ***callp = NULL; static int addcaller(const u_int16_t **); static int addcaller(const u_int16_t **caller) { const u_int16_t ***newcallp; int i; for (i = 0; i < ncallers; i++) { if (callp[i] == caller) return (1); } newcallp = malloc((ncallers + 1) * sizeof (const u_int16_t ***), M_DEVBUF, M_NOWAIT); if (newcallp == NULL) { return (0); } for (i = 0; i < ncallers; i++) { newcallp[i] = callp[i]; } newcallp[ncallers] = caller; if (ncallers++) free(callp, M_DEVBUF); callp = newcallp; return (1); } static void isp_get_firmware(int version, int tgtmode, int devid, const u_int16_t **ptrp) { const u_int16_t *rp = NULL; if (version == ISPFW_VERSION) { switch (devid) { case PCI_PRODUCT_QLOGIC_ISP1020: if (tgtmode) rp = isp_1040_risc_code_it; else rp = isp_1040_risc_code; break; case PCI_PRODUCT_QLOGIC_ISP1080: case PCI_PRODUCT_QLOGIC_ISP1240: case PCI_PRODUCT_QLOGIC_ISP1280: if (tgtmode) rp = isp_1080_risc_code_it; else rp = isp_1080_risc_code; break; case PCI_PRODUCT_QLOGIC_ISP10160: case PCI_PRODUCT_QLOGIC_ISP12160: if (tgtmode) rp = isp_12160_risc_code_it; else rp = isp_12160_risc_code; break; case PCI_PRODUCT_QLOGIC_ISP2100: rp = isp_2100_risc_code; break; case PCI_PRODUCT_QLOGIC_ISP2200: rp = isp_2200_risc_code; break; case PCI_PRODUCT_QLOGIC_ISP2300: case PCI_PRODUCT_QLOGIC_ISP2312: rp = isp_2300_risc_code; break; #if _MACHINE_ARCH == sparc64 case SBUS_PRODUCT_QLOGIC_ISP1000: if (tgtmode) break; rp = isp_1000_risc_code; break; #endif default: break; } } if (rp && addcaller(ptrp)) { *ptrp = rp; } } static int isp_module_handler(module_t mod, int what, void *arg) { switch (what) { case MOD_LOAD: isp_get_firmware_p = isp_get_firmware; break; case MOD_UNLOAD: isp_get_firmware_p = NULL; if (ncallers) { int i; for (i = 0; i < ncallers; i++) { *callp[i] = NULL; } free(callp, M_DEVBUF); } break; default: break; } return (0); } static moduledata_t ispfw_mod = { "ispfw", isp_module_handler, NULL }; DECLARE_MODULE(ispfw, ispfw_mod, SI_SUB_DRIVERS, SI_ORDER_THIRD); MODULE_VERSION(ispfw, ISPFW_VERSION); MODULE_DEPEND(ispfw, isp, 1, 1, 1); Index: head/sys/dev/lge/if_lge.c =================================================================== --- head/sys/dev/lge/if_lge.c (revision 129878) +++ head/sys/dev/lge/if_lge.c (revision 129879) @@ -1,1608 +1,1609 @@ /* * Copyright (c) 2001 Wind River Systems * Copyright (c) 1997, 1998, 1999, 2000, 2001 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Level 1 LXT1001 gigabit ethernet driver for FreeBSD. Public * documentation not available, but ask me nicely. * * The Level 1 chip is used on some D-Link, SMC and Addtron NICs. * It's a 64-bit PCI part that supports TCP/IP checksum offload, * VLAN tagging/insertion, GMII and TBI (1000baseX) ports. There * are three supported methods for data transfer between host and * NIC: programmed I/O, traditional scatter/gather DMA and Packet * Propulsion Technology (tm) DMA. The latter mechanism is a form * of double buffer DMA where the packet data is copied to a * pre-allocated DMA buffer who's physical address has been loaded * into a table at device initialization time. The rationale is that * the virtual to physical address translation needed for normal * scatter/gather DMA is more expensive than the data copy needed * for double buffering. This may be true in Windows NT and the like, * but it isn't true for us, at least on the x86 arch. This driver * uses the scatter/gather I/O method for both TX and RX. * * The LXT1001 only supports TCP/IP checksum offload on receive. * Also, the VLAN tagging is done using a 16-entry table which allows * the chip to perform hardware filtering based on VLAN tags. Sadly, * our vlan support doesn't currently play well with this kind of * hardware support. * * Special thanks to: * - Jeff James at Intel, for arranging to have the LXT1001 manual * released (at long last) * - Beny Chen at D-Link, for actually sending it to me * - Brad Short and Keith Alexis at SMC, for sending me sample * SMC9462SX and SMC9462TX adapters for testing * - Paul Saab at Y!, for not killing me (though it remains to be seen * if in fact he did me much of a favor) */ #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include /* for DELAY */ #include #include #include #include #include #include #include #include #include #include #define LGE_USEIOSPACE #include /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" /* * Various supported device vendors/types and their names. */ static struct lge_type lge_devs[] = { { LGE_VENDORID, LGE_DEVICEID, "Level 1 Gigabit Ethernet" }, { 0, 0, NULL } }; static int lge_probe(device_t); static int lge_attach(device_t); static int lge_detach(device_t); static int lge_alloc_jumbo_mem(struct lge_softc *); static void lge_free_jumbo_mem(struct lge_softc *); static void *lge_jalloc(struct lge_softc *); static void lge_jfree(void *, void *); static int lge_newbuf(struct lge_softc *, struct lge_rx_desc *, struct mbuf *); static int lge_encap(struct lge_softc *, struct mbuf *, u_int32_t *); static void lge_rxeof(struct lge_softc *, int); static void lge_rxeoc(struct lge_softc *); static void lge_txeof(struct lge_softc *); static void lge_intr(void *); static void lge_tick(void *); static void lge_start(struct ifnet *); static int lge_ioctl(struct ifnet *, u_long, caddr_t); static void lge_init(void *); static void lge_stop(struct lge_softc *); static void lge_watchdog(struct ifnet *); static void lge_shutdown(device_t); static int lge_ifmedia_upd(struct ifnet *); static void lge_ifmedia_sts(struct ifnet *, struct ifmediareq *); static void lge_eeprom_getword(struct lge_softc *, int, u_int16_t *); static void lge_read_eeprom(struct lge_softc *, caddr_t, int, int, int); static int lge_miibus_readreg(device_t, int, int); static int lge_miibus_writereg(device_t, int, int, int); static void lge_miibus_statchg(device_t); static void lge_setmulti(struct lge_softc *); static uint32_t lge_mchash(const uint8_t *); static void lge_reset(struct lge_softc *); static int lge_list_rx_init(struct lge_softc *); static int lge_list_tx_init(struct lge_softc *); #ifdef LGE_USEIOSPACE #define LGE_RES SYS_RES_IOPORT #define LGE_RID LGE_PCI_LOIO #else #define LGE_RES SYS_RES_MEMORY #define LGE_RID LGE_PCI_LOMEM #endif static device_method_t lge_methods[] = { /* Device interface */ DEVMETHOD(device_probe, lge_probe), DEVMETHOD(device_attach, lge_attach), DEVMETHOD(device_detach, lge_detach), DEVMETHOD(device_shutdown, lge_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, lge_miibus_readreg), DEVMETHOD(miibus_writereg, lge_miibus_writereg), DEVMETHOD(miibus_statchg, lge_miibus_statchg), { 0, 0 } }; static driver_t lge_driver = { "lge", lge_methods, sizeof(struct lge_softc) }; static devclass_t lge_devclass; DRIVER_MODULE(lge, pci, lge_driver, lge_devclass, 0, 0); DRIVER_MODULE(miibus, lge, miibus_driver, miibus_devclass, 0, 0); MODULE_DEPEND(lge, pci, 1, 1, 1); MODULE_DEPEND(lge, ether, 1, 1, 1); MODULE_DEPEND(lge, miibus, 1, 1, 1); #define LGE_SETBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, \ CSR_READ_4(sc, reg) | (x)) #define LGE_CLRBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, \ CSR_READ_4(sc, reg) & ~(x)) #define SIO_SET(x) \ CSR_WRITE_4(sc, LGE_MEAR, CSR_READ_4(sc, LGE_MEAR) | x) #define SIO_CLR(x) \ CSR_WRITE_4(sc, LGE_MEAR, CSR_READ_4(sc, LGE_MEAR) & ~x) /* * Read a word of data stored in the EEPROM at address 'addr.' */ static void lge_eeprom_getword(sc, addr, dest) struct lge_softc *sc; int addr; u_int16_t *dest; { register int i; u_int32_t val; CSR_WRITE_4(sc, LGE_EECTL, LGE_EECTL_CMD_READ| LGE_EECTL_SINGLEACCESS|((addr >> 1) << 8)); for (i = 0; i < LGE_TIMEOUT; i++) if (!(CSR_READ_4(sc, LGE_EECTL) & LGE_EECTL_CMD_READ)) break; if (i == LGE_TIMEOUT) { printf("lge%d: EEPROM read timed out\n", sc->lge_unit); return; } val = CSR_READ_4(sc, LGE_EEDATA); if (addr & 1) *dest = (val >> 16) & 0xFFFF; else *dest = val & 0xFFFF; return; } /* * Read a sequence of words from the EEPROM. */ static void lge_read_eeprom(sc, dest, off, cnt, swap) struct lge_softc *sc; caddr_t dest; int off; int cnt; int swap; { int i; u_int16_t word = 0, *ptr; for (i = 0; i < cnt; i++) { lge_eeprom_getword(sc, off + i, &word); ptr = (u_int16_t *)(dest + (i * 2)); if (swap) *ptr = ntohs(word); else *ptr = word; } return; } static int lge_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct lge_softc *sc; int i; sc = device_get_softc(dev); /* * If we have a non-PCS PHY, pretend that the internal * autoneg stuff at PHY address 0 isn't there so that * the miibus code will find only the GMII PHY. */ if (sc->lge_pcs == 0 && phy == 0) return(0); CSR_WRITE_4(sc, LGE_GMIICTL, (phy << 8) | reg | LGE_GMIICMD_READ); for (i = 0; i < LGE_TIMEOUT; i++) if (!(CSR_READ_4(sc, LGE_GMIICTL) & LGE_GMIICTL_CMDBUSY)) break; if (i == LGE_TIMEOUT) { printf("lge%d: PHY read timed out\n", sc->lge_unit); return(0); } return(CSR_READ_4(sc, LGE_GMIICTL) >> 16); } static int lge_miibus_writereg(dev, phy, reg, data) device_t dev; int phy, reg, data; { struct lge_softc *sc; int i; sc = device_get_softc(dev); CSR_WRITE_4(sc, LGE_GMIICTL, (data << 16) | (phy << 8) | reg | LGE_GMIICMD_WRITE); for (i = 0; i < LGE_TIMEOUT; i++) if (!(CSR_READ_4(sc, LGE_GMIICTL) & LGE_GMIICTL_CMDBUSY)) break; if (i == LGE_TIMEOUT) { printf("lge%d: PHY write timed out\n", sc->lge_unit); return(0); } return(0); } static void lge_miibus_statchg(dev) device_t dev; { struct lge_softc *sc; struct mii_data *mii; sc = device_get_softc(dev); mii = device_get_softc(sc->lge_miibus); LGE_CLRBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_SPEED); switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_1000_T: case IFM_1000_SX: LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_1000); break; case IFM_100_TX: LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_100); break; case IFM_10_T: LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_10); break; default: /* * Choose something, even if it's wrong. Clearing * all the bits will hose autoneg on the internal * PHY. */ LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_1000); break; } if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { LGE_SETBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_FDX); } else { LGE_CLRBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_FDX); } return; } static uint32_t lge_mchash(addr) const uint8_t *addr; { uint32_t crc, carry; int idx, bit; uint8_t data; /* Compute CRC for the address value. */ crc = 0xFFFFFFFF; /* initial value */ for (idx = 0; idx < 6; idx++) { for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) { carry = ((crc & 0x80000000) ? 1 : 0) ^ (data & 0x01); crc <<= 1; if (carry) crc = (crc ^ 0x04c11db6) | carry; } } /* * return the filter bit position */ return((crc >> 26) & 0x0000003F); } static void lge_setmulti(sc) struct lge_softc *sc; { struct ifnet *ifp; struct ifmultiaddr *ifma; u_int32_t h = 0, hashes[2] = { 0, 0 }; ifp = &sc->arpcom.ac_if; /* Make sure multicast hash table is enabled. */ CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_MCAST); if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { CSR_WRITE_4(sc, LGE_MAR0, 0xFFFFFFFF); CSR_WRITE_4(sc, LGE_MAR1, 0xFFFFFFFF); return; } /* first, zot all the existing hash bits */ CSR_WRITE_4(sc, LGE_MAR0, 0); CSR_WRITE_4(sc, LGE_MAR1, 0); /* now program new ones */ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = lge_mchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); } CSR_WRITE_4(sc, LGE_MAR0, hashes[0]); CSR_WRITE_4(sc, LGE_MAR1, hashes[1]); return; } static void lge_reset(sc) struct lge_softc *sc; { register int i; LGE_SETBIT(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL0|LGE_MODE1_SOFTRST); for (i = 0; i < LGE_TIMEOUT; i++) { if (!(CSR_READ_4(sc, LGE_MODE1) & LGE_MODE1_SOFTRST)) break; } if (i == LGE_TIMEOUT) printf("lge%d: reset never completed\n", sc->lge_unit); /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); return; } /* * Probe for a Level 1 chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int lge_probe(dev) device_t dev; { struct lge_type *t; t = lge_devs; while(t->lge_name != NULL) { if ((pci_get_vendor(dev) == t->lge_vid) && (pci_get_device(dev) == t->lge_did)) { device_set_desc(dev, t->lge_name); return(0); } t++; } return(ENXIO); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int lge_attach(dev) device_t dev; { int s; u_char eaddr[ETHER_ADDR_LEN]; struct lge_softc *sc; struct ifnet *ifp; int unit, error = 0, rid; s = splimp(); sc = device_get_softc(dev); unit = device_get_unit(dev); bzero(sc, sizeof(struct lge_softc)); #ifndef BURN_BRIDGES /* * Handle power management nonsense. */ if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { u_int32_t iobase, membase, irq; /* Save important PCI config data. */ iobase = pci_read_config(dev, LGE_PCI_LOIO, 4); membase = pci_read_config(dev, LGE_PCI_LOMEM, 4); irq = pci_read_config(dev, LGE_PCI_INTLINE, 4); /* Reset the power state. */ printf("lge%d: chip is in D%d power mode " "-- setting to D0\n", unit, pci_get_powerstate(dev)); pci_set_powerstate(dev, PCI_POWERSTATE_D0); /* Restore PCI config data. */ pci_write_config(dev, LGE_PCI_LOIO, iobase, 4); pci_write_config(dev, LGE_PCI_LOMEM, membase, 4); pci_write_config(dev, LGE_PCI_INTLINE, irq, 4); } #endif /* * Map control/status registers. */ pci_enable_busmaster(dev); rid = LGE_RID; sc->lge_res = bus_alloc_resource_any(dev, LGE_RES, &rid, RF_ACTIVE); if (sc->lge_res == NULL) { printf("lge%d: couldn't map ports/memory\n", unit); error = ENXIO; goto fail; } sc->lge_btag = rman_get_bustag(sc->lge_res); sc->lge_bhandle = rman_get_bushandle(sc->lge_res); /* Allocate interrupt */ rid = 0; sc->lge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->lge_irq == NULL) { printf("lge%d: couldn't map interrupt\n", unit); bus_release_resource(dev, LGE_RES, LGE_RID, sc->lge_res); error = ENXIO; goto fail; } error = bus_setup_intr(dev, sc->lge_irq, INTR_TYPE_NET, lge_intr, sc, &sc->lge_intrhand); if (error) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lge_irq); bus_release_resource(dev, LGE_RES, LGE_RID, sc->lge_res); printf("lge%d: couldn't set up irq\n", unit); goto fail; } /* Reset the adapter. */ lge_reset(sc); /* * Get station address from the EEPROM. */ lge_read_eeprom(sc, (caddr_t)&eaddr[0], LGE_EE_NODEADDR_0, 1, 0); lge_read_eeprom(sc, (caddr_t)&eaddr[2], LGE_EE_NODEADDR_1, 1, 0); lge_read_eeprom(sc, (caddr_t)&eaddr[4], LGE_EE_NODEADDR_2, 1, 0); sc->lge_unit = unit; callout_handle_init(&sc->lge_stat_ch); bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); sc->lge_ldata = contigmalloc(sizeof(struct lge_list_data), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->lge_ldata == NULL) { printf("lge%d: no memory for list buffers!\n", unit); bus_teardown_intr(dev, sc->lge_irq, sc->lge_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lge_irq); bus_release_resource(dev, LGE_RES, LGE_RID, sc->lge_res); error = ENXIO; goto fail; } bzero(sc->lge_ldata, sizeof(struct lge_list_data)); /* Try to allocate memory for jumbo buffers. */ if (lge_alloc_jumbo_mem(sc)) { printf("lge%d: jumbo buffer allocation failed\n", sc->lge_unit); contigfree(sc->lge_ldata, sizeof(struct lge_list_data), M_DEVBUF); bus_teardown_intr(dev, sc->lge_irq, sc->lge_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lge_irq); bus_release_resource(dev, LGE_RES, LGE_RID, sc->lge_res); error = ENXIO; goto fail; } ifp = &sc->arpcom.ac_if; ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = lge_ioctl; ifp->if_start = lge_start; ifp->if_watchdog = lge_watchdog; ifp->if_init = lge_init; ifp->if_baudrate = 1000000000; ifp->if_snd.ifq_maxlen = LGE_TX_LIST_CNT - 1; ifp->if_capabilities = IFCAP_RXCSUM; ifp->if_capenable = ifp->if_capabilities; if (CSR_READ_4(sc, LGE_GMIIMODE) & LGE_GMIIMODE_PCSENH) sc->lge_pcs = 1; else sc->lge_pcs = 0; /* * Do MII setup. */ if (mii_phy_probe(dev, &sc->lge_miibus, lge_ifmedia_upd, lge_ifmedia_sts)) { printf("lge%d: MII without any PHY!\n", sc->lge_unit); contigfree(sc->lge_ldata, sizeof(struct lge_list_data), M_DEVBUF); lge_free_jumbo_mem(sc); bus_teardown_intr(dev, sc->lge_irq, sc->lge_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lge_irq); bus_release_resource(dev, LGE_RES, LGE_RID, sc->lge_res); error = ENXIO; goto fail; } /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); callout_handle_init(&sc->lge_stat_ch); fail: splx(s); return(error); } static int lge_detach(dev) device_t dev; { struct lge_softc *sc; struct ifnet *ifp; int s; s = splimp(); sc = device_get_softc(dev); ifp = &sc->arpcom.ac_if; lge_reset(sc); lge_stop(sc); ether_ifdetach(ifp); bus_generic_detach(dev); device_delete_child(dev, sc->lge_miibus); bus_teardown_intr(dev, sc->lge_irq, sc->lge_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lge_irq); bus_release_resource(dev, LGE_RES, LGE_RID, sc->lge_res); contigfree(sc->lge_ldata, sizeof(struct lge_list_data), M_DEVBUF); lge_free_jumbo_mem(sc); splx(s); return(0); } /* * Initialize the transmit descriptors. */ static int lge_list_tx_init(sc) struct lge_softc *sc; { struct lge_list_data *ld; struct lge_ring_data *cd; int i; cd = &sc->lge_cdata; ld = sc->lge_ldata; for (i = 0; i < LGE_TX_LIST_CNT; i++) { ld->lge_tx_list[i].lge_mbuf = NULL; ld->lge_tx_list[i].lge_ctl = 0; } cd->lge_tx_prod = cd->lge_tx_cons = 0; return(0); } /* * Initialize the RX descriptors and allocate mbufs for them. Note that * we arralge the descriptors in a closed ring, so that the last descriptor * points back to the first. */ static int lge_list_rx_init(sc) struct lge_softc *sc; { struct lge_list_data *ld; struct lge_ring_data *cd; int i; ld = sc->lge_ldata; cd = &sc->lge_cdata; cd->lge_rx_prod = cd->lge_rx_cons = 0; CSR_WRITE_4(sc, LGE_RXDESC_ADDR_HI, 0); for (i = 0; i < LGE_RX_LIST_CNT; i++) { if (CSR_READ_1(sc, LGE_RXCMDFREE_8BIT) == 0) break; if (lge_newbuf(sc, &ld->lge_rx_list[i], NULL) == ENOBUFS) return(ENOBUFS); } /* Clear possible 'rx command queue empty' interrupt. */ CSR_READ_4(sc, LGE_ISR); return(0); } /* * Initialize an RX descriptor and attach an MBUF cluster. */ static int lge_newbuf(sc, c, m) struct lge_softc *sc; struct lge_rx_desc *c; struct mbuf *m; { struct mbuf *m_new = NULL; caddr_t *buf = NULL; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("lge%d: no memory for rx list " "-- packet dropped!\n", sc->lge_unit); return(ENOBUFS); } /* Allocate the jumbo buffer */ buf = lge_jalloc(sc); if (buf == NULL) { #ifdef LGE_VERBOSE printf("lge%d: jumbo allocation failed " "-- packet dropped!\n", sc->lge_unit); #endif m_freem(m_new); return(ENOBUFS); } /* Attach the buffer to the mbuf */ m_new->m_data = (void *)buf; m_new->m_len = m_new->m_pkthdr.len = LGE_JUMBO_FRAMELEN; MEXTADD(m_new, buf, LGE_JUMBO_FRAMELEN, lge_jfree, (struct lge_softc *)sc, 0, EXT_NET_DRV); } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = LGE_JUMBO_FRAMELEN; m_new->m_data = m_new->m_ext.ext_buf; } /* * Adjust alignment so packet payload begins on a * longword boundary. Mandatory for Alpha, useful on * x86 too. */ m_adj(m_new, ETHER_ALIGN); c->lge_mbuf = m_new; c->lge_fragptr_hi = 0; c->lge_fragptr_lo = vtophys(mtod(m_new, caddr_t)); c->lge_fraglen = m_new->m_len; c->lge_ctl = m_new->m_len | LGE_RXCTL_WANTINTR | LGE_FRAGCNT(1); c->lge_sts = 0; /* * Put this buffer in the RX command FIFO. To do this, * we just write the physical address of the descriptor * into the RX descriptor address registers. Note that * there are two registers, one high DWORD and one low * DWORD, which lets us specify a 64-bit address if * desired. We only use a 32-bit address for now. * Writing to the low DWORD register is what actually * causes the command to be issued, so we do that * last. */ CSR_WRITE_4(sc, LGE_RXDESC_ADDR_LO, vtophys(c)); LGE_INC(sc->lge_cdata.lge_rx_prod, LGE_RX_LIST_CNT); return(0); } static int lge_alloc_jumbo_mem(sc) struct lge_softc *sc; { caddr_t ptr; register int i; struct lge_jpool_entry *entry; /* Grab a big chunk o' storage. */ sc->lge_cdata.lge_jumbo_buf = contigmalloc(LGE_JMEM, M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->lge_cdata.lge_jumbo_buf == NULL) { printf("lge%d: no memory for jumbo buffers!\n", sc->lge_unit); return(ENOBUFS); } SLIST_INIT(&sc->lge_jfree_listhead); SLIST_INIT(&sc->lge_jinuse_listhead); /* * Now divide it up into 9K pieces and save the addresses * in an array. */ ptr = sc->lge_cdata.lge_jumbo_buf; for (i = 0; i < LGE_JSLOTS; i++) { sc->lge_cdata.lge_jslots[i] = ptr; ptr += LGE_JLEN; entry = malloc(sizeof(struct lge_jpool_entry), M_DEVBUF, M_NOWAIT); if (entry == NULL) { printf("lge%d: no memory for jumbo " "buffer queue!\n", sc->lge_unit); return(ENOBUFS); } entry->slot = i; SLIST_INSERT_HEAD(&sc->lge_jfree_listhead, entry, jpool_entries); } return(0); } static void lge_free_jumbo_mem(sc) struct lge_softc *sc; { int i; struct lge_jpool_entry *entry; for (i = 0; i < LGE_JSLOTS; i++) { entry = SLIST_FIRST(&sc->lge_jfree_listhead); SLIST_REMOVE_HEAD(&sc->lge_jfree_listhead, jpool_entries); free(entry, M_DEVBUF); } contigfree(sc->lge_cdata.lge_jumbo_buf, LGE_JMEM, M_DEVBUF); return; } /* * Allocate a jumbo buffer. */ static void * lge_jalloc(sc) struct lge_softc *sc; { struct lge_jpool_entry *entry; entry = SLIST_FIRST(&sc->lge_jfree_listhead); if (entry == NULL) { #ifdef LGE_VERBOSE printf("lge%d: no free jumbo buffers\n", sc->lge_unit); #endif return(NULL); } SLIST_REMOVE_HEAD(&sc->lge_jfree_listhead, jpool_entries); SLIST_INSERT_HEAD(&sc->lge_jinuse_listhead, entry, jpool_entries); return(sc->lge_cdata.lge_jslots[entry->slot]); } /* * Release a jumbo buffer. */ static void lge_jfree(buf, args) void *buf; void *args; { struct lge_softc *sc; int i; struct lge_jpool_entry *entry; /* Extract the softc struct pointer. */ sc = args; if (sc == NULL) panic("lge_jfree: can't find softc pointer!"); /* calculate the slot this buffer belongs to */ i = ((vm_offset_t)buf - (vm_offset_t)sc->lge_cdata.lge_jumbo_buf) / LGE_JLEN; if ((i < 0) || (i >= LGE_JSLOTS)) panic("lge_jfree: asked to free buffer that we don't manage!"); entry = SLIST_FIRST(&sc->lge_jinuse_listhead); if (entry == NULL) panic("lge_jfree: buffer not in use!"); entry->slot = i; SLIST_REMOVE_HEAD(&sc->lge_jinuse_listhead, jpool_entries); SLIST_INSERT_HEAD(&sc->lge_jfree_listhead, entry, jpool_entries); return; } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ static void lge_rxeof(sc, cnt) struct lge_softc *sc; int cnt; { struct mbuf *m; struct ifnet *ifp; struct lge_rx_desc *cur_rx; int c, i, total_len = 0; u_int32_t rxsts, rxctl; ifp = &sc->arpcom.ac_if; /* Find out how many frames were processed. */ c = cnt; i = sc->lge_cdata.lge_rx_cons; /* Suck them in. */ while(c) { struct mbuf *m0 = NULL; cur_rx = &sc->lge_ldata->lge_rx_list[i]; rxctl = cur_rx->lge_ctl; rxsts = cur_rx->lge_sts; m = cur_rx->lge_mbuf; cur_rx->lge_mbuf = NULL; total_len = LGE_RXBYTES(cur_rx); LGE_INC(i, LGE_RX_LIST_CNT); c--; /* * If an error occurs, update stats, clear the * status word and leave the mbuf cluster in place: * it should simply get re-used next time this descriptor * comes up in the ring. */ if (rxctl & LGE_RXCTL_ERRMASK) { ifp->if_ierrors++; lge_newbuf(sc, &LGE_RXTAIL(sc), m); continue; } if (lge_newbuf(sc, &LGE_RXTAIL(sc), NULL) == ENOBUFS) { m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, ifp, NULL); lge_newbuf(sc, &LGE_RXTAIL(sc), m); if (m0 == NULL) { printf("lge%d: no receive buffers " "available -- packet dropped!\n", sc->lge_unit); ifp->if_ierrors++; continue; } m = m0; } else { m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = total_len; } ifp->if_ipackets++; /* Do IP checksum checking. */ if (rxsts & LGE_RXSTS_ISIP) m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (!(rxsts & LGE_RXSTS_IPCSUMERR)) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if ((rxsts & LGE_RXSTS_ISTCP && !(rxsts & LGE_RXSTS_TCPCSUMERR)) || (rxsts & LGE_RXSTS_ISUDP && !(rxsts & LGE_RXSTS_UDPCSUMERR))) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID|CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } (*ifp->if_input)(ifp, m); } sc->lge_cdata.lge_rx_cons = i; return; } static void lge_rxeoc(sc) struct lge_softc *sc; { struct ifnet *ifp; ifp = &sc->arpcom.ac_if; ifp->if_flags &= ~IFF_RUNNING; lge_init(sc); return; } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void lge_txeof(sc) struct lge_softc *sc; { struct lge_tx_desc *cur_tx = NULL; struct ifnet *ifp; u_int32_t idx, txdone; ifp = &sc->arpcom.ac_if; /* Clear the timeout timer. */ ifp->if_timer = 0; /* * Go through our tx list and free mbufs for those * frames that have been transmitted. */ idx = sc->lge_cdata.lge_tx_cons; txdone = CSR_READ_1(sc, LGE_TXDMADONE_8BIT); while (idx != sc->lge_cdata.lge_tx_prod && txdone) { cur_tx = &sc->lge_ldata->lge_tx_list[idx]; ifp->if_opackets++; if (cur_tx->lge_mbuf != NULL) { m_freem(cur_tx->lge_mbuf); cur_tx->lge_mbuf = NULL; } cur_tx->lge_ctl = 0; txdone--; LGE_INC(idx, LGE_TX_LIST_CNT); ifp->if_timer = 0; } sc->lge_cdata.lge_tx_cons = idx; if (cur_tx != NULL) ifp->if_flags &= ~IFF_OACTIVE; return; } static void lge_tick(xsc) void *xsc; { struct lge_softc *sc; struct mii_data *mii; struct ifnet *ifp; int s; s = splimp(); sc = xsc; ifp = &sc->arpcom.ac_if; CSR_WRITE_4(sc, LGE_STATSIDX, LGE_STATS_SINGLE_COLL_PKTS); ifp->if_collisions += CSR_READ_4(sc, LGE_STATSVAL); CSR_WRITE_4(sc, LGE_STATSIDX, LGE_STATS_MULTI_COLL_PKTS); ifp->if_collisions += CSR_READ_4(sc, LGE_STATSVAL); if (!sc->lge_link) { mii = device_get_softc(sc->lge_miibus); mii_tick(mii); if (mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->lge_link++; if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX|| IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) printf("lge%d: gigabit link up\n", sc->lge_unit); if (ifp->if_snd.ifq_head != NULL) lge_start(ifp); } } sc->lge_stat_ch = timeout(lge_tick, sc, hz); splx(s); return; } static void lge_intr(arg) void *arg; { struct lge_softc *sc; struct ifnet *ifp; u_int32_t status; sc = arg; ifp = &sc->arpcom.ac_if; /* Supress unwanted interrupts */ if (!(ifp->if_flags & IFF_UP)) { lge_stop(sc); return; } for (;;) { /* * Reading the ISR register clears all interrupts, and * clears the 'interrupts enabled' bit in the IMR * register. */ status = CSR_READ_4(sc, LGE_ISR); if ((status & LGE_INTRS) == 0) break; if ((status & (LGE_ISR_TXCMDFIFO_EMPTY|LGE_ISR_TXDMA_DONE))) lge_txeof(sc); if (status & LGE_ISR_RXDMA_DONE) lge_rxeof(sc, LGE_RX_DMACNT(status)); if (status & LGE_ISR_RXCMDFIFO_EMPTY) lge_rxeoc(sc); if (status & LGE_ISR_PHY_INTR) { sc->lge_link = 0; untimeout(lge_tick, sc, sc->lge_stat_ch); lge_tick(sc); } } /* Re-enable interrupts. */ CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL0|LGE_IMR_INTR_ENB); if (ifp->if_snd.ifq_head != NULL) lge_start(ifp); return; } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int lge_encap(sc, m_head, txidx) struct lge_softc *sc; struct mbuf *m_head; u_int32_t *txidx; { struct lge_frag *f = NULL; struct lge_tx_desc *cur_tx; struct mbuf *m; int frag = 0, tot_len = 0; /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ m = m_head; cur_tx = &sc->lge_ldata->lge_tx_list[*txidx]; frag = 0; for (m = m_head; m != NULL; m = m->m_next) { if (m->m_len != 0) { tot_len += m->m_len; f = &cur_tx->lge_frags[frag]; f->lge_fraglen = m->m_len; f->lge_fragptr_lo = vtophys(mtod(m, vm_offset_t)); f->lge_fragptr_hi = 0; frag++; } } if (m != NULL) return(ENOBUFS); cur_tx->lge_mbuf = m_head; cur_tx->lge_ctl = LGE_TXCTL_WANTINTR|LGE_FRAGCNT(frag)|tot_len; LGE_INC((*txidx), LGE_TX_LIST_CNT); /* Queue for transmit */ CSR_WRITE_4(sc, LGE_TXDESC_ADDR_LO, vtophys(cur_tx)); return(0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit lists. We also save a * copy of the pointers since the transmit list fragment pointers are * physical addresses. */ static void lge_start(ifp) struct ifnet *ifp; { struct lge_softc *sc; struct mbuf *m_head = NULL; u_int32_t idx; sc = ifp->if_softc; if (!sc->lge_link) return; idx = sc->lge_cdata.lge_tx_prod; if (ifp->if_flags & IFF_OACTIVE) return; while(sc->lge_ldata->lge_tx_list[idx].lge_mbuf == NULL) { if (CSR_READ_1(sc, LGE_TXCMDFREE_8BIT) == 0) break; IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (lge_encap(sc, m_head, &idx)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; break; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); } sc->lge_cdata.lge_tx_prod = idx; /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; return; } static void lge_init(xsc) void *xsc; { struct lge_softc *sc = xsc; struct ifnet *ifp = &sc->arpcom.ac_if; struct mii_data *mii; int s; if (ifp->if_flags & IFF_RUNNING) return; s = splimp(); /* * Cancel pending I/O and free all RX/TX buffers. */ lge_stop(sc); lge_reset(sc); mii = device_get_softc(sc->lge_miibus); /* Set MAC address */ CSR_WRITE_4(sc, LGE_PAR0, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); CSR_WRITE_4(sc, LGE_PAR1, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); /* Init circular RX list. */ if (lge_list_rx_init(sc) == ENOBUFS) { printf("lge%d: initialization failed: no " "memory for rx buffers\n", sc->lge_unit); lge_stop(sc); (void)splx(s); return; } /* * Init tx descriptors. */ lge_list_tx_init(sc); /* Set initial value for MODE1 register. */ CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_UCAST| LGE_MODE1_TX_CRC|LGE_MODE1_TXPAD| LGE_MODE1_RX_FLOWCTL|LGE_MODE1_SETRST_CTL0| LGE_MODE1_SETRST_CTL1|LGE_MODE1_SETRST_CTL2); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) { CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_PROMISC); } else { CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_PROMISC); } /* * Set the capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) { CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_BCAST); } else { CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_BCAST); } /* Packet padding workaround? */ CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RMVPAD); /* No error frames */ CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_ERRPKTS); /* Receive large frames */ CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_GIANTS); /* Workaround: disable RX/TX flow control */ CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_TX_FLOWCTL); CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_FLOWCTL); /* Make sure to strip CRC from received frames */ CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_CRC); /* Turn off magic packet mode */ CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_MPACK_ENB); /* Turn off all VLAN stuff */ CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_VLAN_RX|LGE_MODE1_VLAN_TX| LGE_MODE1_VLAN_STRIP|LGE_MODE1_VLAN_INSERT); /* Workarond: FIFO overflow */ CSR_WRITE_2(sc, LGE_RXFIFO_HIWAT, 0x3FFF); CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL1|LGE_IMR_RXFIFO_WAT); /* * Load the multicast filter. */ lge_setmulti(sc); /* * Enable hardware checksum validation for all received IPv4 * packets, do not reject packets with bad checksums. */ CSR_WRITE_4(sc, LGE_MODE2, LGE_MODE2_RX_IPCSUM| LGE_MODE2_RX_TCPCSUM|LGE_MODE2_RX_UDPCSUM| LGE_MODE2_RX_ERRCSUM); /* * Enable the delivery of PHY interrupts based on * link/speed/duplex status chalges. */ CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL0|LGE_MODE1_GMIIPOLL); /* Enable receiver and transmitter. */ CSR_WRITE_4(sc, LGE_RXDESC_ADDR_HI, 0); CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_ENB); CSR_WRITE_4(sc, LGE_TXDESC_ADDR_HI, 0); CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_TX_ENB); /* * Enable interrupts. */ CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL0| LGE_IMR_SETRST_CTL1|LGE_IMR_INTR_ENB|LGE_INTRS); lge_ifmedia_upd(ifp); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; (void)splx(s); sc->lge_stat_ch = timeout(lge_tick, sc, hz); return; } /* * Set media options. */ static int lge_ifmedia_upd(ifp) struct ifnet *ifp; { struct lge_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->lge_miibus); sc->lge_link = 0; if (mii->mii_instance) { struct mii_softc *miisc; for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; miisc = LIST_NEXT(miisc, mii_list)) mii_phy_reset(miisc); } mii_mediachg(mii); return(0); } /* * Report current media status. */ static void lge_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct lge_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->lge_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; return; } static int lge_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct lge_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int s, error = 0; s = splimp(); switch(command) { case SIOCSIFMTU: if (ifr->ifr_mtu > LGE_JUMBO_MTU) error = EINVAL; else ifp->if_mtu = ifr->ifr_mtu; break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->lge_if_flags & IFF_PROMISC)) { CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1| LGE_MODE1_RX_PROMISC); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->lge_if_flags & IFF_PROMISC) { CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_PROMISC); } else { ifp->if_flags &= ~IFF_RUNNING; lge_init(sc); } } else { if (ifp->if_flags & IFF_RUNNING) lge_stop(sc); } sc->lge_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: lge_setmulti(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->lge_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: error = ether_ioctl(ifp, command, data); break; } (void)splx(s); return(error); } static void lge_watchdog(ifp) struct ifnet *ifp; { struct lge_softc *sc; sc = ifp->if_softc; ifp->if_oerrors++; printf("lge%d: watchdog timeout\n", sc->lge_unit); lge_stop(sc); lge_reset(sc); ifp->if_flags &= ~IFF_RUNNING; lge_init(sc); if (ifp->if_snd.ifq_head != NULL) lge_start(ifp); return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void lge_stop(sc) struct lge_softc *sc; { register int i; struct ifnet *ifp; ifp = &sc->arpcom.ac_if; ifp->if_timer = 0; untimeout(lge_tick, sc, sc->lge_stat_ch); CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_INTR_ENB); /* Disable receiver and transmitter. */ CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_ENB|LGE_MODE1_TX_ENB); sc->lge_link = 0; /* * Free data in the RX lists. */ for (i = 0; i < LGE_RX_LIST_CNT; i++) { if (sc->lge_ldata->lge_rx_list[i].lge_mbuf != NULL) { m_freem(sc->lge_ldata->lge_rx_list[i].lge_mbuf); sc->lge_ldata->lge_rx_list[i].lge_mbuf = NULL; } } bzero((char *)&sc->lge_ldata->lge_rx_list, sizeof(sc->lge_ldata->lge_rx_list)); /* * Free the TX list buffers. */ for (i = 0; i < LGE_TX_LIST_CNT; i++) { if (sc->lge_ldata->lge_tx_list[i].lge_mbuf != NULL) { m_freem(sc->lge_ldata->lge_tx_list[i].lge_mbuf); sc->lge_ldata->lge_tx_list[i].lge_mbuf = NULL; } } bzero((char *)&sc->lge_ldata->lge_tx_list, sizeof(sc->lge_ldata->lge_tx_list)); ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void lge_shutdown(dev) device_t dev; { struct lge_softc *sc; sc = device_get_softc(dev); lge_reset(sc); lge_stop(sc); return; } Index: head/sys/dev/lnc/if_lnc_isa.c =================================================================== --- head/sys/dev/lnc/if_lnc_isa.c (revision 129878) +++ head/sys/dev/lnc/if_lnc_isa.c (revision 129879) @@ -1,292 +1,293 @@ /* * Copyright (c) 1994-2000 * Paul Richards. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * verbatim and that no modifications are made prior to this * point in the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name Paul Richards may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY PAUL RICHARDS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL PAUL RICHARDS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include static struct isa_pnp_id lnc_pnp_ids[] = { {0, NULL} }; static int lnc_legacy_probe(device_t dev) { struct lnc_softc *sc = device_get_softc(dev); sc->portrid = 0; sc->portres = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &sc->portrid, RF_ACTIVE); if (! sc->portres) { device_printf(dev, "Failed to allocate I/O ports\n"); lnc_release_resources(dev); return (ENXIO); } sc->lnc_btag = rman_get_bustag(sc->portres); sc->lnc_bhandle = rman_get_bushandle(sc->portres); /* * There isn't any way to determine if a NIC is a BICC. Basically, if * the lance probe succeeds using the i/o addresses of the BICC then * we assume it's a BICC. * */ sc->rap = BICC_RAP; sc->rdp = BICC_RDP; sc->nic.mem_mode = DMA_FIXED; /* XXX Should set BICC_IOSIZE et al somewhere to alloc resources correctly */ if ((sc->nic.ic = lance_probe(sc))) { device_set_desc(dev, "BICC Isolan"); sc->nic.ident = BICC; lnc_release_resources(dev); return (0); } else { /* It's not a BICC so try the standard NE2100 ports */ sc->rap = PCNET_RAP; sc->rdp = PCNET_RDP; if ((sc->nic.ic = lance_probe(sc))) { sc->nic.ident = NE2100; device_set_desc(dev, "NE2100"); lnc_release_resources(dev); return (0); } else { lnc_release_resources(dev); return (ENXIO); } } } static int lnc_isa_probe(device_t dev) { int pnp; pnp = ISA_PNP_PROBE(device_get_parent(dev), dev, lnc_pnp_ids); if (pnp == ENOENT) { /* It's not a PNP card, see if we support it by probing it */ return (lnc_legacy_probe(dev)); } else if (pnp == ENXIO) { return (ENXIO); } else { /* Found PNP card we support */ return (0); } } static void lnc_alloc_callback(void *arg, bus_dma_segment_t *seg, int nseg, int error) { /* Do nothing */ return; } static int lnc_isa_attach(device_t dev) { lnc_softc_t *sc = device_get_softc(dev); int err = 0; bus_size_t lnc_mem_size; device_printf(dev, "Attaching %s\n", device_get_desc(dev)); sc->portrid = 0; sc->portres = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &sc->portrid, RF_ACTIVE); if (! sc->portres) { device_printf(dev, "Failed to allocate I/O ports\n"); lnc_release_resources(dev); return (ENXIO); } sc->drqrid = 0; sc->drqres = bus_alloc_resource_any(dev, SYS_RES_DRQ, &sc->drqrid, RF_ACTIVE); if (! sc->drqres) { device_printf(dev, "Failed to allocate DMA channel\n"); lnc_release_resources(dev); return (ENXIO); } if (isa_get_irq(dev) == -1) bus_set_resource(dev, SYS_RES_IRQ, 0, 10, 1); sc->irqrid = 0; sc->irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqrid, RF_ACTIVE); if (! sc->irqres) { device_printf(dev, "Failed to allocate irq\n"); lnc_release_resources(dev); return (ENXIO); } err = bus_setup_intr(dev, sc->irqres, INTR_TYPE_NET, lncintr, sc, &sc->intrhand); if (err) { device_printf(dev, "Failed to setup irq handler\n"); lnc_release_resources(dev); return (err); } /* XXX temp setting for nic */ sc->nic.mem_mode = DMA_FIXED; sc->nrdre = NRDRE; sc->ntdre = NTDRE; if (sc->nic.ident == NE2100) { sc->rap = PCNET_RAP; sc->rdp = PCNET_RDP; sc->bdp = PCNET_BDP; } else { sc->rap = BICC_RAP; sc->rdp = BICC_RDP; } /* Create a DMA tag describing the ring memory we need */ lnc_mem_size = ((NDESC(sc->nrdre) + NDESC(sc->ntdre)) * sizeof(struct host_ring_entry)); lnc_mem_size += (NDESC(sc->nrdre) * RECVBUFSIZE) + (NDESC(sc->ntdre) * TRANSBUFSIZE); err = bus_dma_tag_create(NULL, /* parent */ 4, /* alignement */ 0, /* boundary */ BUS_SPACE_MAXADDR_24BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ lnc_mem_size, /* segsize */ 1, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ busdma_lock_mutex, /* lockfunc */ &Giant, /* lockarg */ &sc->dmat); if (err) { device_printf(dev, "Can't create DMA tag\n"); lnc_release_resources(dev); return (ENOMEM); } err = bus_dmamem_alloc(sc->dmat, (void **)&sc->recv_ring, BUS_DMA_NOWAIT, &sc->dmamap); if (err) { device_printf(dev, "Couldn't allocate memory\n"); lnc_release_resources(dev); return (ENOMEM); } err = bus_dmamap_load(sc->dmat, sc->dmamap, sc->recv_ring, lnc_mem_size, lnc_alloc_callback, sc->recv_ring, BUS_DMA_NOWAIT); if (err) { device_printf(dev, "Couldn't load DMA map\n"); lnc_release_resources(dev); return (ENOMEM); } isa_dmacascade(rman_get_start(sc->drqres)); /* Call generic attach code */ if (! lnc_attach_common(dev)) { device_printf(dev, "Generic attach code failed\n"); lnc_release_resources(dev); return (ENXIO); } return (0); } static int lnc_isa_detach(device_t dev) { lnc_softc_t *sc = device_get_softc(dev); int s = splimp(); ether_ifdetach(&sc->arpcom.ac_if); splx(s); lnc_stop(sc); lnc_release_resources(dev); return (0); } static device_method_t lnc_isa_methods[] = { /* DEVMETHOD(device_identify, lnc_isa_identify), */ DEVMETHOD(device_probe, lnc_isa_probe), DEVMETHOD(device_attach, lnc_isa_attach), DEVMETHOD(device_detach, lnc_isa_detach), #ifdef notyet DEVMETHOD(device_suspend, lnc_isa_suspend), DEVMETHOD(device_resume, lnc_isa_resume), DEVMETHOD(device_shutdown, lnc_isa_shutdown), #endif { 0, 0 } }; static driver_t lnc_isa_driver = { "lnc", lnc_isa_methods, sizeof(struct lnc_softc), }; DRIVER_MODULE(lnc, isa, lnc_isa_driver, lnc_devclass, 0, 0); MODULE_DEPEND(lnc, isa, 1, 1, 1); MODULE_DEPEND(lnc, ether, 1, 1, 1); Index: head/sys/dev/lnc/if_lnc_pci.c =================================================================== --- head/sys/dev/lnc/if_lnc_pci.c (revision 129878) +++ head/sys/dev/lnc/if_lnc_pci.c (revision 129879) @@ -1,235 +1,236 @@ /*- * Copyright (c) 1994-2000 * Paul Richards. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * verbatim and that no modifications are made prior to this * point in the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name Paul Richards may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY PAUL RICHARDS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL PAUL RICHARDS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #define AMD_VENDOR_ID 0x1022 #define PCI_DEVICE_ID_PCNet_PCI 0x2000 #define PCI_DEVICE_ID_PCHome_PCI 0x2001 #define LNC_PROBE_PRIORITY -1 static int lnc_pci_probe(device_t dev) { if (pci_get_vendor(dev) != AMD_VENDOR_ID) return (ENXIO); switch(pci_get_device(dev)) { case PCI_DEVICE_ID_PCNet_PCI: device_set_desc(dev, "PCNet/PCI Ethernet adapter"); return(LNC_PROBE_PRIORITY); break; case PCI_DEVICE_ID_PCHome_PCI: device_set_desc(dev, "PCHome/PCI Ethernet adapter"); return(LNC_PROBE_PRIORITY); break; default: return (ENXIO); break; } return (ENXIO); } static void lnc_alloc_callback(void *arg, bus_dma_segment_t *seg, int nseg, int error) { /* Do nothing */ return; } static int lnc_pci_attach(device_t dev) { lnc_softc_t *sc = device_get_softc(dev); unsigned command; int err = 0; bus_size_t lnc_mem_size; device_printf(dev, "Attaching %s\n", device_get_desc(dev)); command = pci_read_config(dev, PCIR_COMMAND, 4); command |= PCIM_CMD_PORTEN | PCIM_CMD_BUSMASTEREN; pci_write_config(dev, PCIR_COMMAND, command, 4); sc->portrid = PCIR_BAR(0); sc->portres = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &sc->portrid, RF_ACTIVE); if (! sc->portres) { device_printf(dev, "Cannot allocate I/O ports\n"); lnc_release_resources(dev); return (ENXIO); } sc->irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqrid, RF_ACTIVE|RF_SHAREABLE); if (! sc->irqres) { device_printf(dev, "Cannot allocate irq\n"); lnc_release_resources(dev); return (ENXIO); } err = bus_setup_intr(dev, sc->irqres, INTR_TYPE_NET, lncintr, sc, &sc->intrhand); if (err) { device_printf(dev, "Cannot setup irq handler\n"); lnc_release_resources(dev); return (ENXIO); } sc->lnc_btag = rman_get_bustag(sc->portres); sc->lnc_bhandle = rman_get_bushandle(sc->portres); /* XXX temp setting for nic */ sc->nic.ic = PCnet_PCI; sc->nic.ident = NE2100; sc->nic.mem_mode = DMA_FIXED; sc->nrdre = NRDRE; sc->ntdre = NTDRE; sc->rap = PCNET_RAP; sc->rdp = PCNET_RDP; sc->bdp = PCNET_BDP; /* Create a DMA tag describing the ring memory we need */ lnc_mem_size = ((NDESC(sc->nrdre) + NDESC(sc->ntdre)) * sizeof(struct host_ring_entry)); lnc_mem_size += sizeof(struct init_block) + (sizeof(struct mds) * (NDESC(sc->nrdre) + NDESC(sc->ntdre))) + MEM_SLEW; lnc_mem_size += (NDESC(sc->nrdre) * RECVBUFSIZE) + (NDESC(sc->ntdre) * TRANSBUFSIZE); err = bus_dma_tag_create(NULL, /* parent */ 1, /* alignement */ 0, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ lnc_mem_size, /* segsize */ 1, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ busdma_lock_mutex, /* lockfunc */ &Giant, /* lockarg */ &sc->dmat); if (err) { device_printf(dev, "Can't create DMA tag\n"); lnc_release_resources(dev); return (ENOMEM); } err = bus_dmamem_alloc(sc->dmat, (void **)&sc->recv_ring, BUS_DMA_NOWAIT, &sc->dmamap); if (err) { device_printf(dev, "Couldn't allocate memory\n"); lnc_release_resources(dev); return (ENOMEM); } bus_dmamap_load(sc->dmat, sc->dmamap, sc->recv_ring, lnc_mem_size, lnc_alloc_callback, sc->recv_ring, BUS_DMA_NOWAIT); /* Call generic attach code */ if (! lnc_attach_common(dev)) { device_printf(dev, "Generic attach code failed\n"); lnc_release_resources(dev); return (ENXIO); } return (0); } static int lnc_pci_detach(device_t dev) { lnc_softc_t *sc = device_get_softc(dev); int s = splimp(); ether_ifdetach(&sc->arpcom.ac_if); lnc_stop(sc); bus_teardown_intr(dev, sc->irqres, sc->intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irqres); bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(0), sc->portres); bus_dmamap_unload(sc->dmat, sc->dmamap); bus_dmamem_free(sc->dmat, sc->recv_ring, sc->dmamap); bus_dma_tag_destroy(sc->dmat); splx(s); return (0); } static device_method_t lnc_pci_methods[] = { DEVMETHOD(device_probe, lnc_pci_probe), DEVMETHOD(device_attach, lnc_pci_attach), DEVMETHOD(device_detach, lnc_pci_detach), #ifdef notyet DEVMETHOD(device_suspend, lnc_pci_suspend), DEVMETHOD(device_resume, lnc_pci_resume), DEVMETHOD(device_shutdown, lnc_pci_shutdown), #endif { 0, 0 } }; static driver_t lnc_pci_driver = { "lnc", lnc_pci_methods, sizeof(struct lnc_softc), }; DRIVER_MODULE(lnc, pci, lnc_pci_driver, lnc_devclass, 0, 0); MODULE_DEPEND(lnc, pci, 1, 1, 1); MODULE_DEPEND(lnc, ether, 1, 1, 1); Index: head/sys/dev/mlx/mlx_disk.c =================================================================== --- head/sys/dev/mlx/mlx_disk.c (revision 129878) +++ head/sys/dev/mlx/mlx_disk.c (revision 129879) @@ -1,264 +1,265 @@ /*- * Copyright (c) 1999 Jonathan Lemon * Copyright (c) 1999 Michael Smith * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); /* * Disk driver for Mylex DAC960 RAID adapters. */ #include #include #include +#include #include #include #include #include #include #include #include #include #include /* prototypes */ static int mlxd_probe(device_t dev); static int mlxd_attach(device_t dev); static int mlxd_detach(device_t dev); devclass_t mlxd_devclass; static device_method_t mlxd_methods[] = { DEVMETHOD(device_probe, mlxd_probe), DEVMETHOD(device_attach, mlxd_attach), DEVMETHOD(device_detach, mlxd_detach), { 0, 0 } }; static driver_t mlxd_driver = { "mlxd", mlxd_methods, sizeof(struct mlxd_softc) }; DRIVER_MODULE(mlxd, mlx, mlxd_driver, mlxd_devclass, 0, 0); static int mlxd_open(struct disk *dp) { struct mlxd_softc *sc = (struct mlxd_softc *)dp->d_drv1; debug_called(1); if (sc == NULL) return (ENXIO); /* controller not active? */ if (sc->mlxd_controller->mlx_state & MLX_STATE_SHUTDOWN) return(ENXIO); sc->mlxd_flags |= MLXD_OPEN; return (0); } static int mlxd_close(struct disk *dp) { struct mlxd_softc *sc = (struct mlxd_softc *)dp->d_drv1; debug_called(1); if (sc == NULL) return (ENXIO); sc->mlxd_flags &= ~MLXD_OPEN; return (0); } static int mlxd_ioctl(struct disk *dp, u_long cmd, void *addr, int flag, struct thread *td) { struct mlxd_softc *sc = (struct mlxd_softc *)dp->d_drv1; int error; debug_called(1); if (sc == NULL) return (ENXIO); if ((error = mlx_submit_ioctl(sc->mlxd_controller, sc->mlxd_drive, cmd, addr, flag, td)) != ENOIOCTL) { debug(0, "mlx_submit_ioctl returned %d\n", error); return(error); } return (ENOTTY); } /* * Read/write routine for a buffer. Finds the proper unit, range checks * arguments, and schedules the transfer. Does not wait for the transfer * to complete. Multi-page transfers are supported. All I/O requests must * be a multiple of a sector in length. */ static void mlxd_strategy(mlx_bio *bp) { struct mlxd_softc *sc = (struct mlxd_softc *)MLX_BIO_SOFTC(bp); debug_called(1); /* bogus disk? */ if (sc == NULL) { MLX_BIO_SET_ERROR(bp, EINVAL); goto bad; } /* XXX may only be temporarily offline - sleep? */ if (sc->mlxd_drive->ms_state == MLX_SYSD_OFFLINE) { MLX_BIO_SET_ERROR(bp, ENXIO); goto bad; } MLX_BIO_STATS_START(bp); mlx_submit_buf(sc->mlxd_controller, bp); return; bad: /* * Correctly set the bio to indicate a failed tranfer. */ MLX_BIO_RESID(bp) = MLX_BIO_LENGTH(bp); MLX_BIO_DONE(bp); return; } void mlxd_intr(void *data) { mlx_bio *bp = (mlx_bio *)data; debug_called(1); if (MLX_BIO_HAS_ERROR(bp)) MLX_BIO_SET_ERROR(bp, EIO); else MLX_BIO_RESID(bp) = 0; MLX_BIO_STATS_END(bp); MLX_BIO_DONE(bp); } static int mlxd_probe(device_t dev) { debug_called(1); device_set_desc(dev, "Mylex System Drive"); return (0); } static int mlxd_attach(device_t dev) { struct mlxd_softc *sc = (struct mlxd_softc *)device_get_softc(dev); device_t parent; char *state; int s1, s2; debug_called(1); parent = device_get_parent(dev); sc->mlxd_controller = (struct mlx_softc *)device_get_softc(parent); sc->mlxd_unit = device_get_unit(dev); sc->mlxd_drive = device_get_ivars(dev); sc->mlxd_dev = dev; switch(sc->mlxd_drive->ms_state) { case MLX_SYSD_ONLINE: state = "online"; break; case MLX_SYSD_CRITICAL: state = "critical"; break; case MLX_SYSD_OFFLINE: state = "offline"; break; default: state = "unknown state"; } device_printf(dev, "%uMB (%u sectors) RAID %d (%s)\n", sc->mlxd_drive->ms_size / ((1024 * 1024) / MLX_BLKSIZE), sc->mlxd_drive->ms_size, sc->mlxd_drive->ms_raidlevel, state); sc->mlxd_disk = disk_alloc(); sc->mlxd_disk->d_open = mlxd_open; sc->mlxd_disk->d_close = mlxd_close; sc->mlxd_disk->d_ioctl = mlxd_ioctl; sc->mlxd_disk->d_strategy = mlxd_strategy; sc->mlxd_disk->d_name = "mlxd"; sc->mlxd_disk->d_unit = sc->mlxd_unit; sc->mlxd_disk->d_drv1 = sc; sc->mlxd_disk->d_sectorsize = MLX_BLKSIZE; sc->mlxd_disk->d_mediasize = MLX_BLKSIZE * (off_t)sc->mlxd_drive->ms_size; sc->mlxd_disk->d_fwsectors = sc->mlxd_drive->ms_sectors; sc->mlxd_disk->d_fwheads = sc->mlxd_drive->ms_heads; sc->mlxd_disk->d_flags = DISKFLAG_NEEDSGIANT; /* * Set maximum I/O size to the lesser of the recommended maximum and the practical * maximum except on v2 cards where the maximum is set to 8 pages. */ if (sc->mlxd_controller->mlx_iftype == MLX_IFTYPE_2) sc->mlxd_disk->d_maxsize = 8 * PAGE_SIZE; else { s1 = sc->mlxd_controller->mlx_enq2->me_maxblk * MLX_BLKSIZE; s2 = (sc->mlxd_controller->mlx_enq2->me_max_sg - 1) * PAGE_SIZE; sc->mlxd_disk->d_maxsize = imin(s1, s2); } disk_create(sc->mlxd_disk, DISK_VERSION); return (0); } static int mlxd_detach(device_t dev) { struct mlxd_softc *sc = (struct mlxd_softc *)device_get_softc(dev); debug_called(1); disk_destroy(sc->mlxd_disk); return(0); } Index: head/sys/dev/mlx/mlx_pci.c =================================================================== --- head/sys/dev/mlx/mlx_pci.c (revision 129878) +++ head/sys/dev/mlx/mlx_pci.c (revision 129879) @@ -1,226 +1,227 @@ /*- * Copyright (c) 1999 Michael Smith * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int mlx_pci_probe(device_t dev); static int mlx_pci_attach(device_t dev); static device_method_t mlx_methods[] = { /* Device interface */ DEVMETHOD(device_probe, mlx_pci_probe), DEVMETHOD(device_attach, mlx_pci_attach), DEVMETHOD(device_detach, mlx_detach), DEVMETHOD(device_shutdown, mlx_shutdown), DEVMETHOD(device_suspend, mlx_suspend), DEVMETHOD(device_resume, mlx_resume), DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), { 0, 0 } }; static driver_t mlx_pci_driver = { "mlx", mlx_methods, sizeof(struct mlx_softc) }; DRIVER_MODULE(mlx, pci, mlx_pci_driver, mlx_devclass, 0, 0); struct mlx_ident { u_int16_t vendor; u_int16_t device; u_int16_t subvendor; u_int16_t subdevice; int iftype; char *desc; } mlx_identifiers[] = { {0x1069, 0x0001, 0x0000, 0x0000, MLX_IFTYPE_2, "Mylex version 2 RAID interface"}, {0x1069, 0x0002, 0x0000, 0x0000, MLX_IFTYPE_3, "Mylex version 3 RAID interface"}, {0x1069, 0x0010, 0x0000, 0x0000, MLX_IFTYPE_4, "Mylex version 4 RAID interface"}, {0x1011, 0x1065, 0x1069, 0x0020, MLX_IFTYPE_5, "Mylex version 5 RAID interface"}, {0, 0, 0, 0, 0, 0} }; static int mlx_pci_probe(device_t dev) { struct mlx_ident *m; debug_called(1); for (m = mlx_identifiers; m->vendor != 0; m++) { if ((m->vendor == pci_get_vendor(dev)) && (m->device == pci_get_device(dev)) && ((m->subvendor == 0) || ((m->subvendor == pci_get_subvendor(dev)) && (m->subdevice == pci_get_subdevice(dev))))) { device_set_desc(dev, m->desc); return(-10); /* allow room to be overridden */ } } return(ENXIO); } static int mlx_pci_attach(device_t dev) { struct mlx_softc *sc; int i, error; u_int32_t command; debug_called(1); /* * Make sure we are going to be able to talk to this board. */ command = pci_read_config(dev, PCIR_COMMAND, 2); if ((command & PCIM_CMD_MEMEN) == 0) { device_printf(dev, "memory window not available\n"); return(ENXIO); } /* force the busmaster enable bit on */ command |= PCIM_CMD_BUSMASTEREN; pci_write_config(dev, PCIR_COMMAND, command, 2); /* * Initialise softc. */ sc = device_get_softc(dev); bzero(sc, sizeof(*sc)); sc->mlx_dev = dev; /* * Work out what sort of adapter this is (we need to know this in order * to map the appropriate interface resources). */ sc->mlx_iftype = 0; for (i = 0; mlx_identifiers[i].vendor != 0; i++) { if ((mlx_identifiers[i].vendor == pci_get_vendor(dev)) && (mlx_identifiers[i].device == pci_get_device(dev))) { sc->mlx_iftype = mlx_identifiers[i].iftype; break; } } if (sc->mlx_iftype == 0) /* shouldn't happen */ return(ENXIO); /* * Allocate the PCI register window. */ /* type 2/3 adapters have an I/O region we don't prefer at base 0 */ switch(sc->mlx_iftype) { case MLX_IFTYPE_2: case MLX_IFTYPE_3: sc->mlx_mem_type = SYS_RES_MEMORY; sc->mlx_mem_rid = MLX_CFG_BASE1; sc->mlx_mem = bus_alloc_resource_any(dev, sc->mlx_mem_type, &sc->mlx_mem_rid, RF_ACTIVE); if (sc->mlx_mem == NULL) { sc->mlx_mem_type = SYS_RES_IOPORT; sc->mlx_mem_rid = MLX_CFG_BASE0; sc->mlx_mem = bus_alloc_resource_any(dev, sc->mlx_mem_type, &sc->mlx_mem_rid, RF_ACTIVE); } break; case MLX_IFTYPE_4: case MLX_IFTYPE_5: sc->mlx_mem_type = SYS_RES_MEMORY; sc->mlx_mem_rid = MLX_CFG_BASE0; sc->mlx_mem = bus_alloc_resource_any(dev, sc->mlx_mem_type, &sc->mlx_mem_rid, RF_ACTIVE); break; } if (sc->mlx_mem == NULL) { device_printf(sc->mlx_dev, "couldn't allocate mailbox window\n"); mlx_free(sc); return(ENXIO); } sc->mlx_btag = rman_get_bustag(sc->mlx_mem); sc->mlx_bhandle = rman_get_bushandle(sc->mlx_mem); /* * Allocate the parent bus DMA tag appropriate for PCI. */ error = bus_dma_tag_create(NULL, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MAXBSIZE, MLX_NSEG, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &sc->mlx_parent_dmat); if (error != 0) { device_printf(dev, "can't allocate parent DMA tag\n"); mlx_free(sc); return(ENOMEM); } /* * Do bus-independant initialisation. */ error = mlx_attach(sc); if (error != 0) { mlx_free(sc); return(error); } /* * Start the controller. */ mlx_startup(sc); return(0); } Index: head/sys/dev/nge/if_nge.c =================================================================== --- head/sys/dev/nge/if_nge.c (revision 129878) +++ head/sys/dev/nge/if_nge.c (revision 129879) @@ -1,2287 +1,2288 @@ /* * Copyright (c) 2001 Wind River Systems * Copyright (c) 1997, 1998, 1999, 2000, 2001 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * National Semiconductor DP83820/DP83821 gigabit ethernet driver * for FreeBSD. Datasheets are available from: * * http://www.national.com/ds/DP/DP83820.pdf * http://www.national.com/ds/DP/DP83821.pdf * * These chips are used on several low cost gigabit ethernet NICs * sold by D-Link, Addtron, SMC and Asante. Both parts are * virtually the same, except the 83820 is a 64-bit/32-bit part, * while the 83821 is 32-bit only. * * Many cards also use National gigE transceivers, such as the * DP83891, DP83861 and DP83862 gigPHYTER parts. The DP83861 datasheet * contains a full register description that applies to all of these * components: * * http://www.national.com/ds/DP/DP83861.pdf * * Written by Bill Paul * BSDi Open Source Solutions */ /* * The NatSemi DP83820 and 83821 controllers are enhanced versions * of the NatSemi MacPHYTER 10/100 devices. They support 10, 100 * and 1000Mbps speeds with 1000baseX (ten bit interface), MII and GMII * ports. Other features include 8K TX FIFO and 32K RX FIFO, TCP/IP * hardware checksum offload (IPv4 only), VLAN tagging and filtering, * priority TX and RX queues, a 2048 bit multicast hash filter, 4 RX pattern * matching buffers, one perfect address filter buffer and interrupt * moderation. The 83820 supports both 64-bit and 32-bit addressing * and data transfers: the 64-bit support can be toggled on or off * via software. This affects the size of certain fields in the DMA * descriptors. * * There are two bugs/misfeatures in the 83820/83821 that I have * discovered so far: * * - Receive buffers must be aligned on 64-bit boundaries, which means * you must resort to copying data in order to fix up the payload * alignment. * * - In order to transmit jumbo frames larger than 8170 bytes, you have * to turn off transmit checksum offloading, because the chip can't * compute the checksum on an outgoing frame unless it fits entirely * within the TX FIFO, which is only 8192 bytes in size. If you have * TX checksum offload enabled and you transmit attempt to transmit a * frame larger than 8170 bytes, the transmitter will wedge. * * To work around the latter problem, TX checksum offload is disabled * if the user selects an MTU larger than 8152 (8170 - 18). */ #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include /* for DELAY */ #include #include #include #include #include #include #include #include #include #include #define NGE_USEIOSPACE #include MODULE_DEPEND(nge, pci, 1, 1, 1); MODULE_DEPEND(nge, ether, 1, 1, 1); MODULE_DEPEND(nge, miibus, 1, 1, 1); /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #define NGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) /* * Various supported device vendors/types and their names. */ static struct nge_type nge_devs[] = { { NGE_VENDORID, NGE_DEVICEID, "National Semiconductor Gigabit Ethernet" }, { 0, 0, NULL } }; static int nge_probe(device_t); static int nge_attach(device_t); static int nge_detach(device_t); static int nge_alloc_jumbo_mem(struct nge_softc *); static void nge_free_jumbo_mem(struct nge_softc *); static void *nge_jalloc(struct nge_softc *); static void nge_jfree(void *, void *); static int nge_newbuf(struct nge_softc *, struct nge_desc *, struct mbuf *); static int nge_encap(struct nge_softc *, struct mbuf *, u_int32_t *); static void nge_rxeof(struct nge_softc *); static void nge_txeof(struct nge_softc *); static void nge_intr(void *); static void nge_tick(void *); static void nge_start(struct ifnet *); static int nge_ioctl(struct ifnet *, u_long, caddr_t); static void nge_init(void *); static void nge_stop(struct nge_softc *); static void nge_watchdog(struct ifnet *); static void nge_shutdown(device_t); static int nge_ifmedia_upd(struct ifnet *); static void nge_ifmedia_sts(struct ifnet *, struct ifmediareq *); static void nge_delay(struct nge_softc *); static void nge_eeprom_idle(struct nge_softc *); static void nge_eeprom_putbyte(struct nge_softc *, int); static void nge_eeprom_getword(struct nge_softc *, int, u_int16_t *); static void nge_read_eeprom(struct nge_softc *, caddr_t, int, int, int); static void nge_mii_sync(struct nge_softc *); static void nge_mii_send(struct nge_softc *, u_int32_t, int); static int nge_mii_readreg(struct nge_softc *, struct nge_mii_frame *); static int nge_mii_writereg(struct nge_softc *, struct nge_mii_frame *); static int nge_miibus_readreg(device_t, int, int); static int nge_miibus_writereg(device_t, int, int, int); static void nge_miibus_statchg(device_t); static void nge_setmulti(struct nge_softc *); static uint32_t nge_mchash(const uint8_t *); static void nge_reset(struct nge_softc *); static int nge_list_rx_init(struct nge_softc *); static int nge_list_tx_init(struct nge_softc *); #ifdef NGE_USEIOSPACE #define NGE_RES SYS_RES_IOPORT #define NGE_RID NGE_PCI_LOIO #else #define NGE_RES SYS_RES_MEMORY #define NGE_RID NGE_PCI_LOMEM #endif static device_method_t nge_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nge_probe), DEVMETHOD(device_attach, nge_attach), DEVMETHOD(device_detach, nge_detach), DEVMETHOD(device_shutdown, nge_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, nge_miibus_readreg), DEVMETHOD(miibus_writereg, nge_miibus_writereg), DEVMETHOD(miibus_statchg, nge_miibus_statchg), { 0, 0 } }; static driver_t nge_driver = { "nge", nge_methods, sizeof(struct nge_softc) }; static devclass_t nge_devclass; DRIVER_MODULE(nge, pci, nge_driver, nge_devclass, 0, 0); DRIVER_MODULE(miibus, nge, miibus_driver, miibus_devclass, 0, 0); #define NGE_SETBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, \ CSR_READ_4(sc, reg) | (x)) #define NGE_CLRBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, \ CSR_READ_4(sc, reg) & ~(x)) #define SIO_SET(x) \ CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) | (x)) #define SIO_CLR(x) \ CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) & ~(x)) static void nge_delay(sc) struct nge_softc *sc; { int idx; for (idx = (300 / 33) + 1; idx > 0; idx--) CSR_READ_4(sc, NGE_CSR); return; } static void nge_eeprom_idle(sc) struct nge_softc *sc; { register int i; SIO_SET(NGE_MEAR_EE_CSEL); nge_delay(sc); SIO_SET(NGE_MEAR_EE_CLK); nge_delay(sc); for (i = 0; i < 25; i++) { SIO_CLR(NGE_MEAR_EE_CLK); nge_delay(sc); SIO_SET(NGE_MEAR_EE_CLK); nge_delay(sc); } SIO_CLR(NGE_MEAR_EE_CLK); nge_delay(sc); SIO_CLR(NGE_MEAR_EE_CSEL); nge_delay(sc); CSR_WRITE_4(sc, NGE_MEAR, 0x00000000); return; } /* * Send a read command and address to the EEPROM, check for ACK. */ static void nge_eeprom_putbyte(sc, addr) struct nge_softc *sc; int addr; { register int d, i; d = addr | NGE_EECMD_READ; /* * Feed in each bit and stobe the clock. */ for (i = 0x400; i; i >>= 1) { if (d & i) { SIO_SET(NGE_MEAR_EE_DIN); } else { SIO_CLR(NGE_MEAR_EE_DIN); } nge_delay(sc); SIO_SET(NGE_MEAR_EE_CLK); nge_delay(sc); SIO_CLR(NGE_MEAR_EE_CLK); nge_delay(sc); } return; } /* * Read a word of data stored in the EEPROM at address 'addr.' */ static void nge_eeprom_getword(sc, addr, dest) struct nge_softc *sc; int addr; u_int16_t *dest; { register int i; u_int16_t word = 0; /* Force EEPROM to idle state. */ nge_eeprom_idle(sc); /* Enter EEPROM access mode. */ nge_delay(sc); SIO_CLR(NGE_MEAR_EE_CLK); nge_delay(sc); SIO_SET(NGE_MEAR_EE_CSEL); nge_delay(sc); /* * Send address of word we want to read. */ nge_eeprom_putbyte(sc, addr); /* * Start reading bits from EEPROM. */ for (i = 0x8000; i; i >>= 1) { SIO_SET(NGE_MEAR_EE_CLK); nge_delay(sc); if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_EE_DOUT) word |= i; nge_delay(sc); SIO_CLR(NGE_MEAR_EE_CLK); nge_delay(sc); } /* Turn off EEPROM access mode. */ nge_eeprom_idle(sc); *dest = word; return; } /* * Read a sequence of words from the EEPROM. */ static void nge_read_eeprom(sc, dest, off, cnt, swap) struct nge_softc *sc; caddr_t dest; int off; int cnt; int swap; { int i; u_int16_t word = 0, *ptr; for (i = 0; i < cnt; i++) { nge_eeprom_getword(sc, off + i, &word); ptr = (u_int16_t *)(dest + (i * 2)); if (swap) *ptr = ntohs(word); else *ptr = word; } return; } /* * Sync the PHYs by setting data bit and strobing the clock 32 times. */ static void nge_mii_sync(sc) struct nge_softc *sc; { register int i; SIO_SET(NGE_MEAR_MII_DIR|NGE_MEAR_MII_DATA); for (i = 0; i < 32; i++) { SIO_SET(NGE_MEAR_MII_CLK); DELAY(1); SIO_CLR(NGE_MEAR_MII_CLK); DELAY(1); } return; } /* * Clock a series of bits through the MII. */ static void nge_mii_send(sc, bits, cnt) struct nge_softc *sc; u_int32_t bits; int cnt; { int i; SIO_CLR(NGE_MEAR_MII_CLK); for (i = (0x1 << (cnt - 1)); i; i >>= 1) { if (bits & i) { SIO_SET(NGE_MEAR_MII_DATA); } else { SIO_CLR(NGE_MEAR_MII_DATA); } DELAY(1); SIO_CLR(NGE_MEAR_MII_CLK); DELAY(1); SIO_SET(NGE_MEAR_MII_CLK); } } /* * Read an PHY register through the MII. */ static int nge_mii_readreg(sc, frame) struct nge_softc *sc; struct nge_mii_frame *frame; { int i, ack, s; s = splimp(); /* * Set up frame for RX. */ frame->mii_stdelim = NGE_MII_STARTDELIM; frame->mii_opcode = NGE_MII_READOP; frame->mii_turnaround = 0; frame->mii_data = 0; CSR_WRITE_4(sc, NGE_MEAR, 0); /* * Turn on data xmit. */ SIO_SET(NGE_MEAR_MII_DIR); nge_mii_sync(sc); /* * Send command/address info. */ nge_mii_send(sc, frame->mii_stdelim, 2); nge_mii_send(sc, frame->mii_opcode, 2); nge_mii_send(sc, frame->mii_phyaddr, 5); nge_mii_send(sc, frame->mii_regaddr, 5); /* Idle bit */ SIO_CLR((NGE_MEAR_MII_CLK|NGE_MEAR_MII_DATA)); DELAY(1); SIO_SET(NGE_MEAR_MII_CLK); DELAY(1); /* Turn off xmit. */ SIO_CLR(NGE_MEAR_MII_DIR); /* Check for ack */ SIO_CLR(NGE_MEAR_MII_CLK); DELAY(1); ack = CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA; SIO_SET(NGE_MEAR_MII_CLK); DELAY(1); /* * Now try reading data bits. If the ack failed, we still * need to clock through 16 cycles to keep the PHY(s) in sync. */ if (ack) { for(i = 0; i < 16; i++) { SIO_CLR(NGE_MEAR_MII_CLK); DELAY(1); SIO_SET(NGE_MEAR_MII_CLK); DELAY(1); } goto fail; } for (i = 0x8000; i; i >>= 1) { SIO_CLR(NGE_MEAR_MII_CLK); DELAY(1); if (!ack) { if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA) frame->mii_data |= i; DELAY(1); } SIO_SET(NGE_MEAR_MII_CLK); DELAY(1); } fail: SIO_CLR(NGE_MEAR_MII_CLK); DELAY(1); SIO_SET(NGE_MEAR_MII_CLK); DELAY(1); splx(s); if (ack) return(1); return(0); } /* * Write to a PHY register through the MII. */ static int nge_mii_writereg(sc, frame) struct nge_softc *sc; struct nge_mii_frame *frame; { int s; s = splimp(); /* * Set up frame for TX. */ frame->mii_stdelim = NGE_MII_STARTDELIM; frame->mii_opcode = NGE_MII_WRITEOP; frame->mii_turnaround = NGE_MII_TURNAROUND; /* * Turn on data output. */ SIO_SET(NGE_MEAR_MII_DIR); nge_mii_sync(sc); nge_mii_send(sc, frame->mii_stdelim, 2); nge_mii_send(sc, frame->mii_opcode, 2); nge_mii_send(sc, frame->mii_phyaddr, 5); nge_mii_send(sc, frame->mii_regaddr, 5); nge_mii_send(sc, frame->mii_turnaround, 2); nge_mii_send(sc, frame->mii_data, 16); /* Idle bit. */ SIO_SET(NGE_MEAR_MII_CLK); DELAY(1); SIO_CLR(NGE_MEAR_MII_CLK); DELAY(1); /* * Turn off xmit. */ SIO_CLR(NGE_MEAR_MII_DIR); splx(s); return(0); } static int nge_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct nge_softc *sc; struct nge_mii_frame frame; sc = device_get_softc(dev); bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; nge_mii_readreg(sc, &frame); return(frame.mii_data); } static int nge_miibus_writereg(dev, phy, reg, data) device_t dev; int phy, reg, data; { struct nge_softc *sc; struct nge_mii_frame frame; sc = device_get_softc(dev); bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; frame.mii_data = data; nge_mii_writereg(sc, &frame); return(0); } static void nge_miibus_statchg(dev) device_t dev; { int status; struct nge_softc *sc; struct mii_data *mii; sc = device_get_softc(dev); if (sc->nge_tbi) { if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media) == IFM_AUTO) { status = CSR_READ_4(sc, NGE_TBI_ANLPAR); if (status == 0 || status & NGE_TBIANAR_FDX) { NGE_SETBIT(sc, NGE_TX_CFG, (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); } else { NGE_CLRBIT(sc, NGE_TX_CFG, (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); } } else if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK) != IFM_FDX) { NGE_CLRBIT(sc, NGE_TX_CFG, (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); } else { NGE_SETBIT(sc, NGE_TX_CFG, (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); } } else { mii = device_get_softc(sc->nge_miibus); if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { NGE_SETBIT(sc, NGE_TX_CFG, (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); } else { NGE_CLRBIT(sc, NGE_TX_CFG, (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); } /* If we have a 1000Mbps link, set the mode_1000 bit. */ if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) { NGE_SETBIT(sc, NGE_CFG, NGE_CFG_MODE_1000); } else { NGE_CLRBIT(sc, NGE_CFG, NGE_CFG_MODE_1000); } } return; } static u_int32_t nge_mchash(addr) const uint8_t *addr; { uint32_t crc, carry; int idx, bit; uint8_t data; /* Compute CRC for the address value. */ crc = 0xFFFFFFFF; /* initial value */ for (idx = 0; idx < 6; idx++) { for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) { carry = ((crc & 0x80000000) ? 1 : 0) ^ (data & 0x01); crc <<= 1; if (carry) crc = (crc ^ 0x04c11db6) | carry; } } /* * return the filter bit position */ return((crc >> 21) & 0x00000FFF); } static void nge_setmulti(sc) struct nge_softc *sc; { struct ifnet *ifp; struct ifmultiaddr *ifma; u_int32_t h = 0, i, filtsave; int bit, index; ifp = &sc->arpcom.ac_if; if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_MCHASH|NGE_RXFILTCTL_UCHASH); NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLMULTI); return; } /* * We have to explicitly enable the multicast hash table * on the NatSemi chip if we want to use it, which we do. * We also have to tell it that we don't want to use the * hash table for matching unicast addresses. */ NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_MCHASH); NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLMULTI|NGE_RXFILTCTL_UCHASH); filtsave = CSR_READ_4(sc, NGE_RXFILT_CTL); /* first, zot all the existing hash bits */ for (i = 0; i < NGE_MCAST_FILTER_LEN; i += 2) { CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_MCAST_LO + i); CSR_WRITE_4(sc, NGE_RXFILT_DATA, 0); } /* * From the 11 bits returned by the crc routine, the top 7 * bits represent the 16-bit word in the mcast hash table * that needs to be updated, and the lower 4 bits represent * which bit within that byte needs to be set. */ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = nge_mchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); index = (h >> 4) & 0x7F; bit = h & 0xF; CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_MCAST_LO + (index * 2)); NGE_SETBIT(sc, NGE_RXFILT_DATA, (1 << bit)); } CSR_WRITE_4(sc, NGE_RXFILT_CTL, filtsave); return; } static void nge_reset(sc) struct nge_softc *sc; { register int i; NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RESET); for (i = 0; i < NGE_TIMEOUT; i++) { if (!(CSR_READ_4(sc, NGE_CSR) & NGE_CSR_RESET)) break; } if (i == NGE_TIMEOUT) printf("nge%d: reset never completed\n", sc->nge_unit); /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); /* * If this is a NetSemi chip, make sure to clear * PME mode. */ CSR_WRITE_4(sc, NGE_CLKRUN, NGE_CLKRUN_PMESTS); CSR_WRITE_4(sc, NGE_CLKRUN, 0); return; } /* * Probe for a NatSemi chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int nge_probe(dev) device_t dev; { struct nge_type *t; t = nge_devs; while(t->nge_name != NULL) { if ((pci_get_vendor(dev) == t->nge_vid) && (pci_get_device(dev) == t->nge_did)) { device_set_desc(dev, t->nge_name); return(0); } t++; } return(ENXIO); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int nge_attach(dev) device_t dev; { int s; u_char eaddr[ETHER_ADDR_LEN]; struct nge_softc *sc; struct ifnet *ifp; int unit, error = 0, rid; const char *sep = ""; s = splimp(); sc = device_get_softc(dev); unit = device_get_unit(dev); bzero(sc, sizeof(struct nge_softc)); mtx_init(&sc->nge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); #ifndef BURN_BRIDGES /* * Handle power management nonsense. */ if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { u_int32_t iobase, membase, irq; /* Save important PCI config data. */ iobase = pci_read_config(dev, NGE_PCI_LOIO, 4); membase = pci_read_config(dev, NGE_PCI_LOMEM, 4); irq = pci_read_config(dev, NGE_PCI_INTLINE, 4); /* Reset the power state. */ printf("nge%d: chip is in D%d power mode " "-- setting to D0\n", unit, pci_get_powerstate(dev)); pci_set_powerstate(dev, PCI_POWERSTATE_D0); /* Restore PCI config data. */ pci_write_config(dev, NGE_PCI_LOIO, iobase, 4); pci_write_config(dev, NGE_PCI_LOMEM, membase, 4); pci_write_config(dev, NGE_PCI_INTLINE, irq, 4); } #endif /* * Map control/status registers. */ pci_enable_busmaster(dev); rid = NGE_RID; sc->nge_res = bus_alloc_resource_any(dev, NGE_RES, &rid, RF_ACTIVE); if (sc->nge_res == NULL) { printf("nge%d: couldn't map ports/memory\n", unit); error = ENXIO; goto fail; } sc->nge_btag = rman_get_bustag(sc->nge_res); sc->nge_bhandle = rman_get_bushandle(sc->nge_res); /* Allocate interrupt */ rid = 0; sc->nge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->nge_irq == NULL) { printf("nge%d: couldn't map interrupt\n", unit); bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res); error = ENXIO; goto fail; } error = bus_setup_intr(dev, sc->nge_irq, INTR_TYPE_NET, nge_intr, sc, &sc->nge_intrhand); if (error) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq); bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res); printf("nge%d: couldn't set up irq\n", unit); goto fail; } /* Reset the adapter. */ nge_reset(sc); /* * Get station address from the EEPROM. */ nge_read_eeprom(sc, (caddr_t)&eaddr[4], NGE_EE_NODEADDR, 1, 0); nge_read_eeprom(sc, (caddr_t)&eaddr[2], NGE_EE_NODEADDR + 1, 1, 0); nge_read_eeprom(sc, (caddr_t)&eaddr[0], NGE_EE_NODEADDR + 2, 1, 0); sc->nge_unit = unit; bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); sc->nge_ldata = contigmalloc(sizeof(struct nge_list_data), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->nge_ldata == NULL) { printf("nge%d: no memory for list buffers!\n", unit); bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq); bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res); error = ENXIO; goto fail; } bzero(sc->nge_ldata, sizeof(struct nge_list_data)); /* Try to allocate memory for jumbo buffers. */ if (nge_alloc_jumbo_mem(sc)) { printf("nge%d: jumbo buffer allocation failed\n", sc->nge_unit); contigfree(sc->nge_ldata, sizeof(struct nge_list_data), M_DEVBUF); bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq); bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res); error = ENXIO; goto fail; } ifp = &sc->arpcom.ac_if; ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = nge_ioctl; ifp->if_start = nge_start; ifp->if_watchdog = nge_watchdog; ifp->if_init = nge_init; ifp->if_baudrate = 1000000000; ifp->if_snd.ifq_maxlen = NGE_TX_LIST_CNT - 1; ifp->if_hwassist = NGE_CSUM_FEATURES; ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif ifp->if_capenable = ifp->if_capabilities; /* * Do MII setup. */ if (mii_phy_probe(dev, &sc->nge_miibus, nge_ifmedia_upd, nge_ifmedia_sts)) { if (CSR_READ_4(sc, NGE_CFG) & NGE_CFG_TBI_EN) { sc->nge_tbi = 1; device_printf(dev, "Using TBI\n"); sc->nge_miibus = dev; ifmedia_init(&sc->nge_ifmedia, 0, nge_ifmedia_upd, nge_ifmedia_sts); #define ADD(m, c) ifmedia_add(&sc->nge_ifmedia, (m), (c), NULL) #define PRINT(s) printf("%s%s", sep, s); sep = ", " ADD(IFM_MAKEWORD(IFM_ETHER, IFM_NONE, 0, 0), 0); device_printf(dev, " "); ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_SX, 0, 0), 0); PRINT("1000baseSX"); ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_SX, IFM_FDX, 0),0); PRINT("1000baseSX-FDX"); ADD(IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, 0), 0); PRINT("auto"); printf("\n"); #undef ADD #undef PRINT ifmedia_set(&sc->nge_ifmedia, IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, 0)); CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO) | NGE_GPIO_GP4_OUT | NGE_GPIO_GP1_OUTENB | NGE_GPIO_GP2_OUTENB | NGE_GPIO_GP3_OUTENB | NGE_GPIO_GP3_IN | NGE_GPIO_GP4_IN); } else { printf("nge%d: MII without any PHY!\n", sc->nge_unit); nge_free_jumbo_mem(sc); bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq); bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res); error = ENXIO; goto fail; } } /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); callout_handle_init(&sc->nge_stat_ch); fail: splx(s); mtx_destroy(&sc->nge_mtx); return(error); } static int nge_detach(dev) device_t dev; { struct nge_softc *sc; struct ifnet *ifp; int s; s = splimp(); sc = device_get_softc(dev); ifp = &sc->arpcom.ac_if; nge_reset(sc); nge_stop(sc); ether_ifdetach(ifp); bus_generic_detach(dev); if (!sc->nge_tbi) { device_delete_child(dev, sc->nge_miibus); } bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq); bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res); contigfree(sc->nge_ldata, sizeof(struct nge_list_data), M_DEVBUF); nge_free_jumbo_mem(sc); splx(s); mtx_destroy(&sc->nge_mtx); return(0); } /* * Initialize the transmit descriptors. */ static int nge_list_tx_init(sc) struct nge_softc *sc; { struct nge_list_data *ld; struct nge_ring_data *cd; int i; cd = &sc->nge_cdata; ld = sc->nge_ldata; for (i = 0; i < NGE_TX_LIST_CNT; i++) { if (i == (NGE_TX_LIST_CNT - 1)) { ld->nge_tx_list[i].nge_nextdesc = &ld->nge_tx_list[0]; ld->nge_tx_list[i].nge_next = vtophys(&ld->nge_tx_list[0]); } else { ld->nge_tx_list[i].nge_nextdesc = &ld->nge_tx_list[i + 1]; ld->nge_tx_list[i].nge_next = vtophys(&ld->nge_tx_list[i + 1]); } ld->nge_tx_list[i].nge_mbuf = NULL; ld->nge_tx_list[i].nge_ptr = 0; ld->nge_tx_list[i].nge_ctl = 0; } cd->nge_tx_prod = cd->nge_tx_cons = cd->nge_tx_cnt = 0; return(0); } /* * Initialize the RX descriptors and allocate mbufs for them. Note that * we arrange the descriptors in a closed ring, so that the last descriptor * points back to the first. */ static int nge_list_rx_init(sc) struct nge_softc *sc; { struct nge_list_data *ld; struct nge_ring_data *cd; int i; ld = sc->nge_ldata; cd = &sc->nge_cdata; for (i = 0; i < NGE_RX_LIST_CNT; i++) { if (nge_newbuf(sc, &ld->nge_rx_list[i], NULL) == ENOBUFS) return(ENOBUFS); if (i == (NGE_RX_LIST_CNT - 1)) { ld->nge_rx_list[i].nge_nextdesc = &ld->nge_rx_list[0]; ld->nge_rx_list[i].nge_next = vtophys(&ld->nge_rx_list[0]); } else { ld->nge_rx_list[i].nge_nextdesc = &ld->nge_rx_list[i + 1]; ld->nge_rx_list[i].nge_next = vtophys(&ld->nge_rx_list[i + 1]); } } cd->nge_rx_prod = 0; return(0); } /* * Initialize an RX descriptor and attach an MBUF cluster. */ static int nge_newbuf(sc, c, m) struct nge_softc *sc; struct nge_desc *c; struct mbuf *m; { struct mbuf *m_new = NULL; caddr_t *buf = NULL; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("nge%d: no memory for rx list " "-- packet dropped!\n", sc->nge_unit); return(ENOBUFS); } /* Allocate the jumbo buffer */ buf = nge_jalloc(sc); if (buf == NULL) { #ifdef NGE_VERBOSE printf("nge%d: jumbo allocation failed " "-- packet dropped!\n", sc->nge_unit); #endif m_freem(m_new); return(ENOBUFS); } /* Attach the buffer to the mbuf */ m_new->m_data = (void *)buf; m_new->m_len = m_new->m_pkthdr.len = NGE_JUMBO_FRAMELEN; MEXTADD(m_new, buf, NGE_JUMBO_FRAMELEN, nge_jfree, (struct nge_softc *)sc, 0, EXT_NET_DRV); } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = NGE_JUMBO_FRAMELEN; m_new->m_data = m_new->m_ext.ext_buf; } m_adj(m_new, sizeof(u_int64_t)); c->nge_mbuf = m_new; c->nge_ptr = vtophys(mtod(m_new, caddr_t)); c->nge_ctl = m_new->m_len; c->nge_extsts = 0; return(0); } static int nge_alloc_jumbo_mem(sc) struct nge_softc *sc; { caddr_t ptr; register int i; struct nge_jpool_entry *entry; /* Grab a big chunk o' storage. */ sc->nge_cdata.nge_jumbo_buf = contigmalloc(NGE_JMEM, M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->nge_cdata.nge_jumbo_buf == NULL) { printf("nge%d: no memory for jumbo buffers!\n", sc->nge_unit); return(ENOBUFS); } SLIST_INIT(&sc->nge_jfree_listhead); SLIST_INIT(&sc->nge_jinuse_listhead); /* * Now divide it up into 9K pieces and save the addresses * in an array. */ ptr = sc->nge_cdata.nge_jumbo_buf; for (i = 0; i < NGE_JSLOTS; i++) { sc->nge_cdata.nge_jslots[i] = ptr; ptr += NGE_JLEN; entry = malloc(sizeof(struct nge_jpool_entry), M_DEVBUF, M_NOWAIT); if (entry == NULL) { printf("nge%d: no memory for jumbo " "buffer queue!\n", sc->nge_unit); return(ENOBUFS); } entry->slot = i; SLIST_INSERT_HEAD(&sc->nge_jfree_listhead, entry, jpool_entries); } return(0); } static void nge_free_jumbo_mem(sc) struct nge_softc *sc; { register int i; struct nge_jpool_entry *entry; for (i = 0; i < NGE_JSLOTS; i++) { entry = SLIST_FIRST(&sc->nge_jfree_listhead); SLIST_REMOVE_HEAD(&sc->nge_jfree_listhead, jpool_entries); free(entry, M_DEVBUF); } contigfree(sc->nge_cdata.nge_jumbo_buf, NGE_JMEM, M_DEVBUF); return; } /* * Allocate a jumbo buffer. */ static void * nge_jalloc(sc) struct nge_softc *sc; { struct nge_jpool_entry *entry; entry = SLIST_FIRST(&sc->nge_jfree_listhead); if (entry == NULL) { #ifdef NGE_VERBOSE printf("nge%d: no free jumbo buffers\n", sc->nge_unit); #endif return(NULL); } SLIST_REMOVE_HEAD(&sc->nge_jfree_listhead, jpool_entries); SLIST_INSERT_HEAD(&sc->nge_jinuse_listhead, entry, jpool_entries); return(sc->nge_cdata.nge_jslots[entry->slot]); } /* * Release a jumbo buffer. */ static void nge_jfree(buf, args) void *buf; void *args; { struct nge_softc *sc; int i; struct nge_jpool_entry *entry; /* Extract the softc struct pointer. */ sc = args; if (sc == NULL) panic("nge_jfree: can't find softc pointer!"); /* calculate the slot this buffer belongs to */ i = ((vm_offset_t)buf - (vm_offset_t)sc->nge_cdata.nge_jumbo_buf) / NGE_JLEN; if ((i < 0) || (i >= NGE_JSLOTS)) panic("nge_jfree: asked to free buffer that we don't manage!"); entry = SLIST_FIRST(&sc->nge_jinuse_listhead); if (entry == NULL) panic("nge_jfree: buffer not in use!"); entry->slot = i; SLIST_REMOVE_HEAD(&sc->nge_jinuse_listhead, jpool_entries); SLIST_INSERT_HEAD(&sc->nge_jfree_listhead, entry, jpool_entries); return; } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ static void nge_rxeof(sc) struct nge_softc *sc; { struct mbuf *m; struct ifnet *ifp; struct nge_desc *cur_rx; int i, total_len = 0; u_int32_t rxstat; ifp = &sc->arpcom.ac_if; i = sc->nge_cdata.nge_rx_prod; while(NGE_OWNDESC(&sc->nge_ldata->nge_rx_list[i])) { struct mbuf *m0 = NULL; u_int32_t extsts; #ifdef DEVICE_POLLING if (ifp->if_flags & IFF_POLLING) { if (sc->rxcycles <= 0) break; sc->rxcycles--; } #endif /* DEVICE_POLLING */ cur_rx = &sc->nge_ldata->nge_rx_list[i]; rxstat = cur_rx->nge_rxstat; extsts = cur_rx->nge_extsts; m = cur_rx->nge_mbuf; cur_rx->nge_mbuf = NULL; total_len = NGE_RXBYTES(cur_rx); NGE_INC(i, NGE_RX_LIST_CNT); /* * If an error occurs, update stats, clear the * status word and leave the mbuf cluster in place: * it should simply get re-used next time this descriptor * comes up in the ring. */ if (!(rxstat & NGE_CMDSTS_PKT_OK)) { ifp->if_ierrors++; nge_newbuf(sc, cur_rx, m); continue; } /* * Ok. NatSemi really screwed up here. This is the * only gigE chip I know of with alignment constraints * on receive buffers. RX buffers must be 64-bit aligned. */ #ifdef __i386__ /* * By popular demand, ignore the alignment problems * on the Intel x86 platform. The performance hit * incurred due to unaligned accesses is much smaller * than the hit produced by forcing buffer copies all * the time, especially with jumbo frames. We still * need to fix up the alignment everywhere else though. */ if (nge_newbuf(sc, cur_rx, NULL) == ENOBUFS) { #endif m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, ifp, NULL); nge_newbuf(sc, cur_rx, m); if (m0 == NULL) { printf("nge%d: no receive buffers " "available -- packet dropped!\n", sc->nge_unit); ifp->if_ierrors++; continue; } m = m0; #ifdef __i386__ } else { m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = total_len; } #endif ifp->if_ipackets++; /* Do IP checksum checking. */ if (extsts & NGE_RXEXTSTS_IPPKT) m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (!(extsts & NGE_RXEXTSTS_IPCSUMERR)) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if ((extsts & NGE_RXEXTSTS_TCPPKT && !(extsts & NGE_RXEXTSTS_TCPCSUMERR)) || (extsts & NGE_RXEXTSTS_UDPPKT && !(extsts & NGE_RXEXTSTS_UDPCSUMERR))) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID|CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } /* * If we received a packet with a vlan tag, pass it * to vlan_input() instead of ether_input(). */ if (extsts & NGE_RXEXTSTS_VLANPKT) { VLAN_INPUT_TAG(ifp, m, ntohs(extsts & NGE_RXEXTSTS_VTCI), continue); } (*ifp->if_input)(ifp, m); } sc->nge_cdata.nge_rx_prod = i; return; } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void nge_txeof(sc) struct nge_softc *sc; { struct nge_desc *cur_tx; struct ifnet *ifp; u_int32_t idx; ifp = &sc->arpcom.ac_if; /* * Go through our tx list and free mbufs for those * frames that have been transmitted. */ idx = sc->nge_cdata.nge_tx_cons; while (idx != sc->nge_cdata.nge_tx_prod) { cur_tx = &sc->nge_ldata->nge_tx_list[idx]; if (NGE_OWNDESC(cur_tx)) break; if (cur_tx->nge_ctl & NGE_CMDSTS_MORE) { sc->nge_cdata.nge_tx_cnt--; NGE_INC(idx, NGE_TX_LIST_CNT); continue; } if (!(cur_tx->nge_ctl & NGE_CMDSTS_PKT_OK)) { ifp->if_oerrors++; if (cur_tx->nge_txstat & NGE_TXSTAT_EXCESSCOLLS) ifp->if_collisions++; if (cur_tx->nge_txstat & NGE_TXSTAT_OUTOFWINCOLL) ifp->if_collisions++; } ifp->if_collisions += (cur_tx->nge_txstat & NGE_TXSTAT_COLLCNT) >> 16; ifp->if_opackets++; if (cur_tx->nge_mbuf != NULL) { m_freem(cur_tx->nge_mbuf); cur_tx->nge_mbuf = NULL; ifp->if_flags &= ~IFF_OACTIVE; } sc->nge_cdata.nge_tx_cnt--; NGE_INC(idx, NGE_TX_LIST_CNT); } sc->nge_cdata.nge_tx_cons = idx; if (idx == sc->nge_cdata.nge_tx_prod) ifp->if_timer = 0; return; } static void nge_tick(xsc) void *xsc; { struct nge_softc *sc; struct mii_data *mii; struct ifnet *ifp; int s; s = splimp(); sc = xsc; ifp = &sc->arpcom.ac_if; if (sc->nge_tbi) { if (!sc->nge_link) { if (CSR_READ_4(sc, NGE_TBI_BMSR) & NGE_TBIBMSR_ANEG_DONE) { printf("nge%d: gigabit link up\n", sc->nge_unit); nge_miibus_statchg(sc->nge_miibus); sc->nge_link++; if (ifp->if_snd.ifq_head != NULL) nge_start(ifp); } } } else { mii = device_get_softc(sc->nge_miibus); mii_tick(mii); if (!sc->nge_link) { if (mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->nge_link++; if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) printf("nge%d: gigabit link up\n", sc->nge_unit); if (ifp->if_snd.ifq_head != NULL) nge_start(ifp); } } } sc->nge_stat_ch = timeout(nge_tick, sc, hz); splx(s); return; } #ifdef DEVICE_POLLING static poll_handler_t nge_poll; static void nge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct nge_softc *sc = ifp->if_softc; if (!(ifp->if_capenable & IFCAP_POLLING)) { ether_poll_deregister(ifp); cmd = POLL_DEREGISTER; } if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */ CSR_WRITE_4(sc, NGE_IER, 1); return; } /* * On the nge, reading the status register also clears it. * So before returning to intr mode we must make sure that all * possible pending sources of interrupts have been served. * In practice this means run to completion the *eof routines, * and then call the interrupt routine */ sc->rxcycles = count; nge_rxeof(sc); nge_txeof(sc); if (ifp->if_snd.ifq_head != NULL) nge_start(ifp); if (sc->rxcycles > 0 || cmd == POLL_AND_CHECK_STATUS) { u_int32_t status; /* Reading the ISR register clears all interrupts. */ status = CSR_READ_4(sc, NGE_ISR); if (status & (NGE_ISR_RX_ERR|NGE_ISR_RX_OFLOW)) nge_rxeof(sc); if (status & (NGE_ISR_RX_IDLE)) NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); if (status & NGE_ISR_SYSERR) { nge_reset(sc); nge_init(sc); } } } #endif /* DEVICE_POLLING */ static void nge_intr(arg) void *arg; { struct nge_softc *sc; struct ifnet *ifp; u_int32_t status; sc = arg; ifp = &sc->arpcom.ac_if; #ifdef DEVICE_POLLING if (ifp->if_flags & IFF_POLLING) return; if ((ifp->if_capenable & IFCAP_POLLING) && ether_poll_register(nge_poll, ifp)) { /* ok, disable interrupts */ CSR_WRITE_4(sc, NGE_IER, 0); nge_poll(ifp, 0, 1); return; } #endif /* DEVICE_POLLING */ /* Supress unwanted interrupts */ if (!(ifp->if_flags & IFF_UP)) { nge_stop(sc); return; } /* Disable interrupts. */ CSR_WRITE_4(sc, NGE_IER, 0); /* Data LED on for TBI mode */ if(sc->nge_tbi) CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO) | NGE_GPIO_GP3_OUT); for (;;) { /* Reading the ISR register clears all interrupts. */ status = CSR_READ_4(sc, NGE_ISR); if ((status & NGE_INTRS) == 0) break; if ((status & NGE_ISR_TX_DESC_OK) || (status & NGE_ISR_TX_ERR) || (status & NGE_ISR_TX_OK) || (status & NGE_ISR_TX_IDLE)) nge_txeof(sc); if ((status & NGE_ISR_RX_DESC_OK) || (status & NGE_ISR_RX_ERR) || (status & NGE_ISR_RX_OFLOW) || (status & NGE_ISR_RX_FIFO_OFLOW) || (status & NGE_ISR_RX_IDLE) || (status & NGE_ISR_RX_OK)) nge_rxeof(sc); if ((status & NGE_ISR_RX_IDLE)) NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); if (status & NGE_ISR_SYSERR) { nge_reset(sc); ifp->if_flags &= ~IFF_RUNNING; nge_init(sc); } #if 0 /* * XXX: nge_tick() is not ready to be called this way * it screws up the aneg timeout because mii_tick() is * only to be called once per second. */ if (status & NGE_IMR_PHY_INTR) { sc->nge_link = 0; nge_tick(sc); } #endif } /* Re-enable interrupts. */ CSR_WRITE_4(sc, NGE_IER, 1); if (ifp->if_snd.ifq_head != NULL) nge_start(ifp); /* Data LED off for TBI mode */ if(sc->nge_tbi) CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO) & ~NGE_GPIO_GP3_OUT); return; } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int nge_encap(sc, m_head, txidx) struct nge_softc *sc; struct mbuf *m_head; u_int32_t *txidx; { struct nge_desc *f = NULL; struct mbuf *m; int frag, cur, cnt = 0; struct m_tag *mtag; /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ m = m_head; cur = frag = *txidx; for (m = m_head; m != NULL; m = m->m_next) { if (m->m_len != 0) { if ((NGE_TX_LIST_CNT - (sc->nge_cdata.nge_tx_cnt + cnt)) < 2) return(ENOBUFS); f = &sc->nge_ldata->nge_tx_list[frag]; f->nge_ctl = NGE_CMDSTS_MORE | m->m_len; f->nge_ptr = vtophys(mtod(m, vm_offset_t)); if (cnt != 0) f->nge_ctl |= NGE_CMDSTS_OWN; cur = frag; NGE_INC(frag, NGE_TX_LIST_CNT); cnt++; } } if (m != NULL) return(ENOBUFS); sc->nge_ldata->nge_tx_list[*txidx].nge_extsts = 0; if (m_head->m_pkthdr.csum_flags) { if (m_head->m_pkthdr.csum_flags & CSUM_IP) sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |= NGE_TXEXTSTS_IPCSUM; if (m_head->m_pkthdr.csum_flags & CSUM_TCP) sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |= NGE_TXEXTSTS_TCPCSUM; if (m_head->m_pkthdr.csum_flags & CSUM_UDP) sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |= NGE_TXEXTSTS_UDPCSUM; } mtag = VLAN_OUTPUT_TAG(&sc->arpcom.ac_if, m); if (mtag != NULL) { sc->nge_ldata->nge_tx_list[cur].nge_extsts |= (NGE_TXEXTSTS_VLANPKT|htons(VLAN_TAG_VALUE(mtag))); } sc->nge_ldata->nge_tx_list[cur].nge_mbuf = m_head; sc->nge_ldata->nge_tx_list[cur].nge_ctl &= ~NGE_CMDSTS_MORE; sc->nge_ldata->nge_tx_list[*txidx].nge_ctl |= NGE_CMDSTS_OWN; sc->nge_cdata.nge_tx_cnt += cnt; *txidx = frag; return(0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit lists. We also save a * copy of the pointers since the transmit list fragment pointers are * physical addresses. */ static void nge_start(ifp) struct ifnet *ifp; { struct nge_softc *sc; struct mbuf *m_head = NULL; u_int32_t idx; sc = ifp->if_softc; if (!sc->nge_link) return; idx = sc->nge_cdata.nge_tx_prod; if (ifp->if_flags & IFF_OACTIVE) return; while(sc->nge_ldata->nge_tx_list[idx].nge_mbuf == NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (nge_encap(sc, m_head, &idx)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; break; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); } /* Transmit */ sc->nge_cdata.nge_tx_prod = idx; NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_ENABLE); /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; return; } static void nge_init(xsc) void *xsc; { struct nge_softc *sc = xsc; struct ifnet *ifp = &sc->arpcom.ac_if; struct mii_data *mii; int s; if (ifp->if_flags & IFF_RUNNING) return; s = splimp(); /* * Cancel pending I/O and free all RX/TX buffers. */ nge_stop(sc); if (sc->nge_tbi) { mii = NULL; } else { mii = device_get_softc(sc->nge_miibus); } /* Set MAC address */ CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR0); CSR_WRITE_4(sc, NGE_RXFILT_DATA, ((u_int16_t *)sc->arpcom.ac_enaddr)[0]); CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR1); CSR_WRITE_4(sc, NGE_RXFILT_DATA, ((u_int16_t *)sc->arpcom.ac_enaddr)[1]); CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR2); CSR_WRITE_4(sc, NGE_RXFILT_DATA, ((u_int16_t *)sc->arpcom.ac_enaddr)[2]); /* Init circular RX list. */ if (nge_list_rx_init(sc) == ENOBUFS) { printf("nge%d: initialization failed: no " "memory for rx buffers\n", sc->nge_unit); nge_stop(sc); (void)splx(s); return; } /* * Init tx descriptors. */ nge_list_tx_init(sc); /* * For the NatSemi chip, we have to explicitly enable the * reception of ARP frames, as well as turn on the 'perfect * match' filter where we store the station address, otherwise * we won't receive unicasts meant for this host. */ NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ARP); NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_PERFECT); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) { NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS); } else { NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS); } /* * Set the capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) { NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD); } else { NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD); } /* * Load the multicast filter. */ nge_setmulti(sc); /* Turn the receive filter on */ NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ENABLE); /* * Load the address of the RX and TX lists. */ CSR_WRITE_4(sc, NGE_RX_LISTPTR, vtophys(&sc->nge_ldata->nge_rx_list[0])); CSR_WRITE_4(sc, NGE_TX_LISTPTR, vtophys(&sc->nge_ldata->nge_tx_list[0])); /* Set RX configuration */ CSR_WRITE_4(sc, NGE_RX_CFG, NGE_RXCFG); /* * Enable hardware checksum validation for all IPv4 * packets, do not reject packets with bad checksums. */ CSR_WRITE_4(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_IPCSUM_ENB); /* * Tell the chip to detect and strip VLAN tag info from * received frames. The tag will be provided in the extsts * field in the RX descriptors. */ NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_TAG_DETECT_ENB|NGE_VIPRXCTL_TAG_STRIP_ENB); /* Set TX configuration */ CSR_WRITE_4(sc, NGE_TX_CFG, NGE_TXCFG); /* * Enable TX IPv4 checksumming on a per-packet basis. */ CSR_WRITE_4(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_CSUM_PER_PKT); /* * Tell the chip to insert VLAN tags on a per-packet basis as * dictated by the code in the frame encapsulation routine. */ NGE_SETBIT(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_TAG_PER_PKT); /* Set full/half duplex mode. */ if (sc->nge_tbi) { if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK) == IFM_FDX) { NGE_SETBIT(sc, NGE_TX_CFG, (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); } else { NGE_CLRBIT(sc, NGE_TX_CFG, (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); } } else { if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { NGE_SETBIT(sc, NGE_TX_CFG, (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); } else { NGE_CLRBIT(sc, NGE_TX_CFG, (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); } } nge_tick(sc); /* * Enable the delivery of PHY interrupts based on * link/speed/duplex status changes. Also enable the * extsts field in the DMA descriptors (needed for * TCP/IP checksum offload on transmit). */ NGE_SETBIT(sc, NGE_CFG, NGE_CFG_PHYINTR_SPD| NGE_CFG_PHYINTR_LNK|NGE_CFG_PHYINTR_DUP|NGE_CFG_EXTSTS_ENB); /* * Configure interrupt holdoff (moderation). We can * have the chip delay interrupt delivery for a certain * period. Units are in 100us, and the max setting * is 25500us (0xFF x 100us). Default is a 100us holdoff. */ CSR_WRITE_4(sc, NGE_IHR, 0x01); /* * Enable interrupts. */ CSR_WRITE_4(sc, NGE_IMR, NGE_INTRS); #ifdef DEVICE_POLLING /* * ... only enable interrupts if we are not polling, make sure * they are off otherwise. */ if (ifp->if_flags & IFF_POLLING) CSR_WRITE_4(sc, NGE_IER, 0); else #endif /* DEVICE_POLLING */ CSR_WRITE_4(sc, NGE_IER, 1); /* Enable receiver and transmitter. */ NGE_CLRBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE); NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); nge_ifmedia_upd(ifp); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; (void)splx(s); return; } /* * Set media options. */ static int nge_ifmedia_upd(ifp) struct ifnet *ifp; { struct nge_softc *sc; struct mii_data *mii; sc = ifp->if_softc; if (sc->nge_tbi) { if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media) == IFM_AUTO) { CSR_WRITE_4(sc, NGE_TBI_ANAR, CSR_READ_4(sc, NGE_TBI_ANAR) | NGE_TBIANAR_HDX | NGE_TBIANAR_FDX | NGE_TBIANAR_PS1 | NGE_TBIANAR_PS2); CSR_WRITE_4(sc, NGE_TBI_BMCR, NGE_TBIBMCR_ENABLE_ANEG | NGE_TBIBMCR_RESTART_ANEG); CSR_WRITE_4(sc, NGE_TBI_BMCR, NGE_TBIBMCR_ENABLE_ANEG); } else if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK) == IFM_FDX) { NGE_SETBIT(sc, NGE_TX_CFG, (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); CSR_WRITE_4(sc, NGE_TBI_ANAR, 0); CSR_WRITE_4(sc, NGE_TBI_BMCR, 0); } else { NGE_CLRBIT(sc, NGE_TX_CFG, (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); CSR_WRITE_4(sc, NGE_TBI_ANAR, 0); CSR_WRITE_4(sc, NGE_TBI_BMCR, 0); } CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO) & ~NGE_GPIO_GP3_OUT); } else { mii = device_get_softc(sc->nge_miibus); sc->nge_link = 0; if (mii->mii_instance) { struct mii_softc *miisc; for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; miisc = LIST_NEXT(miisc, mii_list)) mii_phy_reset(miisc); } mii_mediachg(mii); } return(0); } /* * Report current media status. */ static void nge_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct nge_softc *sc; struct mii_data *mii; sc = ifp->if_softc; if (sc->nge_tbi) { ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (CSR_READ_4(sc, NGE_TBI_BMSR) & NGE_TBIBMSR_ANEG_DONE) { ifmr->ifm_status |= IFM_ACTIVE; } if (CSR_READ_4(sc, NGE_TBI_BMCR) & NGE_TBIBMCR_LOOPBACK) ifmr->ifm_active |= IFM_LOOP; if (!CSR_READ_4(sc, NGE_TBI_BMSR) & NGE_TBIBMSR_ANEG_DONE) { ifmr->ifm_active |= IFM_NONE; ifmr->ifm_status = 0; return; } ifmr->ifm_active |= IFM_1000_SX; if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media) == IFM_AUTO) { ifmr->ifm_active |= IFM_AUTO; if (CSR_READ_4(sc, NGE_TBI_ANLPAR) & NGE_TBIANAR_FDX) { ifmr->ifm_active |= IFM_FDX; }else if (CSR_READ_4(sc, NGE_TBI_ANLPAR) & NGE_TBIANAR_HDX) { ifmr->ifm_active |= IFM_HDX; } } else if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK) == IFM_FDX) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; } else { mii = device_get_softc(sc->nge_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } return; } static int nge_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct nge_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int s, error = 0; s = splimp(); switch(command) { case SIOCSIFMTU: if (ifr->ifr_mtu > NGE_JUMBO_MTU) error = EINVAL; else { ifp->if_mtu = ifr->ifr_mtu; /* * Workaround: if the MTU is larger than * 8152 (TX FIFO size minus 64 minus 18), turn off * TX checksum offloading. */ if (ifr->ifr_mtu >= 8152) { ifp->if_capenable &= ~IFCAP_TXCSUM; ifp->if_hwassist = 0; } else { ifp->if_capenable |= IFCAP_TXCSUM; ifp->if_hwassist = NGE_CSUM_FEATURES; } } break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->nge_if_flags & IFF_PROMISC)) { NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS| NGE_RXFILTCTL_ALLMULTI); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->nge_if_flags & IFF_PROMISC) { NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS); if (!(ifp->if_flags & IFF_ALLMULTI)) NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLMULTI); } else { ifp->if_flags &= ~IFF_RUNNING; nge_init(sc); } } else { if (ifp->if_flags & IFF_RUNNING) nge_stop(sc); } sc->nge_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: nge_setmulti(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: if (sc->nge_tbi) { error = ifmedia_ioctl(ifp, ifr, &sc->nge_ifmedia, command); } else { mii = device_get_softc(sc->nge_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); } break; case SIOCSIFCAP: ifp->if_capenable &= ~IFCAP_POLLING; ifp->if_capenable |= ifr->ifr_reqcap & IFCAP_POLLING; break; default: error = ether_ioctl(ifp, command, data); break; } (void)splx(s); return(error); } static void nge_watchdog(ifp) struct ifnet *ifp; { struct nge_softc *sc; sc = ifp->if_softc; ifp->if_oerrors++; printf("nge%d: watchdog timeout\n", sc->nge_unit); nge_stop(sc); nge_reset(sc); ifp->if_flags &= ~IFF_RUNNING; nge_init(sc); if (ifp->if_snd.ifq_head != NULL) nge_start(ifp); return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void nge_stop(sc) struct nge_softc *sc; { register int i; struct ifnet *ifp; struct mii_data *mii; ifp = &sc->arpcom.ac_if; ifp->if_timer = 0; if (sc->nge_tbi) { mii = NULL; } else { mii = device_get_softc(sc->nge_miibus); } untimeout(nge_tick, sc, sc->nge_stat_ch); #ifdef DEVICE_POLLING ether_poll_deregister(ifp); #endif CSR_WRITE_4(sc, NGE_IER, 0); CSR_WRITE_4(sc, NGE_IMR, 0); NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE); DELAY(1000); CSR_WRITE_4(sc, NGE_TX_LISTPTR, 0); CSR_WRITE_4(sc, NGE_RX_LISTPTR, 0); if (!sc->nge_tbi) mii_down(mii); sc->nge_link = 0; /* * Free data in the RX lists. */ for (i = 0; i < NGE_RX_LIST_CNT; i++) { if (sc->nge_ldata->nge_rx_list[i].nge_mbuf != NULL) { m_freem(sc->nge_ldata->nge_rx_list[i].nge_mbuf); sc->nge_ldata->nge_rx_list[i].nge_mbuf = NULL; } } bzero((char *)&sc->nge_ldata->nge_rx_list, sizeof(sc->nge_ldata->nge_rx_list)); /* * Free the TX list buffers. */ for (i = 0; i < NGE_TX_LIST_CNT; i++) { if (sc->nge_ldata->nge_tx_list[i].nge_mbuf != NULL) { m_freem(sc->nge_ldata->nge_tx_list[i].nge_mbuf); sc->nge_ldata->nge_tx_list[i].nge_mbuf = NULL; } } bzero((char *)&sc->nge_ldata->nge_tx_list, sizeof(sc->nge_ldata->nge_tx_list)); ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void nge_shutdown(dev) device_t dev; { struct nge_softc *sc; sc = device_get_softc(dev); nge_reset(sc); nge_stop(sc); return; } Index: head/sys/dev/nmdm/nmdm.c =================================================================== --- head/sys/dev/nmdm/nmdm.c (revision 129878) +++ head/sys/dev/nmdm/nmdm.c (revision 129879) @@ -1,616 +1,617 @@ /* * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); /* * Pseudo-nulmodem driver * Mighty handy for use with serial console in Vmware */ #include "opt_compat.h" #include "opt_tty.h" #include #include #if defined(COMPAT_43) || defined(COMPAT_SUNOS) #include #endif #include #include #include #include #include #include +#include #include #include #include MALLOC_DEFINE(M_NLMDM, "nullmodem", "nullmodem data structures"); static void nmdmstart(struct tty *tp); static void nmdmstop(struct tty *tp, int rw); static void wakeup_other(struct tty *tp, int flag); static void nmdminit(dev_t dev); static d_open_t nmdmopen; static d_close_t nmdmclose; static d_read_t nmdmread; static d_write_t nmdmwrite; static d_ioctl_t nmdmioctl; static struct cdevsw nmdm_cdevsw = { .d_version = D_VERSION, .d_open = nmdmopen, .d_close = nmdmclose, .d_read = nmdmread, .d_write = nmdmwrite, .d_ioctl = nmdmioctl, .d_name = "nmdn", .d_flags = D_TTY | D_PSEUDO | D_NEEDGIANT, }; #define BUFSIZ 100 /* Chunk size iomoved to/from user */ #define NMDM_MAX_NUM 128 /* Artificially limit # devices. */ #define PF_STOPPED 0x10 /* user told stopped */ #define BFLAG CLONE_FLAG0 struct softpart { struct tty nm_tty; dev_t dev; int modemsignals; /* bits defined in sys/ttycom.h */ int gotbreak; }; struct nm_softc { TAILQ_ENTRY(nm_softc) pt_list; int pt_flags; struct softpart part1, part2; struct prison *pt_prison; }; static struct clonedevs *nmdmclones; static TAILQ_HEAD(,nm_softc) nmdmhead = TAILQ_HEAD_INITIALIZER(nmdmhead); static void nmdm_clone(void *arg, char *name, int nameen, dev_t *dev) { int i, unit; char *p; dev_t d1, d2; if (*dev != NODEV) return; if (strcmp(name, "nmdm") == 0) { p = NULL; unit = -1; } else { i = dev_stdclone(name, &p, "nmdm", &unit); if (i == 0) return; if (p[0] != '\0' && p[0] != 'A' && p[0] != 'B') return; else if (p[0] != '\0' && p[1] != '\0') return; } i = clone_create(&nmdmclones, &nmdm_cdevsw, &unit, &d1, 0); if (i) { d1 = make_dev(&nmdm_cdevsw, unit2minor(unit), 0, 0, 0666, "nmdm%dA", unit); if (d1 == NULL) return; d2 = make_dev(&nmdm_cdevsw, unit2minor(unit) | BFLAG, 0, 0, 0666, "nmdm%dB", unit); if (d2 == NULL) { destroy_dev(d1); return; } d2->si_drv2 = d1; d1->si_drv2 = d2; dev_depends(d1, d2); dev_depends(d2, d1); d1->si_flags |= SI_CHEAPCLONE; d2->si_flags |= SI_CHEAPCLONE; } if (p != NULL && p[0] == 'B') *dev = d1->si_drv2; else *dev = d1; } static void nmdm_crossover(struct nm_softc *pti, struct softpart *ourpart, struct softpart *otherpart); #define GETPARTS(tp, ourpart, otherpart) \ do { \ struct nm_softc *pti = tp->t_dev->si_drv1; \ if (tp == &pti->part1.nm_tty) { \ ourpart = &pti->part1; \ otherpart = &pti->part2; \ } else { \ ourpart = &pti->part2; \ otherpart = &pti->part1; \ } \ } while (0) /* * This function creates and initializes a pair of ttys. */ static void nmdminit(dev_t dev1) { dev_t dev2; struct nm_softc *pt; dev2 = dev1->si_drv2; dev1->si_flags &= ~SI_CHEAPCLONE; dev2->si_flags &= ~SI_CHEAPCLONE; pt = malloc(sizeof(*pt), M_NLMDM, M_WAITOK | M_ZERO); TAILQ_INSERT_TAIL(&nmdmhead, pt, pt_list); dev1->si_drv1 = dev2->si_drv1 = pt; pt->part1.dev = dev1; pt->part2.dev = dev2; dev1->si_tty = &pt->part1.nm_tty; dev2->si_tty = &pt->part2.nm_tty; ttyregister(&pt->part1.nm_tty); ttyregister(&pt->part2.nm_tty); pt->part1.nm_tty.t_oproc = nmdmstart; pt->part2.nm_tty.t_oproc = nmdmstart; pt->part1.nm_tty.t_stop = nmdmstop; pt->part2.nm_tty.t_stop = nmdmstop; pt->part2.nm_tty.t_dev = dev1; pt->part1.nm_tty.t_dev = dev2; } /* * Device opened from userland */ static int nmdmopen(dev_t dev, int flag, int devtype, struct thread *td) { register struct tty *tp, *tp2; int error; struct nm_softc *pti; struct softpart *ourpart, *otherpart; if (dev->si_drv1 == NULL) nmdminit(dev); pti = dev->si_drv1; if (minor(dev) & BFLAG) tp = &pti->part2.nm_tty; else tp = &pti->part1.nm_tty; GETPARTS(tp, ourpart, otherpart); tp2 = &otherpart->nm_tty; ourpart->modemsignals |= TIOCM_LE; if ((tp->t_state & TS_ISOPEN) == 0) { ttychars(tp); /* Set up default chars */ tp->t_iflag = TTYDEF_IFLAG; tp->t_oflag = TTYDEF_OFLAG; tp->t_lflag = TTYDEF_LFLAG; tp->t_cflag = TTYDEF_CFLAG; tp->t_ispeed = tp->t_ospeed = TTYDEF_SPEED; } else if (tp->t_state & TS_XCLUDE && suser(td)) { return (EBUSY); } else if (pti->pt_prison != td->td_ucred->cr_prison) { return (EBUSY); } /* * If the other side is open we have carrier */ if (tp2->t_state & TS_ISOPEN) { (void)(*linesw[tp->t_line].l_modem)(tp, 1); } /* * And the other side gets carrier as we are now open. */ (void)(*linesw[tp2->t_line].l_modem)(tp2, 1); /* External processing makes no sense here */ tp->t_lflag &= ~EXTPROC; /* * Wait here if we don't have carrier. */ #if 0 while ((tp->t_state & TS_CARR_ON) == 0) { if (flag & FNONBLOCK) break; error = ttysleep(tp, TSA_CARR_ON(tp), TTIPRI | PCATCH, "nmdopn", 0); if (error) return (error); } #endif /* * Give the line disciplin a chance to set this end up. */ error = (*linesw[tp->t_line].l_open)(dev, tp); /* * Wake up the other side. * Theoretically not needed. */ ourpart->modemsignals |= TIOCM_DTR; nmdm_crossover(pti, ourpart, otherpart); if (error == 0) wakeup_other(tp, FREAD|FWRITE); /* XXX */ return (error); } /* * Device closed again */ static int nmdmclose(dev_t dev, int flag, int mode, struct thread *td) { register struct tty *tp, *tp2; int err; struct softpart *ourpart, *otherpart; /* * let the other end know that the game is up */ tp = dev->si_tty; GETPARTS(tp, ourpart, otherpart); tp2 = &otherpart->nm_tty; (void)(*linesw[tp2->t_line].l_modem)(tp2, 0); /* * XXX MDMBUF makes no sense for nmdms but would inhibit the above * l_modem(). CLOCAL makes sense but isn't supported. Special * l_modem()s that ignore carrier drop make no sense for nmdms but * may be in use because other parts of the line discipline make * sense for nmdms. Recover by doing everything that a normal * ttymodem() would have done except for sending a SIGHUP. */ if (tp2->t_state & TS_ISOPEN) { tp2->t_state &= ~(TS_CARR_ON | TS_CONNECTED); tp2->t_state |= TS_ZOMBIE; ttyflush(tp2, FREAD | FWRITE); } err = (*linesw[tp->t_line].l_close)(tp, flag); ourpart->modemsignals &= ~TIOCM_DTR; nmdm_crossover(dev->si_drv1, ourpart, otherpart); nmdmstop(tp, FREAD|FWRITE); (void) ttyclose(tp); return (err); } /* * handle read(2) request from userland */ static int nmdmread(dev_t dev, struct uio *uio, int flag) { int error = 0; struct tty *tp, *tp2; struct softpart *ourpart, *otherpart; tp = dev->si_tty; GETPARTS(tp, ourpart, otherpart); tp2 = &otherpart->nm_tty; #if 0 if (tp2->t_state & TS_ISOPEN) { error = (*linesw[tp->t_line].l_read)(tp, uio, flag); wakeup_other(tp, FWRITE); } else { if (flag & IO_NDELAY) { return (EWOULDBLOCK); } error = tsleep(TSA_PTC_READ(tp), TTIPRI | PCATCH, "nmdout", 0); } } #else if ((error = (*linesw[tp->t_line].l_read)(tp, uio, flag)) == 0) wakeup_other(tp, FWRITE); #endif return (error); } /* * Write to pseudo-tty. * Wakeups of controlling tty will happen * indirectly, when tty driver calls nmdmstart. */ static int nmdmwrite(dev_t dev, struct uio *uio, int flag) { register u_char *cp = 0; register int cc = 0; u_char locbuf[BUFSIZ]; int cnt = 0; int error = 0; struct tty *tp1, *tp; struct softpart *ourpart, *otherpart; tp1 = dev->si_tty; /* * Get the other tty struct. * basically we are writing into the INPUT side of the other device. */ GETPARTS(tp1, ourpart, otherpart); tp = &otherpart->nm_tty; again: if ((tp->t_state & TS_ISOPEN) == 0) return (EIO); while (uio->uio_resid > 0 || cc > 0) { /* * Fill up the buffer if it's empty */ if (cc == 0) { cc = min(uio->uio_resid, BUFSIZ); cp = locbuf; error = uiomove((caddr_t)cp, cc, uio); if (error) return (error); /* check again for safety */ if ((tp->t_state & TS_ISOPEN) == 0) { /* adjust for data copied in but not written */ uio->uio_resid += cc; return (EIO); } } while (cc > 0) { if (((tp->t_rawq.c_cc + tp->t_canq.c_cc) >= (TTYHOG-2)) && ((tp->t_canq.c_cc > 0) || !(tp->t_iflag&ICANON))) { /* * Come here to wait for space in outq, * or space in rawq, or an empty canq. */ wakeup(TSA_HUP_OR_INPUT(tp)); if ((tp->t_state & TS_CONNECTED) == 0) { /* * Data piled up because not connected. * Adjust for data copied in but * not written. */ uio->uio_resid += cc; return (EIO); } if (flag & IO_NDELAY) { /* * Don't wait if asked not to. * Adjust for data copied in but * not written. */ uio->uio_resid += cc; if (cnt == 0) return (EWOULDBLOCK); return (0); } error = tsleep(TSA_PTC_WRITE(tp), TTOPRI | PCATCH, "nmdout", 0); if (error) { /* * Tsleep returned (signal?). * Go find out what the user wants. * adjust for data copied in but * not written */ uio->uio_resid += cc; return (error); } goto again; } (*linesw[tp->t_line].l_rint)(*cp++, tp); cnt++; cc--; } cc = 0; } return (0); } /* * Start output on pseudo-tty. * Wake up process selecting or sleeping for input from controlling tty. */ static void nmdmstart(struct tty *tp) { register struct nm_softc *pti = tp->t_dev->si_drv1; if (tp->t_state & TS_TTSTOP) return; pti->pt_flags &= ~PF_STOPPED; wakeup_other(tp, FREAD); } /* Wakes up the OTHER tty;*/ static void wakeup_other(struct tty *tp, int flag) { struct softpart *ourpart, *otherpart; GETPARTS(tp, ourpart, otherpart); if (flag & FREAD) { selwakeuppri(&otherpart->nm_tty.t_rsel, TTIPRI); wakeup(TSA_PTC_READ((&otherpart->nm_tty))); } if (flag & FWRITE) { selwakeuppri(&otherpart->nm_tty.t_wsel, TTOPRI); wakeup(TSA_PTC_WRITE((&otherpart->nm_tty))); } } /* * stopped output on tty, called when device is closed */ static void nmdmstop(register struct tty *tp, int flush) { struct nm_softc *pti = tp->t_dev->si_drv1; int flag; /* note: FLUSHREAD and FLUSHWRITE already ok */ if (flush == 0) { flush = TIOCPKT_STOP; pti->pt_flags |= PF_STOPPED; } else pti->pt_flags &= ~PF_STOPPED; /* change of perspective */ flag = 0; if (flush & FREAD) flag |= FWRITE; if (flush & FWRITE) flag |= FREAD; wakeup_other(tp, flag); } /* * handle ioctl(2) request from userland */ static int nmdmioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct thread *td) { register struct tty *tp = dev->si_tty; struct nm_softc *pti = dev->si_drv1; int error, s; register struct tty *tp2; struct softpart *ourpart, *otherpart; s = spltty(); GETPARTS(tp, ourpart, otherpart); tp2 = &otherpart->nm_tty; error = (*linesw[tp->t_line].l_ioctl)(tp, cmd, data, flag, td); if (error == ENOIOCTL) error = ttioctl(tp, cmd, data, flag); if (error == ENOIOCTL) { switch (cmd) { case TIOCSBRK: otherpart->gotbreak = 1; break; case TIOCCBRK: break; case TIOCSDTR: ourpart->modemsignals |= TIOCM_DTR; break; case TIOCCDTR: ourpart->modemsignals &= TIOCM_DTR; break; case TIOCMSET: ourpart->modemsignals = *(int *)data; otherpart->modemsignals = *(int *)data; break; case TIOCMBIS: ourpart->modemsignals |= *(int *)data; break; case TIOCMBIC: ourpart->modemsignals &= ~(*(int *)data); otherpart->modemsignals &= ~(*(int *)data); break; case TIOCMGET: *(int *)data = ourpart->modemsignals; break; case TIOCMSDTRWAIT: break; case TIOCMGDTRWAIT: *(int *)data = 0; break; case TIOCTIMESTAMP: /* FALLTHROUGH */ case TIOCDCDTIMESTAMP: default: splx(s); error = ENOTTY; return (error); } error = 0; nmdm_crossover(pti, ourpart, otherpart); } splx(s); return (error); } static void nmdm_crossover(struct nm_softc *pti, struct softpart *ourpart, struct softpart *otherpart) { otherpart->modemsignals &= ~(TIOCM_CTS|TIOCM_CAR); if (ourpart->modemsignals & TIOCM_RTS) otherpart->modemsignals |= TIOCM_CTS; if (ourpart->modemsignals & TIOCM_DTR) otherpart->modemsignals |= TIOCM_CAR; } /* * Module handling */ static int nmdm_modevent(module_t mod, int type, void *data) { static eventhandler_tag tag; struct nm_softc *pt, *tpt; int error = 0; switch(type) { case MOD_LOAD: clone_setup(&nmdmclones); tag = EVENTHANDLER_REGISTER(dev_clone, nmdm_clone, 0, 1000); if (tag == NULL) return (ENOMEM); break; case MOD_SHUTDOWN: /* FALLTHROUGH */ case MOD_UNLOAD: EVENTHANDLER_DEREGISTER(dev_clone, tag); TAILQ_FOREACH_SAFE(pt, &nmdmhead, pt_list, tpt) { destroy_dev(pt->part1.dev); TAILQ_REMOVE(&nmdmhead, pt, pt_list); free(pt, M_NLMDM); } clone_cleanup(&nmdmclones); break; default: error = EOPNOTSUPP; } return (error); } DEV_MODULE(nmdm, nmdm_modevent, NULL); Index: head/sys/dev/pci/eisa_pci.c =================================================================== --- head/sys/dev/pci/eisa_pci.c (revision 129878) +++ head/sys/dev/pci/eisa_pci.c (revision 129879) @@ -1,127 +1,128 @@ /*- * Copyright (c) 1994,1995 Stefan Esser, Wolfgang StanglMeier * Copyright (c) 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * PCI:EISA bridge support */ #include #include +#include #include #include #include static int eisab_probe(device_t dev); static int eisab_attach(device_t dev); static device_method_t eisab_methods[] = { /* Device interface */ DEVMETHOD(device_probe, eisab_probe), DEVMETHOD(device_attach, eisab_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), { 0, 0 } }; static driver_t eisab_driver = { "eisab", eisab_methods, 0, }; static devclass_t eisab_devclass; DRIVER_MODULE(eisab, pci, eisab_driver, eisab_devclass, 0, 0); static int eisab_probe(device_t dev) { int matched = 0; /* * Generic match by class/subclass. */ if ((pci_get_class(dev) == PCIC_BRIDGE) && (pci_get_subclass(dev) == PCIS_BRIDGE_EISA)) matched = 1; /* * Some bridges don't correctly report their class. */ switch (pci_get_devid(dev)) { case 0x04828086: /* may show up as PCI-HOST or 0:0 */ matched = 1; break; default: break; } if (matched) { device_set_desc(dev, "PCI-EISA bridge"); return(-10000); } return(ENXIO); } static int eisab_attach(device_t dev) { /* * Attach an EISA bus. Note that we can only have one EISA bus. */ if (!devclass_get_device(devclass_find("eisa"), 0)) device_add_child(dev, "eisa", -1); /* * Attach an ISA bus as well, since the EISA bus may have ISA * cards installed, and we may have no EISA support in the system. */ if (!devclass_get_device(devclass_find("isa"), 0)) device_add_child(dev, "isa", -1); bus_generic_attach(dev); return(0); } Index: head/sys/dev/ppbus/pcfclock.c =================================================================== --- head/sys/dev/ppbus/pcfclock.c (revision 129878) +++ head/sys/dev/ppbus/pcfclock.c (revision 129879) @@ -1,340 +1,341 @@ /* * Copyright (c) 2000 Sascha Schumann. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY SASCHA SCHUMANN ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * */ #include __FBSDID("$FreeBSD$"); #include "opt_pcfclock.h" #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include "ppbus_if.h" #define PCFCLOCK_NAME "pcfclock" struct pcfclock_data { int count; }; #define DEVTOSOFTC(dev) \ ((struct pcfclock_data *)device_get_softc(dev)) #define UNITOSOFTC(unit) \ ((struct pcfclock_data *)devclass_get_softc(pcfclock_devclass, (unit))) #define UNITODEVICE(unit) \ (devclass_get_device(pcfclock_devclass, (unit))) static devclass_t pcfclock_devclass; static d_open_t pcfclock_open; static d_close_t pcfclock_close; static d_read_t pcfclock_read; static struct cdevsw pcfclock_cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT, .d_open = pcfclock_open, .d_close = pcfclock_close, .d_read = pcfclock_read, .d_name = PCFCLOCK_NAME, }; #ifndef PCFCLOCK_MAX_RETRIES #define PCFCLOCK_MAX_RETRIES 10 #endif #define AFC_HI 0 #define AFC_LO AUTOFEED /* AUTO FEED is used as clock */ #define AUTOFEED_CLOCK(val) \ ctr = (ctr & ~(AUTOFEED)) ^ (val); ppb_wctr(ppbus, ctr) /* SLCT is used as clock */ #define CLOCK_OK \ ((ppb_rstr(ppbus) & SELECT) == (i & 1 ? SELECT : 0)) /* PE is used as data */ #define BIT_SET (ppb_rstr(ppbus)&PERROR) /* the first byte sent as reply must be 00001001b */ #define PCFCLOCK_CORRECT_SYNC(buf) (buf[0] == 9) #define NR(buf, off) (buf[off+1]*10+buf[off]) /* check for correct input values */ #define PCFCLOCK_CORRECT_FORMAT(buf) (\ NR(buf, 14) <= 99 && \ NR(buf, 12) <= 12 && \ NR(buf, 10) <= 31 && \ NR(buf, 6) <= 23 && \ NR(buf, 4) <= 59 && \ NR(buf, 2) <= 59) #define PCFCLOCK_BATTERY_STATUS_LOW(buf) (buf[8] & 4) #define PCFCLOCK_CMD_TIME 0 /* send current time */ #define PCFCLOCK_CMD_COPY 7 /* copy received signal to PC */ static void pcfclock_identify(driver_t *driver, device_t parent) { device_t dev; dev = device_find_child(parent, PCFCLOCK_NAME, 0); if (!dev) BUS_ADD_CHILD(parent, 0, PCFCLOCK_NAME, -1); } static int pcfclock_probe(device_t dev) { struct pcfclock_data *sc; device_set_desc(dev, "PCF-1.0"); sc = DEVTOSOFTC(dev); bzero(sc, sizeof(struct pcfclock_data)); return (0); } static int pcfclock_attach(device_t dev) { int unit; unit = device_get_unit(dev); make_dev(&pcfclock_cdevsw, unit, UID_ROOT, GID_WHEEL, 0400, PCFCLOCK_NAME "%d", unit); return (0); } static int pcfclock_open(dev_t dev, int flag, int fms, struct thread *td) { u_int unit = minor(dev); struct pcfclock_data *sc = UNITOSOFTC(unit); device_t pcfclockdev = UNITODEVICE(unit); device_t ppbus = device_get_parent(pcfclockdev); int res; if (!sc) return (ENXIO); if ((res = ppb_request_bus(ppbus, pcfclockdev, (flag & O_NONBLOCK) ? PPB_DONTWAIT : PPB_WAIT))) return (res); sc->count++; return (0); } static int pcfclock_close(dev_t dev, int flags, int fmt, struct thread *td) { u_int unit = minor(dev); struct pcfclock_data *sc = UNITOSOFTC(unit); device_t pcfclockdev = UNITODEVICE(unit); device_t ppbus = device_get_parent(pcfclockdev); sc->count--; if (sc->count == 0) ppb_release_bus(ppbus, pcfclockdev); return (0); } static void pcfclock_write_cmd(dev_t dev, unsigned char command) { u_int unit = minor(dev); device_t ppidev = UNITODEVICE(unit); device_t ppbus = device_get_parent(ppidev); unsigned char ctr = 14; char i; for (i = 0; i <= 7; i++) { ppb_wdtr(ppbus, i); AUTOFEED_CLOCK(i & 1 ? AFC_HI : AFC_LO); DELAY(3000); } ppb_wdtr(ppbus, command); AUTOFEED_CLOCK(AFC_LO); DELAY(3000); AUTOFEED_CLOCK(AFC_HI); } static void pcfclock_display_data(dev_t dev, char buf[18]) { u_int unit = minor(dev); #ifdef PCFCLOCK_VERBOSE int year; year = NR(buf, 14); if (year < 70) year += 100; printf(PCFCLOCK_NAME "%d: %02d.%02d.%4d %02d:%02d:%02d, " "battery status: %s\n", unit, NR(buf, 10), NR(buf, 12), 1900 + year, NR(buf, 6), NR(buf, 4), NR(buf, 2), PCFCLOCK_BATTERY_STATUS_LOW(buf) ? "LOW" : "ok"); #else if (PCFCLOCK_BATTERY_STATUS_LOW(buf)) printf(PCFCLOCK_NAME "%d: BATTERY STATUS LOW ON\n", unit); #endif } static int pcfclock_read_data(dev_t dev, char *buf, ssize_t bits) { u_int unit = minor(dev); device_t ppidev = UNITODEVICE(unit); device_t ppbus = device_get_parent(ppidev); int i; char waitfor; int offset; /* one byte per four bits */ bzero(buf, ((bits + 3) >> 2) + 1); waitfor = 100; for (i = 0; i <= bits; i++) { /* wait for clock, maximum (waitfor*100) usec */ while(!CLOCK_OK && --waitfor > 0) DELAY(100); /* timed out? */ if (!waitfor) return (EIO); waitfor = 100; /* reload */ /* give it some time */ DELAY(500); /* calculate offset into buffer */ offset = i >> 2; buf[offset] <<= 1; if (BIT_SET) buf[offset] |= 1; } return (0); } static int pcfclock_read_dev(dev_t dev, char *buf, int maxretries) { u_int unit = minor(dev); device_t ppidev = UNITODEVICE(unit); device_t ppbus = device_get_parent(ppidev); int error = 0; ppb_set_mode(ppbus, PPB_COMPATIBLE); while (--maxretries > 0) { pcfclock_write_cmd(dev, PCFCLOCK_CMD_TIME); if (pcfclock_read_data(dev, buf, 68)) continue; if (!PCFCLOCK_CORRECT_SYNC(buf)) continue; if (!PCFCLOCK_CORRECT_FORMAT(buf)) continue; break; } if (!maxretries) error = EIO; return (error); } static int pcfclock_read(dev_t dev, struct uio *uio, int ioflag) { u_int unit = minor(dev); char buf[18]; int error = 0; if (uio->uio_resid < 18) return (ERANGE); error = pcfclock_read_dev(dev, buf, PCFCLOCK_MAX_RETRIES); if (error) { printf(PCFCLOCK_NAME "%d: no PCF found\n", unit); } else { pcfclock_display_data(dev, buf); uiomove(buf, 18, uio); } return (error); } static device_method_t pcfclock_methods[] = { /* device interface */ DEVMETHOD(device_identify, pcfclock_identify), DEVMETHOD(device_probe, pcfclock_probe), DEVMETHOD(device_attach, pcfclock_attach), { 0, 0 } }; static driver_t pcfclock_driver = { PCFCLOCK_NAME, pcfclock_methods, sizeof(struct pcfclock_data), }; DRIVER_MODULE(pcfclock, ppbus, pcfclock_driver, pcfclock_devclass, 0, 0); Index: head/sys/dev/ppc/ppc.c =================================================================== --- head/sys/dev/ppc/ppc.c (revision 129878) +++ head/sys/dev/ppc/ppc.c (revision 129879) @@ -1,2154 +1,2155 @@ /*- * Copyright (c) 1997-2000 Nicolas Souchu * Copyright (c) 2001 Alcove - Nicolas Souchu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ppc.h" #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ppbus_if.h" static int ppc_isa_probe(device_t dev); static void ppcintr(void *arg); #define LOG_PPC(function, ppc, string) \ if (bootverbose) printf("%s: %s\n", function, string) #define DEVTOSOFTC(dev) ((struct ppc_data *)device_get_softc(dev)) devclass_t ppc_devclass; static device_method_t ppc_methods[] = { /* device interface */ DEVMETHOD(device_probe, ppc_isa_probe), DEVMETHOD(device_attach, ppc_attach), /* bus interface */ DEVMETHOD(bus_read_ivar, ppc_read_ivar), DEVMETHOD(bus_setup_intr, ppc_setup_intr), DEVMETHOD(bus_teardown_intr, ppc_teardown_intr), DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), /* ppbus interface */ DEVMETHOD(ppbus_io, ppc_io), DEVMETHOD(ppbus_exec_microseq, ppc_exec_microseq), DEVMETHOD(ppbus_reset_epp, ppc_reset_epp), DEVMETHOD(ppbus_setmode, ppc_setmode), DEVMETHOD(ppbus_ecp_sync, ppc_ecp_sync), DEVMETHOD(ppbus_read, ppc_read), DEVMETHOD(ppbus_write, ppc_write), { 0, 0 } }; static driver_t ppc_driver = { "ppc", ppc_methods, sizeof(struct ppc_data), }; static char *ppc_models[] = { "SMC-like", "SMC FDC37C665GT", "SMC FDC37C666GT", "PC87332", "PC87306", "82091AA", "Generic", "W83877F", "W83877AF", "Winbond", "PC87334", "SMC FDC37C935", "PC87303", 0 }; /* list of available modes */ static char *ppc_avms[] = { "COMPATIBLE", "NIBBLE-only", "PS2-only", "PS2/NIBBLE", "EPP-only", "EPP/NIBBLE", "EPP/PS2", "EPP/PS2/NIBBLE", "ECP-only", "ECP/NIBBLE", "ECP/PS2", "ECP/PS2/NIBBLE", "ECP/EPP", "ECP/EPP/NIBBLE", "ECP/EPP/PS2", "ECP/EPP/PS2/NIBBLE", 0 }; /* list of current executing modes * Note that few modes do not actually exist. */ static char *ppc_modes[] = { "COMPATIBLE", "NIBBLE", "PS/2", "PS/2", "EPP", "EPP", "EPP", "EPP", "ECP", "ECP", "ECP+PS2", "ECP+PS2", "ECP+EPP", "ECP+EPP", "ECP+EPP", "ECP+EPP", 0 }; static char *ppc_epp_protocol[] = { " (EPP 1.9)", " (EPP 1.7)", 0 }; #ifdef __i386__ /* * BIOS printer list - used by BIOS probe. */ #define BIOS_PPC_PORTS 0x408 #define BIOS_PORTS (short *)(KERNBASE+BIOS_PPC_PORTS) #define BIOS_MAX_PPC 4 #endif /* * ppc_ecp_sync() XXX */ void ppc_ecp_sync(device_t dev) { int i, r; struct ppc_data *ppc = DEVTOSOFTC(dev); if (!(ppc->ppc_avm & PPB_ECP) && !(ppc->ppc_dtm & PPB_ECP)) return; r = r_ecr(ppc); if ((r & 0xe0) != PPC_ECR_EPP) return; for (i = 0; i < 100; i++) { r = r_ecr(ppc); if (r & 0x1) return; DELAY(100); } printf("ppc%d: ECP sync failed as data still " \ "present in FIFO.\n", ppc->ppc_unit); return; } /* * ppc_detect_fifo() * * Detect parallel port FIFO */ static int ppc_detect_fifo(struct ppc_data *ppc) { char ecr_sav; char ctr_sav, ctr, cc; short i; /* save registers */ ecr_sav = r_ecr(ppc); ctr_sav = r_ctr(ppc); /* enter ECP configuration mode, no interrupt, no DMA */ w_ecr(ppc, 0xf4); /* read PWord size - transfers in FIFO mode must be PWord aligned */ ppc->ppc_pword = (r_cnfgA(ppc) & PPC_PWORD_MASK); /* XXX 16 and 32 bits implementations not supported */ if (ppc->ppc_pword != PPC_PWORD_8) { LOG_PPC(__func__, ppc, "PWord not supported"); goto error; } w_ecr(ppc, 0x34); /* byte mode, no interrupt, no DMA */ ctr = r_ctr(ppc); w_ctr(ppc, ctr | PCD); /* set direction to 1 */ /* enter ECP test mode, no interrupt, no DMA */ w_ecr(ppc, 0xd4); /* flush the FIFO */ for (i=0; i<1024; i++) { if (r_ecr(ppc) & PPC_FIFO_EMPTY) break; cc = r_fifo(ppc); } if (i >= 1024) { LOG_PPC(__func__, ppc, "can't flush FIFO"); goto error; } /* enable interrupts, no DMA */ w_ecr(ppc, 0xd0); /* determine readIntrThreshold * fill the FIFO until serviceIntr is set */ for (i=0; i<1024; i++) { w_fifo(ppc, (char)i); if (!ppc->ppc_rthr && (r_ecr(ppc) & PPC_SERVICE_INTR)) { /* readThreshold reached */ ppc->ppc_rthr = i+1; } if (r_ecr(ppc) & PPC_FIFO_FULL) { ppc->ppc_fifo = i+1; break; } } if (i >= 1024) { LOG_PPC(__func__, ppc, "can't fill FIFO"); goto error; } w_ecr(ppc, 0xd4); /* test mode, no interrupt, no DMA */ w_ctr(ppc, ctr & ~PCD); /* set direction to 0 */ w_ecr(ppc, 0xd0); /* enable interrupts */ /* determine writeIntrThreshold * empty the FIFO until serviceIntr is set */ for (i=ppc->ppc_fifo; i>0; i--) { if (r_fifo(ppc) != (char)(ppc->ppc_fifo-i)) { LOG_PPC(__func__, ppc, "invalid data in FIFO"); goto error; } if (r_ecr(ppc) & PPC_SERVICE_INTR) { /* writeIntrThreshold reached */ ppc->ppc_wthr = ppc->ppc_fifo - i+1; } /* if FIFO empty before the last byte, error */ if (i>1 && (r_ecr(ppc) & PPC_FIFO_EMPTY)) { LOG_PPC(__func__, ppc, "data lost in FIFO"); goto error; } } /* FIFO must be empty after the last byte */ if (!(r_ecr(ppc) & PPC_FIFO_EMPTY)) { LOG_PPC(__func__, ppc, "can't empty the FIFO"); goto error; } w_ctr(ppc, ctr_sav); w_ecr(ppc, ecr_sav); return (0); error: w_ctr(ppc, ctr_sav); w_ecr(ppc, ecr_sav); return (EINVAL); } static int ppc_detect_port(struct ppc_data *ppc) { w_ctr(ppc, 0x0c); /* To avoid missing PS2 ports */ w_dtr(ppc, 0xaa); if (r_dtr(ppc) != 0xaa) return (0); return (1); } /* * EPP timeout, according to the PC87332 manual * Semantics of clearing EPP timeout bit. * PC87332 - reading SPP_STR does it... * SMC - write 1 to EPP timeout bit XXX * Others - (?) write 0 to EPP timeout bit */ static void ppc_reset_epp_timeout(struct ppc_data *ppc) { register char r; r = r_str(ppc); w_str(ppc, r | 0x1); w_str(ppc, r & 0xfe); return; } static int ppc_check_epp_timeout(struct ppc_data *ppc) { ppc_reset_epp_timeout(ppc); return (!(r_str(ppc) & TIMEOUT)); } /* * Configure current operating mode */ static int ppc_generic_setmode(struct ppc_data *ppc, int mode) { u_char ecr = 0; /* check if mode is available */ if (mode && !(ppc->ppc_avm & mode)) return (EINVAL); /* if ECP mode, configure ecr register */ if ((ppc->ppc_avm & PPB_ECP) || (ppc->ppc_dtm & PPB_ECP)) { /* return to byte mode (keeping direction bit), * no interrupt, no DMA to be able to change to * ECP */ w_ecr(ppc, PPC_ECR_RESET); ecr = PPC_DISABLE_INTR; if (mode & PPB_EPP) return (EINVAL); else if (mode & PPB_ECP) /* select ECP mode */ ecr |= PPC_ECR_ECP; else if (mode & PPB_PS2) /* select PS2 mode with ECP */ ecr |= PPC_ECR_PS2; else /* select COMPATIBLE/NIBBLE mode */ ecr |= PPC_ECR_STD; w_ecr(ppc, ecr); } ppc->ppc_mode = mode; return (0); } /* * The ppc driver is free to choose options like FIFO or DMA * if ECP mode is available. * * The 'RAW' option allows the upper drivers to force the ppc mode * even with FIFO, DMA available. */ static int ppc_smclike_setmode(struct ppc_data *ppc, int mode) { u_char ecr = 0; /* check if mode is available */ if (mode && !(ppc->ppc_avm & mode)) return (EINVAL); /* if ECP mode, configure ecr register */ if ((ppc->ppc_avm & PPB_ECP) || (ppc->ppc_dtm & PPB_ECP)) { /* return to byte mode (keeping direction bit), * no interrupt, no DMA to be able to change to * ECP or EPP mode */ w_ecr(ppc, PPC_ECR_RESET); ecr = PPC_DISABLE_INTR; if (mode & PPB_EPP) /* select EPP mode */ ecr |= PPC_ECR_EPP; else if (mode & PPB_ECP) /* select ECP mode */ ecr |= PPC_ECR_ECP; else if (mode & PPB_PS2) /* select PS2 mode with ECP */ ecr |= PPC_ECR_PS2; else /* select COMPATIBLE/NIBBLE mode */ ecr |= PPC_ECR_STD; w_ecr(ppc, ecr); } ppc->ppc_mode = mode; return (0); } #ifdef PPC_PROBE_CHIPSET /* * ppc_pc873xx_detect * * Probe for a Natsemi PC873xx-family part. * * References in this function are to the National Semiconductor * PC87332 datasheet TL/C/11930, May 1995 revision. */ static int pc873xx_basetab[] = {0x0398, 0x026e, 0x015c, 0x002e, 0}; static int pc873xx_porttab[] = {0x0378, 0x03bc, 0x0278, 0}; static int pc873xx_irqtab[] = {5, 7, 5, 0}; static int pc873xx_regstab[] = { PC873_FER, PC873_FAR, PC873_PTR, PC873_FCR, PC873_PCR, PC873_PMC, PC873_TUP, PC873_SID, PC873_PNP0, PC873_PNP1, PC873_LPTBA, -1 }; static char *pc873xx_rnametab[] = { "FER", "FAR", "PTR", "FCR", "PCR", "PMC", "TUP", "SID", "PNP0", "PNP1", "LPTBA", NULL }; static int ppc_pc873xx_detect(struct ppc_data *ppc, int chipset_mode) /* XXX mode never forced */ { static int index = 0; int idport, irq; int ptr, pcr, val, i; while ((idport = pc873xx_basetab[index++])) { /* XXX should check first to see if this location is already claimed */ /* * Pull the 873xx through the power-on ID cycle (2.2,1.). * We can't use this to locate the chip as it may already have * been used by the BIOS. */ (void)inb(idport); (void)inb(idport); (void)inb(idport); (void)inb(idport); /* * Read the SID byte. Possible values are : * * 01010xxx PC87334 * 0001xxxx PC87332 * 01110xxx PC87306 * 00110xxx PC87303 */ outb(idport, PC873_SID); val = inb(idport + 1); if ((val & 0xf0) == 0x10) { ppc->ppc_model = NS_PC87332; } else if ((val & 0xf8) == 0x70) { ppc->ppc_model = NS_PC87306; } else if ((val & 0xf8) == 0x50) { ppc->ppc_model = NS_PC87334; } else if ((val & 0xf8) == 0x40) { /* Should be 0x30 by the documentation, but probing yielded 0x40... */ ppc->ppc_model = NS_PC87303; } else { if (bootverbose && (val != 0xff)) printf("PC873xx probe at 0x%x got unknown ID 0x%x\n", idport, val); continue ; /* not recognised */ } /* print registers */ if (bootverbose) { printf("PC873xx"); for (i=0; pc873xx_regstab[i] != -1; i++) { outb(idport, pc873xx_regstab[i]); printf(" %s=0x%x", pc873xx_rnametab[i], inb(idport + 1) & 0xff); } printf("\n"); } /* * We think we have one. Is it enabled and where we want it to be? */ outb(idport, PC873_FER); val = inb(idport + 1); if (!(val & PC873_PPENABLE)) { if (bootverbose) printf("PC873xx parallel port disabled\n"); continue; } outb(idport, PC873_FAR); val = inb(idport + 1); /* XXX we should create a driver instance for every port found */ if (pc873xx_porttab[val & 0x3] != ppc->ppc_base) { /* First try to change the port address to that requested... */ switch(ppc->ppc_base) { case 0x378: val &= 0xfc; break; case 0x3bc: val &= 0xfd; break; case 0x278: val &= 0xfe; break; default: val &= 0xfd; break; } outb(idport, PC873_FAR); outb(idport + 1, val); outb(idport + 1, val); /* Check for success by reading back the value we supposedly wrote and comparing...*/ outb(idport, PC873_FAR); val = inb(idport + 1) & 0x3; /* If we fail, report the failure... */ if (pc873xx_porttab[val] != ppc->ppc_base) { if (bootverbose) printf("PC873xx at 0x%x not for driver at port 0x%x\n", pc873xx_porttab[val], ppc->ppc_base); } continue; } outb(idport, PC873_PTR); ptr = inb(idport + 1); /* get irq settings */ if (ppc->ppc_base == 0x378) irq = (ptr & PC873_LPTBIRQ7) ? 7 : 5; else irq = pc873xx_irqtab[val]; if (bootverbose) printf("PC873xx irq %d at 0x%x\n", irq, ppc->ppc_base); /* * Check if irq settings are correct */ if (irq != ppc->ppc_irq) { /* * If the chipset is not locked and base address is 0x378, * we have another chance */ if (ppc->ppc_base == 0x378 && !(ptr & PC873_CFGLOCK)) { if (ppc->ppc_irq == 7) { outb(idport + 1, (ptr | PC873_LPTBIRQ7)); outb(idport + 1, (ptr | PC873_LPTBIRQ7)); } else { outb(idport + 1, (ptr & ~PC873_LPTBIRQ7)); outb(idport + 1, (ptr & ~PC873_LPTBIRQ7)); } if (bootverbose) printf("PC873xx irq set to %d\n", ppc->ppc_irq); } else { if (bootverbose) printf("PC873xx sorry, can't change irq setting\n"); } } else { if (bootverbose) printf("PC873xx irq settings are correct\n"); } outb(idport, PC873_PCR); pcr = inb(idport + 1); if ((ptr & PC873_CFGLOCK) || !chipset_mode) { if (bootverbose) printf("PC873xx %s", (ptr & PC873_CFGLOCK)?"locked":"unlocked"); ppc->ppc_avm |= PPB_NIBBLE; if (bootverbose) printf(", NIBBLE"); if (pcr & PC873_EPPEN) { ppc->ppc_avm |= PPB_EPP; if (bootverbose) printf(", EPP"); if (pcr & PC873_EPP19) ppc->ppc_epp = EPP_1_9; else ppc->ppc_epp = EPP_1_7; if ((ppc->ppc_model == NS_PC87332) && bootverbose) { outb(idport, PC873_PTR); ptr = inb(idport + 1); if (ptr & PC873_EPPRDIR) printf(", Regular mode"); else printf(", Automatic mode"); } } else if (pcr & PC873_ECPEN) { ppc->ppc_avm |= PPB_ECP; if (bootverbose) printf(", ECP"); if (pcr & PC873_ECPCLK) { /* XXX */ ppc->ppc_avm |= PPB_PS2; if (bootverbose) printf(", PS/2"); } } else { outb(idport, PC873_PTR); ptr = inb(idport + 1); if (ptr & PC873_EXTENDED) { ppc->ppc_avm |= PPB_SPP; if (bootverbose) printf(", SPP"); } } } else { if (bootverbose) printf("PC873xx unlocked"); if (chipset_mode & PPB_ECP) { if ((chipset_mode & PPB_EPP) && bootverbose) printf(", ECP+EPP not supported"); pcr &= ~PC873_EPPEN; pcr |= (PC873_ECPEN | PC873_ECPCLK); /* XXX */ outb(idport + 1, pcr); outb(idport + 1, pcr); if (bootverbose) printf(", ECP"); } else if (chipset_mode & PPB_EPP) { pcr &= ~(PC873_ECPEN | PC873_ECPCLK); pcr |= (PC873_EPPEN | PC873_EPP19); outb(idport + 1, pcr); outb(idport + 1, pcr); ppc->ppc_epp = EPP_1_9; /* XXX */ if (bootverbose) printf(", EPP1.9"); /* enable automatic direction turnover */ if (ppc->ppc_model == NS_PC87332) { outb(idport, PC873_PTR); ptr = inb(idport + 1); ptr &= ~PC873_EPPRDIR; outb(idport + 1, ptr); outb(idport + 1, ptr); if (bootverbose) printf(", Automatic mode"); } } else { pcr &= ~(PC873_ECPEN | PC873_ECPCLK | PC873_EPPEN); outb(idport + 1, pcr); outb(idport + 1, pcr); /* configure extended bit in PTR */ outb(idport, PC873_PTR); ptr = inb(idport + 1); if (chipset_mode & PPB_PS2) { ptr |= PC873_EXTENDED; if (bootverbose) printf(", PS/2"); } else { /* default to NIBBLE mode */ ptr &= ~PC873_EXTENDED; if (bootverbose) printf(", NIBBLE"); } outb(idport + 1, ptr); outb(idport + 1, ptr); } ppc->ppc_avm = chipset_mode; } if (bootverbose) printf("\n"); ppc->ppc_type = PPC_TYPE_GENERIC; ppc_generic_setmode(ppc, chipset_mode); return(chipset_mode); } return(-1); } /* * ppc_smc37c66xgt_detect * * SMC FDC37C66xGT configuration. */ static int ppc_smc37c66xgt_detect(struct ppc_data *ppc, int chipset_mode) { int s, i; u_char r; int type = -1; int csr = SMC66x_CSR; /* initial value is 0x3F0 */ int port_address[] = { -1 /* disabled */ , 0x3bc, 0x378, 0x278 }; #define cio csr+1 /* config IO port is either 0x3F1 or 0x371 */ /* * Detection: enter configuration mode and read CRD register. */ s = splhigh(); outb(csr, SMC665_iCODE); outb(csr, SMC665_iCODE); splx(s); outb(csr, 0xd); if (inb(cio) == 0x65) { type = SMC_37C665GT; goto config; } for (i = 0; i < 2; i++) { s = splhigh(); outb(csr, SMC666_iCODE); outb(csr, SMC666_iCODE); splx(s); outb(csr, 0xd); if (inb(cio) == 0x66) { type = SMC_37C666GT; break; } /* Another chance, CSR may be hard-configured to be at 0x370 */ csr = SMC666_CSR; } config: /* * If chipset not found, do not continue. */ if (type == -1) return (-1); /* select CR1 */ outb(csr, 0x1); /* read the port's address: bits 0 and 1 of CR1 */ r = inb(cio) & SMC_CR1_ADDR; if (port_address[(int)r] != ppc->ppc_base) return (-1); ppc->ppc_model = type; /* * CR1 and CR4 registers bits 3 and 0/1 for mode configuration * If SPP mode is detected, try to set ECP+EPP mode */ if (bootverbose) { outb(csr, 0x1); printf("ppc%d: SMC registers CR1=0x%x", ppc->ppc_unit, inb(cio) & 0xff); outb(csr, 0x4); printf(" CR4=0x%x", inb(cio) & 0xff); } /* select CR1 */ outb(csr, 0x1); if (!chipset_mode) { /* autodetect mode */ /* 666GT is ~certainly~ hardwired to an extended ECP+EPP mode */ if (type == SMC_37C666GT) { ppc->ppc_avm |= PPB_ECP | PPB_EPP | PPB_SPP; if (bootverbose) printf(" configuration hardwired, supposing " \ "ECP+EPP SPP"); } else if ((inb(cio) & SMC_CR1_MODE) == 0) { /* already in extended parallel port mode, read CR4 */ outb(csr, 0x4); r = (inb(cio) & SMC_CR4_EMODE); switch (r) { case SMC_SPP: ppc->ppc_avm |= PPB_SPP; if (bootverbose) printf(" SPP"); break; case SMC_EPPSPP: ppc->ppc_avm |= PPB_EPP | PPB_SPP; if (bootverbose) printf(" EPP SPP"); break; case SMC_ECP: ppc->ppc_avm |= PPB_ECP | PPB_SPP; if (bootverbose) printf(" ECP SPP"); break; case SMC_ECPEPP: ppc->ppc_avm |= PPB_ECP | PPB_EPP | PPB_SPP; if (bootverbose) printf(" ECP+EPP SPP"); break; } } else { /* not an extended port mode */ ppc->ppc_avm |= PPB_SPP; if (bootverbose) printf(" SPP"); } } else { /* mode forced */ ppc->ppc_avm = chipset_mode; /* 666GT is ~certainly~ hardwired to an extended ECP+EPP mode */ if (type == SMC_37C666GT) goto end_detect; r = inb(cio); if ((chipset_mode & (PPB_ECP | PPB_EPP)) == 0) { /* do not use ECP when the mode is not forced to */ outb(cio, r | SMC_CR1_MODE); if (bootverbose) printf(" SPP"); } else { /* an extended mode is selected */ outb(cio, r & ~SMC_CR1_MODE); /* read CR4 register and reset mode field */ outb(csr, 0x4); r = inb(cio) & ~SMC_CR4_EMODE; if (chipset_mode & PPB_ECP) { if (chipset_mode & PPB_EPP) { outb(cio, r | SMC_ECPEPP); if (bootverbose) printf(" ECP+EPP"); } else { outb(cio, r | SMC_ECP); if (bootverbose) printf(" ECP"); } } else { /* PPB_EPP is set */ outb(cio, r | SMC_EPPSPP); if (bootverbose) printf(" EPP SPP"); } } ppc->ppc_avm = chipset_mode; } /* set FIFO threshold to 16 */ if (ppc->ppc_avm & PPB_ECP) { /* select CRA */ outb(csr, 0xa); outb(cio, 16); } end_detect: if (bootverbose) printf ("\n"); if (ppc->ppc_avm & PPB_EPP) { /* select CR4 */ outb(csr, 0x4); r = inb(cio); /* * Set the EPP protocol... * Low=EPP 1.9 (1284 standard) and High=EPP 1.7 */ if (ppc->ppc_epp == EPP_1_9) outb(cio, (r & ~SMC_CR4_EPPTYPE)); else outb(cio, (r | SMC_CR4_EPPTYPE)); } /* end config mode */ outb(csr, 0xaa); ppc->ppc_type = PPC_TYPE_SMCLIKE; ppc_smclike_setmode(ppc, chipset_mode); return (chipset_mode); } /* * SMC FDC37C935 configuration * Found on many Alpha machines */ static int ppc_smc37c935_detect(struct ppc_data *ppc, int chipset_mode) { int s; int type = -1; s = splhigh(); outb(SMC935_CFG, 0x55); /* enter config mode */ outb(SMC935_CFG, 0x55); splx(s); outb(SMC935_IND, SMC935_ID); /* check device id */ if (inb(SMC935_DAT) == 0x2) type = SMC_37C935; if (type == -1) { outb(SMC935_CFG, 0xaa); /* exit config mode */ return (-1); } ppc->ppc_model = type; outb(SMC935_IND, SMC935_LOGDEV); /* select parallel port, */ outb(SMC935_DAT, 3); /* which is logical device 3 */ /* set io port base */ outb(SMC935_IND, SMC935_PORTHI); outb(SMC935_DAT, (u_char)((ppc->ppc_base & 0xff00) >> 8)); outb(SMC935_IND, SMC935_PORTLO); outb(SMC935_DAT, (u_char)(ppc->ppc_base & 0xff)); if (!chipset_mode) ppc->ppc_avm = PPB_COMPATIBLE; /* default mode */ else { ppc->ppc_avm = chipset_mode; outb(SMC935_IND, SMC935_PPMODE); outb(SMC935_DAT, SMC935_CENT); /* start in compatible mode */ /* SPP + EPP or just plain SPP */ if (chipset_mode & (PPB_SPP)) { if (chipset_mode & PPB_EPP) { if (ppc->ppc_epp == EPP_1_9) { outb(SMC935_IND, SMC935_PPMODE); outb(SMC935_DAT, SMC935_EPP19SPP); } if (ppc->ppc_epp == EPP_1_7) { outb(SMC935_IND, SMC935_PPMODE); outb(SMC935_DAT, SMC935_EPP17SPP); } } else { outb(SMC935_IND, SMC935_PPMODE); outb(SMC935_DAT, SMC935_SPP); } } /* ECP + EPP or just plain ECP */ if (chipset_mode & PPB_ECP) { if (chipset_mode & PPB_EPP) { if (ppc->ppc_epp == EPP_1_9) { outb(SMC935_IND, SMC935_PPMODE); outb(SMC935_DAT, SMC935_ECPEPP19); } if (ppc->ppc_epp == EPP_1_7) { outb(SMC935_IND, SMC935_PPMODE); outb(SMC935_DAT, SMC935_ECPEPP17); } } else { outb(SMC935_IND, SMC935_PPMODE); outb(SMC935_DAT, SMC935_ECP); } } } outb(SMC935_CFG, 0xaa); /* exit config mode */ ppc->ppc_type = PPC_TYPE_SMCLIKE; ppc_smclike_setmode(ppc, chipset_mode); return (chipset_mode); } /* * Winbond W83877F stuff * * EFER: extended function enable register * EFIR: extended function index register * EFDR: extended function data register */ #define efir ((efer == 0x250) ? 0x251 : 0x3f0) #define efdr ((efer == 0x250) ? 0x252 : 0x3f1) static int w83877f_efers[] = { 0x250, 0x3f0, 0x3f0, 0x250 }; static int w83877f_keys[] = { 0x89, 0x86, 0x87, 0x88 }; static int w83877f_keyiter[] = { 1, 2, 2, 1 }; static int w83877f_hefs[] = { WINB_HEFERE, WINB_HEFRAS, WINB_HEFERE | WINB_HEFRAS, 0 }; static int ppc_w83877f_detect(struct ppc_data *ppc, int chipset_mode) { int i, j, efer; unsigned char r, hefere, hefras; for (i = 0; i < 4; i ++) { /* first try to enable configuration registers */ efer = w83877f_efers[i]; /* write the key to the EFER */ for (j = 0; j < w83877f_keyiter[i]; j ++) outb (efer, w83877f_keys[i]); /* then check HEFERE and HEFRAS bits */ outb (efir, 0x0c); hefere = inb(efdr) & WINB_HEFERE; outb (efir, 0x16); hefras = inb(efdr) & WINB_HEFRAS; /* * HEFRAS HEFERE * 0 1 write 89h to 250h (power-on default) * 1 0 write 86h twice to 3f0h * 1 1 write 87h twice to 3f0h * 0 0 write 88h to 250h */ if ((hefere | hefras) == w83877f_hefs[i]) goto found; } return (-1); /* failed */ found: /* check base port address - read from CR23 */ outb(efir, 0x23); if (ppc->ppc_base != inb(efdr) * 4) /* 4 bytes boundaries */ return (-1); /* read CHIP ID from CR9/bits0-3 */ outb(efir, 0x9); switch (inb(efdr) & WINB_CHIPID) { case WINB_W83877F_ID: ppc->ppc_model = WINB_W83877F; break; case WINB_W83877AF_ID: ppc->ppc_model = WINB_W83877AF; break; default: ppc->ppc_model = WINB_UNKNOWN; } if (bootverbose) { /* dump of registers */ printf("ppc%d: 0x%x - ", ppc->ppc_unit, w83877f_keys[i]); for (i = 0; i <= 0xd; i ++) { outb(efir, i); printf("0x%x ", inb(efdr)); } for (i = 0x10; i <= 0x17; i ++) { outb(efir, i); printf("0x%x ", inb(efdr)); } outb(efir, 0x1e); printf("0x%x ", inb(efdr)); for (i = 0x20; i <= 0x29; i ++) { outb(efir, i); printf("0x%x ", inb(efdr)); } printf("\n"); printf("ppc%d:", ppc->ppc_unit); } ppc->ppc_type = PPC_TYPE_GENERIC; if (!chipset_mode) { /* autodetect mode */ /* select CR0 */ outb(efir, 0x0); r = inb(efdr) & (WINB_PRTMODS0 | WINB_PRTMODS1); /* select CR9 */ outb(efir, 0x9); r |= (inb(efdr) & WINB_PRTMODS2); switch (r) { case WINB_W83757: if (bootverbose) printf("ppc%d: W83757 compatible mode\n", ppc->ppc_unit); return (-1); /* generic or SMC-like */ case WINB_EXTFDC: case WINB_EXTADP: case WINB_EXT2FDD: case WINB_JOYSTICK: if (bootverbose) printf(" not in parallel port mode\n"); return (-1); case (WINB_PARALLEL | WINB_EPP_SPP): ppc->ppc_avm |= PPB_EPP | PPB_SPP; if (bootverbose) printf(" EPP SPP"); break; case (WINB_PARALLEL | WINB_ECP): ppc->ppc_avm |= PPB_ECP | PPB_SPP; if (bootverbose) printf(" ECP SPP"); break; case (WINB_PARALLEL | WINB_ECP_EPP): ppc->ppc_avm |= PPB_ECP | PPB_EPP | PPB_SPP; ppc->ppc_type = PPC_TYPE_SMCLIKE; if (bootverbose) printf(" ECP+EPP SPP"); break; default: printf("%s: unknown case (0x%x)!\n", __func__, r); } } else { /* mode forced */ /* select CR9 and set PRTMODS2 bit */ outb(efir, 0x9); outb(efdr, inb(efdr) & ~WINB_PRTMODS2); /* select CR0 and reset PRTMODSx bits */ outb(efir, 0x0); outb(efdr, inb(efdr) & ~(WINB_PRTMODS0 | WINB_PRTMODS1)); if (chipset_mode & PPB_ECP) { if (chipset_mode & PPB_EPP) { outb(efdr, inb(efdr) | WINB_ECP_EPP); if (bootverbose) printf(" ECP+EPP"); ppc->ppc_type = PPC_TYPE_SMCLIKE; } else { outb(efdr, inb(efdr) | WINB_ECP); if (bootverbose) printf(" ECP"); } } else { /* select EPP_SPP otherwise */ outb(efdr, inb(efdr) | WINB_EPP_SPP); if (bootverbose) printf(" EPP SPP"); } ppc->ppc_avm = chipset_mode; } if (bootverbose) printf("\n"); /* exit configuration mode */ outb(efer, 0xaa); switch (ppc->ppc_type) { case PPC_TYPE_SMCLIKE: ppc_smclike_setmode(ppc, chipset_mode); break; default: ppc_generic_setmode(ppc, chipset_mode); break; } return (chipset_mode); } #endif /* * ppc_generic_detect */ static int ppc_generic_detect(struct ppc_data *ppc, int chipset_mode) { /* default to generic */ ppc->ppc_type = PPC_TYPE_GENERIC; if (bootverbose) printf("ppc%d:", ppc->ppc_unit); /* first, check for ECP */ w_ecr(ppc, PPC_ECR_PS2); if ((r_ecr(ppc) & 0xe0) == PPC_ECR_PS2) { ppc->ppc_dtm |= PPB_ECP | PPB_SPP; if (bootverbose) printf(" ECP SPP"); /* search for SMC style ECP+EPP mode */ w_ecr(ppc, PPC_ECR_EPP); } /* try to reset EPP timeout bit */ if (ppc_check_epp_timeout(ppc)) { ppc->ppc_dtm |= PPB_EPP; if (ppc->ppc_dtm & PPB_ECP) { /* SMC like chipset found */ ppc->ppc_model = SMC_LIKE; ppc->ppc_type = PPC_TYPE_SMCLIKE; if (bootverbose) printf(" ECP+EPP"); } else { if (bootverbose) printf(" EPP"); } } else { /* restore to standard mode */ w_ecr(ppc, PPC_ECR_STD); } /* XXX try to detect NIBBLE and PS2 modes */ ppc->ppc_dtm |= PPB_NIBBLE; if (bootverbose) printf(" SPP"); if (chipset_mode) ppc->ppc_avm = chipset_mode; else ppc->ppc_avm = ppc->ppc_dtm; if (bootverbose) printf("\n"); switch (ppc->ppc_type) { case PPC_TYPE_SMCLIKE: ppc_smclike_setmode(ppc, chipset_mode); break; default: ppc_generic_setmode(ppc, chipset_mode); break; } return (chipset_mode); } /* * ppc_detect() * * mode is the mode suggested at boot */ static int ppc_detect(struct ppc_data *ppc, int chipset_mode) { #ifdef PPC_PROBE_CHIPSET int i, mode; /* list of supported chipsets */ int (*chipset_detect[])(struct ppc_data *, int) = { ppc_pc873xx_detect, ppc_smc37c66xgt_detect, ppc_w83877f_detect, ppc_smc37c935_detect, ppc_generic_detect, NULL }; #endif /* if can't find the port and mode not forced return error */ if (!ppc_detect_port(ppc) && chipset_mode == 0) return (EIO); /* failed, port not present */ /* assume centronics compatible mode is supported */ ppc->ppc_avm = PPB_COMPATIBLE; #ifdef PPC_PROBE_CHIPSET /* we have to differenciate available chipset modes, * chipset running modes and IEEE-1284 operating modes * * after detection, the port must support running in compatible mode */ if (ppc->ppc_flags & 0x40) { if (bootverbose) printf("ppc: chipset forced to generic\n"); #endif ppc->ppc_mode = ppc_generic_detect(ppc, chipset_mode); #ifdef PPC_PROBE_CHIPSET } else { for (i=0; chipset_detect[i] != NULL; i++) { if ((mode = chipset_detect[i](ppc, chipset_mode)) != -1) { ppc->ppc_mode = mode; break; } } } #endif /* configure/detect ECP FIFO */ if ((ppc->ppc_avm & PPB_ECP) && !(ppc->ppc_flags & 0x80)) ppc_detect_fifo(ppc); return (0); } /* * ppc_exec_microseq() * * Execute a microsequence. * Microsequence mechanism is supposed to handle fast I/O operations. */ int ppc_exec_microseq(device_t dev, struct ppb_microseq **p_msq) { struct ppc_data *ppc = DEVTOSOFTC(dev); struct ppb_microseq *mi; char cc, *p; int i, iter, len; int error; register int reg; register char mask; register int accum = 0; register char *ptr = 0; struct ppb_microseq *stack = 0; /* microsequence registers are equivalent to PC-like port registers */ #define r_reg(register,ppc) (bus_space_read_1((ppc)->bst, (ppc)->bsh, register)) #define w_reg(register, ppc, byte) (bus_space_write_1((ppc)->bst, (ppc)->bsh, register, byte)) #define INCR_PC (mi ++) /* increment program counter */ mi = *p_msq; for (;;) { switch (mi->opcode) { case MS_OP_RSET: cc = r_reg(mi->arg[0].i, ppc); cc &= (char)mi->arg[2].i; /* clear mask */ cc |= (char)mi->arg[1].i; /* assert mask */ w_reg(mi->arg[0].i, ppc, cc); INCR_PC; break; case MS_OP_RASSERT_P: reg = mi->arg[1].i; ptr = ppc->ppc_ptr; if ((len = mi->arg[0].i) == MS_ACCUM) { accum = ppc->ppc_accum; for (; accum; accum--) w_reg(reg, ppc, *ptr++); ppc->ppc_accum = accum; } else for (i=0; ippc_ptr = ptr; INCR_PC; break; case MS_OP_RFETCH_P: reg = mi->arg[1].i; mask = (char)mi->arg[2].i; ptr = ppc->ppc_ptr; if ((len = mi->arg[0].i) == MS_ACCUM) { accum = ppc->ppc_accum; for (; accum; accum--) *ptr++ = r_reg(reg, ppc) & mask; ppc->ppc_accum = accum; } else for (i=0; ippc_ptr = ptr; INCR_PC; break; case MS_OP_RFETCH: *((char *) mi->arg[2].p) = r_reg(mi->arg[0].i, ppc) & (char)mi->arg[1].i; INCR_PC; break; case MS_OP_RASSERT: case MS_OP_DELAY: /* let's suppose the next instr. is the same */ prefetch: for (;mi->opcode == MS_OP_RASSERT; INCR_PC) w_reg(mi->arg[0].i, ppc, (char)mi->arg[1].i); if (mi->opcode == MS_OP_DELAY) { DELAY(mi->arg[0].i); INCR_PC; goto prefetch; } break; case MS_OP_ADELAY: if (mi->arg[0].i) tsleep(NULL, PPBPRI, "ppbdelay", mi->arg[0].i * (hz/1000)); INCR_PC; break; case MS_OP_TRIG: reg = mi->arg[0].i; iter = mi->arg[1].i; p = (char *)mi->arg[2].p; /* XXX delay limited to 255 us */ for (i=0; ippc_accum = mi->arg[0].i; INCR_PC; break; case MS_OP_DBRA: if (--ppc->ppc_accum > 0) mi += mi->arg[0].i; INCR_PC; break; case MS_OP_BRSET: cc = r_str(ppc); if ((cc & (char)mi->arg[0].i) == (char)mi->arg[0].i) mi += mi->arg[1].i; INCR_PC; break; case MS_OP_BRCLEAR: cc = r_str(ppc); if ((cc & (char)mi->arg[0].i) == 0) mi += mi->arg[1].i; INCR_PC; break; case MS_OP_BRSTAT: cc = r_str(ppc); if ((cc & ((char)mi->arg[0].i | (char)mi->arg[1].i)) == (char)mi->arg[0].i) mi += mi->arg[2].i; INCR_PC; break; case MS_OP_C_CALL: /* * If the C call returns !0 then end the microseq. * The current state of ptr is passed to the C function */ if ((error = mi->arg[0].f(mi->arg[1].p, ppc->ppc_ptr))) return (error); INCR_PC; break; case MS_OP_PTR: ppc->ppc_ptr = (char *)mi->arg[0].p; INCR_PC; break; case MS_OP_CALL: if (stack) panic("%s: too much calls", __func__); if (mi->arg[0].p) { /* store the state of the actual * microsequence */ stack = mi; /* jump to the new microsequence */ mi = (struct ppb_microseq *)mi->arg[0].p; } else INCR_PC; break; case MS_OP_SUBRET: /* retrieve microseq and pc state before the call */ mi = stack; /* reset the stack */ stack = 0; /* XXX return code */ INCR_PC; break; case MS_OP_PUT: case MS_OP_GET: case MS_OP_RET: /* can't return to ppb level during the execution * of a submicrosequence */ if (stack) panic("%s: can't return to ppb level", __func__); /* update pc for ppb level of execution */ *p_msq = mi; /* return to ppb level of execution */ return (0); default: panic("%s: unknown microsequence opcode 0x%x", __func__, mi->opcode); } } /* unreached */ } static void ppcintr(void *arg) { device_t dev = (device_t)arg; struct ppc_data *ppc = (struct ppc_data *)device_get_softc(dev); u_char ctr, ecr, str; str = r_str(ppc); ctr = r_ctr(ppc); ecr = r_ecr(ppc); #if PPC_DEBUG > 1 printf("![%x/%x/%x]", ctr, ecr, str); #endif /* don't use ecp mode with IRQENABLE set */ if (ctr & IRQENABLE) { return; } /* interrupts are generated by nFault signal * only in ECP mode */ if ((str & nFAULT) && (ppc->ppc_mode & PPB_ECP)) { /* check if ppc driver has programmed the * nFault interrupt */ if (ppc->ppc_irqstat & PPC_IRQ_nFAULT) { w_ecr(ppc, ecr | PPC_nFAULT_INTR); ppc->ppc_irqstat &= ~PPC_IRQ_nFAULT; } else { /* shall be handled by underlying layers XXX */ return; } } if (ppc->ppc_irqstat & PPC_IRQ_DMA) { /* disable interrupts (should be done by hardware though) */ w_ecr(ppc, ecr | PPC_SERVICE_INTR); ppc->ppc_irqstat &= ~PPC_IRQ_DMA; ecr = r_ecr(ppc); /* check if DMA completed */ if ((ppc->ppc_avm & PPB_ECP) && (ecr & PPC_ENABLE_DMA)) { #ifdef PPC_DEBUG printf("a"); #endif /* stop DMA */ w_ecr(ppc, ecr & ~PPC_ENABLE_DMA); ecr = r_ecr(ppc); if (ppc->ppc_dmastat == PPC_DMA_STARTED) { #ifdef PPC_DEBUG printf("d"); #endif isa_dmadone( ppc->ppc_dmaflags, ppc->ppc_dmaddr, ppc->ppc_dmacnt, ppc->ppc_dmachan); ppc->ppc_dmastat = PPC_DMA_COMPLETE; /* wakeup the waiting process */ wakeup(ppc); } } } else if (ppc->ppc_irqstat & PPC_IRQ_FIFO) { /* classic interrupt I/O */ ppc->ppc_irqstat &= ~PPC_IRQ_FIFO; } return; } int ppc_read(device_t dev, char *buf, int len, int mode) { return (EINVAL); } /* * Call this function if you want to send data in any advanced mode * of your parallel port: FIFO, DMA * * If what you want is not possible (no ECP, no DMA...), * EINVAL is returned */ int ppc_write(device_t dev, char *buf, int len, int how) { struct ppc_data *ppc = DEVTOSOFTC(dev); char ecr, ecr_sav, ctr, ctr_sav; int s, error = 0; int spin; #ifdef PPC_DEBUG printf("w"); #endif ecr_sav = r_ecr(ppc); ctr_sav = r_ctr(ppc); /* * Send buffer with DMA, FIFO and interrupts */ if ((ppc->ppc_avm & PPB_ECP) && (ppc->ppc_registered)) { if (ppc->ppc_dmachan > 0) { /* byte mode, no intr, no DMA, dir=0, flush fifo */ ecr = PPC_ECR_STD | PPC_DISABLE_INTR; w_ecr(ppc, ecr); /* disable nAck interrupts */ ctr = r_ctr(ppc); ctr &= ~IRQENABLE; w_ctr(ppc, ctr); ppc->ppc_dmaflags = 0; ppc->ppc_dmaddr = (caddr_t)buf; ppc->ppc_dmacnt = (u_int)len; switch (ppc->ppc_mode) { case PPB_COMPATIBLE: /* compatible mode with FIFO, no intr, DMA, dir=0 */ ecr = PPC_ECR_FIFO | PPC_DISABLE_INTR | PPC_ENABLE_DMA; break; case PPB_ECP: ecr = PPC_ECR_ECP | PPC_DISABLE_INTR | PPC_ENABLE_DMA; break; default: error = EINVAL; goto error; } w_ecr(ppc, ecr); ecr = r_ecr(ppc); /* enter splhigh() not to be preempted * by the dma interrupt, we may miss * the wakeup otherwise */ s = splhigh(); ppc->ppc_dmastat = PPC_DMA_INIT; /* enable interrupts */ ecr &= ~PPC_SERVICE_INTR; ppc->ppc_irqstat = PPC_IRQ_DMA; w_ecr(ppc, ecr); isa_dmastart( ppc->ppc_dmaflags, ppc->ppc_dmaddr, ppc->ppc_dmacnt, ppc->ppc_dmachan); #ifdef PPC_DEBUG printf("s%d", ppc->ppc_dmacnt); #endif ppc->ppc_dmastat = PPC_DMA_STARTED; /* Wait for the DMA completed interrupt. We hope we won't * miss it, otherwise a signal will be necessary to unlock the * process. */ do { /* release CPU */ error = tsleep(ppc, PPBPRI | PCATCH, "ppcdma", 0); } while (error == EWOULDBLOCK); splx(s); if (error) { #ifdef PPC_DEBUG printf("i"); #endif /* stop DMA */ isa_dmadone( ppc->ppc_dmaflags, ppc->ppc_dmaddr, ppc->ppc_dmacnt, ppc->ppc_dmachan); /* no dma, no interrupt, flush the fifo */ w_ecr(ppc, PPC_ECR_RESET); ppc->ppc_dmastat = PPC_DMA_INTERRUPTED; goto error; } /* wait for an empty fifo */ while (!(r_ecr(ppc) & PPC_FIFO_EMPTY)) { for (spin=100; spin; spin--) if (r_ecr(ppc) & PPC_FIFO_EMPTY) goto fifo_empty; #ifdef PPC_DEBUG printf("Z"); #endif error = tsleep(ppc, PPBPRI | PCATCH, "ppcfifo", hz/100); if (error != EWOULDBLOCK) { #ifdef PPC_DEBUG printf("I"); #endif /* no dma, no interrupt, flush the fifo */ w_ecr(ppc, PPC_ECR_RESET); ppc->ppc_dmastat = PPC_DMA_INTERRUPTED; error = EINTR; goto error; } } fifo_empty: /* no dma, no interrupt, flush the fifo */ w_ecr(ppc, PPC_ECR_RESET); } else error = EINVAL; /* XXX we should FIFO and * interrupts */ } else error = EINVAL; error: /* PDRQ must be kept unasserted until nPDACK is * deasserted for a minimum of 350ns (SMC datasheet) * * Consequence may be a FIFO that never empty */ DELAY(1); w_ecr(ppc, ecr_sav); w_ctr(ppc, ctr_sav); return (error); } void ppc_reset_epp(device_t dev) { struct ppc_data *ppc = DEVTOSOFTC(dev); ppc_reset_epp_timeout(ppc); return; } int ppc_setmode(device_t dev, int mode) { struct ppc_data *ppc = DEVTOSOFTC(dev); switch (ppc->ppc_type) { case PPC_TYPE_SMCLIKE: return (ppc_smclike_setmode(ppc, mode)); break; case PPC_TYPE_GENERIC: default: return (ppc_generic_setmode(ppc, mode)); break; } /* not reached */ return (ENXIO); } static struct isa_pnp_id lpc_ids[] = { { 0x0004d041, "Standard parallel printer port" }, /* PNP0400 */ { 0x0104d041, "ECP parallel printer port" }, /* PNP0401 */ { 0 } }; static int ppc_isa_probe(device_t dev) { device_t parent; int error; parent = device_get_parent(dev); error = ISA_PNP_PROBE(parent, dev, lpc_ids); if (error == ENXIO) return (ENXIO); else if (error != 0) /* XXX shall be set after detection */ device_set_desc(dev, "Parallel port"); return(ppc_probe(dev)); } int ppc_probe(device_t dev) { #ifdef __i386__ static short next_bios_ppc = 0; #endif struct ppc_data *ppc; int error; u_long port; /* * Allocate the ppc_data structure. */ ppc = DEVTOSOFTC(dev); bzero(ppc, sizeof(struct ppc_data)); ppc->rid_irq = ppc->rid_drq = ppc->rid_ioport = 0; ppc->res_irq = ppc->res_drq = ppc->res_ioport = 0; /* retrieve ISA parameters */ error = bus_get_resource(dev, SYS_RES_IOPORT, 0, &port, NULL); #ifdef __i386__ /* * If port not specified, use bios list. */ if (error) { if((next_bios_ppc < BIOS_MAX_PPC) && (*(BIOS_PORTS+next_bios_ppc) != 0) ) { port = *(BIOS_PORTS+next_bios_ppc++); if (bootverbose) device_printf(dev, "parallel port found at 0x%x\n", (int) port); } else { device_printf(dev, "parallel port not found.\n"); return ENXIO; } bus_set_resource(dev, SYS_RES_IOPORT, 0, port, IO_LPTSIZE_EXTENDED); } #endif #ifdef __alpha__ /* * There isn't a bios list on alpha. Put it in the usual place. */ if (error) { bus_set_resource(dev, SYS_RES_IOPORT, 0, 0x3bc, IO_LPTSIZE_NORMAL); } #endif /* IO port is mandatory */ /* Try "extended" IO port range...*/ ppc->res_ioport = bus_alloc_resource(dev, SYS_RES_IOPORT, &ppc->rid_ioport, 0, ~0, IO_LPTSIZE_EXTENDED, RF_ACTIVE); if (ppc->res_ioport != 0) { if (bootverbose) device_printf(dev, "using extended I/O port range\n"); } else { /* Failed? If so, then try the "normal" IO port range... */ ppc->res_ioport = bus_alloc_resource(dev, SYS_RES_IOPORT, &ppc->rid_ioport, 0, ~0, IO_LPTSIZE_NORMAL, RF_ACTIVE); if (ppc->res_ioport != 0) { if (bootverbose) device_printf(dev, "using normal I/O port range\n"); } else { device_printf(dev, "cannot reserve I/O port range\n"); goto error; } } ppc->ppc_base = rman_get_start(ppc->res_ioport); ppc->bsh = rman_get_bushandle(ppc->res_ioport); ppc->bst = rman_get_bustag(ppc->res_ioport); ppc->ppc_flags = device_get_flags(dev); if (!(ppc->ppc_flags & 0x20)) { ppc->res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &ppc->rid_irq, RF_SHAREABLE); ppc->res_drq = bus_alloc_resource_any(dev, SYS_RES_DRQ, &ppc->rid_drq, RF_ACTIVE); } if (ppc->res_irq) ppc->ppc_irq = rman_get_start(ppc->res_irq); if (ppc->res_drq) ppc->ppc_dmachan = rman_get_start(ppc->res_drq); ppc->ppc_unit = device_get_unit(dev); ppc->ppc_model = GENERIC; ppc->ppc_mode = PPB_COMPATIBLE; ppc->ppc_epp = (ppc->ppc_flags & 0x10) >> 4; ppc->ppc_type = PPC_TYPE_GENERIC; /* * Try to detect the chipset and its mode. */ if (ppc_detect(ppc, ppc->ppc_flags & 0xf)) goto error; return (0); error: if (ppc->res_irq != 0) { bus_release_resource(dev, SYS_RES_IRQ, ppc->rid_irq, ppc->res_irq); } if (ppc->res_ioport != 0) { bus_deactivate_resource(dev, SYS_RES_IOPORT, ppc->rid_ioport, ppc->res_ioport); bus_release_resource(dev, SYS_RES_IOPORT, ppc->rid_ioport, ppc->res_ioport); } if (ppc->res_drq != 0) { bus_deactivate_resource(dev, SYS_RES_DRQ, ppc->rid_drq, ppc->res_drq); bus_release_resource(dev, SYS_RES_DRQ, ppc->rid_drq, ppc->res_drq); } return (ENXIO); } int ppc_attach(device_t dev) { struct ppc_data *ppc = DEVTOSOFTC(dev); device_t ppbus; device_t parent = device_get_parent(dev); device_printf(dev, "%s chipset (%s) in %s mode%s\n", ppc_models[ppc->ppc_model], ppc_avms[ppc->ppc_avm], ppc_modes[ppc->ppc_mode], (PPB_IS_EPP(ppc->ppc_mode)) ? ppc_epp_protocol[ppc->ppc_epp] : ""); if (ppc->ppc_fifo) device_printf(dev, "FIFO with %d/%d/%d bytes threshold\n", ppc->ppc_fifo, ppc->ppc_wthr, ppc->ppc_rthr); if ((ppc->ppc_avm & PPB_ECP) && (ppc->ppc_dmachan > 0)) { /* acquire the DMA channel forever */ /* XXX */ isa_dma_acquire(ppc->ppc_dmachan); isa_dmainit(ppc->ppc_dmachan, 1024); /* nlpt.BUFSIZE */ } /* add ppbus as a child of this isa to parallel bridge */ ppbus = device_add_child(dev, "ppbus", -1); /* * Probe the ppbus and attach devices found. */ device_probe_and_attach(ppbus); /* register the ppc interrupt handler as default */ if (ppc->res_irq) { /* default to the tty mask for registration */ /* XXX */ if (BUS_SETUP_INTR(parent, dev, ppc->res_irq, INTR_TYPE_TTY, ppcintr, dev, &ppc->intr_cookie) == 0) { /* remember the ppcintr is registered */ ppc->ppc_registered = 1; } } return (0); } u_char ppc_io(device_t ppcdev, int iop, u_char *addr, int cnt, u_char byte) { struct ppc_data *ppc = DEVTOSOFTC(ppcdev); switch (iop) { case PPB_OUTSB_EPP: bus_space_write_multi_1(ppc->bst, ppc->bsh, PPC_EPP_DATA, addr, cnt); break; case PPB_OUTSW_EPP: bus_space_write_multi_2(ppc->bst, ppc->bsh, PPC_EPP_DATA, (u_int16_t *)addr, cnt); break; case PPB_OUTSL_EPP: bus_space_write_multi_4(ppc->bst, ppc->bsh, PPC_EPP_DATA, (u_int32_t *)addr, cnt); break; case PPB_INSB_EPP: bus_space_read_multi_1(ppc->bst, ppc->bsh, PPC_EPP_DATA, addr, cnt); break; case PPB_INSW_EPP: bus_space_read_multi_2(ppc->bst, ppc->bsh, PPC_EPP_DATA, (u_int16_t *)addr, cnt); break; case PPB_INSL_EPP: bus_space_read_multi_4(ppc->bst, ppc->bsh, PPC_EPP_DATA, (u_int32_t *)addr, cnt); break; case PPB_RDTR: return (r_dtr(ppc)); case PPB_RSTR: return (r_str(ppc)); case PPB_RCTR: return (r_ctr(ppc)); case PPB_REPP_A: return (r_epp_A(ppc)); case PPB_REPP_D: return (r_epp_D(ppc)); case PPB_RECR: return (r_ecr(ppc)); case PPB_RFIFO: return (r_fifo(ppc)); case PPB_WDTR: w_dtr(ppc, byte); break; case PPB_WSTR: w_str(ppc, byte); break; case PPB_WCTR: w_ctr(ppc, byte); break; case PPB_WEPP_A: w_epp_A(ppc, byte); break; case PPB_WEPP_D: w_epp_D(ppc, byte); break; case PPB_WECR: w_ecr(ppc, byte); break; case PPB_WFIFO: w_fifo(ppc, byte); break; default: panic("%s: unknown I/O operation", __func__); break; } return (0); /* not significative */ } int ppc_read_ivar(device_t bus, device_t dev, int index, uintptr_t *val) { struct ppc_data *ppc = (struct ppc_data *)device_get_softc(bus); switch (index) { case PPC_IVAR_EPP_PROTO: *val = (u_long)ppc->ppc_epp; break; case PPC_IVAR_IRQ: *val = (u_long)ppc->ppc_irq; break; default: return (ENOENT); } return (0); } /* * Resource is useless here since ppbus devices' interrupt handlers are * multiplexed to the same resource initially allocated by ppc */ int ppc_setup_intr(device_t bus, device_t child, struct resource *r, int flags, void (*ihand)(void *), void *arg, void **cookiep) { int error; struct ppc_data *ppc = DEVTOSOFTC(bus); if (ppc->ppc_registered) { /* XXX refuse registration if DMA is in progress */ /* first, unregister the default interrupt handler */ if ((error = BUS_TEARDOWN_INTR(device_get_parent(bus), bus, ppc->res_irq, ppc->intr_cookie))) return (error); /* bus_deactivate_resource(bus, SYS_RES_IRQ, ppc->rid_irq, */ /* ppc->res_irq); */ /* DMA/FIFO operation won't be possible anymore */ ppc->ppc_registered = 0; } /* pass registration to the upper layer, ignore the incoming resource */ return (BUS_SETUP_INTR(device_get_parent(bus), child, r, flags, ihand, arg, cookiep)); } /* * When no underlying device has a registered interrupt, register the ppc * layer one */ int ppc_teardown_intr(device_t bus, device_t child, struct resource *r, void *ih) { int error; struct ppc_data *ppc = DEVTOSOFTC(bus); device_t parent = device_get_parent(bus); /* pass unregistration to the upper layer */ if ((error = BUS_TEARDOWN_INTR(parent, child, r, ih))) return (error); /* default to the tty mask for registration */ /* XXX */ if (ppc->ppc_irq && !(error = BUS_SETUP_INTR(parent, bus, ppc->res_irq, INTR_TYPE_TTY, ppcintr, bus, &ppc->intr_cookie))) { /* remember the ppcintr is registered */ ppc->ppc_registered = 1; } return (error); } DRIVER_MODULE(ppc, isa, ppc_driver, ppc_devclass, 0, 0); DRIVER_MODULE(ppc, acpi, ppc_driver, ppc_devclass, 0, 0); Index: head/sys/dev/ppc/ppc_puc.c =================================================================== --- head/sys/dev/ppc/ppc_puc.c (revision 129878) +++ head/sys/dev/ppc/ppc_puc.c (revision 129879) @@ -1,83 +1,84 @@ /*- * Copyright (c) 1997-2000 Nicolas Souchu * Copyright (c) 2001 Alcove - Nicolas Souchu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ #include #include +#include #include #include #include #include #include #include #include "ppbus_if.h" static int ppc_puc_probe(device_t dev); static device_method_t ppc_puc_methods[] = { /* device interface */ DEVMETHOD(device_probe, ppc_puc_probe), DEVMETHOD(device_attach, ppc_attach), /* bus interface */ DEVMETHOD(bus_read_ivar, ppc_read_ivar), DEVMETHOD(bus_setup_intr, ppc_setup_intr), DEVMETHOD(bus_teardown_intr, ppc_teardown_intr), DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), /* ppbus interface */ DEVMETHOD(ppbus_io, ppc_io), DEVMETHOD(ppbus_exec_microseq, ppc_exec_microseq), DEVMETHOD(ppbus_reset_epp, ppc_reset_epp), DEVMETHOD(ppbus_setmode, ppc_setmode), DEVMETHOD(ppbus_ecp_sync, ppc_ecp_sync), DEVMETHOD(ppbus_read, ppc_read), DEVMETHOD(ppbus_write, ppc_write), { 0, 0 } }; static driver_t ppc_puc_driver = { "ppc", ppc_puc_methods, sizeof(struct ppc_data), }; static int ppc_puc_probe(dev) device_t dev; { device_set_desc(dev, "Parallel port"); return (ppc_probe(dev)); } DRIVER_MODULE(ppc, puc, ppc_puc_driver, ppc_devclass, 0, 0); Index: head/sys/dev/puc/puc_pccard.c =================================================================== --- head/sys/dev/puc/puc_pccard.c (revision 129878) +++ head/sys/dev/puc/puc_pccard.c (revision 129879) @@ -1,111 +1,112 @@ /*- * Copyright (c) 2002 Poul-Henning Kamp. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include "opt_puc.h" #include #include #include +#include #include #include #include #include #include #include #define PUC_ENTRAILS 1 #include #include #include const struct puc_device_description rscom_devices = { "ARGOSY SP320 Dual port serial PCMCIA", /* http://www.argosy.com.tw/product/sp320.htm */ { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { { PUC_PORT_TYPE_COM, 0x0, 0x00, DEFAULT_RCLK, 0x100000 }, { PUC_PORT_TYPE_COM, 0x1, 0x00, DEFAULT_RCLK, 0 }, } }; static int puc_pccard_probe(device_t dev) { const char *vendor, *product; int error; error = pccard_get_vendor_str(dev, &vendor); if (error) return(error); error = pccard_get_product_str(dev, &product); if (error) return(error); if (!strcmp(vendor, "PCMCIA") && !strcmp(product, "RS-COM 2P")) { device_set_desc(dev, rscom_devices.name); return (0); } return (ENXIO); } static int puc_pccard_attach(device_t dev) { return (puc_attach(dev, &rscom_devices)); } static device_method_t puc_pccard_methods[] = { /* Device interface */ DEVMETHOD(device_probe, puc_pccard_probe), DEVMETHOD(device_attach, puc_pccard_attach), DEVMETHOD(bus_alloc_resource, puc_alloc_resource), DEVMETHOD(bus_release_resource, puc_release_resource), DEVMETHOD(bus_get_resource, puc_get_resource), DEVMETHOD(bus_read_ivar, puc_read_ivar), DEVMETHOD(bus_setup_intr, puc_setup_intr), DEVMETHOD(bus_teardown_intr, puc_teardown_intr), DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), { 0, 0 } }; static driver_t puc_pccard_driver = { "puc", puc_pccard_methods, sizeof(struct puc_softc), }; DRIVER_MODULE(puc, pccard, puc_pccard_driver, puc_devclass, 0, 0); Index: head/sys/dev/puc/puc_pci.c =================================================================== --- head/sys/dev/puc/puc_pci.c (revision 129878) +++ head/sys/dev/puc/puc_pci.c (revision 129879) @@ -1,293 +1,294 @@ /* $NetBSD: puc.c,v 1.7 2000/07/29 17:43:38 jlam Exp $ */ /*- * Copyright (c) 2002 JF Hay. All rights reserved. * Copyright (c) 2000 M. Warner Losh. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Copyright (c) 1996, 1998, 1999 * Christopher G. Demetriou. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Christopher G. Demetriou * for the NetBSD Project. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_puc.h" #include #include #include +#include #include #include #include #include #include #include #include #include #define PUC_ENTRAILS 1 #include extern const struct puc_device_description puc_devices[]; int puc_config_win877(struct puc_softc *); static const struct puc_device_description * puc_find_description(uint32_t vend, uint32_t prod, uint32_t svend, uint32_t sprod) { int i; #define checkreg(val, index) \ (((val) & puc_devices[i].rmask[(index)]) == puc_devices[i].rval[(index)]) for (i = 0; puc_devices[i].name != NULL; i++) { if (checkreg(vend, PUC_REG_VEND) && checkreg(prod, PUC_REG_PROD) && checkreg(svend, PUC_REG_SVEND) && checkreg(sprod, PUC_REG_SPROD)) return (&puc_devices[i]); } #undef checkreg return (NULL); } static int puc_pci_probe(device_t dev) { uint32_t v1, v2, d1, d2; const struct puc_device_description *desc; if ((pci_read_config(dev, PCIR_HDRTYPE, 1) & PCIM_HDRTYPE) != 0) return (ENXIO); v1 = pci_read_config(dev, PCIR_VENDOR, 2); d1 = pci_read_config(dev, PCIR_DEVICE, 2); v2 = pci_read_config(dev, PCIR_SUBVEND_0, 2); d2 = pci_read_config(dev, PCIR_SUBDEV_0, 2); desc = puc_find_description(v1, d1, v2, d2); if (desc == NULL) return (ENXIO); device_set_desc(dev, desc->name); return (0); } static int puc_pci_attach(device_t dev) { uint32_t v1, v2, d1, d2; v1 = pci_read_config(dev, PCIR_VENDOR, 2); d1 = pci_read_config(dev, PCIR_DEVICE, 2); v2 = pci_read_config(dev, PCIR_SUBVEND_0, 2); d2 = pci_read_config(dev, PCIR_SUBDEV_0, 2); return (puc_attach(dev, puc_find_description(v1, d1, v2, d2))); } static device_method_t puc_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, puc_pci_probe), DEVMETHOD(device_attach, puc_pci_attach), DEVMETHOD(bus_alloc_resource, puc_alloc_resource), DEVMETHOD(bus_release_resource, puc_release_resource), DEVMETHOD(bus_get_resource, puc_get_resource), DEVMETHOD(bus_read_ivar, puc_read_ivar), DEVMETHOD(bus_setup_intr, puc_setup_intr), DEVMETHOD(bus_teardown_intr, puc_teardown_intr), DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), { 0, 0 } }; static driver_t puc_pci_driver = { "puc", puc_pci_methods, sizeof(struct puc_softc), }; DRIVER_MODULE(puc, pci, puc_pci_driver, puc_devclass, 0, 0); DRIVER_MODULE(puc, cardbus, puc_pci_driver, puc_devclass, 0, 0); #define rdspio(indx) (bus_space_write_1(bst, bsh, efir, indx), \ bus_space_read_1(bst, bsh, efdr)) #define wrspio(indx,data) (bus_space_write_1(bst, bsh, efir, indx), \ bus_space_write_1(bst, bsh, efdr, data)) #ifdef PUC_DEBUG static void puc_print_win877(bus_space_tag_t bst, bus_space_handle_t bsh, u_int efir, u_int efdr) { u_char cr00, cr01, cr04, cr09, cr0d, cr14, cr15, cr16, cr17; u_char cr18, cr19, cr24, cr25, cr28, cr2c, cr31, cr32; cr00 = rdspio(0x00); cr01 = rdspio(0x01); cr04 = rdspio(0x04); cr09 = rdspio(0x09); cr0d = rdspio(0x0d); cr14 = rdspio(0x14); cr15 = rdspio(0x15); cr16 = rdspio(0x16); cr17 = rdspio(0x17); cr18 = rdspio(0x18); cr19 = rdspio(0x19); cr24 = rdspio(0x24); cr25 = rdspio(0x25); cr28 = rdspio(0x28); cr2c = rdspio(0x2c); cr31 = rdspio(0x31); cr32 = rdspio(0x32); printf("877T: cr00 %x, cr01 %x, cr04 %x, cr09 %x, cr0d %x, cr14 %x, " "cr15 %x, cr16 %x, cr17 %x, cr18 %x, cr19 %x, cr24 %x, cr25 %x, " "cr28 %x, cr2c %x, cr31 %x, cr32 %x\n", cr00, cr01, cr04, cr09, cr0d, cr14, cr15, cr16, cr17, cr18, cr19, cr24, cr25, cr28, cr2c, cr31, cr32); } #endif int puc_config_win877(struct puc_softc *sc) { u_char val; u_int efir, efdr; bus_space_tag_t bst; bus_space_handle_t bsh; struct resource *res; res = sc->sc_bar_mappings[0].res; bst = rman_get_bustag(res); bsh = rman_get_bushandle(res); /* configure the first W83877TF */ bus_space_write_1(bst, bsh, 0x250, 0x89); efir = 0x251; efdr = 0x252; val = rdspio(0x09) & 0x0f; if (val != 0x0c) { printf("conf_win877: Oops not a W83877TF\n"); return (ENXIO); } #ifdef PUC_DEBUG printf("before: "); puc_print_win877(bst, bsh, efir, efdr); #endif val = rdspio(0x16); val |= 0x04; wrspio(0x16, val); val &= ~0x04; wrspio(0x16, val); wrspio(0x24, 0x2e8 >> 2); wrspio(0x25, 0x2f8 >> 2); wrspio(0x17, 0x03); wrspio(0x28, 0x43); #ifdef PUC_DEBUG printf("after: "); puc_print_win877(bst, bsh, efir, efdr); #endif bus_space_write_1(bst, bsh, 0x250, 0xaa); /* configure the second W83877TF */ bus_space_write_1(bst, bsh, 0x3f0, 0x87); bus_space_write_1(bst, bsh, 0x3f0, 0x87); efir = 0x3f0; efdr = 0x3f1; val = rdspio(0x09) & 0x0f; if (val != 0x0c) { printf("conf_win877: Oops not a W83877TF\n"); return(ENXIO); } #ifdef PUC_DEBUG printf("before: "); puc_print_win877(bst, bsh, efir, efdr); #endif val = rdspio(0x16); val |= 0x04; wrspio(0x16, val); val &= ~0x04; wrspio(0x16, val); wrspio(0x24, 0x3e8 >> 2); wrspio(0x25, 0x3f8 >> 2); wrspio(0x17, 0x03); wrspio(0x28, 0x43); #ifdef PUC_DEBUG printf("after: "); puc_print_win877(bst, bsh, efir, efdr); #endif bus_space_write_1(bst, bsh, 0x3f0, 0xaa); return (0); } #undef rdspio #undef wrspio Index: head/sys/dev/ray/if_ray.c =================================================================== --- head/sys/dev/ray/if_ray.c (revision 129878) +++ head/sys/dev/ray/if_ray.c (revision 129879) @@ -1,3804 +1,3805 @@ /* * Copyright (C) 2000 * Dr. Duncan McLennan Barclay, dmlb@ragnet.demon.co.uk. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY DUNCAN BARCLAY AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL DUNCAN BARCLAY OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * */ #include __FBSDID("$FreeBSD$"); /* $NetBSD: if_ray.c,v 1.12 2000/02/07 09:36:27 augustss Exp $ */ /* * Copyright (c) 2000 Christian E. Hopps * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Card configuration * ================== * * This card is unusual in that it uses both common and attribute * memory whilst working. It should use common memory and an IO port. * * The bus resource allocations need to work around the brain deadness * of pccardd (where it reads the CIS for common memory, sets it all * up and then throws it all away assuming the card is an ed * driver...). Note that this could be dangerous (because it doesn't * interact with pccardd) if you use other memory mapped cards in the * same pccard slot as currently old mappings are not cleaned up very well * by the bus_release_resource methods or pccardd. * * There is no support for running this driver on 4.0. * * Ad-hoc and infra-structure modes * ================================ * * The driver supports ad-hoc mode for V4 firmware and infrastructure * mode for V5 firmware. V5 firmware in ad-hoc mode is untested and should * work. * * The Linux driver also seems to have the capability to act as an AP. * I wonder what facilities the "AP" can provide within a driver? We can * probably use the BRIDGE code to form an ESS but I don't think * power saving etc. is easy. * * * Packet framing/encapsulation/translation * ======================================== * * Currently we support the Webgear encapsulation: * 802.11 header struct ieee80211_frame * 802.3 header struct ether_header * IP/ARP payload * * and RFC1042 encapsulation of IP datagrams (translation): * 802.11 header struct ieee80211_frame * 802.2 LLC header * 802.2 SNAP header * 802.3 Ethertype * IP/ARP payload * * Framing should be selected via if_media stuff or link types but * is currently hardcoded to: * V4 encapsulation * V5 translation * * * Authentication * ============== * * 802.11 provides two authentication mechanisms. The first is a very * simple host based mechanism (like xhost) called Open System and the * second is a more complex challenge/response called Shared Key built * ontop of WEP. * * This driver only supports Open System and does not implement any * host based control lists. In otherwords authentication is always * granted to hosts wanting to authenticate with this station. This is * the only sensible behaviour as the Open System mechanism uses MAC * addresses to identify hosts. Send me patches if you need it! */ /* * ***check all XXX_INFRA code - reassoc not done well at all! * ***watchdog to catch screwed up removals? * ***error handling of RAY_COM_RUNQ * ***error handling of ECF command completions * ***can't seem to create a n/w that Win95 wants to see. * ***remove panic in ray_com_ecf by re-quing or timeout * ***use new ioctl stuff - probably need to change RAY_COM_FCHKRUNNING things? * consider user doing: * ifconfig ray0 192.168.200.38 -bssid "freed" * ifconfig ray0 192.168.200.38 -bssid "fred" * here the second one would be missed in this code * check that v5 needs timeouts on ecf commands * write up driver structure in comments above * UPDATE_PARAMS seems to return via an interrupt - maybe the timeout * is needed for wrong values? * proper setting of mib_hop_seq_len with country code for v4 firmware * best done with raycontrol? * countrycode setting is broken I think * userupdate should trap and do via startjoin etc. * fragmentation when rx level drops? * v5 might not need download * defaults are as documented apart from hop_seq_length * settings are sane for ad-hoc not infra * * driver state * most state is implied by the sequence of commands in the runq * but in fact any of the rx and tx path that uses variables * in the sc_c are potentially going to get screwed? * * infra mode stuff * proper handling of the basic rate set - see the manual * all ray_sj, ray_assoc sequencues need a "nicer" solution as we * remember association and authentication * need to consider WEP * acting as ap - should be able to get working from the manual * need to finish RAY_ECMD_REJOIN_DONE * finish authenitcation code, it doesn't handle errors/timeouts/ * REJOIN etc. * * ray_nw_param * promisc in here too? - done * should be able to update the parameters before we download to the * device. This means we must attach a desired struct to the * runq entry and maybe have another big case statement to * move these desired into current when not running. * init must then use the current settings (pre-loaded * in attach now!) and pass to download. But we can't access * current nw params outside of the runq - ahhh * differeniate between parameters set in attach and init * sc_station_addr in here too (for changing mac address) * move desired into the command structure? * take downloaded MIB from a complete nw_param? * longer term need to attach a desired nw params to the runq entry * * * RAY_COM_RUNQ errors * * if sleeping in ccs_alloc with eintr/erestart/enxio/enodev * erestart try again from the top * XXX do not malloc more comqs * XXX ccs allocation hard * eintr clean up and return * enxio clean up and return - done in macro * * if sleeping in runq_arr itself with eintr/erestart/enxio/enodev * erestart try again from the top * XXX do not malloc more comqs * XXX ccs allocation hard * XXX reinsert comqs at head of list * eintr clean up and return * enxio clean up and return - done in macro */ #define XXX 0 #define XXX_ACTING_AP 0 #define XXX_INFRA 0 #define RAY_DEBUG ( \ /* RAY_DBG_AUTH | */ \ /* RAY_DBG_SUBR | */ \ /* RAY_DBG_BOOTPARAM | */ \ /* RAY_DBG_STARTJOIN | */ \ /* RAY_DBG_CCS | */ \ /* RAY_DBG_IOCTL | */ \ /* RAY_DBG_MBUF | */ \ /* RAY_DBG_RX | */ \ /* RAY_DBG_CM | */ \ /* RAY_DBG_COM | */ \ /* RAY_DBG_STOP | */ \ /* RAY_DBG_CTL | */ \ /* RAY_DBG_MGT | */ \ /* RAY_DBG_TX | */ \ /* RAY_DBG_DCOM | */ \ 0 \ ) /* * XXX build options - move to LINT */ #define RAY_CM_RID 0 /* pccardd abuses windows 0 and 1 */ #define RAY_AM_RID 3 /* pccardd abuses windows 0 and 1 */ #define RAY_COM_TIMEOUT (hz/2) /* Timeout for CCS commands */ #define RAY_TX_TIMEOUT (hz/2) /* Timeout for rescheduling TX */ #define RAY_ECF_SPIN_DELAY 1000 /* Wait 1ms before checking ECF ready */ #define RAY_ECF_SPIN_TRIES 10 /* Wait this many times for ECF ready */ /* * XXX build options - move to LINT */ #ifndef RAY_DEBUG #define RAY_DEBUG 0x0000 #endif /* RAY_DEBUG */ #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "card_if.h" #include #include #include #include /* * Prototyping */ static int ray_attach (device_t); static int ray_ccs_alloc (struct ray_softc *sc, size_t *ccsp, char *wmesg); static void ray_ccs_fill (struct ray_softc *sc, size_t ccs, u_int cmd); static void ray_ccs_free (struct ray_softc *sc, size_t ccs); static int ray_ccs_tx (struct ray_softc *sc, size_t *ccsp, size_t *bufpp); static void ray_com_ecf (struct ray_softc *sc, struct ray_comq_entry *com); static void ray_com_ecf_done (struct ray_softc *sc); static void ray_com_ecf_timo (void *xsc); static struct ray_comq_entry * ray_com_init (struct ray_comq_entry *com, ray_comqfn_t function, int flags, char *mesg); static struct ray_comq_entry * ray_com_malloc (ray_comqfn_t function, int flags, char *mesg); static void ray_com_runq (struct ray_softc *sc); static int ray_com_runq_add (struct ray_softc *sc, struct ray_comq_entry *com[], int ncom, char *wmesg); static void ray_com_runq_done (struct ray_softc *sc); static int ray_detach (device_t); static void ray_init (void *xsc); static int ray_init_user (struct ray_softc *sc); static void ray_init_assoc (struct ray_softc *sc, struct ray_comq_entry *com); static void ray_init_assoc_done (struct ray_softc *sc, u_int8_t status, size_t ccs); static void ray_init_auth (struct ray_softc *sc, struct ray_comq_entry *com); static int ray_init_auth_send (struct ray_softc *sc, u_int8_t *dst, int sequence); static void ray_init_auth_done (struct ray_softc *sc, u_int8_t status); static void ray_init_download (struct ray_softc *sc, struct ray_comq_entry *com); static void ray_init_download_done (struct ray_softc *sc, u_int8_t status, size_t ccs); static void ray_init_download_v4 (struct ray_softc *sc, struct ray_comq_entry *com); static void ray_init_download_v5 (struct ray_softc *sc, struct ray_comq_entry *com); static void ray_init_mcast (struct ray_softc *sc, struct ray_comq_entry *com); static void ray_init_sj (struct ray_softc *sc, struct ray_comq_entry *com); static void ray_init_sj_done (struct ray_softc *sc, u_int8_t status, size_t ccs); static void ray_intr (void *xsc); static void ray_intr_ccs (struct ray_softc *sc, u_int8_t cmd, u_int8_t status, size_t ccs); static void ray_intr_rcs (struct ray_softc *sc, u_int8_t cmd, size_t ccs); static void ray_intr_updt_errcntrs (struct ray_softc *sc); static int ray_ioctl (struct ifnet *ifp, u_long command, caddr_t data); static void ray_mcast (struct ray_softc *sc, struct ray_comq_entry *com); static void ray_mcast_done (struct ray_softc *sc, u_int8_t status, size_t ccs); static int ray_mcast_user (struct ray_softc *sc); static int ray_probe (device_t); static void ray_promisc (struct ray_softc *sc, struct ray_comq_entry *com); static void ray_repparams (struct ray_softc *sc, struct ray_comq_entry *com); static void ray_repparams_done (struct ray_softc *sc, u_int8_t status, size_t ccs); static int ray_repparams_user (struct ray_softc *sc, struct ray_param_req *pr); static int ray_repstats_user (struct ray_softc *sc, struct ray_stats_req *sr); static int ray_res_alloc_am (struct ray_softc *sc); static int ray_res_alloc_cm (struct ray_softc *sc); static int ray_res_alloc_irq (struct ray_softc *sc); static void ray_res_release (struct ray_softc *sc); static void ray_rx (struct ray_softc *sc, size_t rcs); static void ray_rx_ctl (struct ray_softc *sc, struct mbuf *m0); static void ray_rx_data (struct ray_softc *sc, struct mbuf *m0, u_int8_t siglev, u_int8_t antenna); static void ray_rx_mgt (struct ray_softc *sc, struct mbuf *m0); static void ray_rx_mgt_auth (struct ray_softc *sc, struct mbuf *m0); static void ray_rx_mgt_beacon (struct ray_softc *sc, struct mbuf *m0); static void ray_rx_mgt_info (struct ray_softc *sc, struct mbuf *m0, struct ieee80211_information *elements); static void ray_rx_update_cache (struct ray_softc *sc, u_int8_t *src, u_int8_t siglev, u_int8_t antenna); static void ray_stop (struct ray_softc *sc, struct ray_comq_entry *com); static int ray_stop_user (struct ray_softc *sc); static void ray_tx (struct ifnet *ifp); static void ray_tx_done (struct ray_softc *sc, u_int8_t status, size_t ccs); static void ray_tx_timo (void *xsc); static int ray_tx_send (struct ray_softc *sc, size_t ccs, int pktlen, u_int8_t *dst); static size_t ray_tx_wrhdr (struct ray_softc *sc, size_t bufp, u_int8_t type, u_int8_t fc1, u_int8_t *addr1, u_int8_t *addr2, u_int8_t *addr3); static void ray_upparams (struct ray_softc *sc, struct ray_comq_entry *com); static void ray_upparams_done (struct ray_softc *sc, u_int8_t status, size_t ccs); static int ray_upparams_user (struct ray_softc *sc, struct ray_param_req *pr); static void ray_watchdog (struct ifnet *ifp); static u_int8_t ray_tx_best_antenna (struct ray_softc *sc, u_int8_t *dst); #if RAY_DEBUG & RAY_DBG_COM static void ray_com_ecf_check (struct ray_softc *sc, size_t ccs, char *mesg); #endif /* RAY_DEBUG & RAY_DBG_COM */ #if RAY_DEBUG & RAY_DBG_MBUF static void ray_dump_mbuf (struct ray_softc *sc, struct mbuf *m, char *s); #endif /* RAY_DEBUG & RAY_DBG_MBUF */ /* * PC-Card (PCMCIA) driver definition */ static device_method_t ray_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ray_probe), DEVMETHOD(device_attach, ray_attach), DEVMETHOD(device_detach, ray_detach), { 0, 0 } }; static driver_t ray_driver = { "ray", ray_methods, sizeof(struct ray_softc) }; static devclass_t ray_devclass; DRIVER_MODULE(ray, pccard, ray_driver, ray_devclass, 0, 0); /* * Probe for the card by checking its startup results. * * Fixup any bugs/quirks for different firmware. */ static int ray_probe(device_t dev) { struct ray_softc *sc = device_get_softc(dev); struct ray_ecf_startup_v5 *ep = &sc->sc_ecf_startup; int error; sc->dev = dev; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); /* * Read startup results from the card. */ error = ray_res_alloc_cm(sc); if (error) return (error); error = ray_res_alloc_am(sc); if (error) { ray_res_release(sc); return (error); } RAY_MAP_CM(sc); SRAM_READ_REGION(sc, RAY_ECF_TO_HOST_BASE, ep, sizeof(sc->sc_ecf_startup)); ray_res_release(sc); /* * Check the card is okay and work out what version we are using. */ if (ep->e_status != RAY_ECFS_CARD_OK) { RAY_PRINTF(sc, "card failed self test 0x%b", ep->e_status, RAY_ECFS_PRINTFB); return (ENXIO); } if (sc->sc_version != RAY_ECFS_BUILD_4 && sc->sc_version != RAY_ECFS_BUILD_5) { RAY_PRINTF(sc, "unsupported firmware version 0x%0x", ep->e_fw_build_string); return (ENXIO); } RAY_DPRINTF(sc, RAY_DBG_BOOTPARAM, "found a card"); sc->sc_gone = 0; /* * Fixup tib size to be correct - on build 4 it is garbage */ if (sc->sc_version == RAY_ECFS_BUILD_4 && sc->sc_tibsize == 0x55) sc->sc_tibsize = sizeof(struct ray_tx_tib); return (0); } /* * Attach the card into the kernel */ static int ray_attach(device_t dev) { struct ray_softc *sc = device_get_softc(dev); struct ray_ecf_startup_v5 *ep = &sc->sc_ecf_startup; struct ifnet *ifp = &sc->arpcom.ac_if; size_t ccs; int i, error; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); if ((sc == NULL) || (sc->sc_gone)) return (ENXIO); /* * Grab the resources I need */ error = ray_res_alloc_cm(sc); if (error) return (error); error = ray_res_alloc_am(sc); if (error) { ray_res_release(sc); return (error); } error = ray_res_alloc_irq(sc); if (error) { ray_res_release(sc); return (error); } /* * Reset any pending interrupts */ RAY_HCS_CLEAR_INTR(sc); /* * Set the parameters that will survive stop/init and * reset a few things on the card. * * Do not update these in ray_init_download's parameter setup * */ RAY_MAP_CM(sc); bzero(&sc->sc_d, sizeof(struct ray_nw_param)); bzero(&sc->sc_c, sizeof(struct ray_nw_param)); /* Clear statistics counters */ sc->sc_rxoverflow = 0; sc->sc_rxcksum = 0; sc->sc_rxhcksum = 0; sc->sc_rxnoise = 0; /* Clear signal and antenna cache */ bzero(sc->sc_siglevs, sizeof(sc->sc_siglevs)); /* Set all ccs to be free */ bzero(sc->sc_ccsinuse, sizeof(sc->sc_ccsinuse)); ccs = RAY_CCS_ADDRESS(0); for (i = 0; i < RAY_CCS_LAST; ccs += RAY_CCS_SIZE, i++) RAY_CCS_FREE(sc, ccs); /* * Initialise the network interface structure */ bcopy((char *)&ep->e_station_addr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_timer = 0; ifp->if_flags = (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); ifp->if_hdrlen = sizeof(struct ieee80211_frame) + sizeof(struct ether_header); ifp->if_baudrate = 1000000; /* Is this baud or bps ;-) */ ifp->if_start = ray_tx; ifp->if_ioctl = ray_ioctl; ifp->if_watchdog = ray_watchdog; ifp->if_init = ray_init; ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; ether_ifattach(ifp, ep->e_station_addr); /* * Initialise the timers and driver */ callout_handle_init(&sc->com_timerh); callout_handle_init(&sc->tx_timerh); TAILQ_INIT(&sc->sc_comq); /* * Print out some useful information */ if (bootverbose || (RAY_DEBUG & RAY_DBG_BOOTPARAM)) { RAY_PRINTF(sc, "start up results"); if (sc->sc_version == RAY_ECFS_BUILD_4) printf(". Firmware version 4\n"); else printf(". Firmware version 5\n"); printf(". Status 0x%b\n", ep->e_status, RAY_ECFS_PRINTFB); printf(". Ether address %6D\n", ep->e_station_addr, ":"); if (sc->sc_version == RAY_ECFS_BUILD_4) { printf(". Program checksum %0x\n", ep->e_resv0); printf(". CIS checksum %0x\n", ep->e_rates[0]); } else { printf(". (reserved word) %0x\n", ep->e_resv0); printf(". Supported rates %8D\n", ep->e_rates, ":"); } printf(". Japan call sign %12D\n", ep->e_japan_callsign, ":"); if (sc->sc_version == RAY_ECFS_BUILD_5) { printf(". Program checksum %0x\n", ep->e_prg_cksum); printf(". CIS checksum %0x\n", ep->e_cis_cksum); printf(". Firmware version %0x\n", ep->e_fw_build_string); printf(". Firmware revision %0x\n", ep->e_fw_build); printf(". (reserved word) %0x\n", ep->e_fw_resv); printf(". ASIC version %0x\n", ep->e_asic_version); printf(". TIB size %0x\n", ep->e_tibsize); } } return (0); } /* * Detach the card * * This is usually called when the card is ejected, but * can be caused by a modunload of a controller driver. * The idea is to reset the driver's view of the device * and ensure that any driver entry points such as * read and write do not hang. */ static int ray_detach(device_t dev) { struct ray_softc *sc = device_get_softc(dev); struct ifnet *ifp = &sc->arpcom.ac_if; struct ray_comq_entry *com; int s; s = splimp(); RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STOP, ""); if ((sc == NULL) || (sc->sc_gone)) return (0); /* * Mark as not running and detach the interface. * * N.B. if_detach can trigger ioctls so we do it first and * then clean the runq. */ sc->sc_gone = 1; sc->sc_c.np_havenet = 0; ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); ether_ifdetach(ifp); /* * Stop the runq and wake up anyone sleeping for us. */ untimeout(ray_com_ecf_timo, sc, sc->com_timerh); untimeout(ray_tx_timo, sc, sc->tx_timerh); com = TAILQ_FIRST(&sc->sc_comq); TAILQ_FOREACH(com, &sc->sc_comq, c_chain) { com->c_flags |= RAY_COM_FDETACHED; com->c_retval = 0; RAY_DPRINTF(sc, RAY_DBG_STOP, "looking at com %p %b", com, com->c_flags, RAY_COM_FLAGS_PRINTFB); if (com->c_flags & RAY_COM_FWOK) { RAY_DPRINTF(sc, RAY_DBG_STOP, "waking com %p", com); wakeup(com->c_wakeup); } } /* * Release resources */ ray_res_release(sc); RAY_DPRINTF(sc, RAY_DBG_STOP, "unloading complete"); splx(s); return (0); } /* * Network ioctl request. */ static int ray_ioctl(register struct ifnet *ifp, u_long command, caddr_t data) { struct ray_softc *sc = ifp->if_softc; struct ray_param_req pr; struct ray_stats_req sr; struct ifreq *ifr = (struct ifreq *)data; int s, error, error2; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_IOCTL, ""); if ((sc == NULL) || (sc->sc_gone)) return (ENXIO); error = error2 = 0; s = splimp(); switch (command) { case SIOCSIFFLAGS: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "SIFFLAGS 0x%0x", ifp->if_flags); /* * If the interface is marked up we call ray_init_user. * This will deal with mcast and promisc flags as well as * initialising the hardware if it needs it. */ if (ifp->if_flags & IFF_UP) error = ray_init_user(sc); else error = ray_stop_user(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "ADDMULTI/DELMULTI"); error = ray_mcast_user(sc); break; case SIOCSRAYPARAM: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "SRAYPARAM"); if ((error = copyin(ifr->ifr_data, &pr, sizeof(pr)))) break; error = ray_upparams_user(sc, &pr); error2 = copyout(&pr, ifr->ifr_data, sizeof(pr)); error = error2 ? error2 : error; break; case SIOCGRAYPARAM: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "GRAYPARAM"); if ((error = copyin(ifr->ifr_data, &pr, sizeof(pr)))) break; error = ray_repparams_user(sc, &pr); error2 = copyout(&pr, ifr->ifr_data, sizeof(pr)); error = error2 ? error2 : error; break; case SIOCGRAYSTATS: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "GRAYSTATS"); error = ray_repstats_user(sc, &sr); error2 = copyout(&sr, ifr->ifr_data, sizeof(sr)); error = error2 ? error2 : error; break; case SIOCGRAYSIGLEV: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "GRAYSIGLEV"); error = copyout(sc->sc_siglevs, ifr->ifr_data, sizeof(sc->sc_siglevs)); break; case SIOCGIFFLAGS: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "GIFFLAGS"); error = EINVAL; break; case SIOCGIFMETRIC: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "GIFMETRIC"); error = EINVAL; break; case SIOCGIFMTU: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "GIFMTU"); error = EINVAL; break; case SIOCGIFPHYS: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "GIFPYHS"); error = EINVAL; break; case SIOCSIFMEDIA: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "SIFMEDIA"); error = EINVAL; break; case SIOCGIFMEDIA: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "GIFMEDIA"); error = EINVAL; break; default: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "OTHER (pass to ether)"); error = ether_ioctl(ifp, command, data); break; } splx(s); return (error); } /* * Ethernet layer entry to ray_init - discard errors */ static void ray_init(void *xsc) { struct ray_softc *sc = (struct ray_softc *)xsc; ray_init_user(sc); } /* * User land entry to network initialisation and changes in interface flags. * * We do a very little work here, just creating runq entries to * processes the actions needed to cope with interface flags. We do it * this way in case there are runq entries outstanding from earlier * ioctls that modify the interface flags. * * Returns values are either 0 for success, a varity of resource allocation * failures or errors in the command sent to the card. * * Note, IFF_RUNNING is eventually set by init_sj_done or init_assoc_done */ static int ray_init_user(struct ray_softc *sc) { struct ray_comq_entry *com[6]; int error, ncom; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN, ""); /* * Create the following runq entries to bring the card up. * * init_download - download the network to the card * init_mcast - reset multicast list * init_sj - find or start a BSS * init_auth - authenticate with an ESSID if needed * init_assoc - associate with an ESSID if needed * * They are only actually executed if the card is not running. * We may enter this routine from a simple change of IP * address and do not need to get the card to do these things. * However, we cannot perform the check here as there may be * commands in the runq that change the IFF_RUNNING state of * the interface. */ ncom = 0; com[ncom++] = RAY_COM_MALLOC(ray_init_download, RAY_COM_FCHKRUNNING); com[ncom++] = RAY_COM_MALLOC(ray_init_mcast, RAY_COM_FCHKRUNNING); com[ncom++] = RAY_COM_MALLOC(ray_init_sj, RAY_COM_FCHKRUNNING); com[ncom++] = RAY_COM_MALLOC(ray_init_auth, RAY_COM_FCHKRUNNING); com[ncom++] = RAY_COM_MALLOC(ray_init_assoc, RAY_COM_FCHKRUNNING); /* * Create runq entries to process flags * * promisc - set/reset PROMISC and ALLMULTI flags * * They are only actually executed if the card is running */ com[ncom++] = RAY_COM_MALLOC(ray_promisc, 0); RAY_COM_RUNQ(sc, com, ncom, "rayinit", error); /* XXX no real error processing from anything yet! */ RAY_COM_FREE(com, ncom); return (error); } /* * Runq entry for resetting driver and downloading start up structures to card */ static void ray_init_download(struct ray_softc *sc, struct ray_comq_entry *com) { struct ifnet *ifp = &sc->arpcom.ac_if; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN, ""); /* If the card already running we might not need to download */ RAY_COM_CHKRUNNING(sc, com, ifp); /* * Reset instance variables * * The first set are network parameters that are read back when * the card starts or joins the network. * * The second set are network parameters that are downloaded to * the card. * * The third set are driver parameters. * * All of the variables in these sets can be updated by the * card or ioctls. * */ sc->sc_d.np_upd_param = 0; bzero(sc->sc_d.np_bss_id, ETHER_ADDR_LEN); sc->sc_d.np_inited = 0; sc->sc_d.np_def_txrate = RAY_MIB_BASIC_RATE_SET_DEFAULT; sc->sc_d.np_encrypt = 0; bzero(sc->sc_d.np_ssid, IEEE80211_NWID_LEN); if (sc->sc_version == RAY_ECFS_BUILD_4) { sc->sc_d.np_net_type = RAY_MIB_NET_TYPE_V4; strncpy(sc->sc_d.np_ssid, RAY_MIB_SSID_V4, IEEE80211_NWID_LEN); sc->sc_d.np_ap_status = RAY_MIB_AP_STATUS_V4; sc->sc_d.np_framing = RAY_FRAMING_ENCAPSULATION; } else { sc->sc_d.np_net_type = RAY_MIB_NET_TYPE_V5; strncpy(sc->sc_d.np_ssid, RAY_MIB_SSID_V5, IEEE80211_NWID_LEN); sc->sc_d.np_ap_status = RAY_MIB_AP_STATUS_V5; sc->sc_d.np_framing = RAY_FRAMING_TRANSLATION; } sc->sc_d.np_priv_start = RAY_MIB_PRIVACY_MUST_START_DEFAULT; sc->sc_d.np_priv_join = RAY_MIB_PRIVACY_CAN_JOIN_DEFAULT; sc->sc_d.np_promisc = !!(ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)); /* XXX this is a hack whilst I transition the code. The instance * XXX variables above should be set somewhere else. This is needed for * XXX start_join */ bcopy(&sc->sc_d, &com->c_desired, sizeof(struct ray_nw_param)); /* * Download the right firmware defaults */ if (sc->sc_version == RAY_ECFS_BUILD_4) ray_init_download_v4(sc, com); else ray_init_download_v5(sc, com); /* * Kick the card */ ray_ccs_fill(sc, com->c_ccs, RAY_CMD_DOWNLOAD_PARAMS); ray_com_ecf(sc, com); } #define PUT2(p, v) \ do { (p)[0] = ((v >> 8) & 0xff); (p)[1] = (v & 0xff); } while(0) /* * Firmware version 4 defaults - see if_raymib.h for details */ static void ray_init_download_v4(struct ray_softc *sc, struct ray_comq_entry *com) { struct ray_mib_4 ray_mib_4_default; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN, ""); RAY_MAP_CM(sc); #define MIB4(m) ray_mib_4_default.m MIB4(mib_net_type) = com->c_desired.np_net_type; MIB4(mib_ap_status) = com->c_desired.np_ap_status; bcopy(com->c_desired.np_ssid, MIB4(mib_ssid), IEEE80211_NWID_LEN); MIB4(mib_scan_mode) = RAY_MIB_SCAN_MODE_V4; MIB4(mib_apm_mode) = RAY_MIB_APM_MODE_V4; bcopy(sc->sc_station_addr, MIB4(mib_mac_addr), ETHER_ADDR_LEN); PUT2(MIB4(mib_frag_thresh), RAY_MIB_FRAG_THRESH_V4); PUT2(MIB4(mib_dwell_time), RAY_MIB_DWELL_TIME_V4); PUT2(MIB4(mib_beacon_period), RAY_MIB_BEACON_PERIOD_V4); MIB4(mib_dtim_interval) = RAY_MIB_DTIM_INTERVAL_V4; MIB4(mib_max_retry) = RAY_MIB_MAX_RETRY_V4; MIB4(mib_ack_timo) = RAY_MIB_ACK_TIMO_V4; MIB4(mib_sifs) = RAY_MIB_SIFS_V4; MIB4(mib_difs) = RAY_MIB_DIFS_V4; MIB4(mib_pifs) = RAY_MIB_PIFS_V4; PUT2(MIB4(mib_rts_thresh), RAY_MIB_RTS_THRESH_V4); PUT2(MIB4(mib_scan_dwell), RAY_MIB_SCAN_DWELL_V4); PUT2(MIB4(mib_scan_max_dwell), RAY_MIB_SCAN_MAX_DWELL_V4); MIB4(mib_assoc_timo) = RAY_MIB_ASSOC_TIMO_V4; MIB4(mib_adhoc_scan_cycle) = RAY_MIB_ADHOC_SCAN_CYCLE_V4; MIB4(mib_infra_scan_cycle) = RAY_MIB_INFRA_SCAN_CYCLE_V4; MIB4(mib_infra_super_scan_cycle) = RAY_MIB_INFRA_SUPER_SCAN_CYCLE_V4; MIB4(mib_promisc) = com->c_desired.np_promisc; PUT2(MIB4(mib_uniq_word), RAY_MIB_UNIQ_WORD_V4); MIB4(mib_slot_time) = RAY_MIB_SLOT_TIME_V4; MIB4(mib_roam_low_snr_thresh) = RAY_MIB_ROAM_LOW_SNR_THRESH_V4; MIB4(mib_low_snr_count) = RAY_MIB_LOW_SNR_COUNT_V4; MIB4(mib_infra_missed_beacon_count) = RAY_MIB_INFRA_MISSED_BEACON_COUNT_V4; MIB4(mib_adhoc_missed_beacon_count) = RAY_MIB_ADHOC_MISSED_BEACON_COUNT_V4; MIB4(mib_country_code) = RAY_MIB_COUNTRY_CODE_V4; MIB4(mib_hop_seq) = RAY_MIB_HOP_SEQ_V4; MIB4(mib_hop_seq_len) = RAY_MIB_HOP_SEQ_LEN_V4; MIB4(mib_cw_max) = RAY_MIB_CW_MAX_V4; MIB4(mib_cw_min) = RAY_MIB_CW_MIN_V4; MIB4(mib_noise_filter_gain) = RAY_MIB_NOISE_FILTER_GAIN_DEFAULT; MIB4(mib_noise_limit_offset) = RAY_MIB_NOISE_LIMIT_OFFSET_DEFAULT; MIB4(mib_rssi_thresh_offset) = RAY_MIB_RSSI_THRESH_OFFSET_DEFAULT; MIB4(mib_busy_thresh_offset) = RAY_MIB_BUSY_THRESH_OFFSET_DEFAULT; MIB4(mib_sync_thresh) = RAY_MIB_SYNC_THRESH_DEFAULT; MIB4(mib_test_mode) = RAY_MIB_TEST_MODE_DEFAULT; MIB4(mib_test_min_chan) = RAY_MIB_TEST_MIN_CHAN_DEFAULT; MIB4(mib_test_max_chan) = RAY_MIB_TEST_MAX_CHAN_DEFAULT; #undef MIB4 SRAM_WRITE_REGION(sc, RAY_HOST_TO_ECF_BASE, &ray_mib_4_default, sizeof(ray_mib_4_default)); } /* * Firmware version 5 defaults - see if_raymib.h for details */ static void ray_init_download_v5(struct ray_softc *sc, struct ray_comq_entry *com) { struct ray_mib_5 ray_mib_5_default; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN, ""); RAY_MAP_CM(sc); #define MIB5(m) ray_mib_5_default.m MIB5(mib_net_type) = com->c_desired.np_net_type; MIB5(mib_ap_status) = com->c_desired.np_ap_status; bcopy(com->c_desired.np_ssid, MIB5(mib_ssid), IEEE80211_NWID_LEN); MIB5(mib_scan_mode) = RAY_MIB_SCAN_MODE_V5; MIB5(mib_apm_mode) = RAY_MIB_APM_MODE_V5; bcopy(sc->sc_station_addr, MIB5(mib_mac_addr), ETHER_ADDR_LEN); PUT2(MIB5(mib_frag_thresh), RAY_MIB_FRAG_THRESH_V5); PUT2(MIB5(mib_dwell_time), RAY_MIB_DWELL_TIME_V5); PUT2(MIB5(mib_beacon_period), RAY_MIB_BEACON_PERIOD_V5); MIB5(mib_dtim_interval) = RAY_MIB_DTIM_INTERVAL_V5; MIB5(mib_max_retry) = RAY_MIB_MAX_RETRY_V5; MIB5(mib_ack_timo) = RAY_MIB_ACK_TIMO_V5; MIB5(mib_sifs) = RAY_MIB_SIFS_V5; MIB5(mib_difs) = RAY_MIB_DIFS_V5; MIB5(mib_pifs) = RAY_MIB_PIFS_V5; PUT2(MIB5(mib_rts_thresh), RAY_MIB_RTS_THRESH_V5); PUT2(MIB5(mib_scan_dwell), RAY_MIB_SCAN_DWELL_V5); PUT2(MIB5(mib_scan_max_dwell), RAY_MIB_SCAN_MAX_DWELL_V5); MIB5(mib_assoc_timo) = RAY_MIB_ASSOC_TIMO_V5; MIB5(mib_adhoc_scan_cycle) = RAY_MIB_ADHOC_SCAN_CYCLE_V5; MIB5(mib_infra_scan_cycle) = RAY_MIB_INFRA_SCAN_CYCLE_V5; MIB5(mib_infra_super_scan_cycle) = RAY_MIB_INFRA_SUPER_SCAN_CYCLE_V5; MIB5(mib_promisc) = com->c_desired.np_promisc; PUT2(MIB5(mib_uniq_word), RAY_MIB_UNIQ_WORD_V5); MIB5(mib_slot_time) = RAY_MIB_SLOT_TIME_V5; MIB5(mib_roam_low_snr_thresh) = RAY_MIB_ROAM_LOW_SNR_THRESH_V5; MIB5(mib_low_snr_count) = RAY_MIB_LOW_SNR_COUNT_V5; MIB5(mib_infra_missed_beacon_count) = RAY_MIB_INFRA_MISSED_BEACON_COUNT_V5; MIB5(mib_adhoc_missed_beacon_count) = RAY_MIB_ADHOC_MISSED_BEACON_COUNT_V5; MIB5(mib_country_code) = RAY_MIB_COUNTRY_CODE_V5; MIB5(mib_hop_seq) = RAY_MIB_HOP_SEQ_V5; MIB5(mib_hop_seq_len) = RAY_MIB_HOP_SEQ_LEN_V5; PUT2(MIB5(mib_cw_max), RAY_MIB_CW_MAX_V5); PUT2(MIB5(mib_cw_min), RAY_MIB_CW_MIN_V5); MIB5(mib_noise_filter_gain) = RAY_MIB_NOISE_FILTER_GAIN_DEFAULT; MIB5(mib_noise_limit_offset) = RAY_MIB_NOISE_LIMIT_OFFSET_DEFAULT; MIB5(mib_rssi_thresh_offset) = RAY_MIB_RSSI_THRESH_OFFSET_DEFAULT; MIB5(mib_busy_thresh_offset) = RAY_MIB_BUSY_THRESH_OFFSET_DEFAULT; MIB5(mib_sync_thresh) = RAY_MIB_SYNC_THRESH_DEFAULT; MIB5(mib_test_mode) = RAY_MIB_TEST_MODE_DEFAULT; MIB5(mib_test_min_chan) = RAY_MIB_TEST_MIN_CHAN_DEFAULT; MIB5(mib_test_max_chan) = RAY_MIB_TEST_MAX_CHAN_DEFAULT; MIB5(mib_allow_probe_resp) = RAY_MIB_ALLOW_PROBE_RESP_DEFAULT; MIB5(mib_privacy_must_start) = com->c_desired.np_priv_start; MIB5(mib_privacy_can_join) = com->c_desired.np_priv_join; MIB5(mib_basic_rate_set[0]) = com->c_desired.np_def_txrate; #undef MIB5 SRAM_WRITE_REGION(sc, RAY_HOST_TO_ECF_BASE, &ray_mib_5_default, sizeof(ray_mib_5_default)); } #undef PUT2 /* * Download completion routine */ static void ray_init_download_done(struct ray_softc *sc, u_int8_t status, size_t ccs) { RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN, ""); RAY_COM_CHECK(sc, ccs); RAY_CCSERR(sc, status, if_oerrors); /* XXX error counter */ ray_com_ecf_done(sc); } /* * Runq entry to empty the multicast filter list */ static void ray_init_mcast(struct ray_softc *sc, struct ray_comq_entry *com) { struct ifnet *ifp = &sc->arpcom.ac_if; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN, ""); RAY_MAP_CM(sc); /* If the card already running we might not need to reset the list */ RAY_COM_CHKRUNNING(sc, com, ifp); /* * Kick the card */ ray_ccs_fill(sc, com->c_ccs, RAY_CMD_UPDATE_MCAST); SRAM_WRITE_FIELD_1(sc, com->c_ccs, ray_cmd_update_mcast, c_nmcast, 0); ray_com_ecf(sc, com); } /* * Runq entry to starting or joining a network */ static void ray_init_sj(struct ray_softc *sc, struct ray_comq_entry *com) { struct ifnet *ifp = &sc->arpcom.ac_if; struct ray_net_params np; int update; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN, ""); RAY_MAP_CM(sc); /* If the card already running we might not need to start the n/w */ RAY_COM_CHKRUNNING(sc, com, ifp); /* * Set up the right start or join command and determine * whether we should tell the card about a change in operating * parameters. */ sc->sc_c.np_havenet = 0; if (sc->sc_d.np_net_type == RAY_MIB_NET_TYPE_ADHOC) ray_ccs_fill(sc, com->c_ccs, RAY_CMD_START_NET); else ray_ccs_fill(sc, com->c_ccs, RAY_CMD_JOIN_NET); update = 0; if (sc->sc_c.np_net_type != sc->sc_d.np_net_type) update++; if (bcmp(sc->sc_c.np_ssid, sc->sc_d.np_ssid, IEEE80211_NWID_LEN)) update++; if (sc->sc_c.np_priv_join != sc->sc_d.np_priv_join) update++; if (sc->sc_c.np_priv_start != sc->sc_d.np_priv_start) update++; RAY_DPRINTF(sc, RAY_DBG_STARTJOIN, "%s updating nw params", update?"is":"not"); if (update) { bzero(&np, sizeof(np)); np.p_net_type = sc->sc_d.np_net_type; bcopy(sc->sc_d.np_ssid, np.p_ssid, IEEE80211_NWID_LEN); np.p_privacy_must_start = sc->sc_d.np_priv_start; np.p_privacy_can_join = sc->sc_d.np_priv_join; SRAM_WRITE_REGION(sc, RAY_HOST_TO_ECF_BASE, &np, sizeof(np)); SRAM_WRITE_FIELD_1(sc, com->c_ccs, ray_cmd_net, c_upd_param, 1); } else SRAM_WRITE_FIELD_1(sc, com->c_ccs, ray_cmd_net, c_upd_param, 0); /* * Kick the card */ ray_com_ecf(sc, com); } /* * Complete start command or intermediate step in assoc command */ static void ray_init_sj_done(struct ray_softc *sc, u_int8_t status, size_t ccs) { struct ifnet *ifp = &sc->arpcom.ac_if; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN, ""); RAY_MAP_CM(sc); RAY_COM_CHECK(sc, ccs); RAY_CCSERR(sc, status, if_oerrors); /* XXX error counter */ /* * Read back network parameters that the ECF sets */ SRAM_READ_REGION(sc, ccs, &sc->sc_c.p_1, sizeof(struct ray_cmd_net)); /* Adjust values for buggy firmware */ if (sc->sc_c.np_inited == 0x55) sc->sc_c.np_inited = 0; if (sc->sc_c.np_def_txrate == 0x55) sc->sc_c.np_def_txrate = sc->sc_d.np_def_txrate; if (sc->sc_c.np_encrypt == 0x55) sc->sc_c.np_encrypt = sc->sc_d.np_encrypt; /* * Update our local state if we updated the network parameters * when the START_NET or JOIN_NET was issued. */ if (sc->sc_c.np_upd_param) { RAY_DPRINTF(sc, RAY_DBG_STARTJOIN, "updated parameters"); SRAM_READ_REGION(sc, RAY_HOST_TO_ECF_BASE, &sc->sc_c.p_2, sizeof(struct ray_net_params)); } /* * Hurrah! The network is now active. * * Clearing IFF_OACTIVE will ensure that the system will send us * packets. Just before we return from the interrupt context * we check to see if packets have been queued. */ if (SRAM_READ_FIELD_1(sc, ccs, ray_cmd, c_cmd) == RAY_CMD_START_NET) { sc->sc_c.np_havenet = 1; sc->sc_c.np_framing = sc->sc_d.np_framing; ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; } ray_com_ecf_done(sc); } /* * Runq entry to authenticate with an access point or another station */ static void ray_init_auth(struct ray_softc *sc, struct ray_comq_entry *com) { struct ifnet *ifp = &sc->arpcom.ac_if; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN | RAY_DBG_AUTH, ""); /* If card already running we might not need to authenticate */ RAY_COM_CHKRUNNING(sc, com, ifp); /* * Don't do anything if we are not in a managed network * * XXX V4 adhoc does not need this, V5 adhoc unknown */ if (sc->sc_c.np_net_type != RAY_MIB_NET_TYPE_INFRA) { ray_com_runq_done(sc); return; } /* * XXX_AUTH need to think of run queue when doing auths from request i.e. would * XXX_AUTH need to have auth at top of runq? * XXX_AUTH ditto for sending any auth response packets...what about timeouts? */ /* * Kick the card */ /* XXX_AUTH check exit status and retry or fail as we can't associate without this */ ray_init_auth_send(sc, sc->sc_c.np_bss_id, IEEE80211_AUTH_OPEN_REQUEST); } /* * Build and send an authentication packet * * If an error occurs, returns 1 else returns 0. */ static int ray_init_auth_send(struct ray_softc *sc, u_int8_t *dst, int sequence) { size_t ccs, bufp; int pktlen = 0; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN | RAY_DBG_AUTH, ""); /* Get a control block */ if (ray_ccs_tx(sc, &ccs, &bufp)) { RAY_RECERR(sc, "could not obtain a ccs"); return (1); } /* Fill the header in */ bufp = ray_tx_wrhdr(sc, bufp, IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_AUTH, IEEE80211_FC1_DIR_NODS, dst, sc->arpcom.ac_enaddr, sc->sc_c.np_bss_id); /* Add algorithm number */ SRAM_WRITE_1(sc, bufp + pktlen++, IEEE80211_AUTH_ALG_OPEN); SRAM_WRITE_1(sc, bufp + pktlen++, 0); /* Add sequence number */ SRAM_WRITE_1(sc, bufp + pktlen++, sequence); SRAM_WRITE_1(sc, bufp + pktlen++, 0); /* Add status code */ SRAM_WRITE_1(sc, bufp + pktlen++, 0); SRAM_WRITE_1(sc, bufp + pktlen++, 0); pktlen += sizeof(struct ieee80211_frame); return (ray_tx_send(sc, ccs, pktlen, dst)); } /* * Complete authentication runq */ static void ray_init_auth_done(struct ray_softc *sc, u_int8_t status) { RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN | RAY_DBG_AUTH, ""); if (status != IEEE80211_STATUS_SUCCESS) RAY_RECERR(sc, "authentication failed with status %d", status); /* * XXX_AUTH retry? if not just recall ray_init_auth_send and dont clear runq? * XXX_AUTH association requires that authenitcation is successful * XXX_AUTH before we associate, and the runq is the only way to halt the * XXX_AUTH progress of associate. * XXX_AUTH In this case I might not need the RAY_AUTH_NEEDED state */ ray_com_runq_done(sc); } /* * Runq entry to starting an association with an access point */ static void ray_init_assoc(struct ray_softc *sc, struct ray_comq_entry *com) { struct ifnet *ifp = &sc->arpcom.ac_if; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN, ""); /* If the card already running we might not need to associate */ RAY_COM_CHKRUNNING(sc, com, ifp); /* * Don't do anything if we are not in a managed network */ if (sc->sc_c.np_net_type != RAY_MIB_NET_TYPE_INFRA) { ray_com_runq_done(sc); return; } /* * Kick the card */ ray_ccs_fill(sc, com->c_ccs, RAY_CMD_START_ASSOC); ray_com_ecf(sc, com); } /* * Complete association */ static void ray_init_assoc_done(struct ray_softc *sc, u_int8_t status, size_t ccs) { struct ifnet *ifp = &sc->arpcom.ac_if; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN, ""); RAY_COM_CHECK(sc, ccs); RAY_CCSERR(sc, status, if_oerrors); /* XXX error counter */ /* * Hurrah! The network is now active. * * Clearing IFF_OACTIVE will ensure that the system will send us * packets. Just before we return from the interrupt context * we check to see if packets have been queued. */ sc->sc_c.np_havenet = 1; sc->sc_c.np_framing = sc->sc_d.np_framing; ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; ray_com_ecf_done(sc); } /* * Network stop. * * Inhibit card - if we can't prevent reception then do not worry; * stopping a NIC only guarantees no TX. * * The change to the interface flags is done via the runq so that any * existing commands can execute normally. */ static int ray_stop_user(struct ray_softc *sc) { struct ray_comq_entry *com[1]; int error, ncom; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STOP, ""); /* * Schedule the real stop routine */ ncom = 0; com[ncom++] = RAY_COM_MALLOC(ray_stop, 0); RAY_COM_RUNQ(sc, com, ncom, "raystop", error); /* XXX no real error processing from anything yet! */ RAY_COM_FREE(com, ncom); return (error); } /* * Runq entry for stopping the interface activity */ static void ray_stop(struct ray_softc *sc, struct ray_comq_entry *com) { struct ifnet *ifp = &sc->arpcom.ac_if; struct mbuf *m; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STOP, ""); /* * Mark as not running and drain output queue */ ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); ifp->if_timer = 0; for (;;) { IF_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; m_freem(m); } ray_com_runq_done(sc); } static void ray_watchdog(struct ifnet *ifp) { struct ray_softc *sc = ifp->if_softc; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); RAY_MAP_CM(sc); if ((sc == NULL) || (sc->sc_gone)) return; RAY_PRINTF(sc, "watchdog timeout"); } /* * Transmit packet handling */ /* * Send a packet. * * We make two assumptions here: * 1) That the current priority is set to splimp _before_ this code * is called *and* is returned to the appropriate priority after * return * 2) That the IFF_OACTIVE flag is checked before this code is called * (i.e. that the output part of the interface is idle) * * A simple one packet at a time TX routine is used - we don't bother * chaining TX buffers. Performance is sufficient to max out the * wireless link on a P75. * * AST J30 Windows 95A (100MHz Pentium) to * Libretto 50CT FreeBSD-3.1 (75MHz Pentium) 167.37kB/s * Nonname box FreeBSD-3.4 (233MHz AMD K6) 161.82kB/s * * Libretto 50CT FreeBSD-3.1 (75MHz Pentium) to * AST J30 Windows 95A (100MHz Pentium) 167.37kB/s * Nonname box FreeBSD-3.4 (233MHz AMD K6) 161.38kB/s * * Given that 160kB/s is saturating the 2Mb/s wireless link we * are about there. * * In short I'm happy that the added complexity of chaining TX * packets together isn't worth it for my machines. */ static void ray_tx(struct ifnet *ifp) { struct ray_softc *sc = ifp->if_softc; struct mbuf *m0, *m; struct ether_header *eh; struct llc *llc; size_t ccs, bufp; int pktlen, len; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_TX, ""); RAY_MAP_CM(sc); /* * Some simple checks first - some are overkill */ if ((sc == NULL) || (sc->sc_gone)) return; if (!(ifp->if_flags & IFF_RUNNING)) { RAY_RECERR(sc, "cannot transmit - not running"); return; } if (!sc->sc_c.np_havenet) { RAY_RECERR(sc, "cannot transmit - no network"); return; } if (!RAY_ECF_READY(sc)) { /* Can't assume that the ECF is busy because of this driver */ if ((sc->tx_timerh.callout == NULL) || (!callout_active(sc->tx_timerh.callout))) { sc->tx_timerh = timeout(ray_tx_timo, sc, RAY_TX_TIMEOUT); return; } } else untimeout(ray_tx_timo, sc, sc->tx_timerh); /* * We find a ccs before we process the mbuf so that we are sure it * is worthwhile processing the packet. All errors in the mbuf * processing are either errors in the mbuf or gross configuration * errors and the packet wouldn't get through anyway. */ if (ray_ccs_tx(sc, &ccs, &bufp)) { ifp->if_flags |= IFF_OACTIVE; return; } /* * Get the mbuf and process it - we have to remember to free the * ccs if there are any errors. */ IF_DEQUEUE(&ifp->if_snd, m0); if (m0 == NULL) { RAY_CCS_FREE(sc, ccs); return; } pktlen = m0->m_pkthdr.len; if (pktlen > ETHER_MAX_LEN - ETHER_CRC_LEN) { RAY_RECERR(sc, "mbuf too long %d", pktlen); RAY_CCS_FREE(sc, ccs); ifp->if_oerrors++; m_freem(m0); return; } m0 = m_pullup(m0, sizeof(struct ether_header)); if (m0 == NULL) { RAY_RECERR(sc, "could not pullup ether"); RAY_CCS_FREE(sc, ccs); ifp->if_oerrors++; return; } eh = mtod(m0, struct ether_header *); /* * Write the 802.11 header according to network type etc. */ if (sc->sc_c.np_net_type == RAY_MIB_NET_TYPE_ADHOC) bufp = ray_tx_wrhdr(sc, bufp, IEEE80211_FC0_TYPE_DATA, IEEE80211_FC1_DIR_NODS, eh->ether_dhost, eh->ether_shost, sc->sc_c.np_bss_id); else if (sc->sc_c.np_ap_status == RAY_MIB_AP_STATUS_TERMINAL) bufp = ray_tx_wrhdr(sc, bufp, IEEE80211_FC0_TYPE_DATA, IEEE80211_FC1_DIR_TODS, sc->sc_c.np_bss_id, eh->ether_shost, eh->ether_dhost); else bufp = ray_tx_wrhdr(sc, bufp, IEEE80211_FC0_TYPE_DATA, IEEE80211_FC1_DIR_FROMDS, eh->ether_dhost, sc->sc_c.np_bss_id, eh->ether_shost); /* * Framing * * Add to the mbuf. */ switch (sc->sc_c.np_framing) { case RAY_FRAMING_ENCAPSULATION: /* Nice and easy - nothing! (just add an 802.11 header) */ break; case RAY_FRAMING_TRANSLATION: /* * Drop the first address in the ethernet header and * write an LLC and SNAP header over the second. */ m_adj(m0, ETHER_ADDR_LEN); if (m0 == NULL) { RAY_RECERR(sc, "could not get space for 802.2 header"); RAY_CCS_FREE(sc, ccs); ifp->if_oerrors++; return; } llc = mtod(m0, struct llc *); llc->llc_dsap = LLC_SNAP_LSAP; llc->llc_ssap = LLC_SNAP_LSAP; llc->llc_control = LLC_UI; llc->llc_un.type_snap.org_code[0] = 0; llc->llc_un.type_snap.org_code[1] = 0; llc->llc_un.type_snap.org_code[2] = 0; break; default: RAY_RECERR(sc, "unknown framing type %d", sc->sc_c.np_framing); RAY_CCS_FREE(sc, ccs); ifp->if_oerrors++; m_freem(m0); return; } if (m0 == NULL) { RAY_RECERR(sc, "could not frame packet"); RAY_CCS_FREE(sc, ccs); ifp->if_oerrors++; return; } RAY_MBUF_DUMP(sc, RAY_DBG_TX, m0, "framed packet"); /* * Copy the mbuf to the buffer in common memory * * We drop and don't bother wrapping as Ethernet packets are 1518 * bytes, we checked the mbuf earlier, and our TX buffers are 2048 * bytes. We don't have 530 bytes of headers etc. so something * must be fubar. */ pktlen = sizeof(struct ieee80211_frame); for (m = m0; m != NULL; m = m->m_next) { pktlen += m->m_len; if ((len = m->m_len) == 0) continue; if ((bufp + len) < RAY_TX_END) SRAM_WRITE_REGION(sc, bufp, mtod(m, u_int8_t *), len); else { RAY_RECERR(sc, "tx buffer overflow"); RAY_CCS_FREE(sc, ccs); ifp->if_oerrors++; m_freem(m0); return; } bufp += len; } /* * Send it off */ if (ray_tx_send(sc, ccs, pktlen, eh->ether_dhost)) ifp->if_oerrors++; else ifp->if_opackets++; m_freem(m0); } /* * Start timeout routine. * * Used when card was busy but we needed to send a packet. */ static void ray_tx_timo(void *xsc) { struct ray_softc *sc = (struct ray_softc *)xsc; struct ifnet *ifp = &sc->arpcom.ac_if; int s; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); if (!(ifp->if_flags & IFF_OACTIVE) && (ifp->if_snd.ifq_head != NULL)) { s = splimp(); ray_tx(ifp); splx(s); } } /* * Write an 802.11 header into the Tx buffer space and return the * adjusted buffer pointer. */ static size_t ray_tx_wrhdr(struct ray_softc *sc, size_t bufp, u_int8_t type, u_int8_t fc1, u_int8_t *addr1, u_int8_t *addr2, u_int8_t *addr3) { struct ieee80211_frame header; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_TX, ""); RAY_MAP_CM(sc); bzero(&header, sizeof(struct ieee80211_frame)); header.i_fc[0] = (IEEE80211_FC0_VERSION_0 | type); header.i_fc[1] = fc1; bcopy(addr1, header.i_addr1, ETHER_ADDR_LEN); bcopy(addr2, header.i_addr2, ETHER_ADDR_LEN); bcopy(addr3, header.i_addr3, ETHER_ADDR_LEN); SRAM_WRITE_REGION(sc, bufp, (u_int8_t *)&header, sizeof(struct ieee80211_frame)); return (bufp + sizeof(struct ieee80211_frame)); } /* * Fill in a few loose ends and kick the card to send the packet * * Returns 0 on success, 1 on failure */ static int ray_tx_send(struct ray_softc *sc, size_t ccs, int pktlen, u_int8_t *dst) { int i = 0; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_TX, ""); RAY_MAP_CM(sc); while (!RAY_ECF_READY(sc)) { DELAY(RAY_ECF_SPIN_DELAY); if (++i > RAY_ECF_SPIN_TRIES) { RAY_RECERR(sc, "ECF busy, dropping packet"); RAY_CCS_FREE(sc, ccs); return (1); } } if (i != 0) RAY_RECERR(sc, "spun %d times", i); SRAM_WRITE_FIELD_2(sc, ccs, ray_cmd_tx, c_len, pktlen); SRAM_WRITE_FIELD_1(sc, ccs, ray_cmd_tx, c_antenna, ray_tx_best_antenna(sc, dst)); SRAM_WRITE_1(sc, RAY_SCB_CCSI, RAY_CCS_INDEX(ccs)); RAY_ECF_START_CMD(sc); return (0); } /* * Determine best antenna to use from rx level and antenna cache */ static u_int8_t ray_tx_best_antenna(struct ray_softc *sc, u_int8_t *dst) { struct ray_siglev *sl; int i; u_int8_t antenna; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_TX, ""); if (sc->sc_version == RAY_ECFS_BUILD_4) return (0); /* try to find host */ for (i = 0; i < RAY_NSIGLEVRECS; i++) { sl = &sc->sc_siglevs[i]; if (bcmp(sl->rsl_host, dst, ETHER_ADDR_LEN) == 0) goto found; } /* not found, return default setting */ return (0); found: /* This is a simple thresholding scheme that takes the mean * of the best antenna history. This is okay but as it is a * filter, it adds a bit of lag in situations where the * best antenna swaps from one side to the other slowly. Don't know * how likely this is given the horrible fading though. */ antenna = 0; for (i = 0; i < RAY_NANTENNA; i++) { antenna += sl->rsl_antennas[i]; } return (antenna > (RAY_NANTENNA >> 1)); } /* * Transmit now complete so clear ccs and network flags. */ static void ray_tx_done(struct ray_softc *sc, u_int8_t status, size_t ccs) { struct ifnet *ifp = &sc->arpcom.ac_if; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_TX, ""); RAY_CCSERR(sc, status, if_oerrors); RAY_CCS_FREE(sc, ccs); ifp->if_timer = 0; if (ifp->if_flags & IFF_OACTIVE) ifp->if_flags &= ~IFF_OACTIVE; } /* * Receiver packet handling */ /* * Receive a packet from the card */ static void ray_rx(struct ray_softc *sc, size_t rcs) { struct ieee80211_frame *header; struct ifnet *ifp = &sc->arpcom.ac_if; struct mbuf *m0; size_t pktlen, fraglen, readlen, tmplen; size_t bufp, ebufp; u_int8_t siglev, antenna; u_int first, ni, i; u_int8_t *mp; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); RAY_MAP_CM(sc); RAY_DPRINTF(sc, RAY_DBG_CCS, "using rcs 0x%x", rcs); m0 = NULL; readlen = 0; /* * Get first part of packet and the length. Do some sanity checks * and get a mbuf. */ first = RAY_CCS_INDEX(rcs); pktlen = SRAM_READ_FIELD_2(sc, rcs, ray_cmd_rx, c_pktlen); siglev = SRAM_READ_FIELD_1(sc, rcs, ray_cmd_rx, c_siglev); antenna = SRAM_READ_FIELD_1(sc, rcs, ray_cmd_rx, c_antenna); if ((pktlen > MCLBYTES) || (pktlen < sizeof(struct ieee80211_frame))) { RAY_RECERR(sc, "packet too big or too small"); ifp->if_ierrors++; goto skip_read; } MGETHDR(m0, M_DONTWAIT, MT_DATA); if (m0 == NULL) { RAY_RECERR(sc, "MGETHDR failed"); ifp->if_ierrors++; goto skip_read; } if (pktlen > MHLEN) { MCLGET(m0, M_DONTWAIT); if (!(m0->m_flags & M_EXT)) { RAY_RECERR(sc, "MCLGET failed"); ifp->if_ierrors++; m_freem(m0); m0 = NULL; goto skip_read; } } m0->m_pkthdr.rcvif = ifp; m0->m_pkthdr.len = pktlen; m0->m_len = pktlen; mp = mtod(m0, u_int8_t *); /* * Walk the fragment chain to build the complete packet. * * The use of two index variables removes a race with the * hardware. If one index were used the clearing of the CCS would * happen before reading the next pointer and the hardware can get in. * Not my idea but verbatim from the NetBSD driver. */ i = ni = first; while ((i = ni) && (i != RAY_CCS_LINK_NULL)) { rcs = RAY_CCS_ADDRESS(i); ni = SRAM_READ_FIELD_1(sc, rcs, ray_cmd_rx, c_nextfrag); bufp = SRAM_READ_FIELD_2(sc, rcs, ray_cmd_rx, c_bufp); fraglen = SRAM_READ_FIELD_2(sc, rcs, ray_cmd_rx, c_len); if (fraglen + readlen > pktlen) { RAY_RECERR(sc, "bad length current 0x%x pktlen 0x%x", fraglen + readlen, pktlen); ifp->if_ierrors++; m_freem(m0); m0 = NULL; goto skip_read; } if ((i < RAY_RCS_FIRST) || (i > RAY_RCS_LAST)) { RAY_RECERR(sc, "bad rcs index 0x%x", i); ifp->if_ierrors++; m_freem(m0); m0 = NULL; goto skip_read; } ebufp = bufp + fraglen; if (ebufp <= RAY_RX_END) SRAM_READ_REGION(sc, bufp, mp, fraglen); else { SRAM_READ_REGION(sc, bufp, mp, (tmplen = RAY_RX_END - bufp)); SRAM_READ_REGION(sc, RAY_RX_BASE, mp + tmplen, ebufp - RAY_RX_END); } mp += fraglen; readlen += fraglen; } skip_read: /* * Walk the chain again to free the rcss. */ i = ni = first; while ((i = ni) && (i != RAY_CCS_LINK_NULL)) { rcs = RAY_CCS_ADDRESS(i); ni = SRAM_READ_FIELD_1(sc, rcs, ray_cmd_rx, c_nextfrag); RAY_CCS_FREE(sc, rcs); } if (m0 == NULL) return; /* * Check the 802.11 packet type and hand off to * appropriate functions. */ header = mtod(m0, struct ieee80211_frame *); if ((header->i_fc[0] & IEEE80211_FC0_VERSION_MASK) != IEEE80211_FC0_VERSION_0) { RAY_RECERR(sc, "header not version 0 fc0 0x%x", header->i_fc[0]); ifp->if_ierrors++; m_freem(m0); return; } switch (header->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { case IEEE80211_FC0_TYPE_DATA: ray_rx_data(sc, m0, siglev, antenna); break; case IEEE80211_FC0_TYPE_MGT: ray_rx_mgt(sc, m0); break; case IEEE80211_FC0_TYPE_CTL: ray_rx_ctl(sc, m0); break; default: RAY_RECERR(sc, "unknown packet fc0 0x%x", header->i_fc[0]); ifp->if_ierrors++; m_freem(m0); } } /* * Deal with DATA packet types */ static void ray_rx_data(struct ray_softc *sc, struct mbuf *m0, u_int8_t siglev, u_int8_t antenna) { struct ifnet *ifp = &sc->arpcom.ac_if; struct ieee80211_frame *header = mtod(m0, struct ieee80211_frame *); struct llc *llc; u_int8_t *sa = NULL, *da = NULL, *ra = NULL, *ta = NULL; int trim = 0; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_RX, ""); /* * Check the the data packet subtype, some packets have * nothing in them so we will drop them here. */ switch (header->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) { case IEEE80211_FC0_SUBTYPE_DATA: case IEEE80211_FC0_SUBTYPE_CF_ACK: case IEEE80211_FC0_SUBTYPE_CF_POLL: case IEEE80211_FC0_SUBTYPE_CF_ACPL: RAY_DPRINTF(sc, RAY_DBG_RX, "DATA packet"); break; case IEEE80211_FC0_SUBTYPE_NODATA: case IEEE80211_FC0_SUBTYPE_CFACK: case IEEE80211_FC0_SUBTYPE_CFPOLL: case IEEE80211_FC0_SUBTYPE_CF_ACK_CF_ACK: RAY_DPRINTF(sc, RAY_DBG_RX, "NULL packet"); m_freem(m0); return; break; default: RAY_RECERR(sc, "reserved DATA packet subtype 0x%x", header->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK); ifp->if_ierrors++; m_freem(m0); return; } /* * Parse the To DS and From DS fields to determine the length * of the 802.11 header for use later on. * * Additionally, furtle out the right destination and * source MAC addresses for the packet. Packets may come via * APs so the MAC addresses of the immediate node may be * different from the node that actually sent us the packet. * * da destination address of final recipient * sa source address of orginator * ra receiver address of immediate recipient * ta transmitter address of immediate orginator * * Address matching is performed on da or sa with the AP or * BSSID in ra and ta. */ RAY_MBUF_DUMP(sc, RAY_DBG_RX, m0, "(1) packet before framing"); switch (header->i_fc[1] & IEEE80211_FC1_DIR_MASK) { case IEEE80211_FC1_DIR_NODS: da = ra = header->i_addr1; sa = ta = header->i_addr2; trim = sizeof(struct ieee80211_frame); RAY_DPRINTF(sc, RAY_DBG_RX, "from %6D to %6D", sa, ":", da, ":"); break; case IEEE80211_FC1_DIR_FROMDS: da = ra = header->i_addr1; ta = header->i_addr2; sa = header->i_addr3; trim = sizeof(struct ieee80211_frame); RAY_DPRINTF(sc, RAY_DBG_RX, "ap %6D from %6D to %6D", ta, ":", sa, ":", da, ":"); break; case IEEE80211_FC1_DIR_TODS: ra = header->i_addr1; sa = ta = header->i_addr2; da = header->i_addr3; trim = sizeof(struct ieee80211_frame); RAY_DPRINTF(sc, RAY_DBG_RX, "from %6D to %6D ap %6D", sa, ":", da, ":", ra, ":"); break; case IEEE80211_FC1_DIR_DSTODS: ra = header->i_addr1; ta = header->i_addr2; da = header->i_addr3; sa = (u_int8_t *)header+1; trim = sizeof(struct ieee80211_frame) + ETHER_ADDR_LEN; RAY_DPRINTF(sc, RAY_DBG_RX, "from %6D to %6D ap %6D to %6D", sa, ":", da, ":", ta, ":", ra, ":"); break; } /* * Framing * * Each case must leave an Ethernet header and adjust trim. */ switch (sc->sc_c.np_framing) { case RAY_FRAMING_ENCAPSULATION: /* A NOP as the Ethernet header is in the packet */ break; case RAY_FRAMING_TRANSLATION: /* Check that we have an LLC and SNAP sequence */ llc = (struct llc *)((u_int8_t *)header + trim); if (llc->llc_dsap == LLC_SNAP_LSAP && llc->llc_ssap == LLC_SNAP_LSAP && llc->llc_control == LLC_UI && llc->llc_un.type_snap.org_code[0] == 0 && llc->llc_un.type_snap.org_code[1] == 0 && llc->llc_un.type_snap.org_code[2] == 0) { struct ether_header *eh; /* * This is not magic. RFC1042 header is 8 * bytes, with the last two bytes being the * ether type. So all we need is another * ETHER_ADDR_LEN bytes to write the * destination into. */ trim -= ETHER_ADDR_LEN; eh = (struct ether_header *)((u_int8_t *)header + trim); /* * Copy carefully to avoid mashing the MAC * addresses. The address layout in the .11 header * does make sense, honest, but it is a pain. * * NODS da sa no risk * FROMDS da ta sa sa then da * DSTODS ra ta da sa sa then da * TODS ra sa da da then sa */ if (sa > da) { /* Copy sa first */ bcopy(sa, eh->ether_shost, ETHER_ADDR_LEN); bcopy(da, eh->ether_dhost, ETHER_ADDR_LEN); } else { /* Copy da first */ bcopy(da, eh->ether_dhost, ETHER_ADDR_LEN); bcopy(sa, eh->ether_shost, ETHER_ADDR_LEN); } } else { /* Assume RAY_FRAMING_ENCAPSULATION */ RAY_RECERR(sc, "got encapsulated packet but in translation mode"); } break; default: RAY_RECERR(sc, "unknown framing type %d", sc->sc_c.np_framing); ifp->if_ierrors++; m_freem(m0); return; } RAY_MBUF_DUMP(sc, RAY_DBG_RX, m0, "(2) packet after framing"); /* * Finally, do a bit of house keeping before sending the packet * up the stack. */ m_adj(m0, trim); RAY_MBUF_DUMP(sc, RAY_DBG_RX, m0, "(3) packet after trimming"); ifp->if_ipackets++; ray_rx_update_cache(sc, header->i_addr2, siglev, antenna); (*ifp->if_input)(ifp, m0); } /* * Deal with MGT packet types */ static void ray_rx_mgt(struct ray_softc *sc, struct mbuf *m0) { struct ifnet *ifp = &sc->arpcom.ac_if; struct ieee80211_frame *header = mtod(m0, struct ieee80211_frame *); RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_MGT, ""); if ((header->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_NODS) { RAY_RECERR(sc, "MGT TODS/FROMDS wrong fc1 0x%x", header->i_fc[1] & IEEE80211_FC1_DIR_MASK); ifp->if_ierrors++; m_freem(m0); return; } /* * Check the the mgt packet subtype, some packets should be * dropped depending on the mode the station is in. See pg * 52(60) of docs * * P - proccess, J - Junk, E - ECF deals with, I - Illegal * ECF Proccesses * AHDOC procces or junk * INFRA STA process or junk * INFRA AP process or jumk * * +PPP IEEE80211_FC0_SUBTYPE_BEACON * +EEE IEEE80211_FC0_SUBTYPE_PROBE_REQ * +EEE IEEE80211_FC0_SUBTYPE_PROBE_RESP * PPP IEEE80211_FC0_SUBTYPE_AUTH * PPP IEEE80211_FC0_SUBTYPE_DEAUTH * JJP IEEE80211_FC0_SUBTYPE_ASSOC_REQ * JPJ IEEE80211_FC0_SUBTYPE_ASSOC_RESP * JPP IEEE80211_FC0_SUBTYPE_DISASSOC * JJP IEEE80211_FC0_SUBTYPE_REASSOC_REQ * JPJ IEEE80211_FC0_SUBTYPE_REASSOC_RESP * +EEE IEEE80211_FC0_SUBTYPE_ATIM */ RAY_MBUF_DUMP(sc, RAY_DBG_MGT, m0, "MGT packet"); switch (header->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) { case IEEE80211_FC0_SUBTYPE_BEACON: RAY_DPRINTF(sc, RAY_DBG_MGT, "BEACON MGT packet"); ray_rx_mgt_beacon(sc, m0); break; case IEEE80211_FC0_SUBTYPE_AUTH: RAY_DPRINTF(sc, RAY_DBG_MGT, "AUTH MGT packet"); ray_rx_mgt_auth(sc, m0); break; case IEEE80211_FC0_SUBTYPE_DEAUTH: RAY_DPRINTF(sc, RAY_DBG_MGT, "DEAUTH MGT packet"); /* XXX ray_rx_mgt_deauth(sc, m0); */ break; case IEEE80211_FC0_SUBTYPE_ASSOC_REQ: case IEEE80211_FC0_SUBTYPE_REASSOC_REQ: RAY_DPRINTF(sc, RAY_DBG_MGT, "(RE)ASSOC_REQ MGT packet"); if ((sc->sc_c.np_net_type == RAY_MIB_NET_TYPE_INFRA) && (sc->sc_c.np_ap_status == RAY_MIB_AP_STATUS_AP)) RAY_RECERR(sc, "can't be an AP yet"); /* XXX_ACTING_AP */ break; case IEEE80211_FC0_SUBTYPE_ASSOC_RESP: case IEEE80211_FC0_SUBTYPE_REASSOC_RESP: RAY_DPRINTF(sc, RAY_DBG_MGT, "(RE)ASSOC_RESP MGT packet"); if ((sc->sc_c.np_net_type == RAY_MIB_NET_TYPE_INFRA) && (sc->sc_c.np_ap_status == RAY_MIB_AP_STATUS_TERMINAL)) RAY_RECERR(sc, "can't be in INFRA yet"); /* XXX_INFRA */ break; case IEEE80211_FC0_SUBTYPE_DISASSOC: RAY_DPRINTF(sc, RAY_DBG_MGT, "DISASSOC MGT packet"); if (sc->sc_c.np_net_type == RAY_MIB_NET_TYPE_INFRA) RAY_RECERR(sc, "can't be in INFRA yet"); /* XXX_INFRA */ break; case IEEE80211_FC0_SUBTYPE_PROBE_REQ: case IEEE80211_FC0_SUBTYPE_PROBE_RESP: case IEEE80211_FC0_SUBTYPE_ATIM: RAY_RECERR(sc, "unexpected MGT packet subtype 0x%0x", header->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK); ifp->if_ierrors++; break; default: RAY_RECERR(sc, "reserved MGT packet subtype 0x%x", header->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK); ifp->if_ierrors++; } m_freem(m0); } /* * Deal with BEACON management packet types * XXX furtle anything interesting out * XXX Note that there are rules governing what beacons to read * XXX see 8802 S7.2.3, S11.1.2.3 * XXX is this actually useful? */ static void ray_rx_mgt_beacon(struct ray_softc *sc, struct mbuf *m0) { struct ieee80211_frame *header = mtod(m0, struct ieee80211_frame *); ieee80211_mgt_beacon_t beacon = (u_int8_t *)(header+1); struct ieee80211_information elements; u_int64_t *timestamp; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_MGT, ""); timestamp = (u_int64_t *)beacon; RAY_DPRINTF(sc, RAY_DBG_MGT, "timestamp\t0x%x", *timestamp); RAY_DPRINTF(sc, RAY_DBG_MGT, "interval\t\t0x%x", IEEE80211_BEACON_INTERVAL(beacon)); RAY_DPRINTF(sc, RAY_DBG_MGT, "capability\t0x%x", IEEE80211_BEACON_CAPABILITY(beacon)); ray_rx_mgt_info(sc, m0, &elements); } static void ray_rx_mgt_info(struct ray_softc *sc, struct mbuf *m0, struct ieee80211_information *elements) { struct ifnet *ifp = &sc->arpcom.ac_if; struct ieee80211_frame *header = mtod(m0, struct ieee80211_frame *); ieee80211_mgt_beacon_t beacon = (u_int8_t *)(header+1); ieee80211_mgt_beacon_t bp, be; int len; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_MGT, ""); bp = beacon + 12; be = mtod(m0, u_int8_t *) + m0->m_len; while (bp < be) { len = *(bp + 1); RAY_DPRINTF(sc, RAY_DBG_MGT, "id 0x%02x length %d", *bp, len); switch (*bp) { case IEEE80211_ELEMID_SSID: if (len > IEEE80211_NWID_LEN) { RAY_RECERR(sc, "bad SSD length: %d from %6D", len, header->i_addr2, ":"); } strncpy(elements->ssid, bp + 2, len); elements->ssid[len] = 0; RAY_DPRINTF(sc, RAY_DBG_MGT, "beacon ssid %s", elements->ssid); break; case IEEE80211_ELEMID_RATES: RAY_DPRINTF(sc, RAY_DBG_MGT, "rates"); break; case IEEE80211_ELEMID_FHPARMS: elements->fh.dwell = bp[2] + (bp[3] << 8); elements->fh.set = bp[4]; elements->fh.pattern = bp[5]; elements->fh.index = bp[6]; RAY_DPRINTF(sc, RAY_DBG_MGT, "fhparams dwell\t0x%04x", elements->fh.dwell); RAY_DPRINTF(sc, RAY_DBG_MGT, "fhparams set\t0x%02x", elements->fh.set); RAY_DPRINTF(sc, RAY_DBG_MGT, "fhparams pattern\t0x%02x", elements->fh.pattern); RAY_DPRINTF(sc, RAY_DBG_MGT, "fhparams index\t0x%02x", elements->fh.index); break; case IEEE80211_ELEMID_DSPARMS: RAY_RECERR(sc, "got direct sequence params!"); break; case IEEE80211_ELEMID_CFPARMS: RAY_DPRINTF(sc, RAY_DBG_MGT, "cfparams"); break; case IEEE80211_ELEMID_TIM: elements->tim.count = bp[2]; elements->tim.period = bp[3]; elements->tim.bitctl = bp[4]; RAY_DPRINTF(sc, RAY_DBG_MGT, "tim count\t0x%02x", elements->tim.count); RAY_DPRINTF(sc, RAY_DBG_MGT, "tim period\t0x%02x", elements->tim.period); RAY_DPRINTF(sc, RAY_DBG_MGT, "tim bitctl\t0x%02x", elements->tim.bitctl); #if RAY_DEBUG & RAY_DBG_MGT { int i; for (i = 5; i < len + 1; i++) RAY_DPRINTF(sc, RAY_DBG_MGT, "tim pvt[%03d]\t0x%02x", i-5, bp[i]); } #endif /* (RAY_DEBUG & RAY_DBG_MGT) */ break; case IEEE80211_ELEMID_IBSSPARMS: elements->ibss.atim = bp[2] + (bp[3] << 8); RAY_DPRINTF(sc, RAY_DBG_MGT, "ibssparams atim\t0x%02x", elements->ibss.atim); break; case IEEE80211_ELEMID_CHALLENGE: RAY_DPRINTF(sc, RAY_DBG_MGT, "challenge"); break; default: RAY_RECERR(sc, "reserved MGT element id 0x%x", *bp); ifp->if_ierrors++;break; } bp += bp[1] + 2; } } /* * Deal with AUTH management packet types */ static void ray_rx_mgt_auth(struct ray_softc *sc, struct mbuf *m0) { struct ieee80211_frame *header = mtod(m0, struct ieee80211_frame *); ieee80211_mgt_auth_t auth = (u_int8_t *)(header+1); RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_AUTH, ""); switch (IEEE80211_AUTH_ALGORITHM(auth)) { case IEEE80211_AUTH_ALG_OPEN: RAY_DPRINTF(sc, RAY_DBG_AUTH, "open system authentication sequence number %d", IEEE80211_AUTH_TRANSACTION(auth)); if (IEEE80211_AUTH_TRANSACTION(auth) == IEEE80211_AUTH_OPEN_REQUEST) { /* XXX_AUTH use ray_init_auth_send */ } else if (IEEE80211_AUTH_TRANSACTION(auth) == IEEE80211_AUTH_OPEN_RESPONSE) ray_init_auth_done(sc, IEEE80211_AUTH_STATUS(auth)); break; case IEEE80211_AUTH_ALG_SHARED: RAY_RECERR(sc, "shared key authentication sequence number %d", IEEE80211_AUTH_TRANSACTION(auth)); break; default: RAY_RECERR(sc, "reserved authentication subtype 0x%04hx", IEEE80211_AUTH_ALGORITHM(auth)); break; } } /* * Deal with CTL packet types */ static void ray_rx_ctl(struct ray_softc *sc, struct mbuf *m0) { struct ifnet *ifp = &sc->arpcom.ac_if; struct ieee80211_frame *header = mtod(m0, struct ieee80211_frame *); RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_CTL, ""); if ((header->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_NODS) { RAY_RECERR(sc, "CTL TODS/FROMDS wrong fc1 0x%x", header->i_fc[1] & IEEE80211_FC1_DIR_MASK); ifp->if_ierrors++; m_freem(m0); return; } /* * Check the the ctl packet subtype, some packets should be * dropped depending on the mode the station is in. The ECF * should deal with everything but the power save poll to an * AP. See pg 52(60) of docs. */ RAY_MBUF_DUMP(sc, RAY_DBG_CTL, m0, "CTL packet"); switch (header->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) { case IEEE80211_FC0_SUBTYPE_PS_POLL: RAY_DPRINTF(sc, RAY_DBG_CTL, "PS_POLL CTL packet"); if ((sc->sc_d.np_net_type == RAY_MIB_NET_TYPE_INFRA) && (sc->sc_c.np_ap_status == RAY_MIB_AP_STATUS_AP)) RAY_RECERR(sc, "can't be an AP yet"); /* XXX_ACTING_AP */ break; case IEEE80211_FC0_SUBTYPE_RTS: case IEEE80211_FC0_SUBTYPE_CTS: case IEEE80211_FC0_SUBTYPE_ACK: case IEEE80211_FC0_SUBTYPE_CF_END: case IEEE80211_FC0_SUBTYPE_CF_END_ACK: RAY_RECERR(sc, "unexpected CTL packet subtype 0x%0x", header->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK); ifp->if_ierrors++; break; default: RAY_RECERR(sc, "reserved CTL packet subtype 0x%x", header->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK); ifp->if_ierrors++; } m_freem(m0); } /* * Update rx level and antenna cache */ static void ray_rx_update_cache(struct ray_softc *sc, u_int8_t *src, u_int8_t siglev, u_int8_t antenna) { struct timeval mint; struct ray_siglev *sl; int i, mini; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); /* Try to find host */ for (i = 0; i < RAY_NSIGLEVRECS; i++) { sl = &sc->sc_siglevs[i]; if (bcmp(sl->rsl_host, src, ETHER_ADDR_LEN) == 0) goto found; } /* Not found, find oldest slot */ mini = 0; mint.tv_sec = LONG_MAX; mint.tv_usec = 0; for (i = 0; i < RAY_NSIGLEVRECS; i++) { sl = &sc->sc_siglevs[i]; if (timevalcmp(&sl->rsl_time, &mint, <)) { mini = i; mint = sl->rsl_time; } } sl = &sc->sc_siglevs[mini]; bzero(sl->rsl_siglevs, RAY_NSIGLEV); bzero(sl->rsl_antennas, RAY_NANTENNA); bcopy(src, sl->rsl_host, ETHER_ADDR_LEN); found: microtime(&sl->rsl_time); bcopy(sl->rsl_siglevs, &sl->rsl_siglevs[1], RAY_NSIGLEV-1); sl->rsl_siglevs[0] = siglev; if (sc->sc_version != RAY_ECFS_BUILD_4) { bcopy(sl->rsl_antennas, &sl->rsl_antennas[1], RAY_NANTENNA-1); sl->rsl_antennas[0] = antenna; } } /* * Interrupt handling */ /* * Process an interrupt */ static void ray_intr(void *xsc) { struct ray_softc *sc = (struct ray_softc *)xsc; struct ifnet *ifp = &sc->arpcom.ac_if; size_t ccs; u_int8_t cmd, status; int ccsi; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); RAY_MAP_CM(sc); if ((sc == NULL) || (sc->sc_gone)) return; /* * Check that the interrupt was for us, if so get the rcs/ccs * and vector on the command contained within it. */ if (RAY_HCS_INTR(sc)) { ccsi = SRAM_READ_1(sc, RAY_SCB_RCSI); ccs = RAY_CCS_ADDRESS(ccsi); cmd = SRAM_READ_FIELD_1(sc, ccs, ray_cmd, c_cmd); status = SRAM_READ_FIELD_1(sc, ccs, ray_cmd, c_status); if (ccsi <= RAY_CCS_LAST) ray_intr_ccs(sc, cmd, status, ccs); else if (ccsi <= RAY_RCS_LAST) ray_intr_rcs(sc, cmd, ccs); else RAY_RECERR(sc, "bad ccs index 0x%x", ccsi); RAY_HCS_CLEAR_INTR(sc); } /* Send any packets lying around and update error counters */ if (!(ifp->if_flags & IFF_OACTIVE) && (ifp->if_snd.ifq_head != NULL)) ray_tx(ifp); if ((++sc->sc_checkcounters % 32) == 0) ray_intr_updt_errcntrs(sc); } /* * Read the error counters. */ static void ray_intr_updt_errcntrs(struct ray_softc *sc) { size_t csc; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); RAY_MAP_CM(sc); /* * The card implements the following protocol to keep the * values from being changed while read: It checks the `own' * bit and if zero writes the current internal counter value, * it then sets the `own' bit to 1. If the `own' bit was 1 it * incremenets its internal counter. The user thus reads the * counter if the `own' bit is one and then sets the own bit * to 0. */ csc = RAY_STATUS_BASE; if (SRAM_READ_FIELD_1(sc, csc, ray_csc, csc_mrxo_own)) { sc->sc_rxoverflow += SRAM_READ_FIELD_2(sc, csc, ray_csc, csc_mrx_overflow); SRAM_WRITE_FIELD_1(sc, csc, ray_csc, csc_mrxo_own, 0); } if (SRAM_READ_FIELD_1(sc, csc, ray_csc, csc_mrxc_own)) { sc->sc_rxcksum += SRAM_READ_FIELD_2(sc, csc, ray_csc, csc_mrx_overflow); SRAM_WRITE_FIELD_1(sc, csc, ray_csc, csc_mrxc_own, 0); } if (SRAM_READ_FIELD_1(sc, csc, ray_csc, csc_rxhc_own)) { sc->sc_rxhcksum += SRAM_READ_FIELD_2(sc, csc, ray_csc, csc_rx_hcksum); SRAM_WRITE_FIELD_1(sc, csc, ray_csc, csc_rxhc_own, 0); } sc->sc_rxnoise = SRAM_READ_FIELD_1(sc, csc, ray_csc, csc_rx_noise); } /* * Process CCS command completion */ static void ray_intr_ccs(struct ray_softc *sc, u_int8_t cmd, u_int8_t status, size_t ccs) { RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); switch (cmd) { case RAY_CMD_DOWNLOAD_PARAMS: RAY_DPRINTF(sc, RAY_DBG_COM, "START_PARAMS"); ray_init_download_done(sc, status, ccs); break; case RAY_CMD_UPDATE_PARAMS: RAY_DPRINTF(sc, RAY_DBG_COM, "UPDATE_PARAMS"); ray_upparams_done(sc, status, ccs); break; case RAY_CMD_REPORT_PARAMS: RAY_DPRINTF(sc, RAY_DBG_COM, "REPORT_PARAMS"); ray_repparams_done(sc, status, ccs); break; case RAY_CMD_UPDATE_MCAST: RAY_DPRINTF(sc, RAY_DBG_COM, "UPDATE_MCAST"); ray_mcast_done(sc, status, ccs); break; case RAY_CMD_START_NET: case RAY_CMD_JOIN_NET: RAY_DPRINTF(sc, RAY_DBG_COM, "START|JOIN_NET"); ray_init_sj_done(sc, status, ccs); break; case RAY_CMD_TX_REQ: RAY_DPRINTF(sc, RAY_DBG_COM, "TX_REQ"); ray_tx_done(sc, status, ccs); break; case RAY_CMD_START_ASSOC: RAY_DPRINTF(sc, RAY_DBG_COM, "START_ASSOC"); ray_init_assoc_done(sc, status, ccs); break; case RAY_CMD_UPDATE_APM: RAY_RECERR(sc, "unexpected UPDATE_APM"); break; case RAY_CMD_TEST_MEM: RAY_RECERR(sc, "unexpected TEST_MEM"); break; case RAY_CMD_SHUTDOWN: RAY_RECERR(sc, "unexpected SHUTDOWN"); break; case RAY_CMD_DUMP_MEM: RAY_RECERR(sc, "unexpected DUMP_MEM"); break; case RAY_CMD_START_TIMER: RAY_RECERR(sc, "unexpected START_TIMER"); break; default: RAY_RECERR(sc, "unknown command 0x%x", cmd); break; } } /* * Process ECF command request */ static void ray_intr_rcs(struct ray_softc *sc, u_int8_t cmd, size_t rcs) { RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); switch (cmd) { case RAY_ECMD_RX_DONE: RAY_DPRINTF(sc, RAY_DBG_RX, "RX_DONE"); ray_rx(sc, rcs); break; case RAY_ECMD_REJOIN_DONE: RAY_DPRINTF(sc, RAY_DBG_RX, "REJOIN_DONE"); sc->sc_c.np_havenet = 1; break; case RAY_ECMD_ROAM_START: RAY_DPRINTF(sc, RAY_DBG_RX, "ROAM_START"); sc->sc_c.np_havenet = 0; break; case RAY_ECMD_JAPAN_CALL_SIGNAL: RAY_RECERR(sc, "unexpected JAPAN_CALL_SIGNAL"); break; default: RAY_RECERR(sc, "unknown command 0x%x", cmd); break; } RAY_CCS_FREE(sc, rcs); } /* * User land entry to multicast list changes */ static int ray_mcast_user(struct ray_softc *sc) { struct ray_comq_entry *com[2]; int error, ncom; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); /* * Do all checking in the runq to preserve ordering. * * We run promisc to pick up changes to the ALL_MULTI * interface flag. */ ncom = 0; com[ncom++] = RAY_COM_MALLOC(ray_mcast, 0); com[ncom++] = RAY_COM_MALLOC(ray_promisc, 0); RAY_COM_RUNQ(sc, com, ncom, "raymcast", error); /* XXX no real error processing from anything yet! */ RAY_COM_FREE(com, ncom); return (error); } /* * Runq entry to setting the multicast filter list * * MUST always be followed by a call to ray_promisc to pick up changes * to promisc flag */ static void ray_mcast(struct ray_softc *sc, struct ray_comq_entry *com) { struct ifnet *ifp = &sc->arpcom.ac_if; struct ifmultiaddr *ifma; size_t bufp; int count = 0; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); RAY_MAP_CM(sc); /* * If card is not running we don't need to update this. */ if (!(ifp->if_flags & IFF_RUNNING)) { RAY_DPRINTF(sc, RAY_DBG_IOCTL, "not running"); ray_com_runq_done(sc); return; } /* * The multicast list is only 16 items long so use promiscuous * mode and don't bother updating the multicast list. */ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) count++; if (count == 0) { ray_com_runq_done(sc); return; } else if (count > 16) { ifp->if_flags |= IFF_ALLMULTI; ray_com_runq_done(sc); return; } else if (ifp->if_flags & IFF_ALLMULTI) ifp->if_flags &= ~IFF_ALLMULTI; /* * Kick the card */ ray_ccs_fill(sc, com->c_ccs, RAY_CMD_UPDATE_MCAST); SRAM_WRITE_FIELD_1(sc, com->c_ccs, ray_cmd_update_mcast, c_nmcast, count); bufp = RAY_HOST_TO_ECF_BASE; TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { SRAM_WRITE_REGION( sc, bufp, LLADDR((struct sockaddr_dl *)ifma->ifma_addr), ETHER_ADDR_LEN ); bufp += ETHER_ADDR_LEN; } ray_com_ecf(sc, com); } /* * Complete the multicast filter list update */ static void ray_mcast_done(struct ray_softc *sc, u_int8_t status, size_t ccs) { RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN, ""); RAY_COM_CHECK(sc, ccs); RAY_CCSERR(sc, status, if_oerrors); /* XXX error counter */ ray_com_ecf_done(sc); } /* * Runq entry to set/reset promiscuous mode */ static void ray_promisc(struct ray_softc *sc, struct ray_comq_entry *com) { struct ifnet *ifp = &sc->arpcom.ac_if; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); RAY_MAP_CM(sc); /* * If card not running or we already have the right flags * we don't need to update this */ sc->sc_d.np_promisc = !!(ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)); if (!(ifp->if_flags & IFF_RUNNING) || (sc->sc_c.np_promisc == sc->sc_d.np_promisc)) { ray_com_runq_done(sc); return; } /* * Kick the card */ ray_ccs_fill(sc, com->c_ccs, RAY_CMD_UPDATE_PARAMS); SRAM_WRITE_FIELD_1(sc, com->c_ccs, ray_cmd_update, c_paramid, RAY_MIB_PROMISC); SRAM_WRITE_FIELD_1(sc, com->c_ccs, ray_cmd_update, c_nparam, 1); SRAM_WRITE_1(sc, RAY_HOST_TO_ECF_BASE, sc->sc_d.np_promisc); ray_com_ecf(sc, com); } /* * User land entry to parameter reporting * * As we by pass the runq to report current parameters this function * only provides a snap shot of the driver's state. */ static int ray_repparams_user(struct ray_softc *sc, struct ray_param_req *pr) { struct ray_comq_entry *com[1]; int error, ncom; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); /* * Test for illegal values or immediate responses */ if (pr->r_paramid > RAY_MIB_MAX) return (EINVAL); if ((sc->sc_version == RAY_ECFS_BUILD_4) && !(mib_info[pr->r_paramid][0] & RAY_V4)) return (EINVAL); if ((sc->sc_version == RAY_ECFS_BUILD_5) && !(mib_info[pr->r_paramid][0] & RAY_V5)) return (EINVAL); if (pr->r_paramid > RAY_MIB_LASTUSER) { switch (pr->r_paramid) { case RAY_MIB_VERSION: if (sc->sc_version == RAY_ECFS_BUILD_4) *pr->r_data = RAY_V4; else *pr->r_data = RAY_V5; break; case RAY_MIB_CUR_BSSID: bcopy(sc->sc_c.np_bss_id, pr->r_data, ETHER_ADDR_LEN); break; case RAY_MIB_CUR_INITED: *pr->r_data = sc->sc_c.np_inited; break; case RAY_MIB_CUR_DEF_TXRATE: *pr->r_data = sc->sc_c.np_def_txrate; break; case RAY_MIB_CUR_ENCRYPT: *pr->r_data = sc->sc_c.np_encrypt; break; case RAY_MIB_CUR_NET_TYPE: *pr->r_data = sc->sc_c.np_net_type; break; case RAY_MIB_CUR_SSID: bcopy(sc->sc_c.np_ssid, pr->r_data, IEEE80211_NWID_LEN); break; case RAY_MIB_CUR_PRIV_START: *pr->r_data = sc->sc_c.np_priv_start; break; case RAY_MIB_CUR_PRIV_JOIN: *pr->r_data = sc->sc_c.np_priv_join; break; case RAY_MIB_DES_BSSID: bcopy(sc->sc_d.np_bss_id, pr->r_data, ETHER_ADDR_LEN); break; case RAY_MIB_DES_INITED: *pr->r_data = sc->sc_d.np_inited; break; case RAY_MIB_DES_DEF_TXRATE: *pr->r_data = sc->sc_d.np_def_txrate; break; case RAY_MIB_DES_ENCRYPT: *pr->r_data = sc->sc_d.np_encrypt; break; case RAY_MIB_DES_NET_TYPE: *pr->r_data = sc->sc_d.np_net_type; break; case RAY_MIB_DES_SSID: bcopy(sc->sc_d.np_ssid, pr->r_data, IEEE80211_NWID_LEN); break; case RAY_MIB_DES_PRIV_START: *pr->r_data = sc->sc_d.np_priv_start; break; case RAY_MIB_DES_PRIV_JOIN: *pr->r_data = sc->sc_d.np_priv_join; break; case RAY_MIB_CUR_AP_STATUS: *pr->r_data = sc->sc_c.np_ap_status; break; case RAY_MIB_CUR_PROMISC: *pr->r_data = sc->sc_c.np_promisc; break; case RAY_MIB_DES_AP_STATUS: *pr->r_data = sc->sc_d.np_ap_status; break; case RAY_MIB_DES_PROMISC: *pr->r_data = sc->sc_d.np_promisc; break; case RAY_MIB_CUR_FRAMING: *pr->r_data = sc->sc_c.np_framing; break; case RAY_MIB_DES_FRAMING: *pr->r_data = sc->sc_d.np_framing; break; default: return (EINVAL); break; } pr->r_failcause = 0; if (sc->sc_version == RAY_ECFS_BUILD_4) pr->r_len = mib_info[pr->r_paramid][RAY_MIB_INFO_SIZ4]; else if (sc->sc_version == RAY_ECFS_BUILD_5) pr->r_len = mib_info[pr->r_paramid][RAY_MIB_INFO_SIZ5]; return (0); } pr->r_failcause = 0; ncom = 0; com[ncom++] = RAY_COM_MALLOC(ray_repparams, RAY_COM_FWOK); com[ncom-1]->c_pr = pr; RAY_COM_RUNQ(sc, com, ncom, "rayrparm", error); /* XXX no real error processing from anything yet! */ if (!com[0]->c_retval && pr->r_failcause) error = EINVAL; RAY_COM_FREE(com, ncom); return (error); } /* * Runq entry to read the required parameter * * The card and driver are happy for parameters to be read * whenever the card is plugged in */ static void ray_repparams(struct ray_softc *sc, struct ray_comq_entry *com) { RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); RAY_MAP_CM(sc); /* * Kick the card */ ray_ccs_fill(sc, com->c_ccs, RAY_CMD_REPORT_PARAMS); SRAM_WRITE_FIELD_1(sc, com->c_ccs, ray_cmd_report, c_paramid, com->c_pr->r_paramid); SRAM_WRITE_FIELD_1(sc, com->c_ccs, ray_cmd_report, c_nparam, 1); ray_com_ecf(sc, com); } /* * Complete the parameter reporting */ static void ray_repparams_done(struct ray_softc *sc, u_int8_t status, size_t ccs) { struct ray_comq_entry *com; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); RAY_MAP_CM(sc); RAY_COM_CHECK(sc, ccs); RAY_CCSERR(sc, status, if_oerrors); /* XXX error counter */ com = TAILQ_FIRST(&sc->sc_comq); com->c_pr->r_failcause = SRAM_READ_FIELD_1(sc, ccs, ray_cmd_report, c_failcause); com->c_pr->r_len = SRAM_READ_FIELD_1(sc, ccs, ray_cmd_report, c_len); SRAM_READ_REGION(sc, RAY_ECF_TO_HOST_BASE, com->c_pr->r_data, com->c_pr->r_len); ray_com_ecf_done(sc); } /* * User land entry (and exit) to the error counters */ static int ray_repstats_user(struct ray_softc *sc, struct ray_stats_req *sr) { RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); sr->rxoverflow = sc->sc_rxoverflow; sr->rxcksum = sc->sc_rxcksum; sr->rxhcksum = sc->sc_rxhcksum; sr->rxnoise = sc->sc_rxnoise; return (0); } /* * User land entry to parameter update changes * * As a parameter change can cause the network parameters to be * invalid we have to re-start/join. */ static int ray_upparams_user(struct ray_softc *sc, struct ray_param_req *pr) { struct ray_comq_entry *com[4]; int error, ncom, todo; #define RAY_UPP_SJ 0x1 #define RAY_UPP_PARAMS 0x2 RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); /* * Check that the parameter is available based on firmware version */ pr->r_failcause = 0; if (pr->r_paramid > RAY_MIB_LASTUSER) return (EINVAL); if ((sc->sc_version == RAY_ECFS_BUILD_4) && !(mib_info[pr->r_paramid][0] & RAY_V4)) return (EINVAL); if ((sc->sc_version == RAY_ECFS_BUILD_5) && !(mib_info[pr->r_paramid][0] & RAY_V5)) return (EINVAL); /* * Handle certain parameters specially */ todo = 0; switch (pr->r_paramid) { case RAY_MIB_NET_TYPE: /* Updated via START_NET JOIN_NET */ sc->sc_d.np_net_type = *pr->r_data; todo |= RAY_UPP_SJ; break; case RAY_MIB_SSID: /* Updated via START_NET JOIN_NET */ bcopy(pr->r_data, sc->sc_d.np_ssid, IEEE80211_NWID_LEN); todo |= RAY_UPP_SJ; break; case RAY_MIB_PRIVACY_MUST_START:/* Updated via START_NET */ if (sc->sc_c.np_net_type != RAY_MIB_NET_TYPE_ADHOC) return (EINVAL); sc->sc_d.np_priv_start = *pr->r_data; todo |= RAY_UPP_SJ; break; case RAY_MIB_PRIVACY_CAN_JOIN: /* Updated via START_NET JOIN_NET */ sc->sc_d.np_priv_join = *pr->r_data; todo |= RAY_UPP_SJ; break; case RAY_MIB_BASIC_RATE_SET: sc->sc_d.np_def_txrate = *pr->r_data; todo |= RAY_UPP_PARAMS; break; case RAY_MIB_AP_STATUS: /* Unsupported */ case RAY_MIB_MAC_ADDR: /* XXX Need interface up but could be done */ case RAY_MIB_PROMISC: /* BPF */ return (EINVAL); break; default: todo |= RAY_UPP_PARAMS; todo |= RAY_UPP_SJ; break; } /* * Generate the runq entries as needed */ ncom = 0; if (todo & RAY_UPP_PARAMS) { com[ncom++] = RAY_COM_MALLOC(ray_upparams, 0); com[ncom-1]->c_pr = pr; } if (todo & RAY_UPP_SJ) { com[ncom++] = RAY_COM_MALLOC(ray_init_sj, 0); com[ncom++] = RAY_COM_MALLOC(ray_init_auth, 0); com[ncom++] = RAY_COM_MALLOC(ray_init_assoc, 0); } RAY_COM_RUNQ(sc, com, ncom, "rayuparam", error); /* XXX no real error processing from anything yet! */ if (!com[0]->c_retval && pr->r_failcause) error = EINVAL; RAY_COM_FREE(com, ncom); return (error); } /* * Runq entry to update a parameter * * The card and driver are basically happy for parameters to be updated * whenever the card is plugged in. However, there may be a couple of * network hangs whilst the update is performed. Reading parameters back * straight away may give the wrong answer and some parameters cannot be * read at all. Local copies should be kept. */ static void ray_upparams(struct ray_softc *sc, struct ray_comq_entry *com) { RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); RAY_MAP_CM(sc); ray_ccs_fill(sc, com->c_ccs, RAY_CMD_UPDATE_PARAMS); SRAM_WRITE_FIELD_1(sc, com->c_ccs, ray_cmd_update, c_paramid, com->c_pr->r_paramid); SRAM_WRITE_FIELD_1(sc, com->c_ccs, ray_cmd_update, c_nparam, 1); SRAM_WRITE_REGION(sc, RAY_HOST_TO_ECF_BASE, com->c_pr->r_data, com->c_pr->r_len); ray_com_ecf(sc, com); } /* * Complete the parameter update, note that promisc finishes up here too */ static void ray_upparams_done(struct ray_softc *sc, u_int8_t status, size_t ccs) { struct ray_comq_entry *com; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); RAY_MAP_CM(sc); RAY_COM_CHECK(sc, ccs); RAY_CCSERR(sc, status, if_oerrors); /* XXX error counter */ com = TAILQ_FIRST(&sc->sc_comq); switch (SRAM_READ_FIELD_1(sc, ccs, ray_cmd_update, c_paramid)) { case RAY_MIB_PROMISC: sc->sc_c.np_promisc = SRAM_READ_1(sc, RAY_HOST_TO_ECF_BASE); RAY_DPRINTF(sc, RAY_DBG_IOCTL, "promisc value %d", sc->sc_c.np_promisc); break; default: com->c_pr->r_failcause = SRAM_READ_FIELD_1(sc, ccs, ray_cmd_update, c_failcause); break; } ray_com_ecf_done(sc); } /* * Command queuing and execution */ /* * Set up a comq entry struct */ static struct ray_comq_entry * ray_com_init(struct ray_comq_entry *com, ray_comqfn_t function, int flags, char *mesg) { com->c_function = function; com->c_flags = flags; com->c_retval = 0; com->c_ccs = 0; com->c_wakeup = NULL; com->c_pr = NULL; com->c_mesg = mesg; return (com); } /* * Malloc and set up a comq entry struct */ static struct ray_comq_entry * ray_com_malloc(ray_comqfn_t function, int flags, char *mesg) { struct ray_comq_entry *com; MALLOC(com, struct ray_comq_entry *, sizeof(struct ray_comq_entry), M_RAYCOM, M_WAITOK); return (ray_com_init(com, function, flags, mesg)); } /* * Add an array of commands to the runq, get some ccs's for them and * then run, waiting on the last command. * * We add the commands to the queue first to preserve ioctl ordering. * * On recoverable errors, this routine removes the entries from the * runq. A caller can requeue the commands (and still preserve its own * processes ioctl ordering) but doesn't have to. When the card is * detached we get out quickly to prevent panics and don't bother * about the runq. */ static int ray_com_runq_add(struct ray_softc *sc, struct ray_comq_entry *com[], int ncom, char *wmesg) { int i, error; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_COM, ""); error = 0; /* * Add the commands to the runq but don't let it run until * the ccs's are allocated successfully */ com[0]->c_flags |= RAY_COM_FWAIT; for (i = 0; i < ncom; i++) { com[i]->c_wakeup = com[ncom-1]; RAY_DPRINTF(sc, RAY_DBG_COM, "adding %p", com[i]); RAY_DCOM(sc, RAY_DBG_DCOM, com[i], "adding"); TAILQ_INSERT_TAIL(&sc->sc_comq, com[i], c_chain); } com[ncom-1]->c_flags |= RAY_COM_FWOK; /* * Allocate ccs's for each command. */ for (i = 0; i < ncom; i++) { error = ray_ccs_alloc(sc, &com[i]->c_ccs, wmesg); if (error == ENXIO) return (ENXIO); else if (error) goto cleanup; } /* * Allow the queue to run and sleep if needed. * * Iff the FDETACHED flag is set in the com entry we waited on * the driver is in a zombie state! The softc structure has been * freed by the generic bus detach methods - eek. We tread very * carefully! */ com[0]->c_flags &= ~RAY_COM_FWAIT; ray_com_runq(sc); if (TAILQ_FIRST(&sc->sc_comq) != NULL) { RAY_DPRINTF(sc, RAY_DBG_COM, "sleeping"); error = tsleep(com[ncom-1], PCATCH | PRIBIO, wmesg, 0); if (com[ncom-1]->c_flags & RAY_COM_FDETACHED) return (ENXIO); RAY_DPRINTF(sc, RAY_DBG_COM, "awakened, tsleep returned 0x%x", error); } else error = 0; cleanup: /* * Only clean the queue on real errors - we don't care about it * when we detach as the queue entries are freed by the callers. */ if (error && (error != ENXIO)) for (i = 0; i < ncom; i++) if (!(com[i]->c_flags & RAY_COM_FCOMPLETED)) { RAY_DPRINTF(sc, RAY_DBG_COM, "removing %p", com[i]); RAY_DCOM(sc, RAY_DBG_DCOM, com[i], "removing"); TAILQ_REMOVE(&sc->sc_comq, com[i], c_chain); ray_ccs_free(sc, com[i]->c_ccs); com[i]->c_ccs = 0; } return (error); } /* * Run the command at the head of the queue (if not already running) */ static void ray_com_runq(struct ray_softc *sc) { struct ray_comq_entry *com; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_COM, ""); com = TAILQ_FIRST(&sc->sc_comq); if ((com == NULL) || (com->c_flags & RAY_COM_FRUNNING) || (com->c_flags & RAY_COM_FWAIT) || (com->c_flags & RAY_COM_FDETACHED)) return; com->c_flags |= RAY_COM_FRUNNING; RAY_DPRINTF(sc, RAY_DBG_COM, "running %p", com); RAY_DCOM(sc, RAY_DBG_DCOM, com, "running"); com->c_function(sc, com); } /* * Remove run command, free ccs and wakeup caller. * * Minimal checks are done here as we ensure that the com and command * handler were matched up earlier. Must be called at splnet or higher * so that entries on the command queue are correctly removed. * * Remove the com from the comq, and wakeup the caller if it requested * to be woken. This is used for ensuring a sequence of commands * completes. Finally, re-run the queue. */ static void ray_com_runq_done(struct ray_softc *sc) { struct ray_comq_entry *com; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_COM, ""); com = TAILQ_FIRST(&sc->sc_comq); /* XXX shall we check this as below */ RAY_DPRINTF(sc, RAY_DBG_COM, "removing %p", com); RAY_DCOM(sc, RAY_DBG_DCOM, com, "removing"); TAILQ_REMOVE(&sc->sc_comq, com, c_chain); com->c_flags &= ~RAY_COM_FRUNNING; com->c_flags |= RAY_COM_FCOMPLETED; com->c_retval = 0; ray_ccs_free(sc, com->c_ccs); com->c_ccs = 0; if (com->c_flags & RAY_COM_FWOK) wakeup(com->c_wakeup); ray_com_runq(sc); /* XXX what about error on completion then? deal with when i fix * XXX the status checking * * XXX all the runq_done calls from IFF_RUNNING checks in runq * XXX routines should return EIO but shouldn't abort the runq */ } /* * Send a command to the ECF. */ static void ray_com_ecf(struct ray_softc *sc, struct ray_comq_entry *com) { int i = 0; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_COM, ""); RAY_MAP_CM(sc); while (!RAY_ECF_READY(sc)) { DELAY(RAY_ECF_SPIN_DELAY); if (++i > RAY_ECF_SPIN_TRIES) RAY_PANIC(sc, "spun too long"); } if (i != 0) RAY_RECERR(sc, "spun %d times", i); RAY_DPRINTF(sc, RAY_DBG_COM, "sending %p", com); RAY_DCOM(sc, RAY_DBG_DCOM, com, "sending"); SRAM_WRITE_1(sc, RAY_SCB_CCSI, RAY_CCS_INDEX(com->c_ccs)); RAY_ECF_START_CMD(sc); if (RAY_COM_NEEDS_TIMO( SRAM_READ_FIELD_1(sc, com->c_ccs, ray_cmd, c_cmd))) { RAY_DPRINTF(sc, RAY_DBG_COM, "adding timeout"); sc->com_timerh = timeout(ray_com_ecf_timo, sc, RAY_COM_TIMEOUT); } } /* * Deal with commands that require a timeout to test completion. * * This routine is coded to only expect one outstanding request for the * timed out requests at a time, but thats all that can be outstanding * per hardware limitations and all that we issue anyway. * * We don't do any fancy testing of the command currently issued as we * know it must be a timeout based one...unless I've got this wrong! */ static void ray_com_ecf_timo(void *xsc) { struct ray_softc *sc = (struct ray_softc *)xsc; struct ray_comq_entry *com; u_int8_t cmd, status; int s; s = splnet(); RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_COM, ""); RAY_MAP_CM(sc); com = TAILQ_FIRST(&sc->sc_comq); cmd = SRAM_READ_FIELD_1(sc, com->c_ccs, ray_cmd, c_cmd); status = SRAM_READ_FIELD_1(sc, com->c_ccs, ray_cmd, c_status); switch (status) { case RAY_CCS_STATUS_COMPLETE: case RAY_CCS_STATUS_FREE: /* Buggy firmware */ ray_intr_ccs(sc, cmd, status, com->c_ccs); break; case RAY_CCS_STATUS_BUSY: sc->com_timerh = timeout(ray_com_ecf_timo, sc, RAY_COM_TIMEOUT); break; default: /* Replicates NetBSD */ if (sc->sc_ccsinuse[RAY_CCS_INDEX(com->c_ccs)] == 1) { /* give a chance for the interrupt to occur */ sc->sc_ccsinuse[RAY_CCS_INDEX(com->c_ccs)] = 2; sc->com_timerh = timeout(ray_com_ecf_timo, sc, RAY_COM_TIMEOUT); } else ray_intr_ccs(sc, cmd, status, com->c_ccs); break; } splx(s); } /* * Called when interrupt handler for the command has done all it * needs to. Will be called at splnet. */ static void ray_com_ecf_done(struct ray_softc *sc) { RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_COM, ""); untimeout(ray_com_ecf_timo, sc, sc->com_timerh); ray_com_runq_done(sc); } #if RAY_DEBUG & RAY_DBG_COM /* * Process completed ECF commands that probably came from the command queue * * This routine is called after vectoring the completed ECF command * to the appropriate _done routine. It helps check everything is okay. */ static void ray_com_ecf_check(struct ray_softc *sc, size_t ccs, char *mesg) { struct ray_comq_entry *com; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_COM, "%s", mesg); com = TAILQ_FIRST(&sc->sc_comq); if (com == NULL) RAY_PANIC(sc, "no command queue"); if (com->c_ccs != ccs) RAY_PANIC(sc, "ccs's don't match"); } #endif /* RAY_DEBUG & RAY_DBG_COM */ /* * CCS allocators */ /* * Obtain a ccs for a commmand * * Returns 0 and in `ccsp' the bus offset of the free ccs. Will block * awaiting free ccs if needed - if the sleep is interrupted * EINTR/ERESTART is returned, if the card is ejected we return ENXIO. */ static int ray_ccs_alloc(struct ray_softc *sc, size_t *ccsp, char *wmesg) { size_t ccs; u_int i; int error; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_CCS, ""); RAY_MAP_CM(sc); for (;;) { for (i = RAY_CCS_CMD_FIRST; i <= RAY_CCS_CMD_LAST; i++) { /* we probe here to make the card go */ (void)SRAM_READ_FIELD_1(sc, RAY_CCS_ADDRESS(i), ray_cmd, c_status); if (!sc->sc_ccsinuse[i]) break; } if (i > RAY_CCS_CMD_LAST) { RAY_DPRINTF(sc, RAY_DBG_CCS, "sleeping"); error = tsleep(ray_ccs_alloc, PCATCH | PRIBIO, wmesg, 0); if ((sc == NULL) || (sc->sc_gone)) return (ENXIO); RAY_DPRINTF(sc, RAY_DBG_CCS, "awakened, tsleep returned 0x%x", error); if (error) return (error); } else break; } RAY_DPRINTF(sc, RAY_DBG_CCS, "allocated 0x%02x", i); sc->sc_ccsinuse[i] = 1; ccs = RAY_CCS_ADDRESS(i); *ccsp = ccs; return (0); } /* * Fill the easy bits in of a pre-allocated CCS */ static void ray_ccs_fill(struct ray_softc *sc, size_t ccs, u_int cmd) { RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_CCS, ""); RAY_MAP_CM(sc); if (ccs == 0) RAY_PANIC(sc, "ccs not allocated"); SRAM_WRITE_FIELD_1(sc, ccs, ray_cmd, c_status, RAY_CCS_STATUS_BUSY); SRAM_WRITE_FIELD_1(sc, ccs, ray_cmd, c_cmd, cmd); SRAM_WRITE_FIELD_1(sc, ccs, ray_cmd, c_link, RAY_CCS_LINK_NULL); } /* * Free up a ccs allocated via ray_ccs_alloc * * Return the old status. This routine is only used for ccs allocated via * ray_ccs_alloc (not tx, rx or ECF command requests). */ static void ray_ccs_free(struct ray_softc *sc, size_t ccs) { RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_CCS, ""); RAY_MAP_CM(sc); #if 1 | (RAY_DEBUG & RAY_DBG_CCS) if (!sc->sc_ccsinuse[RAY_CCS_INDEX(ccs)]) RAY_RECERR(sc, "freeing free ccs 0x%02x", RAY_CCS_INDEX(ccs)); #endif /* RAY_DEBUG & RAY_DBG_CCS */ if (!sc->sc_gone) RAY_CCS_FREE(sc, ccs); sc->sc_ccsinuse[RAY_CCS_INDEX(ccs)] = 0; RAY_DPRINTF(sc, RAY_DBG_CCS, "freed 0x%02x", RAY_CCS_INDEX(ccs)); wakeup(ray_ccs_alloc); } /* * Obtain a ccs and tx buffer to transmit with and fill them in. * * Returns 0 and in `ccsp' the bus offset of the free ccs. Will not block * and if none available and will returns EAGAIN. * * The caller must fill in the length later. * The caller must clear the ccs on errors. */ static int ray_ccs_tx(struct ray_softc *sc, size_t *ccsp, size_t *bufpp) { size_t ccs, bufp; int i; u_int8_t status; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_CCS, ""); RAY_MAP_CM(sc); i = RAY_CCS_TX_FIRST; do { status = SRAM_READ_FIELD_1(sc, RAY_CCS_ADDRESS(i), ray_cmd, c_status); if (status == RAY_CCS_STATUS_FREE) break; i++; } while (i <= RAY_CCS_TX_LAST); if (i > RAY_CCS_TX_LAST) { return (EAGAIN); } RAY_DPRINTF(sc, RAY_DBG_CCS, "allocated 0x%02x", i); /* * Reserve and fill the ccs - must do the length later. * * Even though build 4 and build 5 have different fields all these * are common apart from tx_rate. Neither the NetBSD driver or Linux * driver bother to overwrite this for build 4 cards. * * The start of the buffer must be aligned to a 256 byte boundary * (least significant byte of address = 0x00). */ ccs = RAY_CCS_ADDRESS(i); bufp = RAY_TX_BASE + i * RAY_TX_BUF_SIZE; bufp += sc->sc_tibsize; SRAM_WRITE_FIELD_1(sc, ccs, ray_cmd_tx, c_status, RAY_CCS_STATUS_BUSY); SRAM_WRITE_FIELD_1(sc, ccs, ray_cmd_tx, c_cmd, RAY_CMD_TX_REQ); SRAM_WRITE_FIELD_1(sc, ccs, ray_cmd_tx, c_link, RAY_CCS_LINK_NULL); SRAM_WRITE_FIELD_2(sc, ccs, ray_cmd_tx, c_bufp, bufp); SRAM_WRITE_FIELD_1(sc, ccs, ray_cmd_tx, c_tx_rate, sc->sc_c.np_def_txrate); SRAM_WRITE_FIELD_1(sc, ccs, ray_cmd_tx, c_apm_mode, 0); bufp += sizeof(struct ray_tx_phy_header); *ccsp = ccs; *bufpp = bufp; return (0); } /* * Routines to obtain resources for the card */ /* * Allocate the attribute memory on the card * * The attribute memory space is abused by these devices as IO space. As such * the OS card services don't have a chance of knowing that they need to keep * the attribute space mapped. We have to do it manually. */ static int ray_res_alloc_am(struct ray_softc *sc) { int error; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_CM, ""); sc->am_rid = RAY_AM_RID; sc->am_res = bus_alloc_resource(sc->dev, SYS_RES_MEMORY, &sc->am_rid, 0UL, ~0UL, 0x1000, RF_ACTIVE); if (!sc->am_res) { RAY_PRINTF(sc, "Cannot allocate attribute memory"); return (ENOMEM); } error = CARD_SET_MEMORY_OFFSET(device_get_parent(sc->dev), sc->dev, sc->am_rid, 0, NULL); if (error) { RAY_PRINTF(sc, "CARD_SET_MEMORY_OFFSET returned 0x%0x", error); return (error); } error = CARD_SET_RES_FLAGS(device_get_parent(sc->dev), sc->dev, SYS_RES_MEMORY, sc->am_rid, PCCARD_A_MEM_ATTR); if (error) { RAY_PRINTF(sc, "CARD_SET_RES_FLAGS returned 0x%0x", error); return (error); } error = CARD_SET_RES_FLAGS(device_get_parent(sc->dev), sc->dev, SYS_RES_MEMORY, sc->am_rid, PCCARD_A_MEM_8BIT); if (error) { RAY_PRINTF(sc, "CARD_SET_RES_FLAGS returned 0x%0x", error); return (error); } sc->am_bsh = rman_get_bushandle(sc->am_res); sc->am_bst = rman_get_bustag(sc->am_res); #if RAY_DEBUG & (RAY_DBG_CM | RAY_DBG_BOOTPARAM) { u_long flags; u_int32_t offset; CARD_GET_RES_FLAGS(device_get_parent(sc->dev), sc->dev, SYS_RES_MEMORY, sc->am_rid, &flags); CARD_GET_MEMORY_OFFSET(device_get_parent(sc->dev), sc->dev, sc->am_rid, &offset); RAY_PRINTF(sc, "allocated attribute memory:\n" ". start 0x%0lx count 0x%0lx flags 0x%0lx offset 0x%0x", bus_get_resource_start(sc->dev, SYS_RES_MEMORY, sc->am_rid), bus_get_resource_count(sc->dev, SYS_RES_MEMORY, sc->am_rid), flags, offset); } #endif /* RAY_DEBUG & (RAY_DBG_CM | RAY_DBG_BOOTPARAM) */ return (0); } /* * Allocate the common memory on the card * * As this memory is described in the CIS, the OS card services should * have set the map up okay, but the card uses 8 bit RAM. This is not * described in the CIS. */ static int ray_res_alloc_cm(struct ray_softc *sc) { u_long start, count, end; int error; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_CM, ""); RAY_DPRINTF(sc,RAY_DBG_CM | RAY_DBG_BOOTPARAM, "cm start 0x%0lx count 0x%0lx", bus_get_resource_start(sc->dev, SYS_RES_MEMORY, RAY_CM_RID), bus_get_resource_count(sc->dev, SYS_RES_MEMORY, RAY_CM_RID)); sc->cm_rid = RAY_CM_RID; start = bus_get_resource_start(sc->dev, SYS_RES_MEMORY, sc->cm_rid); count = bus_get_resource_count(sc->dev, SYS_RES_MEMORY, sc->cm_rid); end = start + count - 1; sc->cm_res = bus_alloc_resource(sc->dev, SYS_RES_MEMORY, &sc->cm_rid, start, end, count, RF_ACTIVE); if (!sc->cm_res) { RAY_PRINTF(sc, "Cannot allocate common memory"); return (ENOMEM); } error = CARD_SET_MEMORY_OFFSET(device_get_parent(sc->dev), sc->dev, sc->cm_rid, 0, NULL); if (error) { RAY_PRINTF(sc, "CARD_SET_MEMORY_OFFSET returned 0x%0x", error); return (error); } error = CARD_SET_RES_FLAGS(device_get_parent(sc->dev), sc->dev, SYS_RES_MEMORY, sc->cm_rid, PCCARD_A_MEM_COM); if (error) { RAY_PRINTF(sc, "CARD_SET_RES_FLAGS returned 0x%0x", error); return (error); } error = CARD_SET_RES_FLAGS(device_get_parent(sc->dev), sc->dev, SYS_RES_MEMORY, sc->cm_rid, PCCARD_A_MEM_8BIT); if (error) { RAY_PRINTF(sc, "CARD_SET_RES_FLAGS returned 0x%0x", error); return (error); } sc->cm_bsh = rman_get_bushandle(sc->cm_res); sc->cm_bst = rman_get_bustag(sc->cm_res); #if RAY_DEBUG & (RAY_DBG_CM | RAY_DBG_BOOTPARAM) { u_long flags; u_int32_t offset; CARD_GET_RES_FLAGS(device_get_parent(sc->dev), sc->dev, SYS_RES_MEMORY, sc->cm_rid, &flags); CARD_GET_MEMORY_OFFSET(device_get_parent(sc->dev), sc->dev, sc->cm_rid, &offset); RAY_PRINTF(sc, "allocated common memory:\n" ". start 0x%0lx count 0x%0lx flags 0x%0lx offset 0x%0x", bus_get_resource_start(sc->dev, SYS_RES_MEMORY, sc->cm_rid), bus_get_resource_count(sc->dev, SYS_RES_MEMORY, sc->cm_rid), flags, offset); } #endif /* RAY_DEBUG & (RAY_DBG_CM | RAY_DBG_BOOTPARAM) */ return (0); } /* * Get an irq and attach it to the bus */ static int ray_res_alloc_irq(struct ray_softc *sc) { int error; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); RAY_DPRINTF(sc,RAY_DBG_CM | RAY_DBG_BOOTPARAM, "irq start 0x%0lx count 0x%0lx", bus_get_resource_start(sc->dev, SYS_RES_IRQ, 0), bus_get_resource_count(sc->dev, SYS_RES_IRQ, 0)); sc->irq_rid = 0; sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE); if (!sc->irq_res) { RAY_PRINTF(sc, "Cannot allocate irq"); return (ENOMEM); } if ((error = bus_setup_intr(sc->dev, sc->irq_res, INTR_TYPE_NET, ray_intr, sc, &sc->irq_handle)) != 0) { RAY_PRINTF(sc, "Failed to setup irq"); return (error); } RAY_DPRINTF(sc, RAY_DBG_CM | RAY_DBG_BOOTPARAM, "allocated irq:\n" ". start 0x%0lx count 0x%0lx", bus_get_resource_start(sc->dev, SYS_RES_IRQ, sc->irq_rid), bus_get_resource_count(sc->dev, SYS_RES_IRQ, sc->irq_rid)); return (0); } /* * Release all of the card's resources */ static void ray_res_release(struct ray_softc *sc) { if (sc->irq_res != 0) { bus_teardown_intr(sc->dev, sc->irq_res, sc->irq_handle); bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid, sc->irq_res); sc->irq_res = 0; } if (sc->am_res != 0) { bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->am_rid, sc->am_res); sc->am_res = 0; } if (sc->cm_res != 0) { bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->cm_rid, sc->cm_res); sc->cm_res = 0; } } /* * mbuf dump */ #if RAY_DEBUG & RAY_DBG_MBUF static void ray_dump_mbuf(struct ray_softc *sc, struct mbuf *m, char *s) { u_int8_t *d, *ed; u_int i; char p[17]; RAY_PRINTF(sc, "%s", s); RAY_PRINTF(sc, "\nm0->data\t0x%p\nm_pkthdr.len\t%d\nm_len\t%d", mtod(m, u_int8_t *), m->m_pkthdr.len, m->m_len); i = 0; bzero(p, 17); for (; m; m = m->m_next) { d = mtod(m, u_int8_t *); ed = d + m->m_len; for (; d < ed; i++, d++) { if ((i % 16) == 0) { printf(" %s\n\t", p); } else if ((i % 8) == 0) printf(" "); printf(" %02x", *d); p[i % 16] = ((*d >= 0x20) && (*d < 0x80)) ? *d : '.'; } } if ((i - 1) % 16) printf(" %s\n", p); } #endif /* RAY_DEBUG & RAY_DBG_MBUF */ Index: head/sys/dev/rc/rc.c =================================================================== --- head/sys/dev/rc/rc.c (revision 129878) +++ head/sys/dev/rc/rc.c (revision 129879) @@ -1,1590 +1,1591 @@ /* * Copyright (C) 1995 by Pavel Antonov, Moscow, Russia. * Copyright (C) 1995 by Andrey A. Chernov, Moscow, Russia. * Copyright (C) 2002 by John Baldwin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * SDL Communications Riscom/8 (based on Cirrus Logic CL-CD180) driver * */ /*#define RCDEBUG*/ #include "opt_tty.h" #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #define IOBASE_ADDRS 14 #define DEV_TO_RC(dev) (struct rc_chans *)((dev)->si_drv1) #define TTY_TO_RC(tty) DEV_TO_RC((tty)->t_dev) #define rcin(sc, port) RC_IN(sc, port) #define rcout(sc, port, v) RC_OUT(sc, port, v) #define WAITFORCCR(sc, chan) rc_wait0((sc), (chan), __LINE__) #define CCRCMD(sc, chan, cmd) do { \ WAITFORCCR((sc), (chan)); \ rcout((sc), CD180_CCR, (cmd)); \ } while (0) #define RC_IBUFSIZE 256 #define RB_I_HIGH_WATER (TTYHOG - 2 * RC_IBUFSIZE) #define RC_OBUFSIZE 512 #define RC_IHIGHWATER (3 * RC_IBUFSIZE / 4) #define INPUT_FLAGS_SHIFT (2 * RC_IBUFSIZE) #define LOTS_OF_EVENTS 64 #define RC_FAKEID 0x10 #define CALLOUT(dev) (((intptr_t)(dev)->si_drv2) != 0) /* Per-channel structure */ struct rc_chans { struct rc_softc *rc_rcb; /* back ptr */ dev_t rc_dev; /* non-callout device */ dev_t rc_cdev; /* callout device */ u_short rc_flags; /* Misc. flags */ int rc_chan; /* Channel # */ u_char rc_ier; /* intr. enable reg */ u_char rc_msvr; /* modem sig. status */ u_char rc_cor2; /* options reg */ u_char rc_pendcmd; /* special cmd pending */ u_int rc_dtrwait; /* dtr timeout */ u_int rc_dcdwaits; /* how many waits DCD in open */ u_char rc_hotchar; /* end packed optimize */ struct tty rc_tp; /* tty struct */ u_char *rc_iptr; /* Chars input buffer */ u_char *rc_hiwat; /* hi-water mark */ u_char *rc_bufend; /* end of buffer */ u_char *rc_optr; /* ptr in output buf */ u_char *rc_obufend; /* end of output buf */ u_char rc_ibuf[4 * RC_IBUFSIZE]; /* input buffer */ u_char rc_obuf[RC_OBUFSIZE]; /* output buffer */ struct callout rc_dtrcallout; }; /* Per-board structure */ struct rc_softc { device_t sc_dev; struct resource *sc_irq; struct resource *sc_port[IOBASE_ADDRS]; int sc_irqrid; void *sc_hwicookie; bus_space_tag_t sc_bt; bus_space_handle_t sc_bh; u_int sc_unit; /* unit # */ u_char sc_dtr; /* DTR status */ int sc_opencount; int sc_scheduled_event; void *sc_swicookie; struct rc_chans sc_channels[CD180_NCHAN]; /* channels */ }; /* Static prototypes */ static void rc_release_resources(device_t dev); static void rc_intr(void *); static void rc_hwreset(struct rc_softc *, unsigned int); static int rc_test(struct rc_softc *); static void rc_discard_output(struct rc_chans *); static void rc_hardclose(struct rc_chans *); static int rc_modctl(struct rc_chans *, int, int); static void rc_start(struct tty *); static void rc_stop(struct tty *, int rw); static int rc_param(struct tty *, struct termios *); static void rc_pollcard(void *); static void rc_reinit(struct rc_softc *); #ifdef RCDEBUG static void printrcflags(); #endif static void rc_dtrwakeup(void *); static void disc_optim(struct tty *tp, struct termios *t, struct rc_chans *); static void rc_wait0(struct rc_softc *sc, int chan, int line); static d_open_t rcopen; static d_close_t rcclose; static d_ioctl_t rcioctl; static struct cdevsw rc_cdevsw = { .d_version = D_VERSION, .d_open = rcopen, .d_close = rcclose, .d_ioctl = rcioctl, .d_name = "rc", .d_flags = D_TTY | D_NEEDGIANT, }; static devclass_t rc_devclass; /* Flags */ #define RC_DTR_OFF 0x0001 /* DTR wait, for close/open */ #define RC_ACTOUT 0x0002 /* Dial-out port active */ #define RC_RTSFLOW 0x0004 /* RTS flow ctl enabled */ #define RC_CTSFLOW 0x0008 /* CTS flow ctl enabled */ #define RC_DORXFER 0x0010 /* RXFER event planned */ #define RC_DOXXFER 0x0020 /* XXFER event planned */ #define RC_MODCHG 0x0040 /* Modem status changed */ #define RC_OSUSP 0x0080 /* Output suspended */ #define RC_OSBUSY 0x0100 /* start() routine in progress */ #define RC_WAS_BUFOVFL 0x0200 /* low-level buffer ovferflow */ #define RC_WAS_SILOVFL 0x0400 /* silo buffer overflow */ #define RC_SEND_RDY 0x0800 /* ready to send */ /* Table for translation of RCSR status bits to internal form */ static int rc_rcsrt[16] = { 0, TTY_OE, TTY_FE, TTY_FE|TTY_OE, TTY_PE, TTY_PE|TTY_OE, TTY_PE|TTY_FE, TTY_PE|TTY_FE|TTY_OE, TTY_BI, TTY_BI|TTY_OE, TTY_BI|TTY_FE, TTY_BI|TTY_FE|TTY_OE, TTY_BI|TTY_PE, TTY_BI|TTY_PE|TTY_OE, TTY_BI|TTY_PE|TTY_FE, TTY_BI|TTY_PE|TTY_FE|TTY_OE }; static int rc_ports[] = { 0x220, 0x240, 0x250, 0x260, 0x2a0, 0x2b0, 0x300, 0x320 }; static int iobase_addrs[IOBASE_ADDRS] = { 0, 0x400, 0x800, 0xc00, 0x1400, 0x1800, 0x1c00, 0x2000, 0x3000, 0x3400, 0x3800, 0x3c00, 0x4000, 0x8000 }; /**********************************************/ static int rc_probe(device_t dev) { u_int port; int i, found; /* * We don't know of any PnP ID's for these cards. */ if (isa_get_logicalid(dev) != 0) return (ENXIO); /* * We have to have an IO port hint that is valid. */ port = isa_get_port(dev); if (port == -1) return (ENXIO); found = 0; for (i = 0; i < sizeof(rc_ports) / sizeof(int); i++) if (rc_ports[i] == port) { found = 1; break; } if (!found) return (ENXIO); /* * We have to have an IRQ hint. */ if (isa_get_irq(dev) == -1) return (ENXIO); device_set_desc(dev, "SDL Riscom/8"); return (0); } static int rc_attach(device_t dev) { struct rc_chans *rc; struct tty *tp; struct rc_softc *sc; u_int port; int base, chan, error, i, x; dev_t cdev; sc = device_get_softc(dev); sc->sc_dev = dev; /* * We need to have IO ports. Lots of them. We need * the following ranges relative to the base port: * 0x0 - 0x10 * 0x400 - 0x410 * 0x800 - 0x810 * 0xc00 - 0xc10 * 0x1400 - 0x1410 * 0x1800 - 0x1810 * 0x1c00 - 0x1c10 * 0x2000 - 0x2010 * 0x3000 - 0x3010 * 0x3400 - 0x3410 * 0x3800 - 0x3810 * 0x3c00 - 0x3c10 * 0x4000 - 0x4010 * 0x8000 - 0x8010 */ port = isa_get_port(dev); for (i = 0; i < IOBASE_ADDRS; i++) if (bus_set_resource(dev, SYS_RES_IOPORT, i, port + iobase_addrs[i], 0x10) != 0) return (ENXIO); error = ENOMEM; for (i = 0; i < IOBASE_ADDRS; i++) { x = i; sc->sc_port[i] = bus_alloc_resource(dev, SYS_RES_IOPORT, &x, 0ul, ~0ul, 0x10, RF_ACTIVE); if (x != i) { device_printf(dev, "ioport %d was rid %d\n", i, x); goto fail; } if (sc->sc_port[i] == NULL) { device_printf(dev, "failed to alloc ioports %x-%x\n", port + iobase_addrs[i], port + iobase_addrs[i] + 0x10); goto fail; } } sc->sc_bt = rman_get_bustag(sc->sc_port[0]); sc->sc_bh = rman_get_bushandle(sc->sc_port[0]); sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irqrid, RF_ACTIVE); if (sc->sc_irq == NULL) { device_printf(dev, "failed to alloc IRQ\n"); goto fail; } /* * Now do some actual tests to make sure it works. */ error = ENXIO; rcout(sc, CD180_PPRL, 0x22); /* Random values to Prescale reg. */ rcout(sc, CD180_PPRH, 0x11); if (rcin(sc, CD180_PPRL) != 0x22 || rcin(sc, CD180_PPRH) != 0x11) goto fail; if (rc_test(sc)) goto fail; /* * Ok, start actually hooking things up. */ sc->sc_unit = device_get_unit(dev); /*sc->sc_chipid = 0x10 + device_get_unit(dev);*/ device_printf(dev, "%d chans, firmware rev. %c\n", CD180_NCHAN, (rcin(sc, CD180_GFRCR) & 0xF) + 'A'); rc = sc->sc_channels; base = CD180_NCHAN * sc->sc_unit; for (chan = 0; chan < CD180_NCHAN; chan++, rc++) { rc->rc_rcb = sc; rc->rc_chan = chan; rc->rc_iptr = rc->rc_ibuf; rc->rc_bufend = &rc->rc_ibuf[RC_IBUFSIZE]; rc->rc_hiwat = &rc->rc_ibuf[RC_IHIGHWATER]; rc->rc_optr = rc->rc_obufend = rc->rc_obuf; rc->rc_dtrwait = 3 * hz; callout_init(&rc->rc_dtrcallout, 0); tp = &rc->rc_tp; ttychars(tp); tp->t_lflag = tp->t_iflag = tp->t_oflag = 0; tp->t_cflag = TTYDEF_CFLAG; tp->t_ispeed = tp->t_ospeed = TTYDEF_SPEED; cdev = make_dev(&rc_cdevsw, chan + base, UID_ROOT, GID_WHEEL, 0600, "ttym%d", chan + base); cdev->si_drv1 = rc; cdev->si_drv2 = 0; cdev->si_tty = tp; rc->rc_dev = cdev; cdev = make_dev(&rc_cdevsw, chan + base + 128, UID_UUCP, GID_DIALER, 0660, "cuam%d", chan + base); cdev->si_drv1 = rc; cdev->si_drv2 = (void *)1; cdev->si_tty = tp; rc->rc_cdev = cdev; } error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_TTY, rc_intr, sc, &sc->sc_hwicookie); if (error) { device_printf(dev, "failed to register interrupt handler\n"); goto fail; } swi_add(&tty_ithd, "tty:rc", rc_pollcard, sc, SWI_TTY, 0, &sc->sc_swicookie); return (0); fail: rc_release_resources(dev); return (error); } static int rc_detach(device_t dev) { struct rc_softc *sc; struct rc_chans *rc; int error, i, s; sc = device_get_softc(dev); if (sc->sc_opencount > 0) return (EBUSY); sc->sc_opencount = -1; rc = sc->sc_channels; for (i = 0; i < CD180_NCHAN; i++, rc++) { destroy_dev(rc->rc_dev); destroy_dev(rc->rc_cdev); } rc = sc->sc_channels; s = splsoftclock(); for (i = 0; i < CD180_NCHAN; i++) { if ((rc->rc_flags & RC_DTR_OFF) && !callout_stop(&rc->rc_dtrcallout)) tsleep(&rc->rc_dtrwait, TTIPRI, "rcdtrdet", 0); } error = bus_teardown_intr(dev, sc->sc_irq, sc->sc_hwicookie); if (error) device_printf(dev, "failed to deregister interrupt handler\n"); ithread_remove_handler(sc->sc_swicookie); rc_release_resources(dev); return (0); } static void rc_release_resources(device_t dev) { struct rc_softc *sc; int i; sc = device_get_softc(dev); if (sc->sc_irq != NULL) { bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irqrid, sc->sc_irq); sc->sc_irq = NULL; } for (i = 0; i < IOBASE_ADDRS; i++) { if (sc->sc_port[i] == NULL) break; bus_release_resource(dev, SYS_RES_IOPORT, i, sc->sc_port[i]); sc->sc_port[i] = NULL; } } /* RC interrupt handling */ static void rc_intr(void *arg) { struct rc_softc *sc; struct rc_chans *rc; int resid, chan; u_char val, iack, bsr, ucnt, *optr; int good_data, t_state; sc = (struct rc_softc *)arg; bsr = ~(rcin(sc, RC_BSR)); if (!(bsr & (RC_BSR_TOUT|RC_BSR_RXINT|RC_BSR_TXINT|RC_BSR_MOINT))) { device_printf(sc->sc_dev, "extra interrupt\n"); rcout(sc, CD180_EOIR, 0); return; } while (bsr & (RC_BSR_TOUT|RC_BSR_RXINT|RC_BSR_TXINT|RC_BSR_MOINT)) { #ifdef RCDEBUG_DETAILED device_printf(sc->sc_dev, "intr (%p) %s%s%s%s\n", arg, bsr, (bsr & RC_BSR_TOUT)?"TOUT ":"", (bsr & RC_BSR_RXINT)?"RXINT ":"", (bsr & RC_BSR_TXINT)?"TXINT ":"", (bsr & RC_BSR_MOINT)?"MOINT":""); #endif if (bsr & RC_BSR_TOUT) { device_printf(sc->sc_dev, "hardware failure, reset board\n"); rcout(sc, RC_CTOUT, 0); rc_reinit(sc); return; } if (bsr & RC_BSR_RXINT) { iack = rcin(sc, RC_PILR_RX); good_data = (iack == (GIVR_IT_RGDI | RC_FAKEID)); if (!good_data && iack != (GIVR_IT_REI | RC_FAKEID)) { device_printf(sc->sc_dev, "fake rxint: %02x\n", iack); goto more_intrs; } chan = ((rcin(sc, CD180_GICR) & GICR_CHAN) >> GICR_LSH); rc = &sc->sc_channels[chan]; t_state = rc->rc_tp.t_state; /* Do RTS flow control stuff */ if ( (rc->rc_flags & RC_RTSFLOW) || !(t_state & TS_ISOPEN) ) { if ( ( !(t_state & TS_ISOPEN) || (t_state & TS_TBLOCK) ) && (rc->rc_msvr & MSVR_RTS) ) rcout(sc, CD180_MSVR, rc->rc_msvr &= ~MSVR_RTS); else if (!(rc->rc_msvr & MSVR_RTS)) rcout(sc, CD180_MSVR, rc->rc_msvr |= MSVR_RTS); } ucnt = rcin(sc, CD180_RDCR) & 0xF; resid = 0; if (t_state & TS_ISOPEN) { /* check for input buffer overflow */ if ((rc->rc_iptr + ucnt) >= rc->rc_bufend) { resid = ucnt; ucnt = rc->rc_bufend - rc->rc_iptr; resid -= ucnt; if (!(rc->rc_flags & RC_WAS_BUFOVFL)) { rc->rc_flags |= RC_WAS_BUFOVFL; sc->sc_scheduled_event++; } } optr = rc->rc_iptr; /* check foor good data */ if (good_data) { while (ucnt-- > 0) { val = rcin(sc, CD180_RDR); optr[0] = val; optr[INPUT_FLAGS_SHIFT] = 0; optr++; sc->sc_scheduled_event++; if (val != 0 && val == rc->rc_hotchar) swi_sched(sc->sc_swicookie, 0); } } else { /* Store also status data */ while (ucnt-- > 0) { iack = rcin(sc, CD180_RCSR); if (iack & RCSR_Timeout) break; if ( (iack & RCSR_OE) && !(rc->rc_flags & RC_WAS_SILOVFL)) { rc->rc_flags |= RC_WAS_SILOVFL; sc->sc_scheduled_event++; } val = rcin(sc, CD180_RDR); /* Don't store PE if IGNPAR and BREAK if IGNBRK, this hack allows "raw" tty optimization works even if IGN* is set. */ if ( !(iack & (RCSR_PE|RCSR_FE|RCSR_Break)) || ((!(iack & (RCSR_PE|RCSR_FE)) || !(rc->rc_tp.t_iflag & IGNPAR)) && (!(iack & RCSR_Break) || !(rc->rc_tp.t_iflag & IGNBRK)))) { if ( (iack & (RCSR_PE|RCSR_FE)) && (t_state & TS_CAN_BYPASS_L_RINT) && ((iack & RCSR_FE) || ((iack & RCSR_PE) && (rc->rc_tp.t_iflag & INPCK)))) val = 0; else if (val != 0 && val == rc->rc_hotchar) swi_sched(sc->sc_swicookie, 0); optr[0] = val; optr[INPUT_FLAGS_SHIFT] = iack; optr++; sc->sc_scheduled_event++; } } } rc->rc_iptr = optr; rc->rc_flags |= RC_DORXFER; } else resid = ucnt; /* Clear FIFO if necessary */ while (resid-- > 0) { if (!good_data) iack = rcin(sc, CD180_RCSR); else iack = 0; if (iack & RCSR_Timeout) break; (void) rcin(sc, CD180_RDR); } goto more_intrs; } if (bsr & RC_BSR_MOINT) { iack = rcin(sc, RC_PILR_MODEM); if (iack != (GIVR_IT_MSCI | RC_FAKEID)) { device_printf(sc->sc_dev, "fake moint: %02x\n", iack); goto more_intrs; } chan = ((rcin(sc, CD180_GICR) & GICR_CHAN) >> GICR_LSH); rc = &sc->sc_channels[chan]; iack = rcin(sc, CD180_MCR); rc->rc_msvr = rcin(sc, CD180_MSVR); rcout(sc, CD180_MCR, 0); #ifdef RCDEBUG printrcflags(rc, "moint"); #endif if (rc->rc_flags & RC_CTSFLOW) { if (rc->rc_msvr & MSVR_CTS) rc->rc_flags |= RC_SEND_RDY; else rc->rc_flags &= ~RC_SEND_RDY; } else rc->rc_flags |= RC_SEND_RDY; if ((iack & MCR_CDchg) && !(rc->rc_flags & RC_MODCHG)) { sc->sc_scheduled_event += LOTS_OF_EVENTS; rc->rc_flags |= RC_MODCHG; swi_sched(sc->sc_swicookie, 0); } goto more_intrs; } if (bsr & RC_BSR_TXINT) { iack = rcin(sc, RC_PILR_TX); if (iack != (GIVR_IT_TDI | RC_FAKEID)) { device_printf(sc->sc_dev, "fake txint: %02x\n", iack); goto more_intrs; } chan = ((rcin(sc, CD180_GICR) & GICR_CHAN) >> GICR_LSH); rc = &sc->sc_channels[chan]; if ( (rc->rc_flags & RC_OSUSP) || !(rc->rc_flags & RC_SEND_RDY) ) goto more_intrs; /* Handle breaks and other stuff */ if (rc->rc_pendcmd) { rcout(sc, CD180_COR2, rc->rc_cor2 |= COR2_ETC); rcout(sc, CD180_TDR, CD180_C_ESC); rcout(sc, CD180_TDR, rc->rc_pendcmd); rcout(sc, CD180_COR2, rc->rc_cor2 &= ~COR2_ETC); rc->rc_pendcmd = 0; goto more_intrs; } optr = rc->rc_optr; resid = rc->rc_obufend - optr; if (resid > CD180_NFIFO) resid = CD180_NFIFO; while (resid-- > 0) rcout(sc, CD180_TDR, *optr++); rc->rc_optr = optr; /* output completed? */ if (optr >= rc->rc_obufend) { rcout(sc, CD180_IER, rc->rc_ier &= ~IER_TxRdy); #ifdef RCDEBUG device_printf(sc->sc_dev, "channel %d: output completed\n", rc->rc_chan); #endif if (!(rc->rc_flags & RC_DOXXFER)) { sc->sc_scheduled_event += LOTS_OF_EVENTS; rc->rc_flags |= RC_DOXXFER; swi_sched(sc->sc_swicookie, 0); } } } more_intrs: rcout(sc, CD180_EOIR, 0); /* end of interrupt */ rcout(sc, RC_CTOUT, 0); bsr = ~(rcin(sc, RC_BSR)); } } /* Feed characters to output buffer */ static void rc_start(struct tty *tp) { struct rc_softc *sc; struct rc_chans *rc; int s; rc = TTY_TO_RC(tp); if (rc->rc_flags & RC_OSBUSY) return; sc = rc->rc_rcb; s = spltty(); rc->rc_flags |= RC_OSBUSY; critical_enter(); if (tp->t_state & TS_TTSTOP) rc->rc_flags |= RC_OSUSP; else rc->rc_flags &= ~RC_OSUSP; /* Do RTS flow control stuff */ if ( (rc->rc_flags & RC_RTSFLOW) && (tp->t_state & TS_TBLOCK) && (rc->rc_msvr & MSVR_RTS) ) { rcout(sc, CD180_CAR, rc->rc_chan); rcout(sc, CD180_MSVR, rc->rc_msvr &= ~MSVR_RTS); } else if (!(rc->rc_msvr & MSVR_RTS)) { rcout(sc, CD180_CAR, rc->rc_chan); rcout(sc, CD180_MSVR, rc->rc_msvr |= MSVR_RTS); } critical_exit(); if (tp->t_state & (TS_TIMEOUT|TS_TTSTOP)) goto out; #ifdef RCDEBUG printrcflags(rc, "rcstart"); #endif ttwwakeup(tp); #ifdef RCDEBUG printf("rcstart: outq = %d obuf = %d\n", tp->t_outq.c_cc, rc->rc_obufend - rc->rc_optr); #endif if (tp->t_state & TS_BUSY) goto out; /* output still in progress ... */ if (tp->t_outq.c_cc > 0) { u_int ocnt; tp->t_state |= TS_BUSY; ocnt = q_to_b(&tp->t_outq, rc->rc_obuf, sizeof rc->rc_obuf); critical_enter(); rc->rc_optr = rc->rc_obuf; rc->rc_obufend = rc->rc_optr + ocnt; critical_exit(); if (!(rc->rc_ier & IER_TxRdy)) { #ifdef RCDEBUG device_printf(sc->sc_dev, "channel %d: rcstart enable txint\n", rc->rc_chan); #endif rcout(sc, CD180_CAR, rc->rc_chan); rcout(sc, CD180_IER, rc->rc_ier |= IER_TxRdy); } } out: rc->rc_flags &= ~RC_OSBUSY; (void) splx(s); } /* Handle delayed events. */ void rc_pollcard(void *arg) { struct rc_softc *sc; struct rc_chans *rc; struct tty *tp; u_char *tptr, *eptr; int chan, icnt; sc = (struct rc_softc *)arg; if (sc->sc_scheduled_event == 0) return; do { rc = sc->sc_channels; for (chan = 0; chan < CD180_NCHAN; rc++, chan++) { tp = &rc->rc_tp; #ifdef RCDEBUG if (rc->rc_flags & (RC_DORXFER|RC_DOXXFER|RC_MODCHG| RC_WAS_BUFOVFL|RC_WAS_SILOVFL)) printrcflags(rc, "rcevent"); #endif if (rc->rc_flags & RC_WAS_BUFOVFL) { critical_enter(); rc->rc_flags &= ~RC_WAS_BUFOVFL; sc->sc_scheduled_event--; critical_exit(); device_printf(sc->sc_dev, "channel %d: interrupt-level buffer overflow\n", chan); } if (rc->rc_flags & RC_WAS_SILOVFL) { critical_enter(); rc->rc_flags &= ~RC_WAS_SILOVFL; sc->sc_scheduled_event--; critical_exit(); device_printf(sc->sc_dev, "channel %d: silo overflow\n", chan); } if (rc->rc_flags & RC_MODCHG) { critical_enter(); rc->rc_flags &= ~RC_MODCHG; sc->sc_scheduled_event -= LOTS_OF_EVENTS; critical_exit(); (*linesw[tp->t_line].l_modem)(tp, !!(rc->rc_msvr & MSVR_CD)); } if (rc->rc_flags & RC_DORXFER) { critical_enter(); rc->rc_flags &= ~RC_DORXFER; eptr = rc->rc_iptr; if (rc->rc_bufend == &rc->rc_ibuf[2 * RC_IBUFSIZE]) tptr = &rc->rc_ibuf[RC_IBUFSIZE]; else tptr = rc->rc_ibuf; icnt = eptr - tptr; if (icnt > 0) { if (rc->rc_bufend == &rc->rc_ibuf[2 * RC_IBUFSIZE]) { rc->rc_iptr = rc->rc_ibuf; rc->rc_bufend = &rc->rc_ibuf[RC_IBUFSIZE]; rc->rc_hiwat = &rc->rc_ibuf[RC_IHIGHWATER]; } else { rc->rc_iptr = &rc->rc_ibuf[RC_IBUFSIZE]; rc->rc_bufend = &rc->rc_ibuf[2 * RC_IBUFSIZE]; rc->rc_hiwat = &rc->rc_ibuf[RC_IBUFSIZE + RC_IHIGHWATER]; } if ( (rc->rc_flags & RC_RTSFLOW) && (tp->t_state & TS_ISOPEN) && !(tp->t_state & TS_TBLOCK) && !(rc->rc_msvr & MSVR_RTS) ) { rcout(sc, CD180_CAR, chan); rcout(sc, CD180_MSVR, rc->rc_msvr |= MSVR_RTS); } sc->sc_scheduled_event -= icnt; } critical_exit(); if (icnt <= 0 || !(tp->t_state & TS_ISOPEN)) goto done1; if ( (tp->t_state & TS_CAN_BYPASS_L_RINT) && !(tp->t_state & TS_LOCAL)) { if ((tp->t_rawq.c_cc + icnt) >= RB_I_HIGH_WATER && ((rc->rc_flags & RC_RTSFLOW) || (tp->t_iflag & IXOFF)) && !(tp->t_state & TS_TBLOCK)) ttyblock(tp); tk_nin += icnt; tk_rawcc += icnt; tp->t_rawcc += icnt; if (b_to_q(tptr, icnt, &tp->t_rawq)) device_printf(sc->sc_dev, "channel %d: tty-level buffer overflow\n", chan); ttwakeup(tp); if ((tp->t_state & TS_TTSTOP) && ((tp->t_iflag & IXANY) || (tp->t_cc[VSTART] == tp->t_cc[VSTOP]))) { tp->t_state &= ~TS_TTSTOP; tp->t_lflag &= ~FLUSHO; rc_start(tp); } } else { for (; tptr < eptr; tptr++) (*linesw[tp->t_line].l_rint) (tptr[0] | rc_rcsrt[tptr[INPUT_FLAGS_SHIFT] & 0xF], tp); } done1: ; } if (rc->rc_flags & RC_DOXXFER) { critical_enter(); sc->sc_scheduled_event -= LOTS_OF_EVENTS; rc->rc_flags &= ~RC_DOXXFER; rc->rc_tp.t_state &= ~TS_BUSY; critical_exit(); (*linesw[tp->t_line].l_start)(tp); } if (sc->sc_scheduled_event == 0) break; } } while (sc->sc_scheduled_event >= LOTS_OF_EVENTS); } static void rc_stop(struct tty *tp, int rw) { struct rc_softc *sc; struct rc_chans *rc; u_char *tptr, *eptr; rc = TTY_TO_RC(tp); sc = rc->rc_rcb; #ifdef RCDEBUG device_printf(sc->sc_dev, "channel %d: rc_stop %s%s\n", rc->rc_chan, (rw & FWRITE)?"FWRITE ":"", (rw & FREAD)?"FREAD":""); #endif if (rw & FWRITE) rc_discard_output(rc); critical_enter(); if (rw & FREAD) { rc->rc_flags &= ~RC_DORXFER; eptr = rc->rc_iptr; if (rc->rc_bufend == &rc->rc_ibuf[2 * RC_IBUFSIZE]) { tptr = &rc->rc_ibuf[RC_IBUFSIZE]; rc->rc_iptr = &rc->rc_ibuf[RC_IBUFSIZE]; } else { tptr = rc->rc_ibuf; rc->rc_iptr = rc->rc_ibuf; } sc->sc_scheduled_event -= eptr - tptr; } if (tp->t_state & TS_TTSTOP) rc->rc_flags |= RC_OSUSP; else rc->rc_flags &= ~RC_OSUSP; critical_exit(); } static int rcopen(dev_t dev, int flag, int mode, d_thread_t *td) { struct rc_softc *sc; struct rc_chans *rc; struct tty *tp; int s, error = 0; rc = DEV_TO_RC(dev); sc = rc->rc_rcb; tp = &rc->rc_tp; if (sc->sc_opencount < 0) return (ENXIO); sc->sc_opencount++; #ifdef RCDEBUG device_printf(sc->sc_dev, "channel %d: rcopen: dev %p\n", rc->rc_chan, dev); #endif s = spltty(); again: while (rc->rc_flags & RC_DTR_OFF) { error = tsleep(&(rc->rc_dtrwait), TTIPRI | PCATCH, "rcdtr", 0); if (error != 0) goto out; } if (tp->t_state & TS_ISOPEN) { if (CALLOUT(dev)) { if (!(rc->rc_flags & RC_ACTOUT)) { error = EBUSY; goto out; } } else { if (rc->rc_flags & RC_ACTOUT) { if (flag & O_NONBLOCK) { error = EBUSY; goto out; } error = tsleep(&rc->rc_rcb, TTIPRI|PCATCH, "rcbi", 0); if (error) goto out; goto again; } } if (tp->t_state & TS_XCLUDE && suser(td)) { error = EBUSY; goto out; } } else { tp->t_oproc = rc_start; tp->t_param = rc_param; tp->t_stop = rc_stop; tp->t_dev = dev; if (CALLOUT(dev)) tp->t_cflag |= CLOCAL; else tp->t_cflag &= ~CLOCAL; error = rc_param(tp, &tp->t_termios); if (error) goto out; (void) rc_modctl(rc, TIOCM_RTS|TIOCM_DTR, DMSET); if ((rc->rc_msvr & MSVR_CD) || CALLOUT(dev)) (*linesw[tp->t_line].l_modem)(tp, 1); } if (!(tp->t_state & TS_CARR_ON) && !CALLOUT(dev) && !(tp->t_cflag & CLOCAL) && !(flag & O_NONBLOCK)) { rc->rc_dcdwaits++; error = tsleep(TSA_CARR_ON(tp), TTIPRI | PCATCH, "rcdcd", 0); rc->rc_dcdwaits--; if (error != 0) goto out; goto again; } error = (*linesw[tp->t_line].l_open)(dev, tp); disc_optim(tp, &tp->t_termios, rc); if ((tp->t_state & TS_ISOPEN) && CALLOUT(dev)) rc->rc_flags |= RC_ACTOUT; out: (void) splx(s); if(rc->rc_dcdwaits == 0 && !(tp->t_state & TS_ISOPEN)) rc_hardclose(rc); return error; } static int rcclose(dev_t dev, int flag, int mode, d_thread_t *td) { struct rc_softc *sc; struct rc_chans *rc; struct tty *tp; int s; rc = DEV_TO_RC(dev); sc = rc->rc_rcb; tp = &rc->rc_tp; #ifdef RCDEBUG device_printf(sc->sc_dev, "channel %d: rcclose dev %p\n", rc->rc_chan, dev); #endif s = spltty(); (*linesw[tp->t_line].l_close)(tp, flag); disc_optim(tp, &tp->t_termios, rc); rc_stop(tp, FREAD | FWRITE); rc_hardclose(rc); ttyclose(tp); splx(s); KASSERT(sc->sc_opencount > 0, ("rcclose: non-positive open count")); sc->sc_opencount--; return 0; } static void rc_hardclose(struct rc_chans *rc) { struct rc_softc *sc; struct tty *tp; int s; tp = &rc->rc_tp; sc = rc->rc_rcb; s = spltty(); rcout(sc, CD180_CAR, rc->rc_chan); /* Disable rx/tx intrs */ rcout(sc, CD180_IER, rc->rc_ier = 0); if ( (tp->t_cflag & HUPCL) || (!(rc->rc_flags & RC_ACTOUT) && !(rc->rc_msvr & MSVR_CD) && !(tp->t_cflag & CLOCAL)) || !(tp->t_state & TS_ISOPEN) ) { CCRCMD(sc, rc->rc_chan, CCR_ResetChan); WAITFORCCR(sc, rc->rc_chan); (void) rc_modctl(rc, TIOCM_RTS, DMSET); if (rc->rc_dtrwait) { callout_reset(&rc->rc_dtrcallout, rc->rc_dtrwait, rc_dtrwakeup, rc); rc->rc_flags |= RC_DTR_OFF; } } rc->rc_flags &= ~RC_ACTOUT; wakeup( &rc->rc_rcb); /* wake bi */ wakeup(TSA_CARR_ON(tp)); (void) splx(s); } /* Reset the bastard */ static void rc_hwreset(struct rc_softc *sc, u_int chipid) { CCRCMD(sc, -1, CCR_HWRESET); /* Hardware reset */ DELAY(20000); WAITFORCCR(sc, -1); rcout(sc, RC_CTOUT, 0); /* Clear timeout */ rcout(sc, CD180_GIVR, chipid); rcout(sc, CD180_GICR, 0); /* Set Prescaler Registers (1 msec) */ rcout(sc, CD180_PPRL, ((RC_OSCFREQ + 999) / 1000) & 0xFF); rcout(sc, CD180_PPRH, ((RC_OSCFREQ + 999) / 1000) >> 8); /* Initialize Priority Interrupt Level Registers */ rcout(sc, CD180_PILR1, RC_PILR_MODEM); rcout(sc, CD180_PILR2, RC_PILR_TX); rcout(sc, CD180_PILR3, RC_PILR_RX); /* Reset DTR */ rcout(sc, RC_DTREG, ~0); } /* Set channel parameters */ static int rc_param(struct tty *tp, struct termios *ts) { struct rc_softc *sc; struct rc_chans *rc; int idivs, odivs, s, val, cflag, iflag, lflag, inpflow; if ( ts->c_ospeed < 0 || ts->c_ospeed > 76800 || ts->c_ispeed < 0 || ts->c_ispeed > 76800 ) return (EINVAL); if (ts->c_ispeed == 0) ts->c_ispeed = ts->c_ospeed; odivs = RC_BRD(ts->c_ospeed); idivs = RC_BRD(ts->c_ispeed); rc = TTY_TO_RC(tp); sc = rc->rc_rcb; s = spltty(); /* Select channel */ rcout(sc, CD180_CAR, rc->rc_chan); /* If speed == 0, hangup line */ if (ts->c_ospeed == 0) { CCRCMD(sc, rc->rc_chan, CCR_ResetChan); WAITFORCCR(sc, rc->rc_chan); (void) rc_modctl(rc, TIOCM_DTR, DMBIC); } tp->t_state &= ~TS_CAN_BYPASS_L_RINT; cflag = ts->c_cflag; iflag = ts->c_iflag; lflag = ts->c_lflag; if (idivs > 0) { rcout(sc, CD180_RBPRL, idivs & 0xFF); rcout(sc, CD180_RBPRH, idivs >> 8); } if (odivs > 0) { rcout(sc, CD180_TBPRL, odivs & 0xFF); rcout(sc, CD180_TBPRH, odivs >> 8); } /* set timeout value */ if (ts->c_ispeed > 0) { int itm = ts->c_ispeed > 2400 ? 5 : 10000 / ts->c_ispeed + 1; if ( !(lflag & ICANON) && ts->c_cc[VMIN] != 0 && ts->c_cc[VTIME] != 0 && ts->c_cc[VTIME] * 10 > itm) itm = ts->c_cc[VTIME] * 10; rcout(sc, CD180_RTPR, itm <= 255 ? itm : 255); } switch (cflag & CSIZE) { case CS5: val = COR1_5BITS; break; case CS6: val = COR1_6BITS; break; case CS7: val = COR1_7BITS; break; default: case CS8: val = COR1_8BITS; break; } if (cflag & PARENB) { val |= COR1_NORMPAR; if (cflag & PARODD) val |= COR1_ODDP; if (!(cflag & INPCK)) val |= COR1_Ignore; } else val |= COR1_Ignore; if (cflag & CSTOPB) val |= COR1_2SB; rcout(sc, CD180_COR1, val); /* Set FIFO threshold */ val = ts->c_ospeed <= 4800 ? 1 : CD180_NFIFO / 2; inpflow = 0; if ( (iflag & IXOFF) && ( ts->c_cc[VSTOP] != _POSIX_VDISABLE && ( ts->c_cc[VSTART] != _POSIX_VDISABLE || (iflag & IXANY) ) ) ) { inpflow = 1; val |= COR3_SCDE|COR3_FCT; } rcout(sc, CD180_COR3, val); /* Initialize on-chip automatic flow control */ val = 0; rc->rc_flags &= ~(RC_CTSFLOW|RC_SEND_RDY); if (cflag & CCTS_OFLOW) { rc->rc_flags |= RC_CTSFLOW; val |= COR2_CtsAE; } else rc->rc_flags |= RC_SEND_RDY; if (tp->t_state & TS_TTSTOP) rc->rc_flags |= RC_OSUSP; else rc->rc_flags &= ~RC_OSUSP; if (cflag & CRTS_IFLOW) rc->rc_flags |= RC_RTSFLOW; else rc->rc_flags &= ~RC_RTSFLOW; if (inpflow) { if (ts->c_cc[VSTART] != _POSIX_VDISABLE) rcout(sc, CD180_SCHR1, ts->c_cc[VSTART]); rcout(sc, CD180_SCHR2, ts->c_cc[VSTOP]); val |= COR2_TxIBE; if (iflag & IXANY) val |= COR2_IXM; } rcout(sc, CD180_COR2, rc->rc_cor2 = val); CCRCMD(sc, rc->rc_chan, CCR_CORCHG1 | CCR_CORCHG2 | CCR_CORCHG3); disc_optim(tp, ts, rc); /* modem ctl */ val = cflag & CLOCAL ? 0 : MCOR1_CDzd; if (cflag & CCTS_OFLOW) val |= MCOR1_CTSzd; rcout(sc, CD180_MCOR1, val); val = cflag & CLOCAL ? 0 : MCOR2_CDod; if (cflag & CCTS_OFLOW) val |= MCOR2_CTSod; rcout(sc, CD180_MCOR2, val); /* enable i/o and interrupts */ CCRCMD(sc, rc->rc_chan, CCR_XMTREN | ((cflag & CREAD) ? CCR_RCVREN : CCR_RCVRDIS)); WAITFORCCR(sc, rc->rc_chan); rc->rc_ier = cflag & CLOCAL ? 0 : IER_CD; if (cflag & CCTS_OFLOW) rc->rc_ier |= IER_CTS; if (cflag & CREAD) rc->rc_ier |= IER_RxData; if (tp->t_state & TS_BUSY) rc->rc_ier |= IER_TxRdy; if (ts->c_ospeed != 0) rc_modctl(rc, TIOCM_DTR, DMBIS); if ((cflag & CCTS_OFLOW) && (rc->rc_msvr & MSVR_CTS)) rc->rc_flags |= RC_SEND_RDY; rcout(sc, CD180_IER, rc->rc_ier); (void) splx(s); return 0; } /* Re-initialize board after bogus interrupts */ static void rc_reinit(struct rc_softc *sc) { struct rc_chans *rc; int i; rc_hwreset(sc, RC_FAKEID); rc = sc->sc_channels; for (i = 0; i < CD180_NCHAN; i++, rc++) (void) rc_param(&rc->rc_tp, &rc->rc_tp.t_termios); } static int rcioctl(dev_t dev, u_long cmd, caddr_t data, int flag, d_thread_t *td) { struct rc_chans *rc; struct tty *tp; int s, error; rc = DEV_TO_RC(dev); tp = &rc->rc_tp; error = (*linesw[tp->t_line].l_ioctl)(tp, cmd, data, flag, td); if (error != ENOIOCTL) return (error); error = ttioctl(tp, cmd, data, flag); disc_optim(tp, &tp->t_termios, rc); if (error != ENOIOCTL) return (error); s = spltty(); switch (cmd) { case TIOCSBRK: rc->rc_pendcmd = CD180_C_SBRK; break; case TIOCCBRK: rc->rc_pendcmd = CD180_C_EBRK; break; case TIOCSDTR: (void) rc_modctl(rc, TIOCM_DTR, DMBIS); break; case TIOCCDTR: (void) rc_modctl(rc, TIOCM_DTR, DMBIC); break; case TIOCMGET: *(int *) data = rc_modctl(rc, 0, DMGET); break; case TIOCMSET: (void) rc_modctl(rc, *(int *) data, DMSET); break; case TIOCMBIC: (void) rc_modctl(rc, *(int *) data, DMBIC); break; case TIOCMBIS: (void) rc_modctl(rc, *(int *) data, DMBIS); break; case TIOCMSDTRWAIT: error = suser(td); if (error != 0) { splx(s); return (error); } rc->rc_dtrwait = *(int *)data * hz / 100; break; case TIOCMGDTRWAIT: *(int *)data = rc->rc_dtrwait * 100 / hz; break; default: (void) splx(s); return ENOTTY; } (void) splx(s); return 0; } /* Modem control routines */ static int rc_modctl(struct rc_chans *rc, int bits, int cmd) { struct rc_softc *sc; u_char *dtr; u_char msvr; sc = rc->rc_rcb; dtr = &sc->sc_dtr; rcout(sc, CD180_CAR, rc->rc_chan); switch (cmd) { case DMSET: rcout(sc, RC_DTREG, (bits & TIOCM_DTR) ? ~(*dtr |= 1 << rc->rc_chan) : ~(*dtr &= ~(1 << rc->rc_chan))); msvr = rcin(sc, CD180_MSVR); if (bits & TIOCM_RTS) msvr |= MSVR_RTS; else msvr &= ~MSVR_RTS; if (bits & TIOCM_DTR) msvr |= MSVR_DTR; else msvr &= ~MSVR_DTR; rcout(sc, CD180_MSVR, msvr); break; case DMBIS: if (bits & TIOCM_DTR) rcout(sc, RC_DTREG, ~(*dtr |= 1 << rc->rc_chan)); msvr = rcin(sc, CD180_MSVR); if (bits & TIOCM_RTS) msvr |= MSVR_RTS; if (bits & TIOCM_DTR) msvr |= MSVR_DTR; rcout(sc, CD180_MSVR, msvr); break; case DMGET: bits = TIOCM_LE; msvr = rc->rc_msvr = rcin(sc, CD180_MSVR); if (msvr & MSVR_RTS) bits |= TIOCM_RTS; if (msvr & MSVR_CTS) bits |= TIOCM_CTS; if (msvr & MSVR_DSR) bits |= TIOCM_DSR; if (msvr & MSVR_DTR) bits |= TIOCM_DTR; if (msvr & MSVR_CD) bits |= TIOCM_CD; if (~rcin(sc, RC_RIREG) & (1 << rc->rc_chan)) bits |= TIOCM_RI; return bits; case DMBIC: if (bits & TIOCM_DTR) rcout(sc, RC_DTREG, ~(*dtr &= ~(1 << rc->rc_chan))); msvr = rcin(sc, CD180_MSVR); if (bits & TIOCM_RTS) msvr &= ~MSVR_RTS; if (bits & TIOCM_DTR) msvr &= ~MSVR_DTR; rcout(sc, CD180_MSVR, msvr); break; } rc->rc_msvr = rcin(sc, CD180_MSVR); return 0; } #define ERR(s) do { \ device_printf(sc->sc_dev, "%s", ""); \ printf s ; \ printf("\n"); \ (void) splx(old_level); \ return 1; \ } while (0) /* Test the board. */ int rc_test(struct rc_softc *sc) { int chan = 0; int i = 0, rcnt, old_level; unsigned int iack, chipid; unsigned short divs; static u_char ctest[] = "\377\125\252\045\244\0\377"; #define CTLEN 8 struct rtest { u_char txbuf[CD180_NFIFO]; /* TX buffer */ u_char rxbuf[CD180_NFIFO]; /* RX buffer */ int rxptr; /* RX pointer */ int txptr; /* TX pointer */ } tchans[CD180_NCHAN]; old_level = spltty(); chipid = RC_FAKEID; /* First, reset board to inital state */ rc_hwreset(sc, chipid); divs = RC_BRD(19200); /* Initialize channels */ for (chan = 0; chan < CD180_NCHAN; chan++) { /* Select and reset channel */ rcout(sc, CD180_CAR, chan); CCRCMD(sc, chan, CCR_ResetChan); WAITFORCCR(sc, chan); /* Set speed */ rcout(sc, CD180_RBPRL, divs & 0xFF); rcout(sc, CD180_RBPRH, divs >> 8); rcout(sc, CD180_TBPRL, divs & 0xFF); rcout(sc, CD180_TBPRH, divs >> 8); /* set timeout value */ rcout(sc, CD180_RTPR, 0); /* Establish local loopback */ rcout(sc, CD180_COR1, COR1_NOPAR | COR1_8BITS | COR1_1SB); rcout(sc, CD180_COR2, COR2_LLM); rcout(sc, CD180_COR3, CD180_NFIFO); CCRCMD(sc, chan, CCR_CORCHG1 | CCR_CORCHG2 | CCR_CORCHG3); CCRCMD(sc, chan, CCR_RCVREN | CCR_XMTREN); WAITFORCCR(sc, chan); rcout(sc, CD180_MSVR, MSVR_RTS); /* Fill TXBUF with test data */ for (i = 0; i < CD180_NFIFO; i++) { tchans[chan].txbuf[i] = ctest[i]; tchans[chan].rxbuf[i] = 0; } tchans[chan].txptr = tchans[chan].rxptr = 0; /* Now, start transmit */ rcout(sc, CD180_IER, IER_TxMpty|IER_RxData); } /* Pseudo-interrupt poll stuff */ for (rcnt = 10000; rcnt-- > 0; rcnt--) { i = ~(rcin(sc, RC_BSR)); if (i & RC_BSR_TOUT) ERR(("BSR timeout bit set\n")); else if (i & RC_BSR_TXINT) { iack = rcin(sc, RC_PILR_TX); if (iack != (GIVR_IT_TDI | chipid)) ERR(("Bad TX intr ack (%02x != %02x)\n", iack, GIVR_IT_TDI | chipid)); chan = (rcin(sc, CD180_GICR) & GICR_CHAN) >> GICR_LSH; /* If no more data to transmit, disable TX intr */ if (tchans[chan].txptr >= CD180_NFIFO) { iack = rcin(sc, CD180_IER); rcout(sc, CD180_IER, iack & ~IER_TxMpty); } else { for (iack = tchans[chan].txptr; iack < CD180_NFIFO; iack++) rcout(sc, CD180_TDR, tchans[chan].txbuf[iack]); tchans[chan].txptr = iack; } rcout(sc, CD180_EOIR, 0); } else if (i & RC_BSR_RXINT) { u_char ucnt; iack = rcin(sc, RC_PILR_RX); if (iack != (GIVR_IT_RGDI | chipid) && iack != (GIVR_IT_REI | chipid)) ERR(("Bad RX intr ack (%02x != %02x)\n", iack, GIVR_IT_RGDI | chipid)); chan = (rcin(sc, CD180_GICR) & GICR_CHAN) >> GICR_LSH; ucnt = rcin(sc, CD180_RDCR) & 0xF; while (ucnt-- > 0) { iack = rcin(sc, CD180_RCSR); if (iack & RCSR_Timeout) break; if (iack & 0xF) ERR(("Bad char chan %d (RCSR = %02X)\n", chan, iack)); if (tchans[chan].rxptr > CD180_NFIFO) ERR(("Got extra chars chan %d\n", chan)); tchans[chan].rxbuf[tchans[chan].rxptr++] = rcin(sc, CD180_RDR); } rcout(sc, CD180_EOIR, 0); } rcout(sc, RC_CTOUT, 0); for (iack = chan = 0; chan < CD180_NCHAN; chan++) if (tchans[chan].rxptr >= CD180_NFIFO) iack++; if (iack == CD180_NCHAN) break; } for (chan = 0; chan < CD180_NCHAN; chan++) { /* Select and reset channel */ rcout(sc, CD180_CAR, chan); CCRCMD(sc, chan, CCR_ResetChan); } if (!rcnt) ERR(("looses characters during local loopback\n")); /* Now, check data */ for (chan = 0; chan < CD180_NCHAN; chan++) for (i = 0; i < CD180_NFIFO; i++) if (ctest[i] != tchans[chan].rxbuf[i]) ERR(("data mismatch chan %d ptr %d (%d != %d)\n", chan, i, ctest[i], tchans[chan].rxbuf[i])); (void) splx(old_level); return 0; } #ifdef RCDEBUG static void printrcflags(struct rc_chans *rc, char *comment) { struct rc_softc *sc; u_short f = rc->rc_flags; sc = rc->rc_rcb; printf("rc%d/%d: %s flags: %s%s%s%s%s%s%s%s%s%s%s%s\n", rc->rc_rcb->rcb_unit, rc->rc_chan, comment, (f & RC_DTR_OFF)?"DTR_OFF " :"", (f & RC_ACTOUT) ?"ACTOUT " :"", (f & RC_RTSFLOW)?"RTSFLOW " :"", (f & RC_CTSFLOW)?"CTSFLOW " :"", (f & RC_DORXFER)?"DORXFER " :"", (f & RC_DOXXFER)?"DOXXFER " :"", (f & RC_MODCHG) ?"MODCHG " :"", (f & RC_OSUSP) ?"OSUSP " :"", (f & RC_OSBUSY) ?"OSBUSY " :"", (f & RC_WAS_BUFOVFL) ?"BUFOVFL " :"", (f & RC_WAS_SILOVFL) ?"SILOVFL " :"", (f & RC_SEND_RDY) ?"SEND_RDY":""); rcout(sc, CD180_CAR, rc->rc_chan); printf("rc%d/%d: msvr %02x ier %02x ccsr %02x\n", rc->rc_rcb->rcb_unit, rc->rc_chan, rcin(sc, CD180_MSVR), rcin(sc, CD180_IER), rcin(sc, CD180_CCSR)); } #endif /* RCDEBUG */ static void rc_dtrwakeup(void *arg) { struct rc_chans *rc; rc = (struct rc_chans *)arg; rc->rc_flags &= ~RC_DTR_OFF; wakeup(&rc->rc_dtrwait); } static void rc_discard_output(struct rc_chans *rc) { critical_enter(); if (rc->rc_flags & RC_DOXXFER) { rc->rc_rcb->sc_scheduled_event -= LOTS_OF_EVENTS; rc->rc_flags &= ~RC_DOXXFER; } rc->rc_optr = rc->rc_obufend; rc->rc_tp.t_state &= ~TS_BUSY; critical_exit(); ttwwakeup(&rc->rc_tp); } static void disc_optim(struct tty *tp, struct termios *t, struct rc_chans *rc) { if (!(t->c_iflag & (ICRNL | IGNCR | IMAXBEL | INLCR | ISTRIP | IXON)) && (!(t->c_iflag & BRKINT) || (t->c_iflag & IGNBRK)) && (!(t->c_iflag & PARMRK) || (t->c_iflag & (IGNPAR | IGNBRK)) == (IGNPAR | IGNBRK)) && !(t->c_lflag & (ECHO | ICANON | IEXTEN | ISIG | PENDIN)) && linesw[tp->t_line].l_rint == ttyinput) tp->t_state |= TS_CAN_BYPASS_L_RINT; else tp->t_state &= ~TS_CAN_BYPASS_L_RINT; rc->rc_hotchar = linesw[tp->t_line].l_hotchar; } static void rc_wait0(struct rc_softc *sc, int chan, int line) { int rcnt; for (rcnt = 50; rcnt && rcin(sc, CD180_CCR); rcnt--) DELAY(30); if (rcnt == 0) device_printf(sc->sc_dev, "channel %d command timeout, rc.c line: %d\n", chan, line); } static device_method_t rc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rc_probe), DEVMETHOD(device_attach, rc_attach), DEVMETHOD(device_detach, rc_detach), { 0, 0 } }; static driver_t rc_driver = { "rc", rc_methods, sizeof(struct rc_softc), }; DRIVER_MODULE(rc, isa, rc_driver, rc_devclass, 0, 0); Index: head/sys/dev/re/if_re.c =================================================================== --- head/sys/dev/re/if_re.c (revision 129878) +++ head/sys/dev/re/if_re.c (revision 129879) @@ -1,2502 +1,2503 @@ /* * Copyright (c) 1997, 1998-2003 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * RealTek 8139C+/8169/8169S/8110S PCI NIC driver * * Written by Bill Paul * Senior Networking Software Engineer * Wind River Systems */ /* * This driver is designed to support RealTek's next generation of * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently * four devices in this family: the RTL8139C+, the RTL8169, the RTL8169S * and the RTL8110S. * * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible * with the older 8139 family, however it also supports a special * C+ mode of operation that provides several new performance enhancing * features. These include: * * o Descriptor based DMA mechanism. Each descriptor represents * a single packet fragment. Data buffers may be aligned on * any byte boundary. * * o 64-bit DMA * * o TCP/IP checksum offload for both RX and TX * * o High and normal priority transmit DMA rings * * o VLAN tag insertion and extraction * * o TCP large send (segmentation offload) * * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+ * programming API is fairly straightforward. The RX filtering, EEPROM * access and PHY access is the same as it is on the older 8139 series * chips. * * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the * same programming API and feature set as the 8139C+ with the following * differences and additions: * * o 1000Mbps mode * * o Jumbo frames * * o GMII and TBI ports/registers for interfacing with copper * or fiber PHYs * * o RX and TX DMA rings can have up to 1024 descriptors * (the 8139C+ allows a maximum of 64) * * o Slight differences in register layout from the 8139C+ * * The TX start and timer interrupt registers are at different locations * on the 8169 than they are on the 8139C+. Also, the status word in the * RX descriptor has a slightly different bit layout. The 8169 does not * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska' * copper gigE PHY. * * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs * (the 'S' stands for 'single-chip'). These devices have the same * programming API as the older 8169, but also have some vendor-specific * registers for the on-board PHY. The 8110S is a LAN-on-motherboard * part designed to be pin-compatible with the RealTek 8100 10/100 chip. * * This driver takes advantage of the RX and TX checksum offload and * VLAN tag insertion/extraction features. It also implements TX * interrupt moderation using the timer interrupt registers, which * significantly reduces TX interrupt load. There is also support * for jumbo frames, however the 8169/8169S/8110S can not transmit * jumbo frames larger than 7.5K, so the max MTU possible with this * driver is 7500 bytes. */ #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MODULE_DEPEND(re, pci, 1, 1, 1); MODULE_DEPEND(re, ether, 1, 1, 1); MODULE_DEPEND(re, miibus, 1, 1, 1); /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" /* * Default to using PIO access for this driver. */ #define RE_USEIOSPACE #include #define RE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) /* * Various supported device vendors/types and their names. */ static struct rl_type re_devs[] = { { RT_VENDORID, RT_DEVICEID_8139, RL_HWREV_8139CPLUS, "RealTek 8139C+ 10/100BaseTX" }, { RT_VENDORID, RT_DEVICEID_8169, RL_HWREV_8169, "RealTek 8169 Gigabit Ethernet" }, { RT_VENDORID, RT_DEVICEID_8169, RL_HWREV_8169S, "RealTek 8169S Single-chip Gigabit Ethernet" }, { RT_VENDORID, RT_DEVICEID_8169, RL_HWREV_8110S, "RealTek 8110S Single-chip Gigabit Ethernet" }, { 0, 0, 0, NULL } }; static struct rl_hwrev re_hwrevs[] = { { RL_HWREV_8139, RL_8139, "" }, { RL_HWREV_8139A, RL_8139, "A" }, { RL_HWREV_8139AG, RL_8139, "A-G" }, { RL_HWREV_8139B, RL_8139, "B" }, { RL_HWREV_8130, RL_8139, "8130" }, { RL_HWREV_8139C, RL_8139, "C" }, { RL_HWREV_8139D, RL_8139, "8139D/8100B/8100C" }, { RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+"}, { RL_HWREV_8169, RL_8169, "8169"}, { RL_HWREV_8169S, RL_8169, "8169S"}, { RL_HWREV_8110S, RL_8169, "8110S"}, { RL_HWREV_8100, RL_8139, "8100"}, { RL_HWREV_8101, RL_8139, "8101"}, { 0, 0, NULL } }; static int re_probe (device_t); static int re_attach (device_t); static int re_detach (device_t); static int re_encap (struct rl_softc *, struct mbuf *, int *); static void re_dma_map_addr (void *, bus_dma_segment_t *, int, int); static void re_dma_map_desc (void *, bus_dma_segment_t *, int, bus_size_t, int); static int re_allocmem (device_t, struct rl_softc *); static int re_newbuf (struct rl_softc *, int, struct mbuf *); static int re_rx_list_init (struct rl_softc *); static int re_tx_list_init (struct rl_softc *); static void re_rxeof (struct rl_softc *); static void re_txeof (struct rl_softc *); static void re_intr (void *); static void re_tick (void *); static void re_start (struct ifnet *); static int re_ioctl (struct ifnet *, u_long, caddr_t); static void re_init (void *); static void re_stop (struct rl_softc *); static void re_watchdog (struct ifnet *); static int re_suspend (device_t); static int re_resume (device_t); static void re_shutdown (device_t); static int re_ifmedia_upd (struct ifnet *); static void re_ifmedia_sts (struct ifnet *, struct ifmediareq *); static void re_eeprom_putbyte (struct rl_softc *, int); static void re_eeprom_getword (struct rl_softc *, int, u_int16_t *); static void re_read_eeprom (struct rl_softc *, caddr_t, int, int, int); static int re_gmii_readreg (device_t, int, int); static int re_gmii_writereg (device_t, int, int, int); static int re_miibus_readreg (device_t, int, int); static int re_miibus_writereg (device_t, int, int, int); static void re_miibus_statchg (device_t); static uint32_t re_mchash (const uint8_t *); static void re_setmulti (struct rl_softc *); static void re_reset (struct rl_softc *); static int re_diag (struct rl_softc *); #ifdef RE_USEIOSPACE #define RL_RES SYS_RES_IOPORT #define RL_RID RL_PCI_LOIO #else #define RL_RES SYS_RES_MEMORY #define RL_RID RL_PCI_LOMEM #endif static device_method_t re_methods[] = { /* Device interface */ DEVMETHOD(device_probe, re_probe), DEVMETHOD(device_attach, re_attach), DEVMETHOD(device_detach, re_detach), DEVMETHOD(device_suspend, re_suspend), DEVMETHOD(device_resume, re_resume), DEVMETHOD(device_shutdown, re_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, re_miibus_readreg), DEVMETHOD(miibus_writereg, re_miibus_writereg), DEVMETHOD(miibus_statchg, re_miibus_statchg), { 0, 0 } }; static driver_t re_driver = { "re", re_methods, sizeof(struct rl_softc) }; static devclass_t re_devclass; DRIVER_MODULE(re, pci, re_driver, re_devclass, 0, 0); DRIVER_MODULE(re, cardbus, re_driver, re_devclass, 0, 0); DRIVER_MODULE(miibus, re, miibus_driver, miibus_devclass, 0, 0); #define EE_SET(x) \ CSR_WRITE_1(sc, RL_EECMD, \ CSR_READ_1(sc, RL_EECMD) | x) #define EE_CLR(x) \ CSR_WRITE_1(sc, RL_EECMD, \ CSR_READ_1(sc, RL_EECMD) & ~x) /* * Send a read command and address to the EEPROM, check for ACK. */ static void re_eeprom_putbyte(sc, addr) struct rl_softc *sc; int addr; { register int d, i; d = addr | sc->rl_eecmd_read; /* * Feed in each bit and strobe the clock. */ for (i = 0x400; i; i >>= 1) { if (d & i) { EE_SET(RL_EE_DATAIN); } else { EE_CLR(RL_EE_DATAIN); } DELAY(100); EE_SET(RL_EE_CLK); DELAY(150); EE_CLR(RL_EE_CLK); DELAY(100); } return; } /* * Read a word of data stored in the EEPROM at address 'addr.' */ static void re_eeprom_getword(sc, addr, dest) struct rl_softc *sc; int addr; u_int16_t *dest; { register int i; u_int16_t word = 0; /* Enter EEPROM access mode. */ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL); /* * Send address of word we want to read. */ re_eeprom_putbyte(sc, addr); CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL); /* * Start reading bits from EEPROM. */ for (i = 0x8000; i; i >>= 1) { EE_SET(RL_EE_CLK); DELAY(100); if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) word |= i; EE_CLR(RL_EE_CLK); DELAY(100); } /* Turn off EEPROM access mode. */ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); *dest = word; return; } /* * Read a sequence of words from the EEPROM. */ static void re_read_eeprom(sc, dest, off, cnt, swap) struct rl_softc *sc; caddr_t dest; int off; int cnt; int swap; { int i; u_int16_t word = 0, *ptr; for (i = 0; i < cnt; i++) { re_eeprom_getword(sc, off + i, &word); ptr = (u_int16_t *)(dest + (i * 2)); if (swap) *ptr = ntohs(word); else *ptr = word; } return; } static int re_gmii_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct rl_softc *sc; u_int32_t rval; int i; if (phy != 1) return(0); sc = device_get_softc(dev); /* Let the rgephy driver read the GMEDIASTAT register */ if (reg == RL_GMEDIASTAT) { rval = CSR_READ_1(sc, RL_GMEDIASTAT); return(rval); } CSR_WRITE_4(sc, RL_PHYAR, reg << 16); DELAY(1000); for (i = 0; i < RL_TIMEOUT; i++) { rval = CSR_READ_4(sc, RL_PHYAR); if (rval & RL_PHYAR_BUSY) break; DELAY(100); } if (i == RL_TIMEOUT) { printf ("re%d: PHY read failed\n", sc->rl_unit); return (0); } return (rval & RL_PHYAR_PHYDATA); } static int re_gmii_writereg(dev, phy, reg, data) device_t dev; int phy, reg, data; { struct rl_softc *sc; u_int32_t rval; int i; sc = device_get_softc(dev); CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) | (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY); DELAY(1000); for (i = 0; i < RL_TIMEOUT; i++) { rval = CSR_READ_4(sc, RL_PHYAR); if (!(rval & RL_PHYAR_BUSY)) break; DELAY(100); } if (i == RL_TIMEOUT) { printf ("re%d: PHY write failed\n", sc->rl_unit); return (0); } return (0); } static int re_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct rl_softc *sc; u_int16_t rval = 0; u_int16_t re8139_reg = 0; sc = device_get_softc(dev); RL_LOCK(sc); if (sc->rl_type == RL_8169) { rval = re_gmii_readreg(dev, phy, reg); RL_UNLOCK(sc); return (rval); } /* Pretend the internal PHY is only at address 0 */ if (phy) { RL_UNLOCK(sc); return(0); } switch(reg) { case MII_BMCR: re8139_reg = RL_BMCR; break; case MII_BMSR: re8139_reg = RL_BMSR; break; case MII_ANAR: re8139_reg = RL_ANAR; break; case MII_ANER: re8139_reg = RL_ANER; break; case MII_ANLPAR: re8139_reg = RL_LPAR; break; case MII_PHYIDR1: case MII_PHYIDR2: RL_UNLOCK(sc); return(0); /* * Allow the rlphy driver to read the media status * register. If we have a link partner which does not * support NWAY, this is the register which will tell * us the results of parallel detection. */ case RL_MEDIASTAT: rval = CSR_READ_1(sc, RL_MEDIASTAT); RL_UNLOCK(sc); return(rval); default: printf("re%d: bad phy register\n", sc->rl_unit); RL_UNLOCK(sc); return(0); } rval = CSR_READ_2(sc, re8139_reg); RL_UNLOCK(sc); return(rval); } static int re_miibus_writereg(dev, phy, reg, data) device_t dev; int phy, reg, data; { struct rl_softc *sc; u_int16_t re8139_reg = 0; int rval = 0; sc = device_get_softc(dev); RL_LOCK(sc); if (sc->rl_type == RL_8169) { rval = re_gmii_writereg(dev, phy, reg, data); RL_UNLOCK(sc); return (rval); } /* Pretend the internal PHY is only at address 0 */ if (phy) { RL_UNLOCK(sc); return(0); } switch(reg) { case MII_BMCR: re8139_reg = RL_BMCR; break; case MII_BMSR: re8139_reg = RL_BMSR; break; case MII_ANAR: re8139_reg = RL_ANAR; break; case MII_ANER: re8139_reg = RL_ANER; break; case MII_ANLPAR: re8139_reg = RL_LPAR; break; case MII_PHYIDR1: case MII_PHYIDR2: RL_UNLOCK(sc); return(0); break; default: printf("re%d: bad phy register\n", sc->rl_unit); RL_UNLOCK(sc); return(0); } CSR_WRITE_2(sc, re8139_reg, data); RL_UNLOCK(sc); return(0); } static void re_miibus_statchg(dev) device_t dev; { return; } /* * Calculate CRC of a multicast group address, return the upper 6 bits. */ static uint32_t re_mchash(addr) const uint8_t *addr; { uint32_t crc, carry; int idx, bit; uint8_t data; /* Compute CRC for the address value. */ crc = 0xFFFFFFFF; /* initial value */ for (idx = 0; idx < 6; idx++) { for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) { carry = ((crc & 0x80000000) ? 1 : 0) ^ (data & 0x01); crc <<= 1; if (carry) crc = (crc ^ 0x04c11db6) | carry; } } /* return the filter bit position */ return(crc >> 26); } /* * Program the 64-bit multicast hash filter. */ static void re_setmulti(sc) struct rl_softc *sc; { struct ifnet *ifp; int h = 0; u_int32_t hashes[2] = { 0, 0 }; struct ifmultiaddr *ifma; u_int32_t rxfilt; int mcnt = 0; ifp = &sc->arpcom.ac_if; rxfilt = CSR_READ_4(sc, RL_RXCFG); if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { rxfilt |= RL_RXCFG_RX_MULTI; CSR_WRITE_4(sc, RL_RXCFG, rxfilt); CSR_WRITE_4(sc, RL_MAR0, 0xFFFFFFFF); CSR_WRITE_4(sc, RL_MAR4, 0xFFFFFFFF); return; } /* first, zot all the existing hash bits */ CSR_WRITE_4(sc, RL_MAR0, 0); CSR_WRITE_4(sc, RL_MAR4, 0); /* now program new ones */ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = re_mchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); mcnt++; } if (mcnt) rxfilt |= RL_RXCFG_RX_MULTI; else rxfilt &= ~RL_RXCFG_RX_MULTI; CSR_WRITE_4(sc, RL_RXCFG, rxfilt); CSR_WRITE_4(sc, RL_MAR0, hashes[0]); CSR_WRITE_4(sc, RL_MAR4, hashes[1]); return; } static void re_reset(sc) struct rl_softc *sc; { register int i; CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); for (i = 0; i < RL_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) break; } if (i == RL_TIMEOUT) printf("re%d: reset never completed!\n", sc->rl_unit); CSR_WRITE_1(sc, 0x82, 1); return; } /* * The following routine is designed to test for a defect on some * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64# * lines connected to the bus, however for a 32-bit only card, they * should be pulled high. The result of this defect is that the * NIC will not work right if you plug it into a 64-bit slot: DMA * operations will be done with 64-bit transfers, which will fail * because the 64-bit data lines aren't connected. * * There's no way to work around this (short of talking a soldering * iron to the board), however we can detect it. The method we use * here is to put the NIC into digital loopback mode, set the receiver * to promiscuous mode, and then try to send a frame. We then compare * the frame data we sent to what was received. If the data matches, * then the NIC is working correctly, otherwise we know the user has * a defective NIC which has been mistakenly plugged into a 64-bit PCI * slot. In the latter case, there's no way the NIC can work correctly, * so we print out a message on the console and abort the device attach. */ static int re_diag(sc) struct rl_softc *sc; { struct ifnet *ifp = &sc->arpcom.ac_if; struct mbuf *m0; struct ether_header *eh; struct rl_desc *cur_rx; u_int16_t status; u_int32_t rxstat; int total_len, i, error = 0; u_int8_t dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' }; u_int8_t src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' }; /* Allocate a single mbuf */ MGETHDR(m0, M_DONTWAIT, MT_DATA); if (m0 == NULL) return(ENOBUFS); /* * Initialize the NIC in test mode. This sets the chip up * so that it can send and receive frames, but performs the * following special functions: * - Puts receiver in promiscuous mode * - Enables digital loopback mode * - Leaves interrupts turned off */ ifp->if_flags |= IFF_PROMISC; sc->rl_testmode = 1; re_init(sc); re_stop(sc); DELAY(100000); re_init(sc); /* Put some data in the mbuf */ eh = mtod(m0, struct ether_header *); bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN); bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN); eh->ether_type = htons(ETHERTYPE_IP); m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN; /* * Queue the packet, start transmission. * Note: IF_HANDOFF() ultimately calls re_start() for us. */ CSR_WRITE_2(sc, RL_ISR, 0xFFFF); IF_HANDOFF(&ifp->if_snd, m0, ifp); m0 = NULL; /* Wait for it to propagate through the chip */ DELAY(100000); for (i = 0; i < RL_TIMEOUT; i++) { status = CSR_READ_2(sc, RL_ISR); if ((status & (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) == (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) break; DELAY(10); } if (i == RL_TIMEOUT) { printf("re%d: diagnostic failed, failed to receive packet " "in loopback mode\n", sc->rl_unit); error = EIO; goto done; } /* * The packet should have been dumped into the first * entry in the RX DMA ring. Grab it from there. */ bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_POSTREAD); bus_dmamap_sync(sc->rl_ldata.rl_mtag, sc->rl_ldata.rl_rx_dmamap[0], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->rl_ldata.rl_mtag, sc->rl_ldata.rl_rx_dmamap[0]); m0 = sc->rl_ldata.rl_rx_mbuf[0]; sc->rl_ldata.rl_rx_mbuf[0] = NULL; eh = mtod(m0, struct ether_header *); cur_rx = &sc->rl_ldata.rl_rx_list[0]; total_len = RL_RXBYTES(cur_rx); rxstat = le32toh(cur_rx->rl_cmdstat); if (total_len != ETHER_MIN_LEN) { printf("re%d: diagnostic failed, received short packet\n", sc->rl_unit); error = EIO; goto done; } /* Test that the received packet data matches what we sent. */ if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) || bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) || ntohs(eh->ether_type) != ETHERTYPE_IP) { printf("re%d: WARNING, DMA FAILURE!\n", sc->rl_unit); printf("re%d: expected TX data: %6D/%6D/0x%x\n", sc->rl_unit, dst, ":", src, ":", ETHERTYPE_IP); printf("re%d: received RX data: %6D/%6D/0x%x\n", sc->rl_unit, eh->ether_dhost, ":", eh->ether_shost, ":", ntohs(eh->ether_type)); printf("re%d: You may have a defective 32-bit NIC plugged " "into a 64-bit PCI slot.\n", sc->rl_unit); printf("re%d: Please re-install the NIC in a 32-bit slot " "for proper operation.\n", sc->rl_unit); printf("re%d: Read the re(4) man page for more details.\n", sc->rl_unit); error = EIO; } done: /* Turn interface off, release resources */ sc->rl_testmode = 0; ifp->if_flags &= ~IFF_PROMISC; re_stop(sc); if (m0 != NULL) m_freem(m0); return (error); } /* * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int re_probe(dev) device_t dev; { struct rl_type *t; struct rl_softc *sc; int rid; u_int32_t hwrev; t = re_devs; sc = device_get_softc(dev); while(t->rl_name != NULL) { if ((pci_get_vendor(dev) == t->rl_vid) && (pci_get_device(dev) == t->rl_did)) { /* * Temporarily map the I/O space * so we can read the chip ID register. */ rid = RL_RID; sc->rl_res = bus_alloc_resource_any(dev, RL_RES, &rid, RF_ACTIVE); if (sc->rl_res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); return(ENXIO); } sc->rl_btag = rman_get_bustag(sc->rl_res); sc->rl_bhandle = rman_get_bushandle(sc->rl_res); mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); RL_LOCK(sc); hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV; bus_release_resource(dev, RL_RES, RL_RID, sc->rl_res); RL_UNLOCK(sc); mtx_destroy(&sc->rl_mtx); if (t->rl_basetype == hwrev) { device_set_desc(dev, t->rl_name); return(0); } } t++; } return(ENXIO); } /* * This routine takes the segment list provided as the result of * a bus_dma_map_load() operation and assigns the addresses/lengths * to RealTek DMA descriptors. This can be called either by the RX * code or the TX code. In the RX case, we'll probably wind up mapping * at most one segment. For the TX case, there could be any number of * segments since TX packets may span multiple mbufs. In either case, * if the number of segments is larger than the rl_maxsegs limit * specified by the caller, we abort the mapping operation. Sadly, * whoever designed the buffer mapping API did not provide a way to * return an error from here, so we have to fake it a bit. */ static void re_dma_map_desc(arg, segs, nseg, mapsize, error) void *arg; bus_dma_segment_t *segs; int nseg; bus_size_t mapsize; int error; { struct rl_dmaload_arg *ctx; struct rl_desc *d = NULL; int i = 0, idx; if (error) return; ctx = arg; /* Signal error to caller if there's too many segments */ if (nseg > ctx->rl_maxsegs) { ctx->rl_maxsegs = 0; return; } /* * Map the segment array into descriptors. Note that we set the * start-of-frame and end-of-frame markers for either TX or RX, but * they really only have meaning in the TX case. (In the RX case, * it's the chip that tells us where packets begin and end.) * We also keep track of the end of the ring and set the * end-of-ring bits as needed, and we set the ownership bits * in all except the very first descriptor. (The caller will * set this descriptor later when it start transmission or * reception.) */ idx = ctx->rl_idx; while(1) { u_int32_t cmdstat; d = &ctx->rl_ring[idx]; if (le32toh(d->rl_cmdstat) & RL_RDESC_STAT_OWN) { ctx->rl_maxsegs = 0; return; } cmdstat = segs[i].ds_len; d->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr)); d->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr)); if (i == 0) cmdstat |= RL_TDESC_CMD_SOF; else cmdstat |= RL_TDESC_CMD_OWN; if (idx == (RL_RX_DESC_CNT - 1)) cmdstat |= RL_TDESC_CMD_EOR; d->rl_cmdstat = htole32(cmdstat | ctx->rl_flags); i++; if (i == nseg) break; RL_DESC_INC(idx); } d->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF); ctx->rl_maxsegs = nseg; ctx->rl_idx = idx; return; } /* * Map a single buffer address. */ static void re_dma_map_addr(arg, segs, nseg, error) void *arg; bus_dma_segment_t *segs; int nseg; int error; { u_int32_t *addr; if (error) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); addr = arg; *addr = segs->ds_addr; return; } static int re_allocmem(dev, sc) device_t dev; struct rl_softc *sc; { int error; int nseg; int i; /* * Allocate map for RX mbufs. */ nseg = 32; error = bus_dma_tag_create(sc->rl_parent_tag, ETHER_ALIGN, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * nseg, nseg, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rl_ldata.rl_mtag); if (error) { device_printf(dev, "could not allocate dma tag\n"); return (ENOMEM); } /* * Allocate map for TX descriptor list. */ error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, RL_TX_LIST_SZ, 1, RL_TX_LIST_SZ, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rl_ldata.rl_tx_list_tag); if (error) { device_printf(dev, "could not allocate dma tag\n"); return (ENOMEM); } /* Allocate DMA'able memory for the TX ring */ error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag, (void **)&sc->rl_ldata.rl_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->rl_ldata.rl_tx_list_map); if (error) return (ENOMEM); /* Load the map for the TX ring. */ error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list, RL_TX_LIST_SZ, re_dma_map_addr, &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT); /* Create DMA maps for TX buffers */ for (i = 0; i < RL_TX_DESC_CNT; i++) { error = bus_dmamap_create(sc->rl_ldata.rl_mtag, 0, &sc->rl_ldata.rl_tx_dmamap[i]); if (error) { device_printf(dev, "can't create DMA map for TX\n"); return(ENOMEM); } } /* * Allocate map for RX descriptor list. */ error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, RL_TX_LIST_SZ, 1, RL_TX_LIST_SZ, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rl_ldata.rl_rx_list_tag); if (error) { device_printf(dev, "could not allocate dma tag\n"); return (ENOMEM); } /* Allocate DMA'able memory for the RX ring */ error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag, (void **)&sc->rl_ldata.rl_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->rl_ldata.rl_rx_list_map); if (error) return (ENOMEM); /* Load the map for the RX ring. */ error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list, RL_TX_LIST_SZ, re_dma_map_addr, &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT); /* Create DMA maps for RX buffers */ for (i = 0; i < RL_RX_DESC_CNT; i++) { error = bus_dmamap_create(sc->rl_ldata.rl_mtag, 0, &sc->rl_ldata.rl_rx_dmamap[i]); if (error) { device_printf(dev, "can't create DMA map for RX\n"); return(ENOMEM); } } return(0); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int re_attach(dev) device_t dev; { u_char eaddr[ETHER_ADDR_LEN]; u_int16_t as[3]; struct rl_softc *sc; struct ifnet *ifp; struct rl_hwrev *hw_rev; int hwrev; u_int16_t re_did = 0; int unit, error = 0, rid, i; sc = device_get_softc(dev); unit = device_get_unit(dev); mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); #ifndef BURN_BRIDGES /* * Handle power management nonsense. */ if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { u_int32_t iobase, membase, irq; /* Save important PCI config data. */ iobase = pci_read_config(dev, RL_PCI_LOIO, 4); membase = pci_read_config(dev, RL_PCI_LOMEM, 4); irq = pci_read_config(dev, RL_PCI_INTLINE, 4); /* Reset the power state. */ printf("re%d: chip is is in D%d power mode " "-- setting to D0\n", unit, pci_get_powerstate(dev)); pci_set_powerstate(dev, PCI_POWERSTATE_D0); /* Restore PCI config data. */ pci_write_config(dev, RL_PCI_LOIO, iobase, 4); pci_write_config(dev, RL_PCI_LOMEM, membase, 4); pci_write_config(dev, RL_PCI_INTLINE, irq, 4); } #endif /* * Map control/status registers. */ pci_enable_busmaster(dev); rid = RL_RID; sc->rl_res = bus_alloc_resource_any(dev, RL_RES, &rid, RF_ACTIVE); if (sc->rl_res == NULL) { printf ("re%d: couldn't map ports/memory\n", unit); error = ENXIO; goto fail; } sc->rl_btag = rman_get_bustag(sc->rl_res); sc->rl_bhandle = rman_get_bushandle(sc->rl_res); /* Allocate interrupt */ rid = 0; sc->rl_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->rl_irq == NULL) { printf("re%d: couldn't map interrupt\n", unit); error = ENXIO; goto fail; } /* Reset the adapter. */ re_reset(sc); hw_rev = re_hwrevs; hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV; while (hw_rev->rl_desc != NULL) { if (hw_rev->rl_rev == hwrev) { sc->rl_type = hw_rev->rl_type; break; } hw_rev++; } if (sc->rl_type == RL_8169) { /* Set RX length mask */ sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN; /* Force station address autoload from the EEPROM */ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_AUTOLOAD); for (i = 0; i < RL_TIMEOUT; i++) { if (!(CSR_READ_1(sc, RL_EECMD) & RL_EEMODE_AUTOLOAD)) break; DELAY(100); } if (i == RL_TIMEOUT) printf ("re%d: eeprom autoload timed out\n", unit); for (i = 0; i < ETHER_ADDR_LEN; i++) eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i); } else { /* Set RX length mask */ sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN; sc->rl_eecmd_read = RL_EECMD_READ_6BIT; re_read_eeprom(sc, (caddr_t)&re_did, 0, 1, 0); if (re_did != 0x8129) sc->rl_eecmd_read = RL_EECMD_READ_8BIT; /* * Get station address from the EEPROM. */ re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3, 0); for (i = 0; i < 3; i++) { eaddr[(i * 2) + 0] = as[i] & 0xff; eaddr[(i * 2) + 1] = as[i] >> 8; } } sc->rl_unit = unit; bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); /* * Allocate the parent bus DMA tag appropriate for PCI. */ #define RL_NSEG_NEW 32 error = bus_dma_tag_create(NULL, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MAXBSIZE, RL_NSEG_NEW, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->rl_parent_tag); if (error) goto fail; error = re_allocmem(dev, sc); if (error) goto fail; /* Do MII setup */ if (mii_phy_probe(dev, &sc->rl_miibus, re_ifmedia_upd, re_ifmedia_sts)) { printf("re%d: MII without any phy!\n", sc->rl_unit); error = ENXIO; goto fail; } ifp = &sc->arpcom.ac_if; ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = re_ioctl; ifp->if_capabilities = IFCAP_VLAN_MTU; ifp->if_start = re_start; ifp->if_hwassist = RE_CSUM_FEATURES; ifp->if_capabilities |= IFCAP_HWCSUM|IFCAP_VLAN_HWTAGGING; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif ifp->if_watchdog = re_watchdog; ifp->if_init = re_init; if (sc->rl_type == RL_8169) ifp->if_baudrate = 1000000000; else ifp->if_baudrate = 100000000; ifp->if_snd.ifq_maxlen = RL_IFQ_MAXLEN; ifp->if_capenable = ifp->if_capabilities; callout_handle_init(&sc->rl_stat_ch); /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); /* Perform hardware diagnostic. */ error = re_diag(sc); if (error) { printf("re%d: attach aborted due to hardware diag failure\n", unit); ether_ifdetach(ifp); goto fail; } /* Hook interrupt last to avoid having to lock softc */ error = bus_setup_intr(dev, sc->rl_irq, INTR_TYPE_NET, re_intr, sc, &sc->rl_intrhand); if (error) { printf("re%d: couldn't set up irq\n", unit); ether_ifdetach(ifp); goto fail; } fail: if (error) re_detach(dev); return (error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int re_detach(dev) device_t dev; { struct rl_softc *sc; struct ifnet *ifp; int i; sc = device_get_softc(dev); KASSERT(mtx_initialized(&sc->rl_mtx), ("rl mutex not initialized")); RL_LOCK(sc); ifp = &sc->arpcom.ac_if; /* These should only be active if attach succeeded */ if (device_is_attached(dev)) { re_stop(sc); /* * Force off the IFF_UP flag here, in case someone * still had a BPF descriptor attached to this * interface. If they do, ether_ifattach() will cause * the BPF code to try and clear the promisc mode * flag, which will bubble down to re_ioctl(), * which will try to call re_init() again. This will * turn the NIC back on and restart the MII ticker, * which will panic the system when the kernel tries * to invoke the re_tick() function that isn't there * anymore. */ ifp->if_flags &= ~IFF_UP; ether_ifdetach(ifp); } if (sc->rl_miibus) device_delete_child(dev, sc->rl_miibus); bus_generic_detach(dev); if (sc->rl_intrhand) bus_teardown_intr(dev, sc->rl_irq, sc->rl_intrhand); if (sc->rl_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->rl_irq); if (sc->rl_res) bus_release_resource(dev, RL_RES, RL_RID, sc->rl_res); /* Unload and free the RX DMA ring memory and map */ if (sc->rl_ldata.rl_rx_list_tag) { bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map); bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list, sc->rl_ldata.rl_rx_list_map); bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag); } /* Unload and free the TX DMA ring memory and map */ if (sc->rl_ldata.rl_tx_list_tag) { bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map); bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list, sc->rl_ldata.rl_tx_list_map); bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag); } /* Destroy all the RX and TX buffer maps */ if (sc->rl_ldata.rl_mtag) { for (i = 0; i < RL_TX_DESC_CNT; i++) bus_dmamap_destroy(sc->rl_ldata.rl_mtag, sc->rl_ldata.rl_tx_dmamap[i]); for (i = 0; i < RL_RX_DESC_CNT; i++) bus_dmamap_destroy(sc->rl_ldata.rl_mtag, sc->rl_ldata.rl_rx_dmamap[i]); bus_dma_tag_destroy(sc->rl_ldata.rl_mtag); } /* Unload and free the stats buffer and map */ if (sc->rl_ldata.rl_stag) { bus_dmamap_unload(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_rx_list_map); bus_dmamem_free(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_stats, sc->rl_ldata.rl_smap); bus_dma_tag_destroy(sc->rl_ldata.rl_stag); } if (sc->rl_parent_tag) bus_dma_tag_destroy(sc->rl_parent_tag); RL_UNLOCK(sc); mtx_destroy(&sc->rl_mtx); return(0); } static int re_newbuf(sc, idx, m) struct rl_softc *sc; int idx; struct mbuf *m; { struct rl_dmaload_arg arg; struct mbuf *n = NULL; int error; if (m == NULL) { n = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (n == NULL) return(ENOBUFS); m = n; } else m->m_data = m->m_ext.ext_buf; /* * Initialize mbuf length fields and fixup * alignment so that the frame payload is * longword aligned. */ m->m_len = m->m_pkthdr.len = MCLBYTES; m_adj(m, ETHER_ALIGN); arg.sc = sc; arg.rl_idx = idx; arg.rl_maxsegs = 1; arg.rl_flags = 0; arg.rl_ring = sc->rl_ldata.rl_rx_list; error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag, sc->rl_ldata.rl_rx_dmamap[idx], m, re_dma_map_desc, &arg, BUS_DMA_NOWAIT); if (error || arg.rl_maxsegs != 1) { if (n != NULL) m_freem(n); return (ENOMEM); } sc->rl_ldata.rl_rx_list[idx].rl_cmdstat |= htole32(RL_RDESC_CMD_OWN); sc->rl_ldata.rl_rx_mbuf[idx] = m; bus_dmamap_sync(sc->rl_ldata.rl_mtag, sc->rl_ldata.rl_rx_dmamap[idx], BUS_DMASYNC_PREREAD); return(0); } static int re_tx_list_init(sc) struct rl_softc *sc; { bzero ((char *)sc->rl_ldata.rl_tx_list, RL_TX_LIST_SZ); bzero ((char *)&sc->rl_ldata.rl_tx_mbuf, (RL_TX_DESC_CNT * sizeof(struct mbuf *))); bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_PREWRITE); sc->rl_ldata.rl_tx_prodidx = 0; sc->rl_ldata.rl_tx_considx = 0; sc->rl_ldata.rl_tx_free = RL_TX_DESC_CNT; return(0); } static int re_rx_list_init(sc) struct rl_softc *sc; { int i; bzero ((char *)sc->rl_ldata.rl_rx_list, RL_RX_LIST_SZ); bzero ((char *)&sc->rl_ldata.rl_rx_mbuf, (RL_RX_DESC_CNT * sizeof(struct mbuf *))); for (i = 0; i < RL_RX_DESC_CNT; i++) { if (re_newbuf(sc, i, NULL) == ENOBUFS) return(ENOBUFS); } /* Flush the RX descriptors */ bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); sc->rl_ldata.rl_rx_prodidx = 0; sc->rl_head = sc->rl_tail = NULL; return(0); } /* * RX handler for C+ and 8169. For the gigE chips, we support * the reception of jumbo frames that have been fragmented * across multiple 2K mbuf cluster buffers. */ static void re_rxeof(sc) struct rl_softc *sc; { struct mbuf *m; struct ifnet *ifp; int i, total_len; struct rl_desc *cur_rx; u_int32_t rxstat, rxvlan; RL_LOCK_ASSERT(sc); ifp = &sc->arpcom.ac_if; i = sc->rl_ldata.rl_rx_prodidx; /* Invalidate the descriptor memory */ bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_POSTREAD); while (!RL_OWN(&sc->rl_ldata.rl_rx_list[i])) { cur_rx = &sc->rl_ldata.rl_rx_list[i]; m = sc->rl_ldata.rl_rx_mbuf[i]; total_len = RL_RXBYTES(cur_rx); rxstat = le32toh(cur_rx->rl_cmdstat); rxvlan = le32toh(cur_rx->rl_vlanctl); /* Invalidate the RX mbuf and unload its map */ bus_dmamap_sync(sc->rl_ldata.rl_mtag, sc->rl_ldata.rl_rx_dmamap[i], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->rl_ldata.rl_mtag, sc->rl_ldata.rl_rx_dmamap[i]); if (!(rxstat & RL_RDESC_STAT_EOF)) { m->m_len = MCLBYTES - ETHER_ALIGN; if (sc->rl_head == NULL) sc->rl_head = sc->rl_tail = m; else { m->m_flags &= ~M_PKTHDR; sc->rl_tail->m_next = m; sc->rl_tail = m; } re_newbuf(sc, i, NULL); RL_DESC_INC(i); continue; } /* * NOTE: for the 8139C+, the frame length field * is always 12 bits in size, but for the gigE chips, * it is 13 bits (since the max RX frame length is 16K). * Unfortunately, all 32 bits in the status word * were already used, so to make room for the extra * length bit, RealTek took out the 'frame alignment * error' bit and shifted the other status bits * over one slot. The OWN, EOR, FS and LS bits are * still in the same places. We have already extracted * the frame length and checked the OWN bit, so rather * than using an alternate bit mapping, we shift the * status bits one space to the right so we can evaluate * them using the 8169 status as though it was in the * same format as that of the 8139C+. */ if (sc->rl_type == RL_8169) rxstat >>= 1; if (rxstat & RL_RDESC_STAT_RXERRSUM) { ifp->if_ierrors++; /* * If this is part of a multi-fragment packet, * discard all the pieces. */ if (sc->rl_head != NULL) { m_freem(sc->rl_head); sc->rl_head = sc->rl_tail = NULL; } re_newbuf(sc, i, m); RL_DESC_INC(i); continue; } /* * If allocating a replacement mbuf fails, * reload the current one. */ if (re_newbuf(sc, i, NULL)) { ifp->if_ierrors++; if (sc->rl_head != NULL) { m_freem(sc->rl_head); sc->rl_head = sc->rl_tail = NULL; } re_newbuf(sc, i, m); RL_DESC_INC(i); continue; } RL_DESC_INC(i); if (sc->rl_head != NULL) { m->m_len = total_len % (MCLBYTES - ETHER_ALIGN); /* * Special case: if there's 4 bytes or less * in this buffer, the mbuf can be discarded: * the last 4 bytes is the CRC, which we don't * care about anyway. */ if (m->m_len <= ETHER_CRC_LEN) { sc->rl_tail->m_len -= (ETHER_CRC_LEN - m->m_len); m_freem(m); } else { m->m_len -= ETHER_CRC_LEN; m->m_flags &= ~M_PKTHDR; sc->rl_tail->m_next = m; } m = sc->rl_head; sc->rl_head = sc->rl_tail = NULL; m->m_pkthdr.len = total_len - ETHER_CRC_LEN; } else m->m_pkthdr.len = m->m_len = (total_len - ETHER_CRC_LEN); ifp->if_ipackets++; m->m_pkthdr.rcvif = ifp; /* Do RX checksumming if enabled */ if (ifp->if_capenable & IFCAP_RXCSUM) { /* Check IP header checksum */ if (rxstat & RL_RDESC_STAT_PROTOID) m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (!(rxstat & RL_RDESC_STAT_IPSUMBAD)) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; /* Check TCP/UDP checksum */ if ((RL_TCPPKT(rxstat) && !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || (RL_UDPPKT(rxstat) && !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID|CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } if (rxvlan & RL_RDESC_VLANCTL_TAG) VLAN_INPUT_TAG(ifp, m, ntohs((rxvlan & RL_RDESC_VLANCTL_DATA)), continue); RL_UNLOCK(sc); (*ifp->if_input)(ifp, m); RL_LOCK(sc); } /* Flush the RX DMA ring */ bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); sc->rl_ldata.rl_rx_prodidx = i; return; } static void re_txeof(sc) struct rl_softc *sc; { struct ifnet *ifp; u_int32_t txstat; int idx; ifp = &sc->arpcom.ac_if; idx = sc->rl_ldata.rl_tx_considx; /* Invalidate the TX descriptor list */ bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_POSTREAD); while (idx != sc->rl_ldata.rl_tx_prodidx) { txstat = le32toh(sc->rl_ldata.rl_tx_list[idx].rl_cmdstat); if (txstat & RL_TDESC_CMD_OWN) break; /* * We only stash mbufs in the last descriptor * in a fragment chain, which also happens to * be the only place where the TX status bits * are valid. */ if (txstat & RL_TDESC_CMD_EOF) { m_freem(sc->rl_ldata.rl_tx_mbuf[idx]); sc->rl_ldata.rl_tx_mbuf[idx] = NULL; bus_dmamap_unload(sc->rl_ldata.rl_mtag, sc->rl_ldata.rl_tx_dmamap[idx]); if (txstat & (RL_TDESC_STAT_EXCESSCOL| RL_TDESC_STAT_COLCNT)) ifp->if_collisions++; if (txstat & RL_TDESC_STAT_TXERRSUM) ifp->if_oerrors++; else ifp->if_opackets++; } sc->rl_ldata.rl_tx_free++; RL_DESC_INC(idx); } /* No changes made to the TX ring, so no flush needed */ if (idx != sc->rl_ldata.rl_tx_considx) { sc->rl_ldata.rl_tx_considx = idx; ifp->if_flags &= ~IFF_OACTIVE; ifp->if_timer = 0; } /* * If not all descriptors have been released reaped yet, * reload the timer so that we will eventually get another * interrupt that will cause us to re-enter this routine. * This is done in case the transmitter has gone idle. */ if (sc->rl_ldata.rl_tx_free != RL_TX_DESC_CNT) CSR_WRITE_4(sc, RL_TIMERCNT, 1); return; } static void re_tick(xsc) void *xsc; { struct rl_softc *sc; struct mii_data *mii; sc = xsc; RL_LOCK(sc); mii = device_get_softc(sc->rl_miibus); mii_tick(mii); sc->rl_stat_ch = timeout(re_tick, sc, hz); RL_UNLOCK(sc); return; } #ifdef DEVICE_POLLING static void re_poll (struct ifnet *ifp, enum poll_cmd cmd, int count) { struct rl_softc *sc = ifp->if_softc; RL_LOCK(sc); if (!(ifp->if_capenable & IFCAP_POLLING)) { ether_poll_deregister(ifp); cmd = POLL_DEREGISTER; } if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */ CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); goto done; } sc->rxcycles = count; re_rxeof(sc); re_txeof(sc); if (ifp->if_snd.ifq_head != NULL) (*ifp->if_start)(ifp); if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ u_int16_t status; status = CSR_READ_2(sc, RL_ISR); if (status == 0xffff) goto done; if (status) CSR_WRITE_2(sc, RL_ISR, status); /* * XXX check behaviour on receiver stalls. */ if (status & RL_ISR_SYSTEM_ERR) { re_reset(sc); re_init(sc); } } done: RL_UNLOCK(sc); } #endif /* DEVICE_POLLING */ static void re_intr(arg) void *arg; { struct rl_softc *sc; struct ifnet *ifp; u_int16_t status; sc = arg; if (sc->suspended) { return; } RL_LOCK(sc); ifp = &sc->arpcom.ac_if; if (!(ifp->if_flags & IFF_UP)) { RL_UNLOCK(sc); return; } #ifdef DEVICE_POLLING if (ifp->if_flags & IFF_POLLING) goto done; if ((ifp->if_capenable & IFCAP_POLLING) && ether_poll_register(re_poll, ifp)) { /* ok, disable interrupts */ CSR_WRITE_2(sc, RL_IMR, 0x0000); re_poll(ifp, 0, 1); goto done; } #endif /* DEVICE_POLLING */ for (;;) { status = CSR_READ_2(sc, RL_ISR); /* If the card has gone away the read returns 0xffff. */ if (status == 0xffff) break; if (status) CSR_WRITE_2(sc, RL_ISR, status); if ((status & RL_INTRS_CPLUS) == 0) break; if (status & RL_ISR_RX_OK) re_rxeof(sc); if (status & RL_ISR_RX_ERR) re_rxeof(sc); if ((status & RL_ISR_TIMEOUT_EXPIRED) || (status & RL_ISR_TX_ERR) || (status & RL_ISR_TX_DESC_UNAVAIL)) re_txeof(sc); if (status & RL_ISR_SYSTEM_ERR) { re_reset(sc); re_init(sc); } if (status & RL_ISR_LINKCHG) { untimeout(re_tick, sc, sc->rl_stat_ch); re_tick(sc); } } if (ifp->if_snd.ifq_head != NULL) (*ifp->if_start)(ifp); #ifdef DEVICE_POLLING done: #endif RL_UNLOCK(sc); return; } static int re_encap(sc, m_head, idx) struct rl_softc *sc; struct mbuf *m_head; int *idx; { struct mbuf *m_new = NULL; struct rl_dmaload_arg arg; bus_dmamap_t map; int error; struct m_tag *mtag; if (sc->rl_ldata.rl_tx_free <= 4) return(EFBIG); /* * Set up checksum offload. Note: checksum offload bits must * appear in all descriptors of a multi-descriptor transmit * attempt. (This is according to testing done with an 8169 * chip. I'm not sure if this is a requirement or a bug.) */ arg.rl_flags = 0; if (m_head->m_pkthdr.csum_flags & CSUM_IP) arg.rl_flags |= RL_TDESC_CMD_IPCSUM; if (m_head->m_pkthdr.csum_flags & CSUM_TCP) arg.rl_flags |= RL_TDESC_CMD_TCPCSUM; if (m_head->m_pkthdr.csum_flags & CSUM_UDP) arg.rl_flags |= RL_TDESC_CMD_UDPCSUM; arg.sc = sc; arg.rl_idx = *idx; arg.rl_maxsegs = sc->rl_ldata.rl_tx_free; if (arg.rl_maxsegs > 4) arg.rl_maxsegs -= 4; arg.rl_ring = sc->rl_ldata.rl_tx_list; map = sc->rl_ldata.rl_tx_dmamap[*idx]; error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag, map, m_head, re_dma_map_desc, &arg, BUS_DMA_NOWAIT); if (error && error != EFBIG) { printf("re%d: can't map mbuf (error %d)\n", sc->rl_unit, error); return(ENOBUFS); } /* Too many segments to map, coalesce into a single mbuf */ if (error || arg.rl_maxsegs == 0) { m_new = m_defrag(m_head, M_DONTWAIT); if (m_new == NULL) return(1); else m_head = m_new; arg.sc = sc; arg.rl_idx = *idx; arg.rl_maxsegs = sc->rl_ldata.rl_tx_free; arg.rl_ring = sc->rl_ldata.rl_tx_list; error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag, map, m_head, re_dma_map_desc, &arg, BUS_DMA_NOWAIT); if (error) { printf("re%d: can't map mbuf (error %d)\n", sc->rl_unit, error); return(EFBIG); } } /* * Insure that the map for this transmission * is placed at the array index of the last descriptor * in this chain. */ sc->rl_ldata.rl_tx_dmamap[*idx] = sc->rl_ldata.rl_tx_dmamap[arg.rl_idx]; sc->rl_ldata.rl_tx_dmamap[arg.rl_idx] = map; sc->rl_ldata.rl_tx_mbuf[arg.rl_idx] = m_head; sc->rl_ldata.rl_tx_free -= arg.rl_maxsegs; /* * Set up hardware VLAN tagging. Note: vlan tag info must * appear in the first descriptor of a multi-descriptor * transmission attempt. */ mtag = VLAN_OUTPUT_TAG(&sc->arpcom.ac_if, m_head); if (mtag != NULL) sc->rl_ldata.rl_tx_list[*idx].rl_vlanctl = htole32(htons(VLAN_TAG_VALUE(mtag)) | RL_TDESC_VLANCTL_TAG); /* Transfer ownership of packet to the chip. */ sc->rl_ldata.rl_tx_list[arg.rl_idx].rl_cmdstat |= htole32(RL_TDESC_CMD_OWN); if (*idx != arg.rl_idx) sc->rl_ldata.rl_tx_list[*idx].rl_cmdstat |= htole32(RL_TDESC_CMD_OWN); RL_DESC_INC(arg.rl_idx); *idx = arg.rl_idx; return(0); } /* * Main transmit routine for C+ and gigE NICs. */ static void re_start(ifp) struct ifnet *ifp; { struct rl_softc *sc; struct mbuf *m_head = NULL; int idx; sc = ifp->if_softc; RL_LOCK(sc); idx = sc->rl_ldata.rl_tx_prodidx; while (sc->rl_ldata.rl_tx_mbuf[idx] == NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (re_encap(sc, m_head, &idx)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; break; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); } /* Flush the TX descriptors */ bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); sc->rl_ldata.rl_tx_prodidx = idx; /* * RealTek put the TX poll request register in a different * location on the 8169 gigE chip. I don't know why. */ if (sc->rl_type == RL_8169) CSR_WRITE_2(sc, RL_GTXSTART, RL_TXSTART_START); else CSR_WRITE_2(sc, RL_TXSTART, RL_TXSTART_START); /* * Use the countdown timer for interrupt moderation. * 'TX done' interrupts are disabled. Instead, we reset the * countdown timer, which will begin counting until it hits * the value in the TIMERINT register, and then trigger an * interrupt. Each time we write to the TIMERCNT register, * the timer count is reset to 0. */ CSR_WRITE_4(sc, RL_TIMERCNT, 1); RL_UNLOCK(sc); /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; return; } static void re_init(xsc) void *xsc; { struct rl_softc *sc = xsc; struct ifnet *ifp = &sc->arpcom.ac_if; struct mii_data *mii; u_int32_t rxcfg = 0; RL_LOCK(sc); mii = device_get_softc(sc->rl_miibus); /* * Cancel pending I/O and free all RX/TX buffers. */ re_stop(sc); /* * Enable C+ RX and TX mode, as well as VLAN stripping and * RX checksum offload. We must configure the C+ register * before all others. */ CSR_WRITE_2(sc, RL_CPLUS_CMD, RL_CPLUSCMD_RXENB| RL_CPLUSCMD_TXENB|RL_CPLUSCMD_PCI_MRW| RL_CPLUSCMD_VLANSTRIP| (ifp->if_capenable & IFCAP_RXCSUM ? RL_CPLUSCMD_RXCSUM_ENB : 0)); /* * Init our MAC address. Even though the chipset * documentation doesn't mention it, we need to enter "Config * register write enable" mode to modify the ID registers. */ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); CSR_WRITE_STREAM_4(sc, RL_IDR0, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); CSR_WRITE_STREAM_4(sc, RL_IDR4, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); /* * For C+ mode, initialize the RX descriptors and mbufs. */ re_rx_list_init(sc); re_tx_list_init(sc); /* * Enable transmit and receive. */ CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); /* * Set the initial TX and RX configuration. */ if (sc->rl_testmode) { if (sc->rl_type == RL_8169) CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG|RL_LOOPTEST_ON); else CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG|RL_LOOPTEST_ON_CPLUS); } else CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG); /* Set the individual bit to receive frames for this host only. */ rxcfg = CSR_READ_4(sc, RL_RXCFG); rxcfg |= RL_RXCFG_RX_INDIV; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) { rxcfg |= RL_RXCFG_RX_ALLPHYS; CSR_WRITE_4(sc, RL_RXCFG, rxcfg); } else { rxcfg &= ~RL_RXCFG_RX_ALLPHYS; CSR_WRITE_4(sc, RL_RXCFG, rxcfg); } /* * Set capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) { rxcfg |= RL_RXCFG_RX_BROAD; CSR_WRITE_4(sc, RL_RXCFG, rxcfg); } else { rxcfg &= ~RL_RXCFG_RX_BROAD; CSR_WRITE_4(sc, RL_RXCFG, rxcfg); } /* * Program the multicast filter, if necessary. */ re_setmulti(sc); #ifdef DEVICE_POLLING /* * Disable interrupts if we are polling. */ if (ifp->if_flags & IFF_POLLING) CSR_WRITE_2(sc, RL_IMR, 0); else /* otherwise ... */ #endif /* DEVICE_POLLING */ /* * Enable interrupts. */ if (sc->rl_testmode) CSR_WRITE_2(sc, RL_IMR, 0); else CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); /* Set initial TX threshold */ sc->rl_txthresh = RL_TX_THRESH_INIT; /* Start RX/TX process. */ CSR_WRITE_4(sc, RL_MISSEDPKT, 0); #ifdef notdef /* Enable receiver and transmitter. */ CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); #endif /* * Load the addresses of the RX and TX lists into the chip. */ CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI, RL_ADDR_HI(sc->rl_ldata.rl_rx_list_addr)); CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO, RL_ADDR_LO(sc->rl_ldata.rl_rx_list_addr)); CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI, RL_ADDR_HI(sc->rl_ldata.rl_tx_list_addr)); CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO, RL_ADDR_LO(sc->rl_ldata.rl_tx_list_addr)); CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16); /* * Initialize the timer interrupt register so that * a timer interrupt will be generated once the timer * reaches a certain number of ticks. The timer is * reloaded on each transmit. This gives us TX interrupt * moderation, which dramatically improves TX frame rate. */ if (sc->rl_type == RL_8169) CSR_WRITE_4(sc, RL_TIMERINT_8169, 0x800); else CSR_WRITE_4(sc, RL_TIMERINT, 0x400); /* * For 8169 gigE NICs, set the max allowed RX packet * size so we can receive jumbo frames. */ if (sc->rl_type == RL_8169) CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383); if (sc->rl_testmode) { RL_UNLOCK(sc); return; } mii_mediachg(mii); CSR_WRITE_1(sc, RL_CFG1, RL_CFG1_DRVLOAD|RL_CFG1_FULLDUPLEX); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; sc->rl_stat_ch = timeout(re_tick, sc, hz); RL_UNLOCK(sc); return; } /* * Set media options. */ static int re_ifmedia_upd(ifp) struct ifnet *ifp; { struct rl_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->rl_miibus); mii_mediachg(mii); return(0); } /* * Report current media status. */ static void re_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct rl_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->rl_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; return; } static int re_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct rl_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int error = 0; RL_LOCK(sc); switch(command) { case SIOCSIFMTU: if (ifr->ifr_mtu > RL_JUMBO_MTU) error = EINVAL; ifp->if_mtu = ifr->ifr_mtu; break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { re_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) re_stop(sc); } error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: re_setmulti(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->rl_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; case SIOCSIFCAP: ifp->if_capenable &= ~(IFCAP_HWCSUM | IFCAP_POLLING); ifp->if_capenable |= ifr->ifr_reqcap & (IFCAP_HWCSUM | IFCAP_POLLING); if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist = RE_CSUM_FEATURES; else ifp->if_hwassist = 0; if (ifp->if_flags & IFF_RUNNING) re_init(sc); break; default: error = ether_ioctl(ifp, command, data); break; } RL_UNLOCK(sc); return(error); } static void re_watchdog(ifp) struct ifnet *ifp; { struct rl_softc *sc; sc = ifp->if_softc; RL_LOCK(sc); printf("re%d: watchdog timeout\n", sc->rl_unit); ifp->if_oerrors++; re_txeof(sc); re_rxeof(sc); re_init(sc); RL_UNLOCK(sc); return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void re_stop(sc) struct rl_softc *sc; { register int i; struct ifnet *ifp; RL_LOCK(sc); ifp = &sc->arpcom.ac_if; ifp->if_timer = 0; untimeout(re_tick, sc, sc->rl_stat_ch); ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); #ifdef DEVICE_POLLING ether_poll_deregister(ifp); #endif /* DEVICE_POLLING */ CSR_WRITE_1(sc, RL_COMMAND, 0x00); CSR_WRITE_2(sc, RL_IMR, 0x0000); if (sc->rl_head != NULL) { m_freem(sc->rl_head); sc->rl_head = sc->rl_tail = NULL; } /* Free the TX list buffers. */ for (i = 0; i < RL_TX_DESC_CNT; i++) { if (sc->rl_ldata.rl_tx_mbuf[i] != NULL) { bus_dmamap_unload(sc->rl_ldata.rl_mtag, sc->rl_ldata.rl_tx_dmamap[i]); m_freem(sc->rl_ldata.rl_tx_mbuf[i]); sc->rl_ldata.rl_tx_mbuf[i] = NULL; } } /* Free the RX list buffers. */ for (i = 0; i < RL_RX_DESC_CNT; i++) { if (sc->rl_ldata.rl_rx_mbuf[i] != NULL) { bus_dmamap_unload(sc->rl_ldata.rl_mtag, sc->rl_ldata.rl_rx_dmamap[i]); m_freem(sc->rl_ldata.rl_rx_mbuf[i]); sc->rl_ldata.rl_rx_mbuf[i] = NULL; } } RL_UNLOCK(sc); return; } /* * Device suspend routine. Stop the interface and save some PCI * settings in case the BIOS doesn't restore them properly on * resume. */ static int re_suspend(dev) device_t dev; { #ifndef BURN_BRIDGES register int i; #endif struct rl_softc *sc; sc = device_get_softc(dev); re_stop(sc); #ifndef BURN_BRIDGES for (i = 0; i < 5; i++) sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4); sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); #endif sc->suspended = 1; return (0); } /* * Device resume routine. Restore some PCI settings in case the BIOS * doesn't, re-enable busmastering, and restart the interface if * appropriate. */ static int re_resume(dev) device_t dev; { #ifndef BURN_BRIDGES register int i; #endif struct rl_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = &sc->arpcom.ac_if; #ifndef BURN_BRIDGES /* better way to do this? */ for (i = 0; i < 5; i++) pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4); pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4); pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1); pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1); pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1); /* reenable busmastering */ pci_enable_busmaster(dev); pci_enable_io(dev, RL_RES); #endif /* reinitialize interface if necessary */ if (ifp->if_flags & IFF_UP) re_init(sc); sc->suspended = 0; return (0); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void re_shutdown(dev) device_t dev; { struct rl_softc *sc; sc = device_get_softc(dev); re_stop(sc); return; } Index: head/sys/dev/rp/rp_isa.c =================================================================== --- head/sys/dev/rp/rp_isa.c (revision 129878) +++ head/sys/dev/rp/rp_isa.c (revision 129879) @@ -1,504 +1,505 @@ /* * Copyright (c) Comtrol Corporation * All rights reserved. * * ISA-specific part separated from: * sys/i386/isa/rp.c,v 1.33 1999/09/28 11:45:27 phk Exp * * Redistribution and use in source and binary forms, with or without * modification, are permitted prodived that the follwoing conditions * are met. * 1. Redistributions of source code must retain the above copyright * notive, this list of conditions and the following disclainer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials prodided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Comtrol Corporation. * 4. The name of Comtrol Corporation may not be used to endorse or * promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY COMTROL CORPORATION ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL COMTROL CORPORATION BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, LIFE OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include +#include #include #include #include #include #define ROCKET_C #include #include #include /* ISA-specific part of CONTROLLER_t */ struct ISACONTROLLER_T { int MBaseIO; /* rid of the Mudbac controller for this controller */ int MReg0IO; /* offset0 of the Mudbac controller for this controller */ int MReg1IO; /* offset1 of the Mudbac controller for this controller */ int MReg2IO; /* offset2 of the Mudbac controller for this controller */ int MReg3IO; /* offset3 of the Mudbac controller for this controller */ Byte_t MReg2; Byte_t MReg3; }; typedef struct ISACONTROLLER_T ISACONTROLLER_t; #define ISACTL(ctlp) ((ISACONTROLLER_t *)((ctlp)->bus_ctlp)) /*************************************************************************** Function: sControllerEOI Purpose: Strobe the MUDBAC's End Of Interrupt bit. Call: sControllerEOI(MudbacCtlP,CtlP) CONTROLLER_T *MudbacCtlP; Ptr to Mudbac controller structure CONTROLLER_T *CtlP; Ptr to controller structure */ #define sControllerEOI(MudbacCtlP,CtlP) \ rp_writeio1(MudbacCtlP,ISACTL(CtlP)->MBaseIO,ISACTL(CtlP)->MReg2IO,ISACTL(CtlP)->MReg2 | INT_STROB) /*************************************************************************** Function: sDisAiop Purpose: Disable I/O access to an AIOP Call: sDisAiop(MudbacCtlP,CtlP) CONTROLLER_T *MudbacCtlP; Ptr to Mudbac controller structure CONTROLLER_T *CtlP; Ptr to controller structure int AiopNum; Number of AIOP on controller */ #define sDisAiop(MudbacCtlP,CtlP,AIOPNUM) \ { \ ISACTL(CtlP)->MReg3 &= rp_sBitMapClrTbl[AIOPNUM]; \ rp_writeio1(MudbacCtlP,ISACTL(CtlP)->MBaseIO,ISACTL(CtlP)->MReg3IO,ISACTL(CtlP)->MReg3); \ } /*************************************************************************** Function: sEnAiop Purpose: Enable I/O access to an AIOP Call: sEnAiop(MudbacCtlP,CtlP) CONTROLLER_T *MudbacCtlP; Ptr to Mudbac controller structure CONTROLLER_T *CtlP; Ptr to controller structure int AiopNum; Number of AIOP on controller */ #define sEnAiop(MudbacCtlP,CtlP,AIOPNUM) \ { \ ISACTL(CtlP)->MReg3 |= rp_sBitMapSetTbl[AIOPNUM]; \ rp_writeio1(MudbacCtlP,ISACTL(CtlP)->MBaseIO,ISACTL(CtlP)->MReg3IO,ISACTL(CtlP)->MReg3); \ } /*************************************************************************** Function: sGetControllerIntStatus Purpose: Get the controller interrupt status Call: sGetControllerIntStatus(MudbacCtlP,CtlP) CONTROLLER_T *MudbacCtlP; Ptr to Mudbac controller structure CONTROLLER_T *CtlP; Ptr to controller structure Return: Byte_t: The controller interrupt status in the lower 4 bits. Bits 0 through 3 represent AIOP's 0 through 3 respectively. If a bit is set that AIOP is interrupting. Bits 4 through 7 will always be cleared. */ #define sGetControllerIntStatus(MudbacCtlP,CtlP) \ (rp_readio1(MudbacCtlP,ISACTL(CtlP)->MBaseIO,ISACTL(CtlP)->MReg1IO) & 0x0f) static devclass_t rp_devclass; static CONTROLLER_t *rp_controller; static int rp_nisadevs; static int rp_probe(device_t dev); static int rp_attach(device_t dev); static void rp_isareleaseresource(CONTROLLER_t *ctlp); static int sInitController(CONTROLLER_T *CtlP, CONTROLLER_T *MudbacCtlP, int AiopNum, int IRQNum, Byte_t Frequency, int PeriodicOnly); static rp_aiop2rid_t rp_isa_aiop2rid; static rp_aiop2off_t rp_isa_aiop2off; static rp_ctlmask_t rp_isa_ctlmask; static int rp_probe(device_t dev) { int unit; CONTROLLER_t *controller; int num_aiops; CONTROLLER_t *ctlp; int retval; /* * We have no PnP RocketPort cards. * (At least according to LINT) */ if (isa_get_logicalid(dev) != 0) return (ENXIO); /* We need IO port resource to configure an ISA device. */ if (bus_get_resource_count(dev, SYS_RES_IOPORT, 0) == 0) return (ENXIO); unit = device_get_unit(dev); if (unit >= 4) { device_printf(dev, "rpprobe: unit number %d invalid.\n", unit); return (ENXIO); } device_printf(dev, "probing for RocketPort(ISA) unit %d.\n", unit); ctlp = device_get_softc(dev); bzero(ctlp, sizeof(*ctlp)); ctlp->dev = dev; ctlp->aiop2rid = rp_isa_aiop2rid; ctlp->aiop2off = rp_isa_aiop2off; ctlp->ctlmask = rp_isa_ctlmask; /* The IO ports of AIOPs for an ISA controller are discrete. */ ctlp->io_num = 1; ctlp->io_rid = malloc(sizeof(*(ctlp->io_rid)) * MAX_AIOPS_PER_BOARD, M_DEVBUF, M_NOWAIT | M_ZERO); ctlp->io = malloc(sizeof(*(ctlp->io)) * MAX_AIOPS_PER_BOARD, M_DEVBUF, M_NOWAIT | M_ZERO); if (ctlp->io_rid == NULL || ctlp->io == NULL) { device_printf(dev, "rp_attach: Out of memory.\n"); retval = ENOMEM; goto nogo; } ctlp->bus_ctlp = malloc(sizeof(ISACONTROLLER_t) * 1, M_DEVBUF, M_NOWAIT | M_ZERO); if (ctlp->bus_ctlp == NULL) { device_printf(dev, "rp_attach: Out of memory.\n"); retval = ENOMEM; goto nogo; } ctlp->io_rid[0] = 0; if (rp_controller != NULL) { controller = rp_controller; ctlp->io[0] = bus_alloc_resource(dev, SYS_RES_IOPORT, &ctlp->io_rid[0], 0, ~0, 0x40, RF_ACTIVE); } else { controller = rp_controller = ctlp; ctlp->io[0] = bus_alloc_resource(dev, SYS_RES_IOPORT, &ctlp->io_rid[0], 0, ~0, 0x44, RF_ACTIVE); } if (ctlp->io[0] == NULL) { device_printf(dev, "rp_attach: Resource not available.\n"); retval = ENXIO; goto nogo; } num_aiops = sInitController(ctlp, controller, MAX_AIOPS_PER_BOARD, 0, FREQ_DIS, 0); if (num_aiops <= 0) { device_printf(dev, "board%d init failed.\n", unit); retval = ENXIO; goto nogo; } if (rp_controller == NULL) rp_controller = controller; rp_nisadevs++; device_set_desc(dev, "RocketPort ISA"); return (0); nogo: rp_isareleaseresource(ctlp); return (retval); } static int rp_attach(device_t dev) { int unit; int num_ports, num_aiops; int aiop; CONTROLLER_t *ctlp; int retval; unit = device_get_unit(dev); ctlp = device_get_softc(dev); #if notdef num_aiops = sInitController(ctlp, rp_controller, MAX_AIOPS_PER_BOARD, 0, FREQ_DIS, 0); #else num_aiops = ctlp->NumAiop; #endif /* notdef */ num_ports = 0; for(aiop=0; aiop < num_aiops; aiop++) { sResetAiopByNum(ctlp, aiop); sEnAiop(rp_controller, ctlp, aiop); num_ports += sGetAiopNumChan(ctlp, aiop); } retval = rp_attachcommon(ctlp, num_aiops, num_ports); if (retval != 0) goto nogo; return (0); nogo: rp_isareleaseresource(ctlp); return (retval); } static void rp_isareleaseresource(CONTROLLER_t *ctlp) { int i; rp_releaseresource(ctlp); if (ctlp == rp_controller) rp_controller = NULL; if (ctlp->io != NULL) { for (i = 0 ; i < MAX_AIOPS_PER_BOARD ; i++) if (ctlp->io[i] != NULL) bus_release_resource(ctlp->dev, SYS_RES_IOPORT, ctlp->io_rid[i], ctlp->io[i]); free(ctlp->io, M_DEVBUF); } if (ctlp->io_rid != NULL) free(ctlp->io_rid, M_DEVBUF); if (rp_controller != NULL && rp_controller->io[ISACTL(ctlp)->MBaseIO] != NULL) { bus_release_resource(rp_controller->dev, SYS_RES_IOPORT, rp_controller->io_rid[ISACTL(ctlp)->MBaseIO], rp_controller->io[ISACTL(ctlp)->MBaseIO]); rp_controller->io[ISACTL(ctlp)->MBaseIO] = NULL; rp_controller->io_rid[ISACTL(ctlp)->MBaseIO] = 0; } if (ctlp->bus_ctlp != NULL) free(ctlp->bus_ctlp, M_DEVBUF); } /*************************************************************************** Function: sInitController Purpose: Initialization of controller global registers and controller structure. Call: sInitController(CtlP,MudbacCtlP,AiopNum, IRQNum,Frequency,PeriodicOnly) CONTROLLER_T *CtlP; Ptr to controller structure CONTROLLER_T *MudbacCtlP; Ptr to Mudbac controller structure int AiopNum; Number of Aiops int IRQNum; Interrupt Request number. Can be any of the following: 0: Disable global interrupts 3: IRQ 3 4: IRQ 4 5: IRQ 5 9: IRQ 9 10: IRQ 10 11: IRQ 11 12: IRQ 12 15: IRQ 15 Byte_t Frequency: A flag identifying the frequency of the periodic interrupt, can be any one of the following: FREQ_DIS - periodic interrupt disabled FREQ_137HZ - 137 Hertz FREQ_69HZ - 69 Hertz FREQ_34HZ - 34 Hertz FREQ_17HZ - 17 Hertz FREQ_9HZ - 9 Hertz FREQ_4HZ - 4 Hertz If IRQNum is set to 0 the Frequency parameter is overidden, it is forced to a value of FREQ_DIS. int PeriodicOnly: TRUE if all interrupts except the periodic interrupt are to be blocked. FALSE is both the periodic interrupt and other channel interrupts are allowed. If IRQNum is set to 0 the PeriodicOnly parameter is overidden, it is forced to a value of FALSE. Return: int: Number of AIOPs on the controller, or CTLID_NULL if controller initialization failed. Comments: If periodic interrupts are to be disabled but AIOP interrupts are allowed, set Frequency to FREQ_DIS and PeriodicOnly to FALSE. If interrupts are to be completely disabled set IRQNum to 0. Setting Frequency to FREQ_DIS and PeriodicOnly to TRUE is an invalid combination. This function performs initialization of global interrupt modes, but it does not actually enable global interrupts. To enable and disable global interrupts use functions sEnGlobalInt() and sDisGlobalInt(). Enabling of global interrupts is normally not done until all other initializations are complete. Even if interrupts are globally enabled, they must also be individually enabled for each channel that is to generate interrupts. Warnings: No range checking on any of the parameters is done. No context switches are allowed while executing this function. After this function all AIOPs on the controller are disabled, they can be enabled with sEnAiop(). */ static int sInitController( CONTROLLER_T *CtlP, CONTROLLER_T *MudbacCtlP, int AiopNum, int IRQNum, Byte_t Frequency, int PeriodicOnly) { int i; int ctl_base, aiop_base, aiop_size; CtlP->CtlID = CTLID_0001; /* controller release 1 */ ISACTL(CtlP)->MBaseIO = rp_nisadevs; if (MudbacCtlP->io[ISACTL(CtlP)->MBaseIO] != NULL) { ISACTL(CtlP)->MReg0IO = 0x40 + 0; ISACTL(CtlP)->MReg1IO = 0x40 + 1; ISACTL(CtlP)->MReg2IO = 0x40 + 2; ISACTL(CtlP)->MReg3IO = 0x40 + 3; } else { MudbacCtlP->io_rid[ISACTL(CtlP)->MBaseIO] = ISACTL(CtlP)->MBaseIO; ctl_base = rman_get_start(MudbacCtlP->io[0]) + 0x40 + 0x400 * rp_nisadevs; MudbacCtlP->io[ISACTL(CtlP)->MBaseIO] = bus_alloc_resource(MudbacCtlP->dev, SYS_RES_IOPORT, &CtlP->io_rid[ISACTL(CtlP)->MBaseIO], ctl_base, ctl_base + 3, 4, RF_ACTIVE); ISACTL(CtlP)->MReg0IO = 0; ISACTL(CtlP)->MReg1IO = 1; ISACTL(CtlP)->MReg2IO = 2; ISACTL(CtlP)->MReg3IO = 3; } #if 1 ISACTL(CtlP)->MReg2 = 0; /* interrupt disable */ ISACTL(CtlP)->MReg3 = 0; /* no periodic interrupts */ #else if(sIRQMap[IRQNum] == 0) /* interrupts globally disabled */ { ISACTL(CtlP)->MReg2 = 0; /* interrupt disable */ ISACTL(CtlP)->MReg3 = 0; /* no periodic interrupts */ } else { ISACTL(CtlP)->MReg2 = sIRQMap[IRQNum]; /* set IRQ number */ ISACTL(CtlP)->MReg3 = Frequency; /* set frequency */ if(PeriodicOnly) /* periodic interrupt only */ { ISACTL(CtlP)->MReg3 |= PERIODIC_ONLY; } } #endif rp_writeio1(MudbacCtlP,ISACTL(CtlP)->MBaseIO,ISACTL(CtlP)->MReg2IO,ISACTL(CtlP)->MReg2); rp_writeio1(MudbacCtlP,ISACTL(CtlP)->MBaseIO,ISACTL(CtlP)->MReg3IO,ISACTL(CtlP)->MReg3); sControllerEOI(MudbacCtlP,CtlP); /* clear EOI if warm init */ /* Init AIOPs */ CtlP->NumAiop = 0; for(i=0; i < AiopNum; i++) { if (CtlP->io[i] == NULL) { CtlP->io_rid[i] = i; aiop_base = rman_get_start(CtlP->io[0]) + 0x400 * i; if (rp_nisadevs == 0) aiop_size = 0x44; else aiop_size = 0x40; CtlP->io[i] = bus_alloc_resource(CtlP->dev, SYS_RES_IOPORT, &CtlP->io_rid[i], aiop_base, aiop_base + aiop_size - 1, aiop_size, RF_ACTIVE); } else aiop_base = rman_get_start(CtlP->io[i]); rp_writeio1(MudbacCtlP,ISACTL(CtlP)->MBaseIO, ISACTL(CtlP)->MReg2IO, ISACTL(CtlP)->MReg2 | (i & 0x03)); /* AIOP index */ rp_writeio1(MudbacCtlP,ISACTL(CtlP)->MBaseIO, ISACTL(CtlP)->MReg0IO, (Byte_t)(aiop_base >> 6)); /* set up AIOP I/O in MUDBAC */ sEnAiop(MudbacCtlP,CtlP,i); /* enable the AIOP */ CtlP->AiopID[i] = sReadAiopID(CtlP, i); /* read AIOP ID */ if(CtlP->AiopID[i] == AIOPID_NULL) /* if AIOP does not exist */ { sDisAiop(MudbacCtlP,CtlP,i); /* disable AIOP */ bus_release_resource(CtlP->dev, SYS_RES_IOPORT, CtlP->io_rid[i], CtlP->io[i]); CtlP->io[i] = NULL; break; /* done looking for AIOPs */ } CtlP->AiopNumChan[i] = sReadAiopNumChan(CtlP, i); /* num channels in AIOP */ rp_writeaiop2(CtlP,i,_INDX_ADDR,_CLK_PRE); /* clock prescaler */ rp_writeaiop1(CtlP,i,_INDX_DATA,CLOCK_PRESC); CtlP->NumAiop++; /* bump count of AIOPs */ sDisAiop(MudbacCtlP,CtlP,i); /* disable AIOP */ } if(CtlP->NumAiop == 0) return(-1); else return(CtlP->NumAiop); } /* * ARGSUSED * Maps (aiop, offset) to rid. */ static int rp_isa_aiop2rid(int aiop, int offset) { /* rid equals to aiop for an ISA controller. */ return aiop; } /* * ARGSUSED * Maps (aiop, offset) to the offset of resource. */ static int rp_isa_aiop2off(int aiop, int offset) { /* Each aiop has its own resource. */ return offset; } /* Read the int status for an ISA controller. */ static unsigned char rp_isa_ctlmask(CONTROLLER_t *ctlp) { return sGetControllerIntStatus(rp_controller,ctlp); } static device_method_t rp_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rp_probe), DEVMETHOD(device_attach, rp_attach), { 0, 0 } }; static driver_t rp_driver = { "rp", rp_methods, sizeof(CONTROLLER_t), }; /* * rp can be attached to an isa bus. */ DRIVER_MODULE(rp, isa, rp_driver, rp_devclass, 0, 0); Index: head/sys/dev/rp/rp_pci.c =================================================================== --- head/sys/dev/rp/rp_pci.c (revision 129878) +++ head/sys/dev/rp/rp_pci.c (revision 129879) @@ -1,370 +1,371 @@ /*- * Copyright (c) Comtrol Corporation * All rights reserved. * * PCI-specific part separated from: * sys/i386/isa/rp.c,v 1.33 1999/09/28 11:45:27 phk Exp * * Redistribution and use in source and binary forms, with or without * modification, are permitted prodived that the follwoing conditions * are met. * 1. Redistributions of source code must retain the above copyright * notive, this list of conditions and the following disclainer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials prodided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Comtrol Corporation. * 4. The name of Comtrol Corporation may not be used to endorse or * promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY COMTROL CORPORATION ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL COMTROL CORPORATION BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, LIFE OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include +#include #include #include #include #include #define ROCKET_C #include #include #include #include /* PCI IDs */ #define RP_VENDOR_ID 0x11FE #define RP_DEVICE_ID_32I 0x0001 #define RP_DEVICE_ID_8I 0x0002 #define RP_DEVICE_ID_16I 0x0003 #define RP_DEVICE_ID_4Q 0x0004 #define RP_DEVICE_ID_8O 0x0005 #define RP_DEVICE_ID_8J 0x0006 #define RP_DEVICE_ID_4J 0x0007 #define RP_DEVICE_ID_6M 0x000C #define RP_DEVICE_ID_4M 0x000D /************************************************************************** MUDBAC remapped for PCI **************************************************************************/ #define _CFG_INT_PCI 0x40 #define _PCI_INT_FUNC 0x3A #define PCI_STROB 0x2000 #define INTR_EN_PCI 0x0010 /*************************************************************************** Function: sPCIControllerEOI Purpose: Strobe the MUDBAC's End Of Interrupt bit. Call: sPCIControllerEOI(CtlP) CONTROLLER_T *CtlP; Ptr to controller structure */ #define sPCIControllerEOI(CtlP) rp_writeio2(CtlP, 0, _PCI_INT_FUNC, PCI_STROB) /*************************************************************************** Function: sPCIGetControllerIntStatus Purpose: Get the controller interrupt status Call: sPCIGetControllerIntStatus(CtlP) CONTROLLER_T *CtlP; Ptr to controller structure Return: Byte_t: The controller interrupt status in the lower 4 bits. Bits 0 through 3 represent AIOP's 0 through 3 respectively. If a bit is set that AIOP is interrupting. Bits 4 through 7 will always be cleared. */ #define sPCIGetControllerIntStatus(CTLP) ((rp_readio2(CTLP, 0, _PCI_INT_FUNC) >> 8) & 0x1f) static devclass_t rp_devclass; static int rp_pciprobe(device_t dev); static int rp_pciattach(device_t dev); #if notdef static int rp_pcidetach(device_t dev); static int rp_pcishutdown(device_t dev); #endif /* notdef */ static void rp_pcireleaseresource(CONTROLLER_t *ctlp); static int sPCIInitController( CONTROLLER_t *CtlP, int AiopNum, int IRQNum, Byte_t Frequency, int PeriodicOnly, int VendorDevice); static rp_aiop2rid_t rp_pci_aiop2rid; static rp_aiop2off_t rp_pci_aiop2off; static rp_ctlmask_t rp_pci_ctlmask; /* * The following functions are the pci-specific part * of rp driver. */ static int rp_pciprobe(device_t dev) { char *s; s = NULL; if ((pci_get_devid(dev) & 0xffff) == RP_VENDOR_ID) s = "RocketPort PCI"; if (s != NULL) { device_set_desc(dev, s); return (0); } return (ENXIO); } static int rp_pciattach(device_t dev) { int num_ports, num_aiops; int aiop; CONTROLLER_t *ctlp; int unit; int retval; u_int32_t stcmd; ctlp = device_get_softc(dev); bzero(ctlp, sizeof(*ctlp)); ctlp->dev = dev; unit = device_get_unit(dev); ctlp->aiop2rid = rp_pci_aiop2rid; ctlp->aiop2off = rp_pci_aiop2off; ctlp->ctlmask = rp_pci_ctlmask; /* Wake up the device. */ stcmd = pci_read_config(dev, PCIR_COMMAND, 4); if ((stcmd & PCIM_CMD_PORTEN) == 0) { stcmd |= (PCIM_CMD_PORTEN); pci_write_config(dev, PCIR_COMMAND, 4, stcmd); } /* The IO ports of AIOPs for a PCI controller are continuous. */ ctlp->io_num = 1; ctlp->io_rid = malloc(sizeof(*(ctlp->io_rid)) * ctlp->io_num, M_DEVBUF, M_NOWAIT | M_ZERO); ctlp->io = malloc(sizeof(*(ctlp->io)) * ctlp->io_num, M_DEVBUF, M_NOWAIT | M_ZERO); if (ctlp->io_rid == NULL || ctlp->io == NULL) { device_printf(dev, "rp_pciattach: Out of memory.\n"); retval = ENOMEM; goto nogo; } ctlp->bus_ctlp = NULL; ctlp->io_rid[0] = 0x10; ctlp->io[0] = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &ctlp->io_rid[0], RF_ACTIVE); if(ctlp->io[0] == NULL) { device_printf(dev, "ioaddr mapping failed for RocketPort(PCI).\n"); retval = ENXIO; goto nogo; } num_aiops = sPCIInitController(ctlp, MAX_AIOPS_PER_BOARD, 0, FREQ_DIS, 0, (pci_get_devid(dev) >> 16) & 0xffff); num_ports = 0; for(aiop=0; aiop < num_aiops; aiop++) { sResetAiopByNum(ctlp, aiop); num_ports += sGetAiopNumChan(ctlp, aiop); } retval = rp_attachcommon(ctlp, num_aiops, num_ports); if (retval != 0) goto nogo; return (0); nogo: rp_pcireleaseresource(ctlp); return (retval); } #if notdef static int rp_pcidetach(device_t dev) { CONTROLLER_t *ctlp; if (device_get_state(dev) == DS_BUSY) return (EBUSY); ctlp = device_get_softc(dev); rp_pcireleaseresource(ctlp); return (0); } static int rp_pcishutdown(device_t dev) { CONTROLLER_t *ctlp; if (device_get_state(dev) == DS_BUSY) return (EBUSY); ctlp = device_get_softc(dev); rp_pcireleaseresource(ctlp); return (0); } #endif /* notdef */ static void rp_pcireleaseresource(CONTROLLER_t *ctlp) { rp_releaseresource(ctlp); if (ctlp->io != NULL) { if (ctlp->io[0] != NULL) bus_release_resource(ctlp->dev, SYS_RES_IOPORT, ctlp->io_rid[0], ctlp->io[0]); free(ctlp->io, M_DEVBUF); } if (ctlp->io_rid != NULL) free(ctlp->io_rid, M_DEVBUF); } static int sPCIInitController( CONTROLLER_t *CtlP, int AiopNum, int IRQNum, Byte_t Frequency, int PeriodicOnly, int VendorDevice) { int i; CtlP->CtlID = CTLID_0001; /* controller release 1 */ sPCIControllerEOI(CtlP); /* Init AIOPs */ CtlP->NumAiop = 0; for(i=0; i < AiopNum; i++) { /*device_printf(CtlP->dev, "aiop %d.\n", i);*/ CtlP->AiopID[i] = sReadAiopID(CtlP, i); /* read AIOP ID */ /*device_printf(CtlP->dev, "ID = %d.\n", CtlP->AiopID[i]);*/ if(CtlP->AiopID[i] == AIOPID_NULL) /* if AIOP does not exist */ { break; /* done looking for AIOPs */ } switch( VendorDevice ) { case RP_DEVICE_ID_4Q: case RP_DEVICE_ID_4J: case RP_DEVICE_ID_4M: CtlP->AiopNumChan[i] = 4; break; case RP_DEVICE_ID_6M: CtlP->AiopNumChan[i] = 6; break; case RP_DEVICE_ID_8O: case RP_DEVICE_ID_8J: case RP_DEVICE_ID_8I: case RP_DEVICE_ID_16I: case RP_DEVICE_ID_32I: CtlP->AiopNumChan[i] = 8; break; default: #if notdef CtlP->AiopNumChan[i] = 8; #else CtlP->AiopNumChan[i] = sReadAiopNumChan(CtlP, i); #endif /* notdef */ break; } /*device_printf(CtlP->dev, "%d channels.\n", CtlP->AiopNumChan[i]);*/ rp_writeaiop2(CtlP, i, _INDX_ADDR,_CLK_PRE); /* clock prescaler */ /*device_printf(CtlP->dev, "configuring clock prescaler.\n");*/ rp_writeaiop1(CtlP, i, _INDX_DATA,CLOCK_PRESC); /*device_printf(CtlP->dev, "configured clock prescaler.\n");*/ CtlP->NumAiop++; /* bump count of AIOPs */ } if(CtlP->NumAiop == 0) return(-1); else return(CtlP->NumAiop); } /* * ARGSUSED * Maps (aiop, offset) to rid. */ static int rp_pci_aiop2rid(int aiop, int offset) { /* Always return zero for a PCI controller. */ return 0; } /* * ARGSUSED * Maps (aiop, offset) to the offset of resource. */ static int rp_pci_aiop2off(int aiop, int offset) { /* Each AIOP reserves 0x40 bytes. */ return aiop * 0x40 + offset; } /* Read the int status for a PCI controller. */ static unsigned char rp_pci_ctlmask(CONTROLLER_t *ctlp) { return sPCIGetControllerIntStatus(ctlp); } static device_method_t rp_pcimethods[] = { /* Device interface */ DEVMETHOD(device_probe, rp_pciprobe), DEVMETHOD(device_attach, rp_pciattach), #if notdef DEVMETHOD(device_detach, rp_pcidetach), DEVMETHOD(device_shutdown, rp_pcishutdown), #endif /* notdef */ { 0, 0 } }; static driver_t rp_pcidriver = { "rp", rp_pcimethods, sizeof(CONTROLLER_t), }; /* * rp can be attached to a pci bus. */ DRIVER_MODULE(rp, pci, rp_pcidriver, rp_devclass, 0, 0); Index: head/sys/dev/safe/safe.c =================================================================== --- head/sys/dev/safe/safe.c (revision 129878) +++ head/sys/dev/safe/safe.c (revision 129879) @@ -1,2247 +1,2248 @@ /*- * Copyright (c) 2003 Sam Leffler, Errno Consulting * Copyright (c) 2003 Global Technology Associates, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * SafeNet SafeXcel-1141 hardware crypto accelerator */ #include "opt_safe.h" #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef SAFE_RNDTEST #include #endif #include #include #ifndef bswap32 #define bswap32 NTOHL #endif /* * Prototypes and count for the pci_device structure */ static int safe_probe(device_t); static int safe_attach(device_t); static int safe_detach(device_t); static int safe_suspend(device_t); static int safe_resume(device_t); static void safe_shutdown(device_t); static device_method_t safe_methods[] = { /* Device interface */ DEVMETHOD(device_probe, safe_probe), DEVMETHOD(device_attach, safe_attach), DEVMETHOD(device_detach, safe_detach), DEVMETHOD(device_suspend, safe_suspend), DEVMETHOD(device_resume, safe_resume), DEVMETHOD(device_shutdown, safe_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), { 0, 0 } }; static driver_t safe_driver = { "safe", safe_methods, sizeof (struct safe_softc) }; static devclass_t safe_devclass; DRIVER_MODULE(safe, pci, safe_driver, safe_devclass, 0, 0); MODULE_DEPEND(safe, crypto, 1, 1, 1); #ifdef SAFE_RNDTEST MODULE_DEPEND(safe, rndtest, 1, 1, 1); #endif static void safe_intr(void *); static int safe_newsession(void *, u_int32_t *, struct cryptoini *); static int safe_freesession(void *, u_int64_t); static int safe_process(void *, struct cryptop *, int); static void safe_callback(struct safe_softc *, struct safe_ringentry *); static void safe_feed(struct safe_softc *, struct safe_ringentry *); static void safe_mcopy(struct mbuf *, struct mbuf *, u_int); #ifndef SAFE_NO_RNG static void safe_rng_init(struct safe_softc *); static void safe_rng(void *); #endif /* SAFE_NO_RNG */ static int safe_dma_malloc(struct safe_softc *, bus_size_t, struct safe_dma_alloc *, int); #define safe_dma_sync(_dma, _flags) \ bus_dmamap_sync((_dma)->dma_tag, (_dma)->dma_map, (_flags)) static void safe_dma_free(struct safe_softc *, struct safe_dma_alloc *); static int safe_dmamap_aligned(const struct safe_operand *); static int safe_dmamap_uniform(const struct safe_operand *); static void safe_reset_board(struct safe_softc *); static void safe_init_board(struct safe_softc *); static void safe_init_pciregs(device_t dev); static void safe_cleanchip(struct safe_softc *); static void safe_totalreset(struct safe_softc *); static int safe_free_entry(struct safe_softc *, struct safe_ringentry *); SYSCTL_NODE(_hw, OID_AUTO, safe, CTLFLAG_RD, 0, "SafeNet driver parameters"); #ifdef SAFE_DEBUG static void safe_dump_dmastatus(struct safe_softc *, const char *); static void safe_dump_ringstate(struct safe_softc *, const char *); static void safe_dump_intrstate(struct safe_softc *, const char *); static void safe_dump_request(struct safe_softc *, const char *, struct safe_ringentry *); static struct safe_softc *safec; /* for use by hw.safe.dump */ static int safe_debug = 0; SYSCTL_INT(_hw_safe, OID_AUTO, debug, CTLFLAG_RW, &safe_debug, 0, "control debugging msgs"); #define DPRINTF(_x) if (safe_debug) printf _x #else #define DPRINTF(_x) #endif #define READ_REG(sc,r) \ bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r)) #define WRITE_REG(sc,reg,val) \ bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val) struct safe_stats safestats; SYSCTL_STRUCT(_hw_safe, OID_AUTO, stats, CTLFLAG_RD, &safestats, safe_stats, "driver statistics"); #ifndef SAFE_NO_RNG static int safe_rnginterval = 1; /* poll once a second */ SYSCTL_INT(_hw_safe, OID_AUTO, rnginterval, CTLFLAG_RW, &safe_rnginterval, 0, "RNG polling interval (secs)"); static int safe_rngbufsize = 16; /* 64 bytes each poll */ SYSCTL_INT(_hw_safe, OID_AUTO, rngbufsize, CTLFLAG_RW, &safe_rngbufsize, 0, "RNG polling buffer size (32-bit words)"); static int safe_rngmaxalarm = 8; /* max alarms before reset */ SYSCTL_INT(_hw_safe, OID_AUTO, rngmaxalarm, CTLFLAG_RW, &safe_rngmaxalarm, 0, "RNG max alarms before reset"); #endif /* SAFE_NO_RNG */ static int safe_probe(device_t dev) { if (pci_get_vendor(dev) == PCI_VENDOR_SAFENET && pci_get_device(dev) == PCI_PRODUCT_SAFEXCEL) return (0); return (ENXIO); } static const char* safe_partname(struct safe_softc *sc) { /* XXX sprintf numbers when not decoded */ switch (pci_get_vendor(sc->sc_dev)) { case PCI_VENDOR_SAFENET: switch (pci_get_device(sc->sc_dev)) { case PCI_PRODUCT_SAFEXCEL: return "SafeNet SafeXcel-1141"; } return "SafeNet unknown-part"; } return "Unknown-vendor unknown-part"; } #ifndef SAFE_NO_RNG static void default_harvest(struct rndtest_state *rsp, void *buf, u_int count) { random_harvest(buf, count, count*NBBY, 0, RANDOM_PURE); } #endif /* SAFE_NO_RNG */ static int safe_attach(device_t dev) { struct safe_softc *sc = device_get_softc(dev); u_int32_t raddr; u_int32_t cmd, i, devinfo; int rid; bzero(sc, sizeof (*sc)); sc->sc_dev = dev; /* XXX handle power management */ cmd = pci_read_config(dev, PCIR_COMMAND, 4); cmd |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN; pci_write_config(dev, PCIR_COMMAND, cmd, 4); cmd = pci_read_config(dev, PCIR_COMMAND, 4); if (!(cmd & PCIM_CMD_MEMEN)) { device_printf(dev, "failed to enable memory mapping\n"); goto bad; } if (!(cmd & PCIM_CMD_BUSMASTEREN)) { device_printf(dev, "failed to enable bus mastering\n"); goto bad; } /* * Setup memory-mapping of PCI registers. */ rid = BS_BAR; sc->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_sr == NULL) { device_printf(dev, "cannot map register space\n"); goto bad; } sc->sc_st = rman_get_bustag(sc->sc_sr); sc->sc_sh = rman_get_bushandle(sc->sc_sr); /* * Arrange interrupt line. */ rid = 0; sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE|RF_ACTIVE); if (sc->sc_irq == NULL) { device_printf(dev, "could not map interrupt\n"); goto bad1; } /* * NB: Network code assumes we are blocked with splimp() * so make sure the IRQ is mapped appropriately. */ if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, safe_intr, sc, &sc->sc_ih)) { device_printf(dev, "could not establish interrupt\n"); goto bad2; } sc->sc_cid = crypto_get_driverid(0); if (sc->sc_cid < 0) { device_printf(dev, "could not get crypto driver id\n"); goto bad3; } sc->sc_chiprev = READ_REG(sc, SAFE_DEVINFO) & (SAFE_DEVINFO_REV_MAJ | SAFE_DEVINFO_REV_MIN); /* * Setup DMA descriptor area. */ if (bus_dma_tag_create(NULL, /* parent */ 1, /* alignment */ SAFE_DMA_BOUNDARY, /* boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ SAFE_MAX_DMA, /* maxsize */ SAFE_MAX_PART, /* nsegments */ SAFE_MAX_SSIZE, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* locking */ &sc->sc_srcdmat)) { device_printf(dev, "cannot allocate DMA tag\n"); goto bad4; } if (bus_dma_tag_create(NULL, /* parent */ sizeof(u_int32_t), /* alignment */ SAFE_MAX_DSIZE, /* boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ SAFE_MAX_DMA, /* maxsize */ SAFE_MAX_PART, /* nsegments */ SAFE_MAX_DSIZE, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* locking */ &sc->sc_dstdmat)) { device_printf(dev, "cannot allocate DMA tag\n"); goto bad4; } /* * Allocate packet engine descriptors. */ if (safe_dma_malloc(sc, SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry), &sc->sc_ringalloc, 0)) { device_printf(dev, "cannot allocate PE descriptor ring\n"); bus_dma_tag_destroy(sc->sc_srcdmat); goto bad4; } /* * Hookup the static portion of all our data structures. */ sc->sc_ring = (struct safe_ringentry *) sc->sc_ringalloc.dma_vaddr; sc->sc_ringtop = sc->sc_ring + SAFE_MAX_NQUEUE; sc->sc_front = sc->sc_ring; sc->sc_back = sc->sc_ring; raddr = sc->sc_ringalloc.dma_paddr; bzero(sc->sc_ring, SAFE_MAX_NQUEUE * sizeof(struct safe_ringentry)); for (i = 0; i < SAFE_MAX_NQUEUE; i++) { struct safe_ringentry *re = &sc->sc_ring[i]; re->re_desc.d_sa = raddr + offsetof(struct safe_ringentry, re_sa); re->re_sa.sa_staterec = raddr + offsetof(struct safe_ringentry, re_sastate); raddr += sizeof (struct safe_ringentry); } mtx_init(&sc->sc_ringmtx, device_get_nameunit(dev), "packet engine ring", MTX_DEF); /* * Allocate scatter and gather particle descriptors. */ if (safe_dma_malloc(sc, SAFE_TOTAL_SPART * sizeof (struct safe_pdesc), &sc->sc_spalloc, 0)) { device_printf(dev, "cannot allocate source particle " "descriptor ring\n"); mtx_destroy(&sc->sc_ringmtx); safe_dma_free(sc, &sc->sc_ringalloc); bus_dma_tag_destroy(sc->sc_srcdmat); goto bad4; } sc->sc_spring = (struct safe_pdesc *) sc->sc_spalloc.dma_vaddr; sc->sc_springtop = sc->sc_spring + SAFE_TOTAL_SPART; sc->sc_spfree = sc->sc_spring; bzero(sc->sc_spring, SAFE_TOTAL_SPART * sizeof(struct safe_pdesc)); if (safe_dma_malloc(sc, SAFE_TOTAL_DPART * sizeof (struct safe_pdesc), &sc->sc_dpalloc, 0)) { device_printf(dev, "cannot allocate destination particle " "descriptor ring\n"); mtx_destroy(&sc->sc_ringmtx); safe_dma_free(sc, &sc->sc_spalloc); safe_dma_free(sc, &sc->sc_ringalloc); bus_dma_tag_destroy(sc->sc_dstdmat); goto bad4; } sc->sc_dpring = (struct safe_pdesc *) sc->sc_dpalloc.dma_vaddr; sc->sc_dpringtop = sc->sc_dpring + SAFE_TOTAL_DPART; sc->sc_dpfree = sc->sc_dpring; bzero(sc->sc_dpring, SAFE_TOTAL_DPART * sizeof(struct safe_pdesc)); device_printf(sc->sc_dev, "%s", safe_partname(sc)); devinfo = READ_REG(sc, SAFE_DEVINFO); if (devinfo & SAFE_DEVINFO_RNG) { sc->sc_flags |= SAFE_FLAGS_RNG; printf(" rng"); } if (devinfo & SAFE_DEVINFO_PKEY) { #if 0 printf(" key"); sc->sc_flags |= SAFE_FLAGS_KEY; crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0, safe_kprocess, sc); crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0, safe_kprocess, sc); #endif } if (devinfo & SAFE_DEVINFO_DES) { printf(" des/3des"); crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0, safe_newsession, safe_freesession, safe_process, sc); crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0, safe_newsession, safe_freesession, safe_process, sc); } if (devinfo & SAFE_DEVINFO_AES) { printf(" aes"); crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0, safe_newsession, safe_freesession, safe_process, sc); } if (devinfo & SAFE_DEVINFO_MD5) { printf(" md5"); crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0, safe_newsession, safe_freesession, safe_process, sc); } if (devinfo & SAFE_DEVINFO_SHA1) { printf(" sha1"); crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0, safe_newsession, safe_freesession, safe_process, sc); } printf(" null"); crypto_register(sc->sc_cid, CRYPTO_NULL_CBC, 0, 0, safe_newsession, safe_freesession, safe_process, sc); crypto_register(sc->sc_cid, CRYPTO_NULL_HMAC, 0, 0, safe_newsession, safe_freesession, safe_process, sc); /* XXX other supported algorithms */ printf("\n"); safe_reset_board(sc); /* reset h/w */ safe_init_pciregs(dev); /* init pci settings */ safe_init_board(sc); /* init h/w */ #ifndef SAFE_NO_RNG if (sc->sc_flags & SAFE_FLAGS_RNG) { #ifdef SAFE_RNDTEST sc->sc_rndtest = rndtest_attach(dev); if (sc->sc_rndtest) sc->sc_harvest = rndtest_harvest; else sc->sc_harvest = default_harvest; #else sc->sc_harvest = default_harvest; #endif safe_rng_init(sc); callout_init(&sc->sc_rngto, CALLOUT_MPSAFE); callout_reset(&sc->sc_rngto, hz*safe_rnginterval, safe_rng, sc); } #endif /* SAFE_NO_RNG */ #ifdef SAFE_DEBUG safec = sc; /* for use by hw.safe.dump */ #endif return (0); bad4: crypto_unregister_all(sc->sc_cid); bad3: bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); bad2: bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); bad1: bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); bad: return (ENXIO); } /* * Detach a device that successfully probed. */ static int safe_detach(device_t dev) { struct safe_softc *sc = device_get_softc(dev); /* XXX wait/abort active ops */ WRITE_REG(sc, SAFE_HI_MASK, 0); /* disable interrupts */ callout_stop(&sc->sc_rngto); crypto_unregister_all(sc->sc_cid); #ifdef SAFE_RNDTEST if (sc->sc_rndtest) rndtest_detach(sc->sc_rndtest); #endif safe_cleanchip(sc); safe_dma_free(sc, &sc->sc_dpalloc); safe_dma_free(sc, &sc->sc_spalloc); mtx_destroy(&sc->sc_ringmtx); safe_dma_free(sc, &sc->sc_ringalloc); bus_generic_detach(dev); bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); bus_dma_tag_destroy(sc->sc_srcdmat); bus_dma_tag_destroy(sc->sc_dstdmat); bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); return (0); } /* * Stop all chip i/o so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void safe_shutdown(device_t dev) { #ifdef notyet safe_stop(device_get_softc(dev)); #endif } /* * Device suspend routine. */ static int safe_suspend(device_t dev) { struct safe_softc *sc = device_get_softc(dev); #ifdef notyet /* XXX stop the device and save PCI settings */ #endif sc->sc_suspended = 1; return (0); } static int safe_resume(device_t dev) { struct safe_softc *sc = device_get_softc(dev); #ifdef notyet /* XXX retore PCI settings and start the device */ #endif sc->sc_suspended = 0; return (0); } /* * SafeXcel Interrupt routine */ static void safe_intr(void *arg) { struct safe_softc *sc = arg; volatile u_int32_t stat; stat = READ_REG(sc, SAFE_HM_STAT); if (stat == 0) /* shared irq, not for us */ return; WRITE_REG(sc, SAFE_HI_CLR, stat); /* IACK */ if ((stat & SAFE_INT_PE_DDONE)) { /* * Descriptor(s) done; scan the ring and * process completed operations. */ mtx_lock(&sc->sc_ringmtx); while (sc->sc_back != sc->sc_front) { struct safe_ringentry *re = sc->sc_back; #ifdef SAFE_DEBUG if (safe_debug) { safe_dump_ringstate(sc, __func__); safe_dump_request(sc, __func__, re); } #endif /* * safe_process marks ring entries that were allocated * but not used with a csr of zero. This insures the * ring front pointer never needs to be set backwards * in the event that an entry is allocated but not used * because of a setup error. */ if (re->re_desc.d_csr != 0) { if (!SAFE_PE_CSR_IS_DONE(re->re_desc.d_csr)) break; if (!SAFE_PE_LEN_IS_DONE(re->re_desc.d_len)) break; sc->sc_nqchip--; safe_callback(sc, re); } if (++(sc->sc_back) == sc->sc_ringtop) sc->sc_back = sc->sc_ring; } mtx_unlock(&sc->sc_ringmtx); } /* * Check to see if we got any DMA Error */ if (stat & SAFE_INT_PE_ERROR) { DPRINTF(("dmaerr dmastat %08x\n", READ_REG(sc, SAFE_PE_DMASTAT))); safestats.st_dmaerr++; safe_totalreset(sc); #if 0 safe_feed(sc); #endif } if (sc->sc_needwakeup) { /* XXX check high watermark */ int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); DPRINTF(("%s: wakeup crypto %x\n", __func__, sc->sc_needwakeup)); sc->sc_needwakeup &= ~wakeup; crypto_unblock(sc->sc_cid, wakeup); } } /* * safe_feed() - post a request to chip */ static void safe_feed(struct safe_softc *sc, struct safe_ringentry *re) { bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_PREWRITE); if (re->re_dst_map != NULL) bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map, BUS_DMASYNC_PREREAD); /* XXX have no smaller granularity */ safe_dma_sync(&sc->sc_ringalloc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); safe_dma_sync(&sc->sc_spalloc, BUS_DMASYNC_PREWRITE); safe_dma_sync(&sc->sc_dpalloc, BUS_DMASYNC_PREWRITE); #ifdef SAFE_DEBUG if (safe_debug) { safe_dump_ringstate(sc, __func__); safe_dump_request(sc, __func__, re); } #endif sc->sc_nqchip++; if (sc->sc_nqchip > safestats.st_maxqchip) safestats.st_maxqchip = sc->sc_nqchip; /* poke h/w to check descriptor ring, any value can be written */ WRITE_REG(sc, SAFE_HI_RD_DESCR, 0); } /* * Allocate a new 'session' and return an encoded session id. 'sidp' * contains our registration id, and should contain an encoded session * id on successful allocation. */ static int safe_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri) { #define N(a) (sizeof(a) / sizeof (a[0])) struct cryptoini *c, *encini = NULL, *macini = NULL; struct safe_softc *sc = arg; struct safe_session *ses = NULL; MD5_CTX md5ctx; SHA1_CTX sha1ctx; int i, sesn; if (sidp == NULL || cri == NULL || sc == NULL) return (EINVAL); for (c = cri; c != NULL; c = c->cri_next) { if (c->cri_alg == CRYPTO_MD5_HMAC || c->cri_alg == CRYPTO_SHA1_HMAC || c->cri_alg == CRYPTO_NULL_HMAC) { if (macini) return (EINVAL); macini = c; } else if (c->cri_alg == CRYPTO_DES_CBC || c->cri_alg == CRYPTO_3DES_CBC || c->cri_alg == CRYPTO_AES_CBC || c->cri_alg == CRYPTO_NULL_CBC) { if (encini) return (EINVAL); encini = c; } else return (EINVAL); } if (encini == NULL && macini == NULL) return (EINVAL); if (encini) { /* validate key length */ switch (encini->cri_alg) { case CRYPTO_DES_CBC: if (encini->cri_klen != 64) return (EINVAL); break; case CRYPTO_3DES_CBC: if (encini->cri_klen != 192) return (EINVAL); break; case CRYPTO_AES_CBC: if (encini->cri_klen != 128 && encini->cri_klen != 192 && encini->cri_klen != 256) return (EINVAL); break; } } if (sc->sc_sessions == NULL) { ses = sc->sc_sessions = (struct safe_session *)malloc( sizeof(struct safe_session), M_DEVBUF, M_NOWAIT); if (ses == NULL) return (ENOMEM); sesn = 0; sc->sc_nsessions = 1; } else { for (sesn = 0; sesn < sc->sc_nsessions; sesn++) { if (sc->sc_sessions[sesn].ses_used == 0) { ses = &sc->sc_sessions[sesn]; break; } } if (ses == NULL) { sesn = sc->sc_nsessions; ses = (struct safe_session *)malloc((sesn + 1) * sizeof(struct safe_session), M_DEVBUF, M_NOWAIT); if (ses == NULL) return (ENOMEM); bcopy(sc->sc_sessions, ses, sesn * sizeof(struct safe_session)); bzero(sc->sc_sessions, sesn * sizeof(struct safe_session)); free(sc->sc_sessions, M_DEVBUF); sc->sc_sessions = ses; ses = &sc->sc_sessions[sesn]; sc->sc_nsessions++; } } bzero(ses, sizeof(struct safe_session)); ses->ses_used = 1; if (encini) { /* get an IV */ /* XXX may read fewer than requested */ read_random(ses->ses_iv, sizeof(ses->ses_iv)); ses->ses_klen = encini->cri_klen; bcopy(encini->cri_key, ses->ses_key, ses->ses_klen / 8); /* PE is little-endian, insure proper byte order */ for (i = 0; i < N(ses->ses_key); i++) ses->ses_key[i] = htole32(ses->ses_key[i]); } if (macini) { for (i = 0; i < macini->cri_klen / 8; i++) macini->cri_key[i] ^= HMAC_IPAD_VAL; if (macini->cri_alg == CRYPTO_MD5_HMAC) { MD5Init(&md5ctx); MD5Update(&md5ctx, macini->cri_key, macini->cri_klen / 8); MD5Update(&md5ctx, hmac_ipad_buffer, HMAC_BLOCK_LEN - (macini->cri_klen / 8)); bcopy(md5ctx.state, ses->ses_hminner, sizeof(md5ctx.state)); } else { SHA1Init(&sha1ctx); SHA1Update(&sha1ctx, macini->cri_key, macini->cri_klen / 8); SHA1Update(&sha1ctx, hmac_ipad_buffer, HMAC_BLOCK_LEN - (macini->cri_klen / 8)); bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32)); } for (i = 0; i < macini->cri_klen / 8; i++) macini->cri_key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); if (macini->cri_alg == CRYPTO_MD5_HMAC) { MD5Init(&md5ctx); MD5Update(&md5ctx, macini->cri_key, macini->cri_klen / 8); MD5Update(&md5ctx, hmac_opad_buffer, HMAC_BLOCK_LEN - (macini->cri_klen / 8)); bcopy(md5ctx.state, ses->ses_hmouter, sizeof(md5ctx.state)); } else { SHA1Init(&sha1ctx); SHA1Update(&sha1ctx, macini->cri_key, macini->cri_klen / 8); SHA1Update(&sha1ctx, hmac_opad_buffer, HMAC_BLOCK_LEN - (macini->cri_klen / 8)); bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32)); } for (i = 0; i < macini->cri_klen / 8; i++) macini->cri_key[i] ^= HMAC_OPAD_VAL; /* PE is little-endian, insure proper byte order */ for (i = 0; i < N(ses->ses_hminner); i++) { ses->ses_hminner[i] = htole32(ses->ses_hminner[i]); ses->ses_hmouter[i] = htole32(ses->ses_hmouter[i]); } } *sidp = SAFE_SID(device_get_unit(sc->sc_dev), sesn); return (0); #undef N } /* * Deallocate a session. */ static int safe_freesession(void *arg, u_int64_t tid) { struct safe_softc *sc = arg; int session, ret; u_int32_t sid = ((u_int32_t) tid) & 0xffffffff; if (sc == NULL) return (EINVAL); session = SAFE_SESSION(sid); if (session < sc->sc_nsessions) { bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session])); ret = 0; } else ret = EINVAL; return (ret); } static void safe_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error) { struct safe_operand *op = arg; DPRINTF(("%s: mapsize %u nsegs %d error %d\n", __func__, (u_int) mapsize, nsegs, error)); if (error != 0) return; op->mapsize = mapsize; op->nsegs = nsegs; bcopy(seg, op->segs, nsegs * sizeof (seg[0])); } static int safe_process(void *arg, struct cryptop *crp, int hint) { int err = 0, i, nicealign, uniform; struct safe_softc *sc = arg; struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; int bypass, oplen, ivsize; caddr_t iv; int16_t coffset; struct safe_session *ses; struct safe_ringentry *re; struct safe_sarec *sa; struct safe_pdesc *pd; u_int32_t cmd0, cmd1, staterec; if (crp == NULL || crp->crp_callback == NULL || sc == NULL) { safestats.st_invalid++; return (EINVAL); } if (SAFE_SESSION(crp->crp_sid) >= sc->sc_nsessions) { safestats.st_badsession++; return (EINVAL); } mtx_lock(&sc->sc_ringmtx); if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) { safestats.st_ringfull++; sc->sc_needwakeup |= CRYPTO_SYMQ; mtx_unlock(&sc->sc_ringmtx); return (ERESTART); } re = sc->sc_front; staterec = re->re_sa.sa_staterec; /* save */ /* NB: zero everything but the PE descriptor */ bzero(&re->re_sa, sizeof(struct safe_ringentry) - sizeof(re->re_desc)); re->re_sa.sa_staterec = staterec; /* restore */ re->re_crp = crp; re->re_sesn = SAFE_SESSION(crp->crp_sid); if (crp->crp_flags & CRYPTO_F_IMBUF) { re->re_src_m = (struct mbuf *)crp->crp_buf; re->re_dst_m = (struct mbuf *)crp->crp_buf; } else if (crp->crp_flags & CRYPTO_F_IOV) { re->re_src_io = (struct uio *)crp->crp_buf; re->re_dst_io = (struct uio *)crp->crp_buf; } else { safestats.st_badflags++; err = EINVAL; goto errout; /* XXX we don't handle contiguous blocks! */ } sa = &re->re_sa; ses = &sc->sc_sessions[re->re_sesn]; crd1 = crp->crp_desc; if (crd1 == NULL) { safestats.st_nodesc++; err = EINVAL; goto errout; } crd2 = crd1->crd_next; cmd0 = SAFE_SA_CMD0_BASIC; /* basic group operation */ cmd1 = 0; if (crd2 == NULL) { if (crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC || crd1->crd_alg == CRYPTO_NULL_HMAC) { maccrd = crd1; enccrd = NULL; cmd0 |= SAFE_SA_CMD0_OP_HASH; } else if (crd1->crd_alg == CRYPTO_DES_CBC || crd1->crd_alg == CRYPTO_3DES_CBC || crd1->crd_alg == CRYPTO_AES_CBC || crd1->crd_alg == CRYPTO_NULL_CBC) { maccrd = NULL; enccrd = crd1; cmd0 |= SAFE_SA_CMD0_OP_CRYPT; } else { safestats.st_badalg++; err = EINVAL; goto errout; } } else { if ((crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC || crd1->crd_alg == CRYPTO_NULL_HMAC) && (crd2->crd_alg == CRYPTO_DES_CBC || crd2->crd_alg == CRYPTO_3DES_CBC || crd2->crd_alg == CRYPTO_AES_CBC || crd2->crd_alg == CRYPTO_NULL_CBC) && ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { maccrd = crd1; enccrd = crd2; } else if ((crd1->crd_alg == CRYPTO_DES_CBC || crd1->crd_alg == CRYPTO_3DES_CBC || crd1->crd_alg == CRYPTO_AES_CBC || crd1->crd_alg == CRYPTO_NULL_CBC) && (crd2->crd_alg == CRYPTO_MD5_HMAC || crd2->crd_alg == CRYPTO_SHA1_HMAC || crd2->crd_alg == CRYPTO_NULL_HMAC) && (crd1->crd_flags & CRD_F_ENCRYPT)) { enccrd = crd1; maccrd = crd2; } else { safestats.st_badalg++; err = EINVAL; goto errout; } cmd0 |= SAFE_SA_CMD0_OP_BOTH; } if (enccrd) { if (enccrd->crd_alg == CRYPTO_DES_CBC) { cmd0 |= SAFE_SA_CMD0_DES; cmd1 |= SAFE_SA_CMD1_CBC; ivsize = 2*sizeof(u_int32_t); } else if (enccrd->crd_alg == CRYPTO_3DES_CBC) { cmd0 |= SAFE_SA_CMD0_3DES; cmd1 |= SAFE_SA_CMD1_CBC; ivsize = 2*sizeof(u_int32_t); } else if (enccrd->crd_alg == CRYPTO_AES_CBC) { cmd0 |= SAFE_SA_CMD0_AES; cmd1 |= SAFE_SA_CMD1_CBC; if (ses->ses_klen == 128) cmd1 |= SAFE_SA_CMD1_AES128; else if (ses->ses_klen == 192) cmd1 |= SAFE_SA_CMD1_AES192; else cmd1 |= SAFE_SA_CMD1_AES256; ivsize = 4*sizeof(u_int32_t); } else { cmd0 |= SAFE_SA_CMD0_CRYPT_NULL; ivsize = 0; } /* * Setup encrypt/decrypt state. When using basic ops * we can't use an inline IV because hash/crypt offset * must be from the end of the IV to the start of the * crypt data and this leaves out the preceding header * from the hash calculation. Instead we place the IV * in the state record and set the hash/crypt offset to * copy both the header+IV. */ if (enccrd->crd_flags & CRD_F_ENCRYPT) { cmd0 |= SAFE_SA_CMD0_OUTBOUND; if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) iv = enccrd->crd_iv; else iv = (caddr_t) ses->ses_iv; if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) { if (crp->crp_flags & CRYPTO_F_IMBUF) m_copyback(re->re_src_m, enccrd->crd_inject, ivsize, iv); else if (crp->crp_flags & CRYPTO_F_IOV) cuio_copyback(re->re_src_io, enccrd->crd_inject, ivsize, iv); } bcopy(iv, re->re_sastate.sa_saved_iv, ivsize); cmd0 |= SAFE_SA_CMD0_IVLD_STATE | SAFE_SA_CMD0_SAVEIV; re->re_flags |= SAFE_QFLAGS_COPYOUTIV; } else { cmd0 |= SAFE_SA_CMD0_INBOUND; if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) bcopy(enccrd->crd_iv, re->re_sastate.sa_saved_iv, ivsize); else if (crp->crp_flags & CRYPTO_F_IMBUF) m_copydata(re->re_src_m, enccrd->crd_inject, ivsize, (caddr_t)re->re_sastate.sa_saved_iv); else if (crp->crp_flags & CRYPTO_F_IOV) cuio_copydata(re->re_src_io, enccrd->crd_inject, ivsize, (caddr_t)re->re_sastate.sa_saved_iv); cmd0 |= SAFE_SA_CMD0_IVLD_STATE; } /* * For basic encryption use the zero pad algorithm. * This pads results to an 8-byte boundary and * suppresses padding verification for inbound (i.e. * decrypt) operations. * * NB: Not sure if the 8-byte pad boundary is a problem. */ cmd0 |= SAFE_SA_CMD0_PAD_ZERO; /* XXX assert key bufs have the same size */ bcopy(ses->ses_key, sa->sa_key, sizeof(sa->sa_key)); } if (maccrd) { if (maccrd->crd_alg == CRYPTO_MD5_HMAC) { cmd0 |= SAFE_SA_CMD0_MD5; cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */ } else if (maccrd->crd_alg == CRYPTO_SHA1_HMAC) { cmd0 |= SAFE_SA_CMD0_SHA1; cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */ } else { cmd0 |= SAFE_SA_CMD0_HASH_NULL; } /* * Digest data is loaded from the SA and the hash * result is saved to the state block where we * retrieve it for return to the caller. */ /* XXX assert digest bufs have the same size */ bcopy(ses->ses_hminner, sa->sa_indigest, sizeof(sa->sa_indigest)); bcopy(ses->ses_hmouter, sa->sa_outdigest, sizeof(sa->sa_outdigest)); cmd0 |= SAFE_SA_CMD0_HSLD_SA | SAFE_SA_CMD0_SAVEHASH; re->re_flags |= SAFE_QFLAGS_COPYOUTICV; } if (enccrd && maccrd) { /* * The offset from hash data to the start of * crypt data is the difference in the skips. */ bypass = maccrd->crd_skip; coffset = enccrd->crd_skip - maccrd->crd_skip; if (coffset < 0) { DPRINTF(("%s: hash does not precede crypt; " "mac skip %u enc skip %u\n", __func__, maccrd->crd_skip, enccrd->crd_skip)); safestats.st_skipmismatch++; err = EINVAL; goto errout; } oplen = enccrd->crd_skip + enccrd->crd_len; if (maccrd->crd_skip + maccrd->crd_len != oplen) { DPRINTF(("%s: hash amount %u != crypt amount %u\n", __func__, maccrd->crd_skip + maccrd->crd_len, oplen)); safestats.st_lenmismatch++; err = EINVAL; goto errout; } #ifdef SAFE_DEBUG if (safe_debug) { printf("mac: skip %d, len %d, inject %d\n", maccrd->crd_skip, maccrd->crd_len, maccrd->crd_inject); printf("enc: skip %d, len %d, inject %d\n", enccrd->crd_skip, enccrd->crd_len, enccrd->crd_inject); printf("bypass %d coffset %d oplen %d\n", bypass, coffset, oplen); } #endif if (coffset & 3) { /* offset must be 32-bit aligned */ DPRINTF(("%s: coffset %u misaligned\n", __func__, coffset)); safestats.st_coffmisaligned++; err = EINVAL; goto errout; } coffset >>= 2; if (coffset > 255) { /* offset must be <256 dwords */ DPRINTF(("%s: coffset %u too big\n", __func__, coffset)); safestats.st_cofftoobig++; err = EINVAL; goto errout; } /* * Tell the hardware to copy the header to the output. * The header is defined as the data from the end of * the bypass to the start of data to be encrypted. * Typically this is the inline IV. Note that you need * to do this even if src+dst are the same; it appears * that w/o this bit the crypted data is written * immediately after the bypass data. */ cmd1 |= SAFE_SA_CMD1_HDRCOPY; /* * Disable IP header mutable bit handling. This is * needed to get correct HMAC calculations. */ cmd1 |= SAFE_SA_CMD1_MUTABLE; } else { if (enccrd) { bypass = enccrd->crd_skip; oplen = bypass + enccrd->crd_len; } else { bypass = maccrd->crd_skip; oplen = bypass + maccrd->crd_len; } coffset = 0; } /* XXX verify multiple of 4 when using s/g */ if (bypass > 96) { /* bypass offset must be <= 96 bytes */ DPRINTF(("%s: bypass %u too big\n", __func__, bypass)); safestats.st_bypasstoobig++; err = EINVAL; goto errout; } if (bus_dmamap_create(sc->sc_srcdmat, BUS_DMA_NOWAIT, &re->re_src_map)) { safestats.st_nomap++; err = ENOMEM; goto errout; } if (crp->crp_flags & CRYPTO_F_IMBUF) { if (bus_dmamap_load_mbuf(sc->sc_srcdmat, re->re_src_map, re->re_src_m, safe_op_cb, &re->re_src, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); re->re_src_map = NULL; safestats.st_noload++; err = ENOMEM; goto errout; } } else if (crp->crp_flags & CRYPTO_F_IOV) { if (bus_dmamap_load_uio(sc->sc_srcdmat, re->re_src_map, re->re_src_io, safe_op_cb, &re->re_src, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); re->re_src_map = NULL; safestats.st_noload++; err = ENOMEM; goto errout; } } nicealign = safe_dmamap_aligned(&re->re_src); uniform = safe_dmamap_uniform(&re->re_src); DPRINTF(("src nicealign %u uniform %u nsegs %u\n", nicealign, uniform, re->re_src.nsegs)); if (re->re_src.nsegs > 1) { re->re_desc.d_src = sc->sc_spalloc.dma_paddr + ((caddr_t) sc->sc_spfree - (caddr_t) sc->sc_spring); for (i = 0; i < re->re_src_nsegs; i++) { /* NB: no need to check if there's space */ pd = sc->sc_spfree; if (++(sc->sc_spfree) == sc->sc_springtop) sc->sc_spfree = sc->sc_spring; KASSERT((pd->pd_flags&3) == 0 || (pd->pd_flags&3) == SAFE_PD_DONE, ("bogus source particle descriptor; flags %x", pd->pd_flags)); pd->pd_addr = re->re_src_segs[i].ds_addr; pd->pd_size = re->re_src_segs[i].ds_len; pd->pd_flags = SAFE_PD_READY; } cmd0 |= SAFE_SA_CMD0_IGATHER; } else { /* * No need for gather, reference the operand directly. */ re->re_desc.d_src = re->re_src_segs[0].ds_addr; } if (enccrd == NULL && maccrd != NULL) { /* * Hash op; no destination needed. */ } else { if (crp->crp_flags & CRYPTO_F_IOV) { if (!nicealign) { safestats.st_iovmisaligned++; err = EINVAL; goto errout; } if (uniform != 1) { /* * Source is not suitable for direct use as * the destination. Create a new scatter/gather * list based on the destination requirements * and check if that's ok. */ if (bus_dmamap_create(sc->sc_dstdmat, BUS_DMA_NOWAIT, &re->re_dst_map)) { safestats.st_nomap++; err = ENOMEM; goto errout; } if (bus_dmamap_load_uio(sc->sc_dstdmat, re->re_dst_map, re->re_dst_io, safe_op_cb, &re->re_dst, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); re->re_dst_map = NULL; safestats.st_noload++; err = ENOMEM; goto errout; } uniform = safe_dmamap_uniform(&re->re_dst); if (!uniform) { /* * There's no way to handle the DMA * requirements with this uio. We * could create a separate DMA area for * the result and then copy it back, * but for now we just bail and return * an error. Note that uio requests * > SAFE_MAX_DSIZE are handled because * the DMA map and segment list for the * destination wil result in a * destination particle list that does * the necessary scatter DMA. */ safestats.st_iovnotuniform++; err = EINVAL; goto errout; } } else re->re_dst = re->re_src; } else if (crp->crp_flags & CRYPTO_F_IMBUF) { if (nicealign && uniform == 1) { /* * Source layout is suitable for direct * sharing of the DMA map and segment list. */ re->re_dst = re->re_src; } else if (nicealign && uniform == 2) { /* * The source is properly aligned but requires a * different particle list to handle DMA of the * result. Create a new map and do the load to * create the segment list. The particle * descriptor setup code below will handle the * rest. */ if (bus_dmamap_create(sc->sc_dstdmat, BUS_DMA_NOWAIT, &re->re_dst_map)) { safestats.st_nomap++; err = ENOMEM; goto errout; } if (bus_dmamap_load_mbuf(sc->sc_dstdmat, re->re_dst_map, re->re_dst_m, safe_op_cb, &re->re_dst, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); re->re_dst_map = NULL; safestats.st_noload++; err = ENOMEM; goto errout; } } else { /* !(aligned and/or uniform) */ int totlen, len; struct mbuf *m, *top, **mp; /* * DMA constraints require that we allocate a * new mbuf chain for the destination. We * allocate an entire new set of mbufs of * optimal/required size and then tell the * hardware to copy any bits that are not * created as a byproduct of the operation. */ if (!nicealign) safestats.st_unaligned++; if (!uniform) safestats.st_notuniform++; totlen = re->re_src_mapsize; if (re->re_src_m->m_flags & M_PKTHDR) { len = MHLEN; MGETHDR(m, M_DONTWAIT, MT_DATA); if (m && !m_dup_pkthdr(m, re->re_src_m, M_DONTWAIT)) { m_free(m); m = NULL; } } else { len = MLEN; MGET(m, M_DONTWAIT, MT_DATA); } if (m == NULL) { safestats.st_nombuf++; err = sc->sc_nqchip ? ERESTART : ENOMEM; goto errout; } if (totlen >= MINCLSIZE) { MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { m_free(m); safestats.st_nomcl++; err = sc->sc_nqchip ? ERESTART : ENOMEM; goto errout; } len = MCLBYTES; } m->m_len = len; top = NULL; mp = ⊤ while (totlen > 0) { if (top) { MGET(m, M_DONTWAIT, MT_DATA); if (m == NULL) { m_freem(top); safestats.st_nombuf++; err = sc->sc_nqchip ? ERESTART : ENOMEM; goto errout; } len = MLEN; } if (top && totlen >= MINCLSIZE) { MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { *mp = m; m_freem(top); safestats.st_nomcl++; err = sc->sc_nqchip ? ERESTART : ENOMEM; goto errout; } len = MCLBYTES; } m->m_len = len = min(totlen, len); totlen -= len; *mp = m; mp = &m->m_next; } re->re_dst_m = top; if (bus_dmamap_create(sc->sc_dstdmat, BUS_DMA_NOWAIT, &re->re_dst_map) != 0) { safestats.st_nomap++; err = ENOMEM; goto errout; } if (bus_dmamap_load_mbuf(sc->sc_dstdmat, re->re_dst_map, re->re_dst_m, safe_op_cb, &re->re_dst, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); re->re_dst_map = NULL; safestats.st_noload++; err = ENOMEM; goto errout; } if (re->re_src.mapsize > oplen) { /* * There's data following what the * hardware will copy for us. If this * isn't just the ICV (that's going to * be written on completion), copy it * to the new mbufs */ if (!(maccrd && (re->re_src.mapsize-oplen) == 12 && maccrd->crd_inject == oplen)) safe_mcopy(re->re_src_m, re->re_dst_m, oplen); else safestats.st_noicvcopy++; } } } else { safestats.st_badflags++; err = EINVAL; goto errout; } if (re->re_dst.nsegs > 1) { re->re_desc.d_dst = sc->sc_dpalloc.dma_paddr + ((caddr_t) sc->sc_dpfree - (caddr_t) sc->sc_dpring); for (i = 0; i < re->re_dst_nsegs; i++) { pd = sc->sc_dpfree; KASSERT((pd->pd_flags&3) == 0 || (pd->pd_flags&3) == SAFE_PD_DONE, ("bogus dest particle descriptor; flags %x", pd->pd_flags)); if (++(sc->sc_dpfree) == sc->sc_dpringtop) sc->sc_dpfree = sc->sc_dpring; pd->pd_addr = re->re_dst_segs[i].ds_addr; pd->pd_flags = SAFE_PD_READY; } cmd0 |= SAFE_SA_CMD0_OSCATTER; } else { /* * No need for scatter, reference the operand directly. */ re->re_desc.d_dst = re->re_dst_segs[0].ds_addr; } } /* * All done with setup; fillin the SA command words * and the packet engine descriptor. The operation * is now ready for submission to the hardware. */ sa->sa_cmd0 = cmd0 | SAFE_SA_CMD0_IPCI | SAFE_SA_CMD0_OPCI; sa->sa_cmd1 = cmd1 | (coffset << SAFE_SA_CMD1_OFFSET_S) | SAFE_SA_CMD1_SAREV1 /* Rev 1 SA data structure */ | SAFE_SA_CMD1_SRPCI ; /* * NB: the order of writes is important here. In case the * chip is scanning the ring because of an outstanding request * it might nab this one too. In that case we need to make * sure the setup is complete before we write the length * field of the descriptor as it signals the descriptor is * ready for processing. */ re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI; if (maccrd) re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL; re->re_desc.d_len = oplen | SAFE_PE_LEN_READY | (bypass << SAFE_PE_LEN_BYPASS_S) ; safestats.st_ipackets++; safestats.st_ibytes += oplen; if (++(sc->sc_front) == sc->sc_ringtop) sc->sc_front = sc->sc_ring; /* XXX honor batching */ safe_feed(sc, re); mtx_unlock(&sc->sc_ringmtx); return (0); errout: if ((re->re_dst_m != NULL) && (re->re_src_m != re->re_dst_m)) m_freem(re->re_dst_m); if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) { bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map); bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); } if (re->re_src_map != NULL) { bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map); bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); } mtx_unlock(&sc->sc_ringmtx); if (err != ERESTART) { crp->crp_etype = err; crypto_done(crp); } else { sc->sc_needwakeup |= CRYPTO_SYMQ; } return (err); } static void safe_callback(struct safe_softc *sc, struct safe_ringentry *re) { struct cryptop *crp = (struct cryptop *)re->re_crp; struct cryptodesc *crd; safestats.st_opackets++; safestats.st_obytes += re->re_dst.mapsize; safe_dma_sync(&sc->sc_ringalloc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); if (re->re_desc.d_csr & SAFE_PE_CSR_STATUS) { device_printf(sc->sc_dev, "csr 0x%x cmd0 0x%x cmd1 0x%x\n", re->re_desc.d_csr, re->re_sa.sa_cmd0, re->re_sa.sa_cmd1); safestats.st_peoperr++; crp->crp_etype = EIO; /* something more meaningful? */ } if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) { bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map); bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); } bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map); bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); /* * If result was written to a differet mbuf chain, swap * it in as the return value and reclaim the original. */ if ((crp->crp_flags & CRYPTO_F_IMBUF) && re->re_src_m != re->re_dst_m) { m_freem(re->re_src_m); crp->crp_buf = (caddr_t)re->re_dst_m; } if (re->re_flags & SAFE_QFLAGS_COPYOUTIV) { /* copy out IV for future use */ for (crd = crp->crp_desc; crd; crd = crd->crd_next) { int ivsize; if (crd->crd_alg == CRYPTO_DES_CBC || crd->crd_alg == CRYPTO_3DES_CBC) { ivsize = 2*sizeof(u_int32_t); } else if (crd->crd_alg == CRYPTO_AES_CBC) { ivsize = 4*sizeof(u_int32_t); } else continue; if (crp->crp_flags & CRYPTO_F_IMBUF) { m_copydata((struct mbuf *)crp->crp_buf, crd->crd_skip + crd->crd_len - ivsize, ivsize, (caddr_t) sc->sc_sessions[re->re_sesn].ses_iv); } else if (crp->crp_flags & CRYPTO_F_IOV) { cuio_copydata((struct uio *)crp->crp_buf, crd->crd_skip + crd->crd_len - ivsize, ivsize, (caddr_t)sc->sc_sessions[re->re_sesn].ses_iv); } break; } } if (re->re_flags & SAFE_QFLAGS_COPYOUTICV) { /* copy out ICV result */ for (crd = crp->crp_desc; crd; crd = crd->crd_next) { if (!(crd->crd_alg == CRYPTO_MD5_HMAC || crd->crd_alg == CRYPTO_SHA1_HMAC || crd->crd_alg == CRYPTO_NULL_HMAC)) continue; if (crd->crd_alg == CRYPTO_SHA1_HMAC) { /* * SHA-1 ICV's are byte-swapped; fix 'em up * before copy them to their destination. */ bswap32(re->re_sastate.sa_saved_indigest[0]); bswap32(re->re_sastate.sa_saved_indigest[1]); bswap32(re->re_sastate.sa_saved_indigest[2]); } if (crp->crp_flags & CRYPTO_F_IMBUF) { m_copyback((struct mbuf *)crp->crp_buf, crd->crd_inject, 12, (caddr_t)re->re_sastate.sa_saved_indigest); } else if (crp->crp_flags & CRYPTO_F_IOV && crp->crp_mac) { bcopy((caddr_t)re->re_sastate.sa_saved_indigest, crp->crp_mac, 12); } break; } } crypto_done(crp); } /* * Copy all data past offset from srcm to dstm. */ static void safe_mcopy(struct mbuf *srcm, struct mbuf *dstm, u_int offset) { u_int j, dlen, slen; caddr_t dptr, sptr; /* * Advance src and dst to offset. */ j = offset; while (j >= 0) { if (srcm->m_len > j) break; j -= srcm->m_len; srcm = srcm->m_next; if (srcm == NULL) return; } sptr = mtod(srcm, caddr_t) + j; slen = srcm->m_len - j; j = offset; while (j >= 0) { if (dstm->m_len > j) break; j -= dstm->m_len; dstm = dstm->m_next; if (dstm == NULL) return; } dptr = mtod(dstm, caddr_t) + j; dlen = dstm->m_len - j; /* * Copy everything that remains. */ for (;;) { j = min(slen, dlen); bcopy(sptr, dptr, j); if (slen == j) { srcm = srcm->m_next; if (srcm == NULL) return; sptr = srcm->m_data; slen = srcm->m_len; } else sptr += j, slen -= j; if (dlen == j) { dstm = dstm->m_next; if (dstm == NULL) return; dptr = dstm->m_data; dlen = dstm->m_len; } else dptr += j, dlen -= j; } } #ifndef SAFE_NO_RNG #define SAFE_RNG_MAXWAIT 1000 static void safe_rng_init(struct safe_softc *sc) { u_int32_t w, v; int i; WRITE_REG(sc, SAFE_RNG_CTRL, 0); /* use default value according to the manual */ WRITE_REG(sc, SAFE_RNG_CNFG, 0x834); /* magic from SafeNet */ WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); /* * There is a bug in rev 1.0 of the 1140 that when the RNG * is brought out of reset the ready status flag does not * work until the RNG has finished its internal initialization. * * So in order to determine the device is through its * initialization we must read the data register, using the * status reg in the read in case it is initialized. Then read * the data register until it changes from the first read. * Once it changes read the data register until it changes * again. At this time the RNG is considered initialized. * This could take between 750ms - 1000ms in time. */ i = 0; w = READ_REG(sc, SAFE_RNG_OUT); do { v = READ_REG(sc, SAFE_RNG_OUT); if (v != w) { w = v; break; } DELAY(10); } while (++i < SAFE_RNG_MAXWAIT); /* Wait Until data changes again */ i = 0; do { v = READ_REG(sc, SAFE_RNG_OUT); if (v != w) break; DELAY(10); } while (++i < SAFE_RNG_MAXWAIT); } static __inline void safe_rng_disable_short_cycle(struct safe_softc *sc) { WRITE_REG(sc, SAFE_RNG_CTRL, READ_REG(sc, SAFE_RNG_CTRL) &~ SAFE_RNG_CTRL_SHORTEN); } static __inline void safe_rng_enable_short_cycle(struct safe_softc *sc) { WRITE_REG(sc, SAFE_RNG_CTRL, READ_REG(sc, SAFE_RNG_CTRL) | SAFE_RNG_CTRL_SHORTEN); } static __inline u_int32_t safe_rng_read(struct safe_softc *sc) { int i; i = 0; while (READ_REG(sc, SAFE_RNG_STAT) != 0 && ++i < SAFE_RNG_MAXWAIT) ; return READ_REG(sc, SAFE_RNG_OUT); } static void safe_rng(void *arg) { struct safe_softc *sc = arg; u_int32_t buf[SAFE_RNG_MAXBUFSIZ]; /* NB: maybe move to softc */ u_int maxwords; int i; safestats.st_rng++; /* * Fetch the next block of data. */ maxwords = safe_rngbufsize; if (maxwords > SAFE_RNG_MAXBUFSIZ) maxwords = SAFE_RNG_MAXBUFSIZ; retry: for (i = 0; i < maxwords; i++) buf[i] = safe_rng_read(sc); /* * Check the comparator alarm count and reset the h/w if * it exceeds our threshold. This guards against the * hardware oscillators resonating with external signals. */ if (READ_REG(sc, SAFE_RNG_ALM_CNT) > safe_rngmaxalarm) { u_int32_t freq_inc, w; DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__, READ_REG(sc, SAFE_RNG_ALM_CNT), safe_rngmaxalarm)); safestats.st_rngalarm++; safe_rng_enable_short_cycle(sc); freq_inc = 18; for (i = 0; i < 64; i++) { w = READ_REG(sc, SAFE_RNG_CNFG); freq_inc = ((w + freq_inc) & 0x3fL); w = ((w & ~0x3fL) | freq_inc); WRITE_REG(sc, SAFE_RNG_CNFG, w); WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); (void) safe_rng_read(sc); DELAY(25); if (READ_REG(sc, SAFE_RNG_ALM_CNT) == 0) { safe_rng_disable_short_cycle(sc); goto retry; } freq_inc = 1; } safe_rng_disable_short_cycle(sc); } else WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); (*sc->sc_harvest)(sc->sc_rndtest, buf, maxwords*sizeof (u_int32_t)); callout_reset(&sc->sc_rngto, hz * (safe_rnginterval ? safe_rnginterval : 1), safe_rng, sc); } #endif /* SAFE_NO_RNG */ static void safe_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *paddr = (bus_addr_t*) arg; *paddr = segs->ds_addr; } static int safe_dma_malloc( struct safe_softc *sc, bus_size_t size, struct safe_dma_alloc *dma, int mapflags ) { int r; r = bus_dma_tag_create(NULL, /* parent */ sizeof(u_int32_t), 0, /* alignment, bounds */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ size, /* maxsize */ 1, /* nsegments */ size, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* locking */ &dma->dma_tag); if (r != 0) { device_printf(sc->sc_dev, "safe_dma_malloc: " "bus_dma_tag_create failed; error %u\n", r); goto fail_0; } r = bus_dmamap_create(dma->dma_tag, BUS_DMA_NOWAIT, &dma->dma_map); if (r != 0) { device_printf(sc->sc_dev, "safe_dma_malloc: " "bus_dmamap_create failed; error %u\n", r); goto fail_1; } r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, BUS_DMA_NOWAIT, &dma->dma_map); if (r != 0) { device_printf(sc->sc_dev, "safe_dma_malloc: " "bus_dmammem_alloc failed; size %zu, error %u\n", size, r); goto fail_2; } r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size, safe_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT); if (r != 0) { device_printf(sc->sc_dev, "safe_dma_malloc: " "bus_dmamap_load failed; error %u\n", r); goto fail_3; } dma->dma_size = size; return (0); fail_3: bus_dmamap_unload(dma->dma_tag, dma->dma_map); fail_2: bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); fail_1: bus_dmamap_destroy(dma->dma_tag, dma->dma_map); bus_dma_tag_destroy(dma->dma_tag); fail_0: dma->dma_map = NULL; dma->dma_tag = NULL; return (r); } static void safe_dma_free(struct safe_softc *sc, struct safe_dma_alloc *dma) { bus_dmamap_unload(dma->dma_tag, dma->dma_map); bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); bus_dmamap_destroy(dma->dma_tag, dma->dma_map); bus_dma_tag_destroy(dma->dma_tag); } /* * Resets the board. Values in the regesters are left as is * from the reset (i.e. initial values are assigned elsewhere). */ static void safe_reset_board(struct safe_softc *sc) { u_int32_t v; /* * Reset the device. The manual says no delay * is needed between marking and clearing reset. */ v = READ_REG(sc, SAFE_PE_DMACFG) &~ (SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET | SAFE_PE_DMACFG_SGRESET); WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET | SAFE_PE_DMACFG_SGRESET); WRITE_REG(sc, SAFE_PE_DMACFG, v); } /* * Initialize registers we need to touch only once. */ static void safe_init_board(struct safe_softc *sc) { u_int32_t v, dwords; v = READ_REG(sc, SAFE_PE_DMACFG);; v &=~ SAFE_PE_DMACFG_PEMODE; v |= SAFE_PE_DMACFG_FSENA /* failsafe enable */ | SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */ | SAFE_PE_DMACFG_SPRPCI /* scatter ring on PCI */ | SAFE_PE_DMACFG_ESDESC /* endian-swap descriptors */ | SAFE_PE_DMACFG_ESSA /* endian-swap SA's */ | SAFE_PE_DMACFG_ESPDESC /* endian-swap part. desc's */ ; WRITE_REG(sc, SAFE_PE_DMACFG, v); #if 0 /* XXX select byte swap based on host byte order */ WRITE_REG(sc, SAFE_ENDIAN, 0x1b); #endif if (sc->sc_chiprev == SAFE_REV(1,0)) { /* * Avoid large PCI DMA transfers. Rev 1.0 has a bug where * "target mode transfers" done while the chip is DMA'ing * >1020 bytes cause the hardware to lockup. To avoid this * we reduce the max PCI transfer size and use small source * particle descriptors (<= 256 bytes). */ WRITE_REG(sc, SAFE_DMA_CFG, 256); device_printf(sc->sc_dev, "Reduce max DMA size to %u words for rev %u.%u WAR\n", (READ_REG(sc, SAFE_DMA_CFG)>>2) & 0xff, SAFE_REV_MAJ(sc->sc_chiprev), SAFE_REV_MIN(sc->sc_chiprev)); } /* NB: operands+results are overlaid */ WRITE_REG(sc, SAFE_PE_PDRBASE, sc->sc_ringalloc.dma_paddr); WRITE_REG(sc, SAFE_PE_RDRBASE, sc->sc_ringalloc.dma_paddr); /* * Configure ring entry size and number of items in the ring. */ KASSERT((sizeof(struct safe_ringentry) % sizeof(u_int32_t)) == 0, ("PE ring entry not 32-bit aligned!")); dwords = sizeof(struct safe_ringentry) / sizeof(u_int32_t); WRITE_REG(sc, SAFE_PE_RINGCFG, (dwords << SAFE_PE_RINGCFG_OFFSET_S) | SAFE_MAX_NQUEUE); WRITE_REG(sc, SAFE_PE_RINGPOLL, 0); /* disable polling */ WRITE_REG(sc, SAFE_PE_GRNGBASE, sc->sc_spalloc.dma_paddr); WRITE_REG(sc, SAFE_PE_SRNGBASE, sc->sc_dpalloc.dma_paddr); WRITE_REG(sc, SAFE_PE_PARTSIZE, (SAFE_TOTAL_DPART<<16) | SAFE_TOTAL_SPART); /* * NB: destination particles are fixed size. We use * an mbuf cluster and require all results go to * clusters or smaller. */ WRITE_REG(sc, SAFE_PE_PARTCFG, SAFE_MAX_DSIZE); /* it's now safe to enable PE mode, do it */ WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PEMODE); /* * Configure hardware to use level-triggered interrupts and * to interrupt after each descriptor is processed. */ WRITE_REG(sc, SAFE_HI_CFG, SAFE_HI_CFG_LEVEL); WRITE_REG(sc, SAFE_HI_DESC_CNT, 1); WRITE_REG(sc, SAFE_HI_MASK, SAFE_INT_PE_DDONE | SAFE_INT_PE_ERROR); } /* * Init PCI registers */ static void safe_init_pciregs(device_t dev) { } /* * Clean up after a chip crash. * It is assumed that the caller in splimp() */ static void safe_cleanchip(struct safe_softc *sc) { if (sc->sc_nqchip != 0) { struct safe_ringentry *re = sc->sc_back; while (re != sc->sc_front) { if (re->re_desc.d_csr != 0) safe_free_entry(sc, re); if (++re == sc->sc_ringtop) re = sc->sc_ring; } sc->sc_back = re; sc->sc_nqchip = 0; } } /* * free a safe_q * It is assumed that the caller is within splimp(). */ static int safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re) { struct cryptop *crp; /* * Free header MCR */ if ((re->re_dst_m != NULL) && (re->re_src_m != re->re_dst_m)) m_freem(re->re_dst_m); crp = (struct cryptop *)re->re_crp; re->re_desc.d_csr = 0; crp->crp_etype = EFAULT; crypto_done(crp); return(0); } /* * Routine to reset the chip and clean up. * It is assumed that the caller is in splimp() */ static void safe_totalreset(struct safe_softc *sc) { safe_reset_board(sc); safe_init_board(sc); safe_cleanchip(sc); } /* * Is the operand suitable aligned for direct DMA. Each * segment must be aligned on a 32-bit boundary and all * but the last segment must be a multiple of 4 bytes. */ static int safe_dmamap_aligned(const struct safe_operand *op) { int i; for (i = 0; i < op->nsegs; i++) { if (op->segs[i].ds_addr & 3) return (0); if (i != (op->nsegs - 1) && (op->segs[i].ds_len & 3)) return (0); } return (1); } /* * Is the operand suitable for direct DMA as the destination * of an operation. The hardware requires that each ``particle'' * but the last in an operation result have the same size. We * fix that size at SAFE_MAX_DSIZE bytes. This routine returns * 0 if some segment is not a multiple of of this size, 1 if all * segments are exactly this size, or 2 if segments are at worst * a multple of this size. */ static int safe_dmamap_uniform(const struct safe_operand *op) { int result = 1; if (op->nsegs > 0) { int i; for (i = 0; i < op->nsegs-1; i++) { if (op->segs[i].ds_len % SAFE_MAX_DSIZE) return (0); if (op->segs[i].ds_len != SAFE_MAX_DSIZE) result = 2; } } return (result); } #ifdef SAFE_DEBUG static void safe_dump_dmastatus(struct safe_softc *sc, const char *tag) { printf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n" , tag , READ_REG(sc, SAFE_DMA_ENDIAN) , READ_REG(sc, SAFE_DMA_SRCADDR) , READ_REG(sc, SAFE_DMA_DSTADDR) , READ_REG(sc, SAFE_DMA_STAT) ); } static void safe_dump_intrstate(struct safe_softc *sc, const char *tag) { printf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n" , tag , READ_REG(sc, SAFE_HI_CFG) , READ_REG(sc, SAFE_HI_MASK) , READ_REG(sc, SAFE_HI_DESC_CNT) , READ_REG(sc, SAFE_HU_STAT) , READ_REG(sc, SAFE_HM_STAT) ); } static void safe_dump_ringstate(struct safe_softc *sc, const char *tag) { u_int32_t estat = READ_REG(sc, SAFE_PE_ERNGSTAT); /* NB: assume caller has lock on ring */ printf("%s: ERNGSTAT %x (next %u) back %lu front %lu\n", tag, estat, (estat >> SAFE_PE_ERNGSTAT_NEXT_S), (unsigned long)(sc->sc_back - sc->sc_ring), (unsigned long)(sc->sc_front - sc->sc_ring)); } static void safe_dump_request(struct safe_softc *sc, const char* tag, struct safe_ringentry *re) { int ix, nsegs; ix = re - sc->sc_ring; printf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n" , tag , re, ix , re->re_desc.d_csr , re->re_desc.d_src , re->re_desc.d_dst , re->re_desc.d_sa , re->re_desc.d_len ); if (re->re_src.nsegs > 1) { ix = (re->re_desc.d_src - sc->sc_spalloc.dma_paddr) / sizeof(struct safe_pdesc); for (nsegs = re->re_src.nsegs; nsegs; nsegs--) { printf(" spd[%u] %p: %p size %u flags %x" , ix, &sc->sc_spring[ix] , (caddr_t)(uintptr_t) sc->sc_spring[ix].pd_addr , sc->sc_spring[ix].pd_size , sc->sc_spring[ix].pd_flags ); if (sc->sc_spring[ix].pd_size == 0) printf(" (zero!)"); printf("\n"); if (++ix == SAFE_TOTAL_SPART) ix = 0; } } if (re->re_dst.nsegs > 1) { ix = (re->re_desc.d_dst - sc->sc_dpalloc.dma_paddr) / sizeof(struct safe_pdesc); for (nsegs = re->re_dst.nsegs; nsegs; nsegs--) { printf(" dpd[%u] %p: %p flags %x\n" , ix, &sc->sc_dpring[ix] , (caddr_t)(uintptr_t) sc->sc_dpring[ix].pd_addr , sc->sc_dpring[ix].pd_flags ); if (++ix == SAFE_TOTAL_DPART) ix = 0; } } printf("sa: cmd0 %08x cmd1 %08x staterec %x\n", re->re_sa.sa_cmd0, re->re_sa.sa_cmd1, re->re_sa.sa_staterec); printf("sa: key %x %x %x %x %x %x %x %x\n" , re->re_sa.sa_key[0] , re->re_sa.sa_key[1] , re->re_sa.sa_key[2] , re->re_sa.sa_key[3] , re->re_sa.sa_key[4] , re->re_sa.sa_key[5] , re->re_sa.sa_key[6] , re->re_sa.sa_key[7] ); printf("sa: indigest %x %x %x %x %x\n" , re->re_sa.sa_indigest[0] , re->re_sa.sa_indigest[1] , re->re_sa.sa_indigest[2] , re->re_sa.sa_indigest[3] , re->re_sa.sa_indigest[4] ); printf("sa: outdigest %x %x %x %x %x\n" , re->re_sa.sa_outdigest[0] , re->re_sa.sa_outdigest[1] , re->re_sa.sa_outdigest[2] , re->re_sa.sa_outdigest[3] , re->re_sa.sa_outdigest[4] ); printf("sr: iv %x %x %x %x\n" , re->re_sastate.sa_saved_iv[0] , re->re_sastate.sa_saved_iv[1] , re->re_sastate.sa_saved_iv[2] , re->re_sastate.sa_saved_iv[3] ); printf("sr: hashbc %u indigest %x %x %x %x %x\n" , re->re_sastate.sa_saved_hashbc , re->re_sastate.sa_saved_indigest[0] , re->re_sastate.sa_saved_indigest[1] , re->re_sastate.sa_saved_indigest[2] , re->re_sastate.sa_saved_indigest[3] , re->re_sastate.sa_saved_indigest[4] ); } static void safe_dump_ring(struct safe_softc *sc, const char *tag) { mtx_lock(&sc->sc_ringmtx); printf("\nSafeNet Ring State:\n"); safe_dump_intrstate(sc, tag); safe_dump_dmastatus(sc, tag); safe_dump_ringstate(sc, tag); if (sc->sc_nqchip) { struct safe_ringentry *re = sc->sc_back; do { safe_dump_request(sc, tag, re); if (++re == sc->sc_ringtop) re = sc->sc_ring; } while (re != sc->sc_front); } mtx_unlock(&sc->sc_ringmtx); } static int sysctl_hw_safe_dump(SYSCTL_HANDLER_ARGS) { char dmode[64]; int error; strncpy(dmode, "", sizeof(dmode) - 1); dmode[sizeof(dmode) - 1] = '\0'; error = sysctl_handle_string(oidp, &dmode[0], sizeof(dmode), req); if (error == 0 && req->newptr != NULL) { struct safe_softc *sc = safec; if (!sc) return EINVAL; if (strncmp(dmode, "dma", 3) == 0) safe_dump_dmastatus(sc, "safe0"); else if (strncmp(dmode, "int", 3) == 0) safe_dump_intrstate(sc, "safe0"); else if (strncmp(dmode, "ring", 4) == 0) safe_dump_ring(sc, "safe0"); else return EINVAL; } return error; } SYSCTL_PROC(_hw_safe, OID_AUTO, dump, CTLTYPE_STRING | CTLFLAG_RW, 0, 0, sysctl_hw_safe_dump, "A", "Dump driver state"); #endif /* SAFE_DEBUG */ Index: head/sys/dev/sbsh/if_sbsh.c =================================================================== --- head/sys/dev/sbsh/if_sbsh.c (revision 129878) +++ head/sys/dev/sbsh/if_sbsh.c (revision 129879) @@ -1,1061 +1,1062 @@ /*- * Granch SBNI16 G.SHDSL Modem driver * Written by Denis I. Timofeev, 2002-2003. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* -------------------------------------------------------------------------- */ struct sbni16_hw_regs { u_int8_t CR, CRB, SR, IMR, CTDR, LTDR, CRDR, LRDR; }; struct hw_descr { u_int32_t address; u_int32_t length; }; struct cx28975_cmdarea { u_int8_t intr_host; u_int8_t intr_8051; u_int8_t map_version; u_int8_t in_dest; u_int8_t in_opcode; u_int8_t in_zero; u_int8_t in_length; u_int8_t in_csum; u_int8_t in_data[75]; u_int8_t in_datasum; u_int8_t out_dest; u_int8_t out_opcode; u_int8_t out_ack; u_int8_t out_length; u_int8_t out_csum; u_int8_t out_data[75]; u_int8_t out_datasum; }; #define XQLEN 8 #define RQLEN 8 struct sbsh_softc { struct arpcom arpcom; /* ethernet common */ struct resource *mem_res; struct resource *irq_res; void *intr_hand; void *mem_base; /* mapped memory address */ volatile struct sbni16_hw_regs *regs; volatile struct hw_descr *tbd; volatile struct hw_descr *rbd; volatile struct cx28975_cmdarea *cmdp; /* SBNI16 controller statistics */ struct sbni16_stats { u_int32_t sent_pkts, rcvd_pkts; u_int32_t crc_errs, ufl_errs, ofl_errs, attempts, last_time; } in_stats; /* transmit and reception queues */ struct mbuf *xq[XQLEN], *rq[RQLEN]; unsigned head_xq, tail_xq, head_rq, tail_rq; /* the descriptors mapped onto the first buffers in xq and rq */ unsigned head_tdesc, head_rdesc; u_int8_t state; }; struct cx28975_cfg { u_int8_t *firmw_image; u_int32_t firmw_len; u_int32_t lrate: 10; u_int32_t master: 1; u_int32_t mod: 2; u_int32_t crc16: 1; u_int32_t fill_7e: 1; u_int32_t inv: 1; u_int32_t rburst: 1; u_int32_t wburst: 1; u_int32_t : 14; }; /* SHDSL transceiver statistics */ struct dsl_stats { u_int8_t status_1, status_3; u_int8_t attenuat, nmr, tpbo, rpbo; u_int16_t losw, segd, crc, sega, losd; }; enum State { NOT_LOADED, DOWN, ACTIVATION, ACTIVE }; #define SIOCLOADFIRMW _IOWR('i', 67, struct ifreq) #define SIOCGETSTATS _IOWR('i', 68, struct ifreq) #define SIOCCLRSTATS _IOWR('i', 69, struct ifreq) static int sbsh_probe(device_t); static int sbsh_attach(device_t); static int sbsh_detach(device_t); static int sbsh_ioctl(struct ifnet *, u_long, caddr_t); static void sbsh_shutdown(device_t); static int sbsh_suspend(device_t); static int sbsh_resume(device_t); static void sbsh_watchdog(struct ifnet *); static void sbsh_start(struct ifnet *); static void sbsh_init(void *); static void sbsh_stop(struct sbsh_softc *); static void init_card(struct sbsh_softc *); static void sbsh_intr(void *); static void resume_tx(struct sbsh_softc *); static void start_xmit_frames(struct sbsh_softc *); static void encap_frame(struct sbsh_softc *, struct mbuf *); static struct mbuf * repack(struct sbsh_softc *, struct mbuf *); static void free_sent_buffers(struct sbsh_softc *); static void alloc_rx_buffers(struct sbsh_softc *); static void indicate_frames(struct sbsh_softc *); static void drop_queues(struct sbsh_softc *); static void activate(struct sbsh_softc *); static void deactivate(struct sbsh_softc *); static void cx28975_interrupt(struct sbsh_softc *); static int start_cx28975(struct sbsh_softc *, struct cx28975_cfg); static int download_firmware(struct sbsh_softc *, u_int8_t *, u_int32_t); static int issue_cx28975_cmd(struct sbsh_softc *, u_int8_t, u_int8_t *, u_int8_t); static device_method_t sbsh_methods[] = { /* Device interface */ DEVMETHOD(device_probe, sbsh_probe), DEVMETHOD(device_attach, sbsh_attach), DEVMETHOD(device_detach, sbsh_detach), DEVMETHOD(device_shutdown, sbsh_shutdown), DEVMETHOD(device_suspend, sbsh_suspend), DEVMETHOD(device_resume, sbsh_resume), { 0, 0 } }; static driver_t sbsh_driver = { "sbsh", sbsh_methods, sizeof(struct sbsh_softc) }; static devclass_t sbsh_devclass; DRIVER_MODULE(sbsh, pci, sbsh_driver, sbsh_devclass, 0, 0); MODULE_DEPEND(sbsh, pci, 1, 1, 1); static int sbsh_probe(device_t dev) { if (pci_get_vendor(dev) != SBNI16_VENDOR || pci_get_device(dev) != SBNI16_DEVICE || pci_get_subdevice(dev) != SBNI16_SUBDEV) return (ENXIO); device_set_desc(dev, "Granch SBNI16 G.SHDSL Modem"); return (0); } static int sbsh_attach(device_t dev) { struct sbsh_softc *sc; struct ifnet *ifp; int unit, error = 0, rid, s; s = splimp(); sc = device_get_softc(dev); unit = device_get_unit(dev); rid = PCIR_BAR(1); sc->mem_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 4096, RF_ACTIVE); if (sc->mem_res == NULL) { printf ("sbsh%d: couldn't map memory\n", unit); error = ENXIO; goto fail; } rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->irq_res == NULL) { printf("sbsh%d: couldn't map interrupt\n", unit); bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(1), sc->mem_res); error = ENXIO; goto fail; } sc->mem_base = rman_get_virtual(sc->mem_res); init_card(sc); error = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET, sbsh_intr, sc, &sc->intr_hand); if (error) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(1), sc->mem_res); printf("sbsh%d: couldn't set up irq\n", unit); goto fail; } /* generate ethernet MAC address */ *(u_int32_t *)sc->arpcom.ac_enaddr = htonl(0x00ff0192); read_random(sc->arpcom.ac_enaddr + 4, 2); ifp = &sc->arpcom.ac_if; ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = sbsh_ioctl; ifp->if_start = sbsh_start; ifp->if_watchdog = sbsh_watchdog; ifp->if_init = sbsh_init; ifp->if_baudrate = 4600000; ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; ether_ifattach(ifp, sc->arpcom.ac_enaddr); fail: splx(s); return (error); } static int sbsh_detach(device_t dev) { struct sbsh_softc *sc; struct ifnet *ifp; int s; s = splimp(); sc = device_get_softc(dev); ifp = &sc->arpcom.ac_if; sbsh_stop(sc); ether_ifdetach(ifp); bus_teardown_intr(dev, sc->irq_res, sc->intr_hand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(1), sc->mem_res); splx(s); return (0); } static void sbsh_start(struct ifnet *ifp) { struct sbsh_softc *sc = ifp->if_softc; int s; if (sc->state != ACTIVE) return; s = splimp(); start_xmit_frames(ifp->if_softc); splx(s); } static void sbsh_init(void *xsc) { struct sbsh_softc *sc = xsc; struct ifnet *ifp = &sc->arpcom.ac_if; int s; u_int8_t t; if ((ifp->if_flags & IFF_RUNNING) || sc->state == NOT_LOADED) return; s = splimp(); bzero(&sc->in_stats, sizeof(struct sbni16_stats)); sc->head_xq = sc->tail_xq = sc->head_rq = sc->tail_rq = 0; sc->head_tdesc = sc->head_rdesc = 0; sc->regs->IMR = EXT; t = 2; issue_cx28975_cmd(sc, _DSL_CLEAR_ERROR_CTRS, &t, 1); if (issue_cx28975_cmd(sc, _DSL_ACTIVATION, &t, 1) == 0) { sc->state = ACTIVATION; ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; } splx(s); } static void sbsh_stop(struct sbsh_softc *sc) { int s; u_int8_t t; s = splimp(); sc->regs->IMR = EXT; t = 0; issue_cx28975_cmd(sc, _DSL_ACTIVATION, &t, 1); if (sc->state == ACTIVE) { t = 1; issue_cx28975_cmd(sc, _DSL_FORCE_DEACTIVATE, &t, 1); /* FIX! activation manager state */ /* Is it really must be done here? It calls from intr handler */ deactivate(sc); } sc->regs->IMR = 0; sc->state = DOWN; splx(s); } static void init_card(struct sbsh_softc *sc) { sc->state = NOT_LOADED; sc->tbd = (struct hw_descr *) sc->mem_base; sc->rbd = (struct hw_descr *) ((u_int8_t *)sc->mem_base + 0x400); sc->regs = (struct sbni16_hw_regs *) ((u_int8_t *)sc->mem_base + 0x800); sc->cmdp = (struct cx28975_cmdarea *) ((u_int8_t *)sc->mem_base + 0xc00); sc->regs->CR = 0; sc->regs->SR = 0xff; sc->regs->IMR = 0; } static int sbsh_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct sbsh_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct cx28975_cfg cfg; struct dsl_stats ds; int s, error = 0; u_int8_t t; s = splimp(); switch(cmd) { case SIOCLOADFIRMW: if ((error = suser(curthread)) != 0) break; if (ifp->if_flags & IFF_UP) error = EBUSY; bcopy((caddr_t)ifr->ifr_data, (caddr_t)&cfg, sizeof cfg); if (start_cx28975(sc, cfg) == 0) { static char *modstr[] = { "TCPAM32", "TCPAM16", "TCPAM8", "TCPAM4" }; if_printf(&sc->arpcom.ac_if, "%s, rate %d, %s\n", cfg.master ? "master" : "slave", cfg.lrate << 3, modstr[cfg.mod]); } else { if_printf(&sc->arpcom.ac_if, "unable to load firmware\n"); error = EIO; } break; case SIOCGETSTATS : if ((error = suser(curthread)) != 0) break; t = 0; if (issue_cx28975_cmd(sc, _DSL_FAR_END_ATTEN, &t, 1)) error = EIO; ds.attenuat = sc->cmdp->out_data[0]; if (issue_cx28975_cmd(sc, _DSL_NOISE_MARGIN, &t, 1)) error = EIO; ds.nmr = sc->cmdp->out_data[0]; if (issue_cx28975_cmd(sc, _DSL_POWER_BACK_OFF_RESULT, &t, 1)) error = EIO; ds.tpbo = sc->cmdp->out_data[0]; ds.rpbo = sc->cmdp->out_data[1]; if (!issue_cx28975_cmd(sc, _DSL_HDSL_PERF_ERR_CTRS, &t, 1)) { int i; for (i = 0; i < 10; ++i) ((u_int8_t *) &ds.losw)[i] = sc->cmdp->out_data[i]; } else error = EIO; ds.status_1 = ((volatile u_int8_t *)sc->cmdp)[0x3c0]; ds.status_3 = ((volatile u_int8_t *)sc->cmdp)[0x3c2]; bcopy(&sc->in_stats, ifr->ifr_data, sizeof(struct sbni16_stats)); bcopy(&ds, ifr->ifr_data + sizeof(struct sbni16_stats), sizeof(struct dsl_stats)); break; case SIOCCLRSTATS : if (!(error = suser(curthread))) { bzero(&sc->in_stats, sizeof(struct sbni16_stats)); t = 2; if (issue_cx28975_cmd(sc, _DSL_CLEAR_ERROR_CTRS, &t, 1)) error = EIO; } break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (!(ifp->if_flags & IFF_RUNNING)) { if (sc->state == NOT_LOADED) { if_printf(ifp, "firmware wasn't loaded\n"); error = EBUSY; } else sbsh_init(sc); } } else { if (ifp->if_flags & IFF_RUNNING) { sbsh_stop(sc); ifp->if_flags &= ~IFF_RUNNING; } } break; case SIOCADDMULTI: case SIOCDELMULTI: error = 0; break; default: error = ether_ioctl(ifp, cmd, data); break; } splx(s); return (error); } static void sbsh_shutdown(device_t dev) { struct sbsh_softc *sc = device_get_softc(dev); sbsh_stop(sc); } static int sbsh_suspend(device_t dev) { struct sbsh_softc *sc = device_get_softc(dev); int s; s = splimp(); sbsh_stop(sc); splx(s); return (0); } static int sbsh_resume(device_t dev) { struct sbsh_softc *sc = device_get_softc(dev); struct ifnet *ifp; int s; s = splimp(); ifp = &sc->arpcom.ac_if; if (ifp->if_flags & IFF_UP) sbsh_init(sc); splx(s); return (0); } static void sbsh_watchdog(struct ifnet *ifp) { struct sbsh_softc *sc = ifp->if_softc; if_printf(ifp, "transmit timeout\n"); if (sc->regs->SR & TXS) { sc->regs->SR = TXS; if_printf(ifp, "interrupt posted but not delivered\n"); } free_sent_buffers(sc); } /* -------------------------------------------------------------------------- */ static void sbsh_intr(void *arg) { struct sbsh_softc *sc = (struct sbsh_softc *)arg; u_int8_t status = sc->regs->SR; if (status == 0) return; if (status & EXT) { cx28975_interrupt(sc); sc->regs->SR = EXT; } if (status & UFL) { resume_tx(sc); sc->regs->SR = UFL; ++sc->in_stats.ufl_errs; ++sc->arpcom.ac_if.if_oerrors; } if (status & RXS) { sc->regs->SR = RXS; indicate_frames(sc); alloc_rx_buffers(sc); } if (status & TXS) { sc->regs->SR = TXS; free_sent_buffers(sc); } if (status & CRC) { ++sc->in_stats.crc_errs; ++sc->arpcom.ac_if.if_ierrors; sc->regs->SR = CRC; } if (status & OFL) { ++sc->in_stats.ofl_errs; ++sc->arpcom.ac_if.if_ierrors; sc->regs->SR = OFL; } } /* * Look for a first descriptor of a next packet, and write it's number * into CTDR. Then enable the transmitter. */ static void resume_tx(struct sbsh_softc *sc) { u_int32_t cur_tbd = sc->regs->CTDR; while (cur_tbd != sc->regs->LTDR && (sc->tbd[cur_tbd++].length & LAST_FRAG) == 0) ; sc->regs->CTDR = cur_tbd; sc->regs->CR |= TXEN; } static void start_xmit_frames(struct sbsh_softc *sc) { struct ifnet *ifp = &sc->arpcom.ac_if; struct mbuf *m; /* * Check if we have any free descriptor(s) and free space in * our transmit queue. */ while (sc->tail_xq != ((sc->head_xq - 1) & (XQLEN - 1)) && sc->regs->LTDR != ((sc->head_tdesc - 1) & 0x7f)) { IF_DEQUEUE(&ifp->if_snd, m); if (!m) break; if (m->m_pkthdr.len) { BPF_MTAP(ifp, m); encap_frame(sc, m); } else m_freem(m); } if (sc->regs->CTDR != sc->regs->LTDR) ifp->if_flags |= IFF_OACTIVE; else ifp->if_flags &= ~IFF_OACTIVE; } /* * MUST be called at splimp */ static void encap_frame(struct sbsh_softc *sc, struct mbuf *m_head) { struct mbuf *m; u_int32_t cur_tbd; int done; look_for_nonzero: for (m = m_head; !m->m_len; m = m->m_next) ; cur_tbd = sc->regs->LTDR & 0x7f; done = 0; do { if (m->m_len < 5 || cur_tbd == ((sc->head_tdesc - 1) & 0x7f)) { if ((m_head = repack(sc, m_head)) != NULL) goto look_for_nonzero; else return; } sc->tbd[cur_tbd].address = vtophys(mtod(m, vm_offset_t)); sc->tbd[cur_tbd].length = m->m_len; do { m = m->m_next; } while (m && !m->m_len); if (!m) { /* last fragment has been reached */ sc->tbd[cur_tbd].length |= LAST_FRAG; done = 1; } ++cur_tbd; cur_tbd &= 0x7f; } while (!done); sc->xq[sc->tail_xq++] = m_head; sc->tail_xq &= (XQLEN - 1); sc->regs->LTDR = cur_tbd; ++sc->in_stats.sent_pkts; ++sc->arpcom.ac_if.if_opackets; } static struct mbuf * repack(struct sbsh_softc *sc, struct mbuf *m) { struct mbuf *m_new; MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (!m_new) { if_printf (&sc->arpcom.ac_if, "unable to get mbuf.\n"); return (NULL); } if (m->m_pkthdr.len > MHLEN) { MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { m_freem(m_new); if_printf (&sc->arpcom.ac_if, "unable to get mbuf cluster.\n"); return (NULL); } } m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, caddr_t)); m_new->m_pkthdr.len = m_new->m_len = m->m_pkthdr.len; m_freem(m); return (m_new); } static void free_sent_buffers(struct sbsh_softc *sc) { u_int32_t cur_tbd; cur_tbd = sc->regs->CTDR; while (sc->head_tdesc != cur_tbd) { /* * Be careful! one element in xq may correspond to * multiple descriptors. */ if (sc->tbd[sc->head_tdesc].length & LAST_FRAG) { m_freem(sc->xq[sc->head_xq++]); sc->head_xq &= (XQLEN - 1); } sc->tbd[sc->head_tdesc].length = 0; sc->head_tdesc = (sc->head_tdesc + 1) & 0x7f; } start_xmit_frames(sc); } /* * DON'T use free_sent_buffers to drop the queue! */ static void alloc_rx_buffers(struct sbsh_softc *sc) { unsigned cur_rbd = sc->regs->LRDR & 0x7f; struct mbuf *m; while (sc->tail_rq != ((sc->head_rq - 1) & (RQLEN - 1))) { MGETHDR(m, M_DONTWAIT, MT_DATA); if (!m) { if_printf (&sc->arpcom.ac_if, "unable to get mbuf.\n"); return; } if (SBNI16_MAX_FRAME > MHLEN) { MCLGET(m, M_DONTWAIT); if (!(m->m_flags & M_EXT)) { m_freem(m); if_printf (&sc->arpcom.ac_if, "unable to get mbuf cluster.\n"); return; } m->m_pkthdr.len = m->m_len = MCLBYTES; } m_adj(m, 2); /* align ip on longword boundaries */ sc->rq[sc->tail_rq++] = m; sc->tail_rq &= (RQLEN - 1); sc->rbd[cur_rbd].address = vtophys(mtod(m, vm_offset_t)); sc->rbd[cur_rbd].length = 0; sc->regs->LRDR = cur_rbd = (cur_rbd + 1) & 0x7f; } } static void indicate_frames(struct sbsh_softc *sc) { unsigned cur_rbd = sc->regs->CRDR & 0x7f; while (sc->head_rdesc != cur_rbd) { struct mbuf *m = sc->rq[sc->head_rq++]; sc->head_rq &= (RQLEN - 1); m->m_pkthdr.len = m->m_len = sc->rbd[sc->head_rdesc].length & 0x7ff; m->m_pkthdr.rcvif = &sc->arpcom.ac_if; (*sc->arpcom.ac_if.if_input)(&sc->arpcom.ac_if, m); ++sc->in_stats.rcvd_pkts; ++sc->arpcom.ac_if.if_ipackets; sc->head_rdesc = (sc->head_rdesc + 1) & 0x7f; } } static void drop_queues(struct sbsh_softc *sc) { while (sc->head_rq != sc->tail_rq) { m_freem(sc->rq[sc->head_rq++]); sc->head_rq &= (RQLEN - 1); } while (sc->head_xq != sc->tail_xq) { m_freem(sc->xq[sc->head_xq++]); sc->head_xq &= (XQLEN - 1); } } /* -------------------------------------------------------------------------- */ static void activate(struct sbsh_softc *sc) { struct timeval tv; sc->regs->SR = 0xff; /* clear it! */ sc->regs->CTDR = sc->regs->LTDR = sc->regs->CRDR = sc->regs->LRDR = 0; sc->head_tdesc = sc->head_rdesc = 0; alloc_rx_buffers(sc); sc->regs->CRB &= ~RXDE; sc->regs->IMR = EXT | RXS | TXS | CRC | OFL | UFL; sc->regs->CR |= TXEN | RXEN; sc->state = ACTIVE; ++sc->in_stats.attempts; microtime(&tv); sc->in_stats.last_time = tv.tv_sec; start_xmit_frames(sc); } static void deactivate(struct sbsh_softc *sc) { sc->regs->CR &= ~(RXEN | TXEN); sc->regs->CRB |= RXDE; sc->regs->IMR = EXT; sc->regs->CTDR = sc->regs->LTDR; sc->regs->CRDR = sc->regs->LRDR; sc->state = ACTIVATION; drop_queues(sc); } /* -------------------------------------------------------------------------- */ static void cx28975_interrupt(struct sbsh_softc *sc) { volatile struct cx28975_cmdarea *p = sc->cmdp; u_int8_t t; if (p->intr_host != 0xfe) return; if (p->out_ack & 0x80) { if (*((volatile u_int8_t *)p + 0x3c7) & 2) { if (sc->state != ACTIVE && (*((volatile u_int8_t *)p + 0x3c0) & 0xc0) == 0x40) { activate(sc); if_printf(&sc->arpcom.ac_if, "connected to peer\n"); } else if (sc->state == ACTIVE && (*((volatile u_int8_t *)p + 0x3c0) & 0xc0) != 0x40) { deactivate(sc); if_printf(&sc->arpcom.ac_if, "carrier lost\n"); } } p->intr_host = 0; t = p->intr_host; p->out_ack = 0; } else { wakeup(sc); p->intr_host = 0; t = p->intr_host; } } /* -------------------------------------------------------------------------- */ static int start_cx28975(struct sbsh_softc *sc, struct cx28975_cfg cfg) { static char thresh[] = { +8, -4, -16, -40 }; volatile struct cx28975_cmdarea *p = sc->cmdp; u_int8_t t, parm[12]; p->intr_host = 0; t = p->intr_host; /* reset chip set */ sc->regs->IMR = EXT; sc->regs->CR = 0; sc->regs->SR = 0xff; DELAY(2); sc->regs->CR = XRST; if (cfg.crc16) sc->regs->CR |= CMOD; if (cfg.fill_7e) sc->regs->CR |= FMOD; if (cfg.inv) sc->regs->CR |= PMOD; sc->regs->CRB |= RODD | RXDE; if (cfg.rburst) sc->regs->CRB |= RDBE; if (cfg.wburst) sc->regs->CRB |= WTBE; tsleep(sc, PWAIT, "sbsh", 0); if ((p->out_ack & 0x1f) != _ACK_BOOT_WAKE_UP) return (-1); if (download_firmware(sc, cfg.firmw_image, cfg.firmw_len)) return (-1); tsleep(sc, PWAIT, "sbsh", 0); if ((p->out_ack & 0x1f) != _ACK_OPER_WAKE_UP) return (-1); t = cfg.master ? 1 : 9; if (issue_cx28975_cmd(sc, _DSL_SYSTEM_ENABLE, &t, 1)) return (-1); t = 0x63; if (issue_cx28975_cmd(sc, _DSL_SYSTEM_CONFIG, &t, 1)) return (-1); *(u_int16_t *)parm = cfg.lrate >> 3; parm[2] = parm[3] = parm[0]; parm[5] = cfg.lrate & 7; parm[4] = parm[7] = 1; parm[6] = 0; if (issue_cx28975_cmd(sc, _DSL_MULTI_RATE_CONFIG, parm, 8)) return (-1); parm[0] = 0x02 | (cfg.mod << 4); parm[1] = 0; if (issue_cx28975_cmd(sc, _DSL_TRAINING_MODE, parm, 2)) return (-1); bzero(parm, 12); parm[0] = 0x04; /* pre-activation: G.hs */ parm[4] = 0x04; /* no remote configuration */ parm[7] = 0x01; /* annex A (default) */ parm[8] = 0xff; /* i-bit mask (all bits) */ if (issue_cx28975_cmd(sc, _DSL_PREACTIVATION_CFG, parm, 12)) return (-1); parm[0] = 0x03; /* dying gasp time - 3 frames */ parm[1] = thresh[cfg.mod]; parm[2] = 0xff; /* attenuation */ parm[3] = 0x04; /* line probe NMR (+2 dB) */ parm[4] = 0x00; /* reserved */ parm[5] = 0x00; if (issue_cx28975_cmd(sc, _DSL_THRESHOLDS, parm, 6)) return (-1); t = cfg.master ? 0x23 : 0x21; if (issue_cx28975_cmd(sc, _DSL_FR_PCM_CONFIG, &t, 1)) return (-1); t = 0x02; if (issue_cx28975_cmd(sc, _DSL_INTR_HOST_MASK, &t, 1)) return (-1); sc->state = DOWN; return (0); } static int download_firmware(struct sbsh_softc *sc, u_int8_t *img, u_int32_t img_len) { u_int32_t t; int i; u_int8_t cksum = 0; for (i = 0; i < img_len; ++i) cksum += img[i]; t = img_len; if (issue_cx28975_cmd(sc, _DSL_DOWNLOAD_START, (u_int8_t *) &t, 4)) return (-1); for (i = 0; img_len >= 75; i += 75, img_len -= 75) { if (issue_cx28975_cmd(sc, _DSL_DOWNLOAD_DATA, img + i, 75)) return (-1); } if (img_len && issue_cx28975_cmd(sc, _DSL_DOWNLOAD_DATA, img + i, img_len)) return (-1); t = (cksum ^ 0xff) + 1; if (issue_cx28975_cmd(sc, _DSL_DOWNLOAD_END, (u_int8_t *) &t, 1)) return (-1); return (0); } static int issue_cx28975_cmd(struct sbsh_softc *sc, u_int8_t cmd, u_int8_t *data, u_int8_t size) { volatile struct cx28975_cmdarea *p = sc->cmdp; u_int8_t *databuf = p->in_data; int i; u_int8_t cksum = 0; p->in_dest = 0xf0; p->in_opcode = cmd; p->in_zero = 0; p->in_length = --size; p->in_csum = 0xf0 ^ cmd ^ size ^ 0xaa; for (i = 0; i <= size; ++i) { cksum ^= *data; *databuf++ = *data++; /* only 1 byte per cycle! */ } p->in_datasum = cksum ^ 0xaa; p->out_ack = _ACK_NOT_COMPLETE; p->intr_8051 = 0xfe; if (tsleep(sc, PWAIT, "sbsh", hz << 3)) return (-1); while (p->out_ack == _ACK_NOT_COMPLETE) ; /* FIXME ! */ if ((p->out_ack & 0x1f) == _ACK_PASS) { p->out_ack = 0; return (0); } else { p->out_ack = 0; return (-1); } } Index: head/sys/dev/si/si_eisa.c =================================================================== --- head/sys/dev/si/si_eisa.c (revision 129878) +++ head/sys/dev/si/si_eisa.c (revision 129879) @@ -1,152 +1,153 @@ /* * Device driver for Specialix range (SI/XIO) of serial line multiplexors. * * Copyright (C) 2000, Peter Wemm * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notices, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notices, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHORS BE LIABLE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include #include #include #include #include static int si_eisa_probe(device_t dev) { u_long iobase; u_long maddr; int irq; if (eisa_get_id(dev) != SIEISADEVID) return ENXIO; device_set_desc(dev, "Specialix SI/XIO EISA host card"); iobase = (eisa_get_slot(dev) * EISA_SLOT_SIZE) + SIEISABASE; eisa_add_iospace(dev, iobase, SIEISAIOSIZE, RESVADDR_NONE); maddr = (inb(iobase+1) << 24) | (inb(iobase) << 16); eisa_add_mspace(dev, maddr, SIEISA_MEMSIZE, RESVADDR_NONE); irq = ((inb(iobase+2) >> 4) & 0xf); eisa_add_intr(dev, irq, EISA_TRIGGER_LEVEL); /* XXX shared? */ return (0); } static int si_eisa_attach(device_t dev) { struct si_softc *sc; void *ih; int error; error = 0; ih = NULL; sc = device_get_softc(dev); sc->sc_type = SIEISA; sc->sc_port_rid = 0; sc->sc_port_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &sc->sc_port_rid, RF_ACTIVE); if (!sc->sc_port_res) { device_printf(dev, "couldn't allocate ioports\n"); goto fail; } sc->sc_iobase = rman_get_start(sc->sc_port_res); sc->sc_mem_rid = 0; sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_mem_rid, RF_ACTIVE); if (!sc->sc_mem_res) { device_printf(dev, "couldn't allocate iomemory"); goto fail; } sc->sc_paddr = (caddr_t)rman_get_start(sc->sc_mem_res); sc->sc_maddr = rman_get_virtual(sc->sc_mem_res); sc->sc_irq_rid = 0; sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irq_rid, RF_ACTIVE | RF_SHAREABLE); if (!sc->sc_irq_res) { device_printf(dev, "couldn't allocate interrupt"); goto fail; } sc->sc_irq = rman_get_start(sc->sc_irq_res); error = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_TTY, si_intr, sc,&ih); if (error) { device_printf(dev, "couldn't activate interrupt"); goto fail; } error = siattach(dev); if (error) goto fail; return (0); /* success */ fail: if (error == 0) error = ENXIO; if (sc->sc_irq_res) { if (ih) bus_teardown_intr(dev, sc->sc_irq_res, ih); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid, sc->sc_irq_res); sc->sc_irq_res = 0; } if (sc->sc_mem_res) { bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, sc->sc_mem_res); sc->sc_mem_res = 0; } if (sc->sc_port_res) { bus_release_resource(dev, SYS_RES_IOPORT, sc->sc_port_rid, sc->sc_port_res); sc->sc_port_res = 0; } return (error); } static device_method_t si_eisa_methods[] = { /* Device interface */ DEVMETHOD(device_probe, si_eisa_probe), DEVMETHOD(device_attach, si_eisa_attach), { 0, 0 } }; static driver_t si_eisa_driver = { "si", si_eisa_methods, sizeof(struct si_softc), }; DRIVER_MODULE(si, eisa, si_eisa_driver, si_devclass, 0, 0); Index: head/sys/dev/si/si_isa.c =================================================================== --- head/sys/dev/si/si_isa.c (revision 129878) +++ head/sys/dev/si/si_isa.c (revision 129879) @@ -1,337 +1,338 @@ /* * Device driver for Specialix range (SI/XIO) of serial line multiplexors. * * Copyright (C) 2000, Peter Wemm * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notices, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notices, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHORS BE LIABLE. * */ #include __FBSDID("$FreeBSD$"); #include "opt_debug_si.h" #include #include #include +#include #include #include #include #include #include #include #include /* Look for a valid board at the given mem addr */ static int si_isa_probe(device_t dev) { struct si_softc *sc; int type; u_int i, ramsize; volatile unsigned char was, *ux; volatile unsigned char *maddr; unsigned char *paddr; int unit; /* No pnp support */ if (isa_get_vendorid(dev)) return (ENXIO); sc = device_get_softc(dev); unit = device_get_unit(dev); sc->sc_mem_rid = 0; sc->sc_mem_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->sc_mem_rid, 0, ~0, SIPROBEALLOC, RF_ACTIVE); if (!sc->sc_mem_res) { device_printf(dev, "cannot allocate memory resource\n"); return ENXIO; } paddr = (caddr_t)rman_get_start(sc->sc_mem_res);/* physical */ maddr = rman_get_virtual(sc->sc_mem_res); /* in kvm */ DPRINT((0, DBG_AUTOBOOT, "si%d: probe at virtual=0x%x physical=0x%x\n", unit, maddr, paddr)); /* * this is a lie, but it's easier than trying to handle caching * and ram conflicts in the >1M and <16M region. */ if ((caddr_t)paddr < (caddr_t)0xA0000 || (caddr_t)paddr >= (caddr_t)0x100000) { device_printf(dev, "maddr (%p) out of range\n", paddr); goto fail; } if (((uintptr_t)paddr & 0x7fff) != 0) { device_printf(dev, "maddr (%p) not on 32k boundary\n", paddr); goto fail; } /* Is there anything out there? (0x17 is just an arbitrary number) */ *maddr = 0x17; if (*maddr != 0x17) { device_printf(dev, "0x17 check fail at phys %p\n", paddr); goto fail; } /* * Let's look first for a JET ISA card, since that's pretty easy * * All jet hosts are supposed to have this string in the IDROM, * but it's not worth checking on self-IDing busses like PCI. */ { unsigned char *jet_chk_str = "JET HOST BY KEV#"; for (i = 0; i < strlen(jet_chk_str); i++) if (jet_chk_str[i] != *(maddr + SIJETIDSTR + 2 * i)) goto try_mk2; } DPRINT((0, DBG_AUTOBOOT|DBG_FAIL, "si%d: JET first check - 0x%x\n", unit, (*(maddr+SIJETIDBASE)))); if (*(maddr+SIJETIDBASE) != (SISPLXID&0xff)) goto try_mk2; DPRINT((0, DBG_AUTOBOOT|DBG_FAIL, "si%d: JET second check - 0x%x\n", unit, (*(maddr+SIJETIDBASE+2)))); if (*(maddr+SIJETIDBASE+2) != ((SISPLXID&0xff00)>>8)) goto try_mk2; /* It must be a Jet ISA or RIO card */ DPRINT((0, DBG_AUTOBOOT|DBG_FAIL, "si%d: JET id check - 0x%x\n", unit, (*(maddr+SIUNIQID)))); if ((*(maddr+SIUNIQID) & 0xf0) != 0x20) goto try_mk2; /* It must be a Jet ISA SI/XIO card */ *(maddr + SIJETCONFIG) = 0; type = SIJETISA; ramsize = SIJET_RAMSIZE; goto got_card; try_mk2: /* * OK, now to see if whatever responded is really an SI card. * Try for a MK II next (SIHOST2) */ for (i = SIPLSIG; i < SIPLSIG + 8; i++) if ((*(maddr+i) & 7) != (~(unsigned char)i & 7)) goto try_mk1; /* It must be an SIHOST2 */ *(maddr + SIPLRESET) = 0; *(maddr + SIPLIRQCLR) = 0; *(maddr + SIPLIRQSET) = 0x10; type = SIHOST2; ramsize = SIHOST2_RAMSIZE; goto got_card; try_mk1: /* * Its not a MK II, so try for a MK I (SIHOST) */ *(maddr+SIRESET) = 0x0; /* reset the card */ *(maddr+SIINTCL) = 0x0; /* clear int */ *(maddr+SIRAM) = 0x17; if (*(maddr+SIRAM) != (unsigned char)0x17) goto fail; *(maddr+0x7ff8) = 0x17; if (*(maddr+0x7ff8) != (unsigned char)0x17) { device_printf(dev, "0x17 check fail at phys %p = 0x%x\n", paddr+0x77f8, *(maddr+0x77f8)); goto fail; } /* It must be an SIHOST (maybe?) - there must be a better way XXX */ type = SIHOST; ramsize = SIHOST_RAMSIZE; got_card: DPRINT((0, DBG_AUTOBOOT, "si%d: found type %d card, try memory test\n", unit, type)); /* Try the acid test */ ux = maddr + SIRAM; for (i = 0; i < ramsize; i++, ux++) *ux = (unsigned char)(i&0xff); ux = maddr + SIRAM; for (i = 0; i < ramsize; i++, ux++) { if ((was = *ux) != (unsigned char)(i&0xff)) { device_printf(dev, "memtest fail at phys %p, was %x should be %x\n", paddr + i, was, i & 0xff); goto fail; } } /* clear out the RAM */ ux = maddr + SIRAM; for (i = 0; i < ramsize; i++) *ux++ = 0; ux = maddr + SIRAM; for (i = 0; i < ramsize; i++) { if ((was = *ux++) != 0) { device_printf(dev, "clear fail at phys %p, was %x\n", paddr + i, was); goto fail; } } /* * Success, we've found a valid board, now fill in * the adapter structure. */ switch (type) { case SIHOST2: switch (isa_get_irq(dev)) { case 11: case 12: case 15: break; default: device_printf(dev, "bad IRQ value - %d (11, 12, 15 allowed)\n", isa_get_irq(dev)); goto fail; } sc->sc_memsize = SIHOST2_MEMSIZE; break; case SIHOST: switch (isa_get_irq(dev)) { case 11: case 12: case 15: break; default: device_printf(dev, "bad IRQ value - %d (11, 12, 15 allowed)\n", isa_get_irq(dev)); goto fail; } sc->sc_memsize = SIHOST_MEMSIZE; break; case SIJETISA: switch (isa_get_irq(dev)) { case 9: case 10: case 11: case 12: case 15: break; default: device_printf(dev, "bad IRQ value - %d (9, 10, 11, 12, 15 allowed)\n", isa_get_irq(dev)); goto fail; } sc->sc_memsize = SIJETISA_MEMSIZE; break; case SIMCA: /* MCA */ default: device_printf(dev, "card type %d not supported\n", type); goto fail; } sc->sc_type = type; bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, sc->sc_mem_res); sc->sc_mem_res = 0; return (0); /* success! */ fail: if (sc->sc_mem_res) { bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, sc->sc_mem_res); sc->sc_mem_res = 0; } return(EINVAL); } static int si_isa_attach(device_t dev) { int error; void *ih; struct si_softc *sc; error = 0; ih = NULL; sc = device_get_softc(dev); sc->sc_mem_rid = 0; sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_mem_rid, RF_ACTIVE); if (!sc->sc_mem_res) { device_printf(dev, "couldn't map memory\n"); goto fail; } sc->sc_paddr = (caddr_t)rman_get_start(sc->sc_mem_res); sc->sc_maddr = rman_get_virtual(sc->sc_mem_res); sc->sc_irq_rid = 0; sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irq_rid, RF_ACTIVE | RF_SHAREABLE); if (!sc->sc_irq_res) { device_printf(dev, "couldn't allocate interrupt\n"); goto fail; } sc->sc_irq = rman_get_start(sc->sc_irq_res); error = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_TTY, si_intr, sc,&ih); if (error) { device_printf(dev, "couldn't activate interrupt\n"); goto fail; } error = siattach(dev); if (error) goto fail; return (0); /* success */ fail: if (error == 0) error = ENXIO; if (sc->sc_irq_res) { if (ih) bus_teardown_intr(dev, sc->sc_irq_res, ih); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid, sc->sc_irq_res); sc->sc_irq_res = 0; } if (sc->sc_mem_res) { bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, sc->sc_mem_res); sc->sc_mem_res = 0; } return (error); } static device_method_t si_isa_methods[] = { /* Device interface */ DEVMETHOD(device_probe, si_isa_probe), DEVMETHOD(device_attach, si_isa_attach), { 0, 0 } }; static driver_t si_isa_driver = { "si", si_isa_methods, sizeof(struct si_softc), }; DRIVER_MODULE(si, isa, si_isa_driver, si_devclass, 0, 0); Index: head/sys/dev/si/si_pci.c =================================================================== --- head/sys/dev/si/si_pci.c (revision 129878) +++ head/sys/dev/si/si_pci.c (revision 129879) @@ -1,143 +1,144 @@ /*- * Device driver for Specialix range (SI/XIO) of serial line multiplexors. * * Copyright (C) 2000, Peter Wemm * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notices, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notices, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHORS BE LIABLE. */ #include __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include #include #include #include #include static int si_pci_probe(device_t dev) { const char *desc = NULL; switch (pci_get_devid(dev)) { case 0x400011cb: desc = "Specialix SI/XIO PCI host card"; break; case 0x200011cb: if (pci_read_config(dev, SIJETSSIDREG, 4) == 0x020011cb) desc = "Specialix SX PCI host card"; break; } if (desc) { device_set_desc(dev, desc); return 0; } return ENXIO; } static int si_pci_attach(device_t dev) { struct si_softc *sc; void *ih; int error; error = 0; ih = NULL; sc = device_get_softc(dev); switch (pci_get_devid(dev)) { case 0x400011cb: sc->sc_type = SIPCI; sc->sc_mem_rid = SIPCIBADR; break; case 0x200011cb: sc->sc_type = SIJETPCI; sc->sc_mem_rid = SIJETBADR; break; } sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_mem_rid, RF_ACTIVE); if (!sc->sc_mem_res) { device_printf(dev, "couldn't map memory\n"); goto fail; } sc->sc_paddr = (caddr_t)rman_get_start(sc->sc_mem_res); sc->sc_maddr = rman_get_virtual(sc->sc_mem_res); sc->sc_irq_rid = 0; sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irq_rid, RF_ACTIVE | RF_SHAREABLE); if (!sc->sc_irq_res) { device_printf(dev, "couldn't map interrupt\n"); goto fail; } sc->sc_irq = rman_get_start(sc->sc_irq_res); error = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_TTY, si_intr, sc, &ih); if (error) { device_printf(dev, "could not activate interrupt\n"); goto fail; } error = siattach(dev); if (error) goto fail; return (0); /* success */ fail: if (error == 0) error = ENXIO; if (sc->sc_irq_res) { if (ih) bus_teardown_intr(dev, sc->sc_irq_res, ih); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid, sc->sc_irq_res); sc->sc_irq_res = 0; } if (sc->sc_mem_res) { bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, sc->sc_mem_res); sc->sc_mem_res = 0; } return (error); } static device_method_t si_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, si_pci_probe), DEVMETHOD(device_attach, si_pci_attach), { 0, 0 } }; static driver_t si_pci_driver = { "si", si_pci_methods, sizeof(struct si_softc), }; DRIVER_MODULE(si, pci, si_pci_driver, si_devclass, 0, 0); Index: head/sys/dev/sn/if_sn_pccard.c =================================================================== --- head/sys/dev/sn/if_sn_pccard.c (revision 129878) +++ head/sys/dev/sn/if_sn_pccard.c (revision 129879) @@ -1,169 +1,170 @@ /*- * Copyright (c) 1999 M. Warner Losh * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Modifications for Megahertz X-Jack Ethernet Card (XJ-10BT) * * Copyright (c) 1996 by Tatsumi Hosokawa * BSD-nomads, Tokyo, Japan. */ #include __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include #include #include #include #include #include #include "card_if.h" #include "pccarddevs.h" static const struct pccard_product sn_pccard_products[] = { PCMCIA_CARD(DSPSI, XJACK, 0), PCMCIA_CARD(NEWMEDIA, BASICS, 0), #if 0 PCMCIA_CARD(SMC, 8020BT, 0), #endif { NULL } }; static int sn_pccard_match(device_t dev) { const struct pccard_product *pp; if ((pp = pccard_product_lookup(dev, sn_pccard_products, sizeof(sn_pccard_products[0]), NULL)) != NULL) { if (pp->pp_name != NULL) device_set_desc(dev, pp->pp_name); return 0; } return EIO; } static int sn_pccard_probe(device_t dev) { int err; err = sn_probe(dev, 1); return (err); } static int sn_pccard_ascii_enaddr(const char *str, u_char *enet) { uint8_t digit; int i; memset(enet, 0, ETHER_ADDR_LEN); for (i = 0, digit = 0; i < (ETHER_ADDR_LEN * 2); i++) { if (str[i] >= '0' && str[i] <= '9') digit |= str[i] - '0'; else if (str[i] >= 'a' && str[i] <= 'f') digit |= (str[i] - 'a') + 10; else if (str[i] >= 'A' && str[i] <= 'F') digit |= (str[i] - 'A') + 10; else { /* Bogus digit!! */ return (0); } /* Compensate for ordering of digits. */ if (i & 1) { enet[i >> 1] = digit; digit = 0; } else digit <<= 4; } return (1); } static int sn_pccard_attach(device_t dev) { struct sn_softc *sc = device_get_softc(dev); int i; u_char sum; u_char ether_addr[ETHER_ADDR_LEN]; const char *cisstr; sc->pccard_enaddr = 0; pccard_get_ether(dev, ether_addr); for (i = 0, sum = 0; i < ETHER_ADDR_LEN; i++) sum |= ether_addr[i]; if (sum == 0) { pccard_get_cis3_str(dev, &cisstr); if (strlen(cisstr) == ETHER_ADDR_LEN * 2) sum = sn_pccard_ascii_enaddr(cisstr, ether_addr); } if (sum == 0) { pccard_get_cis4_str(dev, &cisstr); if (strlen(cisstr) == ETHER_ADDR_LEN * 2) sum = sn_pccard_ascii_enaddr(cisstr, ether_addr); } if (sum) { sc->pccard_enaddr = 1; bcopy(ether_addr, sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); } return (sn_attach(dev)); } static device_method_t sn_pccard_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pccard_compat_probe), DEVMETHOD(device_attach, pccard_compat_attach), DEVMETHOD(device_detach, sn_detach), /* Card interface */ DEVMETHOD(card_compat_match, sn_pccard_match), DEVMETHOD(card_compat_probe, sn_pccard_probe), DEVMETHOD(card_compat_attach, sn_pccard_attach), { 0, 0 } }; static driver_t sn_pccard_driver = { "sn", sn_pccard_methods, sizeof(struct sn_softc), }; extern devclass_t sn_devclass; DRIVER_MODULE(sn, pccard, sn_pccard_driver, sn_devclass, 0, 0); MODULE_DEPEND(sn, ether, 1, 1, 1); Index: head/sys/dev/snp/snp.c =================================================================== --- head/sys/dev/snp/snp.c (revision 129878) +++ head/sys/dev/snp/snp.c (revision 129879) @@ -1,656 +1,657 @@ /* * Copyright (c) 1995 Ugen J.S.Antsilevich * * Redistribution and use in source forms, with and without modification, * are permitted provided that this entire comment appears intact. * * Redistribution in binary form may occur without any restrictions. * Obviously, it would be nice if you gave credit where credit is due * but requiring it would be too onerous. * * This software is provided ``AS IS'' without any warranties of any kind. * * Snoop stuff. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include +#include #include #include #include static l_close_t snplclose; static l_write_t snplwrite; static d_open_t snpopen; static d_close_t snpclose; static d_read_t snpread; static d_write_t snpwrite; static d_ioctl_t snpioctl; static d_poll_t snppoll; static struct cdevsw snp_cdevsw = { .d_version = D_VERSION, .d_flags = D_PSEUDO | D_NEEDGIANT, .d_open = snpopen, .d_close = snpclose, .d_read = snpread, .d_write = snpwrite, .d_ioctl = snpioctl, .d_poll = snppoll, .d_name = "snp", }; static struct linesw snpdisc = { ttyopen, snplclose, ttread, snplwrite, l_nullioctl, ttyinput, ttstart, ttymodem }; /* * This is the main snoop per-device structure. */ struct snoop { LIST_ENTRY(snoop) snp_list; /* List glue. */ int snp_unit; /* Device number. */ dev_t snp_target; /* Target tty device. */ struct tty *snp_tty; /* Target tty pointer. */ u_long snp_len; /* Possible length. */ u_long snp_base; /* Data base. */ u_long snp_blen; /* Used length. */ caddr_t snp_buf; /* Allocation pointer. */ int snp_flags; /* Flags. */ struct selinfo snp_sel; /* Select info. */ int snp_olddisc; /* Old line discipline. */ }; /* * Possible flags. */ #define SNOOP_ASYNC 0x0002 #define SNOOP_OPEN 0x0004 #define SNOOP_RWAIT 0x0008 #define SNOOP_OFLOW 0x0010 #define SNOOP_DOWN 0x0020 /* * Other constants. */ #define SNOOP_MINLEN (4*1024) /* This should be power of 2. * 4K tested to be the minimum * for which on normal tty * usage there is no need to * allocate more. */ #define SNOOP_MAXLEN (64*1024) /* This one also,64K enough * If we grow more,something * really bad in this world.. */ static MALLOC_DEFINE(M_SNP, "snp", "Snoop device data"); /* * The number of the "snoop" line discipline. This gets determined at * module load time. */ static int snooplinedisc; static LIST_HEAD(, snoop) snp_sclist = LIST_HEAD_INITIALIZER(&snp_sclist); static struct clonedevs *snpclones; static struct tty *snpdevtotty(dev_t dev); static void snp_clone(void *arg, char *name, int namelen, dev_t *dev); static int snp_detach(struct snoop *snp); static int snp_down(struct snoop *snp); static int snp_in(struct snoop *snp, char *buf, int n); static int snp_modevent(module_t mod, int what, void *arg); static int snplclose(tp, flag) struct tty *tp; int flag; { struct snoop *snp; int error; snp = tp->t_sc; error = snp_down(snp); if (error != 0) return (error); error = ttylclose(tp, flag); return (error); } static int snplwrite(tp, uio, flag) struct tty *tp; struct uio *uio; int flag; { struct iovec iov; struct uio uio2; struct snoop *snp; int error, ilen; char *ibuf; error = 0; ibuf = NULL; snp = tp->t_sc; while (uio->uio_resid > 0) { ilen = imin(512, uio->uio_resid); ibuf = malloc(ilen, M_SNP, M_WAITOK); error = uiomove(ibuf, ilen, uio); if (error != 0) break; snp_in(snp, ibuf, ilen); /* Hackish, but probably the least of all evils. */ iov.iov_base = ibuf; iov.iov_len = ilen; uio2.uio_iov = &iov; uio2.uio_iovcnt = 1; uio2.uio_offset = 0; uio2.uio_resid = ilen; uio2.uio_segflg = UIO_SYSSPACE; uio2.uio_rw = UIO_WRITE; uio2.uio_td = uio->uio_td; error = ttwrite(tp, &uio2, flag); if (error != 0) break; free(ibuf, M_SNP); ibuf = NULL; } if (ibuf != NULL) free(ibuf, M_SNP); return (error); } static struct tty * snpdevtotty(dev) dev_t dev; { struct cdevsw *cdp; cdp = devsw(dev); if (cdp == NULL || (cdp->d_flags & D_TTY) == 0) return (NULL); return (dev->si_tty); } #define SNP_INPUT_BUF 5 /* This is even too much, the maximal * interactive mode write is 3 bytes * length for function keys... */ static int snpwrite(dev, uio, flag) dev_t dev; struct uio *uio; int flag; { struct snoop *snp; struct tty *tp; int error, i, len; unsigned char c[SNP_INPUT_BUF]; snp = dev->si_drv1; tp = snp->snp_tty; if (tp == NULL) return (EIO); if ((tp->t_sc == snp) && (tp->t_state & TS_SNOOP) && tp->t_line == snooplinedisc) goto tty_input; printf("snp%d: attempt to write to bad tty\n", snp->snp_unit); return (EIO); tty_input: if (!(tp->t_state & TS_ISOPEN)) return (EIO); while (uio->uio_resid > 0) { len = imin(uio->uio_resid, SNP_INPUT_BUF); if ((error = uiomove(c, len, uio)) != 0) return (error); for (i=0; i < len; i++) { if (ttyinput(c[i], tp)) return (EIO); } } return (0); } static int snpread(dev, uio, flag) dev_t dev; struct uio *uio; int flag; { struct snoop *snp; int error, len, n, nblen, s; caddr_t from; char *nbuf; snp = dev->si_drv1; KASSERT(snp->snp_len + snp->snp_base <= snp->snp_blen, ("snoop buffer error")); if (snp->snp_tty == NULL) return (EIO); snp->snp_flags &= ~SNOOP_RWAIT; do { if (snp->snp_len == 0) { if (flag & IO_NDELAY) return (EWOULDBLOCK); snp->snp_flags |= SNOOP_RWAIT; error = tsleep(snp, (PZERO + 1) | PCATCH, "snprd", 0); if (error != 0) return (error); } } while (snp->snp_len == 0); n = snp->snp_len; error = 0; while (snp->snp_len > 0 && uio->uio_resid > 0 && error == 0) { len = min((unsigned)uio->uio_resid, snp->snp_len); from = (caddr_t)(snp->snp_buf + snp->snp_base); if (len == 0) break; error = uiomove(from, len, uio); snp->snp_base += len; snp->snp_len -= len; } if ((snp->snp_flags & SNOOP_OFLOW) && (n < snp->snp_len)) { snp->snp_flags &= ~SNOOP_OFLOW; } s = spltty(); nblen = snp->snp_blen; if (((nblen / 2) >= SNOOP_MINLEN) && (nblen / 2) >= snp->snp_len) { while (nblen / 2 >= snp->snp_len && nblen / 2 >= SNOOP_MINLEN) nblen = nblen / 2; if ((nbuf = malloc(nblen, M_SNP, M_NOWAIT)) != NULL) { bcopy(snp->snp_buf + snp->snp_base, nbuf, snp->snp_len); free(snp->snp_buf, M_SNP); snp->snp_buf = nbuf; snp->snp_blen = nblen; snp->snp_base = 0; } } splx(s); return (error); } static int snp_in(snp, buf, n) struct snoop *snp; char *buf; int n; { int s_free, s_tail; int s, len, nblen; caddr_t from, to; char *nbuf; KASSERT(n >= 0, ("negative snoop char count")); if (n == 0) return (0); if (snp->snp_flags & SNOOP_DOWN) { printf("snp%d: more data to down interface\n", snp->snp_unit); return (0); } if (snp->snp_flags & SNOOP_OFLOW) { printf("snp%d: buffer overflow\n", snp->snp_unit); /* * On overflow we just repeat the standart close * procedure...yes , this is waste of space but.. Then next * read from device will fail if one would recall he is * snooping and retry... */ return (snp_down(snp)); } s_tail = snp->snp_blen - (snp->snp_len + snp->snp_base); s_free = snp->snp_blen - snp->snp_len; if (n > s_free) { s = spltty(); nblen = snp->snp_blen; while ((n > s_free) && ((nblen * 2) <= SNOOP_MAXLEN)) { nblen = snp->snp_blen * 2; s_free = nblen - (snp->snp_len + snp->snp_base); } if ((n <= s_free) && (nbuf = malloc(nblen, M_SNP, M_NOWAIT))) { bcopy(snp->snp_buf + snp->snp_base, nbuf, snp->snp_len); free(snp->snp_buf, M_SNP); snp->snp_buf = nbuf; snp->snp_blen = nblen; snp->snp_base = 0; } else { snp->snp_flags |= SNOOP_OFLOW; if (snp->snp_flags & SNOOP_RWAIT) { snp->snp_flags &= ~SNOOP_RWAIT; wakeup(snp); } splx(s); return (0); } splx(s); } if (n > s_tail) { from = (caddr_t)(snp->snp_buf + snp->snp_base); to = (caddr_t)(snp->snp_buf); len = snp->snp_len; bcopy(from, to, len); snp->snp_base = 0; } to = (caddr_t)(snp->snp_buf + snp->snp_base + snp->snp_len); bcopy(buf, to, n); snp->snp_len += n; if (snp->snp_flags & SNOOP_RWAIT) { snp->snp_flags &= ~SNOOP_RWAIT; wakeup(snp); } selwakeuppri(&snp->snp_sel, PZERO + 1); return (n); } static int snpopen(dev, flag, mode, td) dev_t dev; int flag, mode; struct thread *td; { struct snoop *snp; if (dev->si_drv1 == NULL) { dev->si_flags &= ~SI_CHEAPCLONE; dev->si_drv1 = snp = malloc(sizeof(*snp), M_SNP, M_WAITOK | M_ZERO); snp->snp_unit = dev2unit(dev); } else return (EBUSY); /* * We intentionally do not OR flags with SNOOP_OPEN, but set them so * all previous settings (especially SNOOP_OFLOW) will be cleared. */ snp->snp_flags = SNOOP_OPEN; snp->snp_buf = malloc(SNOOP_MINLEN, M_SNP, M_WAITOK); snp->snp_blen = SNOOP_MINLEN; snp->snp_base = 0; snp->snp_len = 0; /* * snp_tty == NULL is for inactive snoop devices. */ snp->snp_tty = NULL; snp->snp_target = NODEV; LIST_INSERT_HEAD(&snp_sclist, snp, snp_list); return (0); } static int snp_detach(snp) struct snoop *snp; { struct tty *tp; snp->snp_base = 0; snp->snp_len = 0; /* * If line disc. changed we do not touch this pointer, SLIP/PPP will * change it anyway. */ tp = snp->snp_tty; if (tp == NULL) goto detach_notty; if (tp && (tp->t_sc == snp) && (tp->t_state & TS_SNOOP) && tp->t_line == snooplinedisc) { tp->t_sc = NULL; tp->t_state &= ~TS_SNOOP; tp->t_line = snp->snp_olddisc; } else printf("snp%d: bad attached tty data\n", snp->snp_unit); snp->snp_tty = NULL; snp->snp_target = NODEV; detach_notty: selwakeuppri(&snp->snp_sel, PZERO + 1); if ((snp->snp_flags & SNOOP_OPEN) == 0) free(snp, M_SNP); return (0); } static int snpclose(dev, flags, fmt, td) dev_t dev; int flags; int fmt; struct thread *td; { struct snoop *snp; snp = dev->si_drv1; snp->snp_blen = 0; LIST_REMOVE(snp, snp_list); free(snp->snp_buf, M_SNP); snp->snp_flags &= ~SNOOP_OPEN; dev->si_drv1 = NULL; destroy_dev(dev); return (snp_detach(snp)); } static int snp_down(snp) struct snoop *snp; { if (snp->snp_blen != SNOOP_MINLEN) { free(snp->snp_buf, M_SNP); snp->snp_buf = malloc(SNOOP_MINLEN, M_SNP, M_WAITOK); snp->snp_blen = SNOOP_MINLEN; } snp->snp_flags |= SNOOP_DOWN; return (snp_detach(snp)); } static int snpioctl(dev, cmd, data, flags, td) dev_t dev; u_long cmd; caddr_t data; int flags; struct thread *td; { struct snoop *snp; struct tty *tp, *tpo; dev_t tdev; int s; snp = dev->si_drv1; switch (cmd) { case SNPSTTY: tdev = udev2dev(*((udev_t *)data)); if (tdev == NODEV) return (snp_down(snp)); tp = snpdevtotty(tdev); if (!tp) return (EINVAL); if (tp->t_state & TS_SNOOP) return (EBUSY); s = spltty(); if (snp->snp_target == NODEV) { tpo = snp->snp_tty; if (tpo) tpo->t_state &= ~TS_SNOOP; } tp->t_sc = (caddr_t)snp; tp->t_state |= TS_SNOOP; snp->snp_olddisc = tp->t_line; tp->t_line = snooplinedisc; snp->snp_tty = tp; snp->snp_target = tdev; /* * Clean overflow and down flags - * we'll have a chance to get them in the future :))) */ snp->snp_flags &= ~SNOOP_OFLOW; snp->snp_flags &= ~SNOOP_DOWN; splx(s); break; case SNPGTTY: /* * We keep snp_target field specially to make * SNPGTTY happy, else we can't know what is device * major/minor for tty. */ *((udev_t *)data) = dev2udev(snp->snp_target); break; case FIONBIO: break; case FIOASYNC: if (*(int *)data) snp->snp_flags |= SNOOP_ASYNC; else snp->snp_flags &= ~SNOOP_ASYNC; break; case FIONREAD: s = spltty(); if (snp->snp_tty != NULL) *(int *)data = snp->snp_len; else if (snp->snp_flags & SNOOP_DOWN) { if (snp->snp_flags & SNOOP_OFLOW) *(int *)data = SNP_OFLOW; else *(int *)data = SNP_TTYCLOSE; } else { *(int *)data = SNP_DETACH; } splx(s); break; default: return (ENOTTY); } return (0); } static int snppoll(dev, events, td) dev_t dev; int events; struct thread *td; { struct snoop *snp; int revents; snp = dev->si_drv1; revents = 0; /* * If snoop is down, we don't want to poll() forever so we return 1. * Caller should see if we down via FIONREAD ioctl(). The last should * return -1 to indicate down state. */ if (events & (POLLIN | POLLRDNORM)) { if (snp->snp_flags & SNOOP_DOWN || snp->snp_len > 0) revents |= events & (POLLIN | POLLRDNORM); else selrecord(td, &snp->snp_sel); } return (revents); } static void snp_clone(arg, name, namelen, dev) void *arg; char *name; int namelen; dev_t *dev; { int u, i; if (*dev != NODEV) return; if (dev_stdclone(name, NULL, "snp", &u) != 1) return; i = clone_create(&snpclones, &snp_cdevsw, &u, dev, 0); if (i) *dev = make_dev(&snp_cdevsw, unit2minor(u), UID_ROOT, GID_WHEEL, 0600, "snp%d", u); if (*dev != NULL) (*dev)->si_flags |= SI_CHEAPCLONE; } static int snp_modevent(mod, type, data) module_t mod; int type; void *data; { static eventhandler_tag eh_tag; switch (type) { case MOD_LOAD: /* XXX error checking. */ clone_setup(&snpclones); eh_tag = EVENTHANDLER_REGISTER(dev_clone, snp_clone, 0, 1000); snooplinedisc = ldisc_register(LDISC_LOAD, &snpdisc); break; case MOD_UNLOAD: if (!LIST_EMPTY(&snp_sclist)) return (EBUSY); EVENTHANDLER_DEREGISTER(dev_clone, eh_tag); clone_cleanup(&snpclones); ldisc_deregister(snooplinedisc); break; default: break; } return (0); } static moduledata_t snp_mod = { "snp", snp_modevent, NULL }; DECLARE_MODULE(snp, snp_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE); Index: head/sys/dev/sr/if_sr.c =================================================================== --- head/sys/dev/sr/if_sr.c (revision 129878) +++ head/sys/dev/sr/if_sr.c (revision 129879) @@ -1,2962 +1,2963 @@ /* * Copyright (c) 1996 - 2001 John Hay. * Copyright (c) 1996 SDL Communications, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); /* * Programming assumptions and other issues. * * Only a 16K window will be used. * * The descriptors of a DMA channel will fit in a 16K memory window. * * The buffers of a transmit DMA channel will fit in a 16K memory window. * * When interface is going up, handshaking is set and it is only cleared * when the interface is down'ed. * * There should be a way to set/reset Raw HDLC/PPP, Loopback, DCE/DTE, * internal/external clock, etc..... * */ #include "opt_netgraph.h" #ifdef NETGRAPH #include #endif /* NETGRAPH */ #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #ifdef NETGRAPH #include #else /* NETGRAPH */ #include #include #endif /* NETGRAPH */ #include #include #include #ifdef NETGRAPH #include #include #endif /* NETGRAPH */ /* #define USE_MODEMCK */ #ifndef BUGGY #define BUGGY 0 #endif #ifndef NETGRAPH #define PPP_HEADER_LEN 4 #endif /* NETGRAPH */ static int next_sc_unit = 0; #ifndef NETGRAPH #ifdef USE_MODEMCK static int sr_watcher = 0; #endif #endif /* NETGRAPH */ /* * Define the software interface for the card... There is one for * every channel (port). */ struct sr_softc { #ifndef NETGRAPH struct sppp ifsppp; /* PPP service w/in system */ #endif /* NETGRAPH */ struct sr_hardc *hc; /* card-level information */ int unit; /* With regard to all sr devices */ int subunit; /* With regard to this card */ struct buf_block { u_int txdesc; /* DPRAM offset */ u_int txstart;/* DPRAM offset */ u_int txend; /* DPRAM offset */ u_int txtail; /* # of 1st free gran */ u_int txmax; /* # of free grans */ u_int txeda; /* err descr addr */ } block[SR_TX_BLOCKS]; char xmit_busy; /* Transmitter is busy */ char txb_inuse; /* # of tx grans in use */ u_int txb_new; /* ndx to new buffer */ u_int txb_next_tx; /* ndx to next gran rdy tx */ u_int rxdesc; /* DPRAM offset */ u_int rxstart; /* DPRAM offset */ u_int rxend; /* DPRAM offset */ u_int rxhind; /* ndx to the hd of rx bufrs */ u_int rxmax; /* # of avail grans */ u_int clk_cfg; /* Clock configuration */ int scachan; /* channel # on card */ #ifdef NETGRAPH int running; /* something is attached so we are running */ int dcd; /* do we have dcd? */ /* ---netgraph bits --- */ char nodename[NG_NODESIZ]; /* store our node name */ int datahooks; /* number of data hooks attached */ node_p node; /* netgraph node */ hook_p hook; /* data hook */ hook_p debug_hook; struct ifqueue xmitq_hipri; /* hi-priority transmit queue */ struct ifqueue xmitq; /* transmit queue */ int flags; /* state */ #define SCF_RUNNING 0x01 /* board is active */ #define SCF_OACTIVE 0x02 /* output is active */ int out_dog; /* watchdog cycles output count-down */ struct callout_handle handle; /* timeout(9) handle */ u_long inbytes, outbytes; /* stats */ u_long lastinbytes, lastoutbytes; /* a second ago */ u_long inrate, outrate; /* highest rate seen */ u_long inlast; /* last input N secs ago */ u_long out_deficit; /* output since last input */ u_long oerrors, ierrors[6]; u_long opackets, ipackets; #endif /* NETGRAPH */ }; #ifdef NETGRAPH #define DOG_HOLDOFF 6 /* dog holds off for 6 secs */ #define QUITE_A_WHILE 300 /* 5 MINUTES */ #define LOTS_OF_PACKETS 100 #endif /* NETGRAPH */ /* * Baud Rate table for Sync Mode. * Each entry consists of 3 elements: * Baud Rate (x100) , TMC, BR * * Baud Rate = FCLK / TMC / 2^BR * Baud table for Crystal freq. of 9.8304 Mhz */ #ifdef N2_TEST_SPEED struct rate_line { int target; /* target rate/100 */ int tmc_reg; /* TMC register value */ int br_reg; /* BR (BaudRateClk) selector */ } n2_rates[] = { /* Baudx100 TMC BR */ { 3, 128, 8 }, { 6, 128, 7 }, { 12, 128, 6 }, { 24, 128, 5 }, { 48, 128, 4 }, { 96, 128, 3 }, { 192, 128, 2 }, { 384, 128, 1 }, { 560, 88, 1 }, { 640, 77, 1 }, { 1280, 38, 1 }, { 2560, 19, 1 }, { 5120, 10, 1 }, { 10000, 5, 1 }, { 15000, 3, 1 }, { 25000, 2, 1 }, { 50000, 1, 1 }, { 0, 0, 0 } }; int sr_test_speed[] = { N2_TEST_SPEED, N2_TEST_SPEED }; int etc0vals[] = { SR_MCR_ETC0, /* ISA channel 0 */ SR_MCR_ETC1, /* ISA channel 1 */ SR_FECR_ETC0, /* PCI channel 0 */ SR_FECR_ETC1 /* PCI channel 1 */ }; #endif devclass_t sr_devclass; #ifndef NETGRAPH MODULE_DEPEND(if_sr, sppp, 1, 1, 1); #else MODULE_DEPEND(ng_sync_sr, netgraph, 1, 1, 1); #endif static void srintr(void *arg); static void sr_xmit(struct sr_softc *sc); #ifndef NETGRAPH static void srstart(struct ifnet *ifp); static int srioctl(struct ifnet *ifp, u_long cmd, caddr_t data); static void srwatchdog(struct ifnet *ifp); #else static void srstart(struct sr_softc *sc); static void srwatchdog(struct sr_softc *sc); #endif /* NETGRAPH */ static int sr_packet_avail(struct sr_softc *sc, int *len, u_char *rxstat); static void sr_copy_rxbuf(struct mbuf *m, struct sr_softc *sc, int len); static void sr_eat_packet(struct sr_softc *sc, int single); static void sr_get_packets(struct sr_softc *sc); static void sr_up(struct sr_softc *sc); static void sr_down(struct sr_softc *sc); static void src_init(struct sr_hardc *hc); static void sr_init_sca(struct sr_hardc *hc); static void sr_init_msci(struct sr_softc *sc); static void sr_init_rx_dmac(struct sr_softc *sc); static void sr_init_tx_dmac(struct sr_softc *sc); static void sr_dmac_intr(struct sr_hardc *hc, u_char isr); static void sr_msci_intr(struct sr_hardc *hc, u_char isr); static void sr_timer_intr(struct sr_hardc *hc, u_char isr); #ifndef NETGRAPH #ifdef USE_MODEMCK static void sr_modemck(void *x); #endif #else static void sr_modemck(struct sr_softc *x); #endif /* NETGRAPH */ #ifdef NETGRAPH static void ngsr_watchdog_frame(void * arg); static void ngsr_init(void* ignored); static ng_constructor_t ngsr_constructor; static ng_rcvmsg_t ngsr_rcvmsg; static ng_shutdown_t ngsr_shutdown; static ng_newhook_t ngsr_newhook; /*static ng_findhook_t ngsr_findhook; */ static ng_connect_t ngsr_connect; static ng_rcvdata_t ngsr_rcvdata; static ng_disconnect_t ngsr_disconnect; static struct ng_type typestruct = { .version = NG_ABI_VERSION, .name = NG_SR_NODE_TYPE, .constructor = ngsr_constructor, .rcvmsg = ngsr_rcvmsg, .shutdown = ngsr_shutdown, .newhook = ngsr_newhook, .connect = ngsr_connect, .rcvdata = ngsr_rcvdata, .disconnect = ngsr_disconnect, }; static int ngsr_done_init = 0; #endif /* NETGRAPH */ /* * Register the ports on the adapter. * Fill in the info for each port. #ifndef NETGRAPH * Attach each port to sppp and bpf. #endif */ int sr_attach(device_t device) { int intf_sw, pndx; u_int32_t flags; u_int fecr; struct sr_hardc *hc; struct sr_softc *sc; #ifndef NETGRAPH struct ifnet *ifp; #endif /* NETGRAPH */ int unit; /* index: channel w/in card */ hc = (struct sr_hardc *)device_get_softc(device); MALLOC(sc, struct sr_softc *, hc->numports * sizeof(struct sr_softc), M_DEVBUF, M_WAITOK | M_ZERO); if (sc == NULL) goto errexit; hc->sc = sc; /* * Get the TX clock direction and configuration. The default is a * single external clock which is used by RX and TX. */ switch(hc->cardtype) { case SR_CRD_N2: flags = device_get_flags(device); #ifdef N2_TEST_SPEED if (sr_test_speed[0] > 0) hc->sc[0].clk_cfg = SR_FLAGS_INT_CLK; else #endif if (flags & SR_FLAGS_0_CLK_MSK) hc->sc[0].clk_cfg = (flags & SR_FLAGS_0_CLK_MSK) >> SR_FLAGS_CLK_SHFT; if (hc->numports == 2) #ifdef N2_TEST_SPEED if (sr_test_speed[1] > 0) hc->sc[0].clk_cfg = SR_FLAGS_INT_CLK; else #endif if (flags & SR_FLAGS_1_CLK_MSK) hc->sc[1].clk_cfg = (flags & SR_FLAGS_1_CLK_MSK) >> (SR_FLAGS_CLK_SHFT + SR_FLAGS_CLK_CHAN_SHFT); break; case SR_CRD_N2PCI: fecr = sr_read_fecr(hc); for (pndx = 0; pndx < hc->numports; pndx++, sc++) { switch (pndx) { case 1: intf_sw = fecr & SR_FECR_ID1 >> SR_FE_ID1_SHFT; break; case 0: default: intf_sw = fecr & SR_FECR_ID0 >> SR_FE_ID0_SHFT; } #ifdef N2_TEST_SPEED if (sr_test_speed[pndx] > 0) sc->clk_cfg = SR_FLAGS_INT_CLK; else #endif switch (intf_sw) { default: case SR_FE_ID_RS232: case SR_FE_ID_HSSI: case SR_FE_ID_RS422: case SR_FE_ID_TEST: break; case SR_FE_ID_V35: sc->clk_cfg = SR_FLAGS_EXT_SEP_CLK; break; case SR_FE_ID_X21: sc->clk_cfg = SR_FLAGS_EXT_CLK; break; } } sc = hc->sc; break; } /* * Report Card configuration information before we start configuring * each channel on the card... */ printf("src%d: %uK RAM (%d mempages) @ %p-%p, %u ports.\n", hc->cunit, hc->memsize / 1024, hc->mempages, hc->mem_start, hc->mem_end, hc->numports); src_init(hc); sr_init_sca(hc); if (BUS_SETUP_INTR(device_get_parent(device), device, hc->res_irq, INTR_TYPE_NET, srintr, hc, &hc->intr_cookie) != 0) goto errexit; /* * Now configure each port on the card. */ for (unit = 0; unit < hc->numports; sc++, unit++) { sc->hc = hc; sc->subunit = unit; sc->unit = next_sc_unit; next_sc_unit++; sc->scachan = unit % NCHAN; sr_init_rx_dmac(sc); sr_init_tx_dmac(sc); sr_init_msci(sc); printf("sr%d: Adapter %d, port %d.\n", sc->unit, hc->cunit, sc->subunit); #ifndef NETGRAPH ifp = &sc->ifsppp.pp_if; ifp->if_softc = sc; if_initname(ifp, device_get_name(device), device_get_unit(device)); ifp->if_mtu = PP_MTU; ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST; ifp->if_ioctl = srioctl; ifp->if_start = srstart; ifp->if_watchdog = srwatchdog; sc->ifsppp.pp_flags = PP_KEEPALIVE; sppp_attach((struct ifnet *)&sc->ifsppp); if_attach(ifp); bpfattach(ifp, DLT_PPP, PPP_HEADER_LEN); #else /* NETGRAPH */ /* * we have found a node, make sure our 'type' is availabe. */ if (ngsr_done_init == 0) ngsr_init(NULL); if (ng_make_node_common(&typestruct, &sc->node) != 0) goto errexit; sprintf(sc->nodename, "%s%d", NG_SR_NODE_TYPE, sc->unit); if (ng_name_node(sc->node, sc->nodename)) { NG_NODE_UNREF(sc->node); /* make it go away again */ goto errexit; } NG_NODE_SET_PRIVATE(sc->node, sc); callout_handle_init(&sc->handle); sc->xmitq.ifq_maxlen = IFQ_MAXLEN; sc->xmitq_hipri.ifq_maxlen = IFQ_MAXLEN; mtx_init(&sc->xmitq.ifq_mtx, "sr_xmitq", NULL, MTX_DEF); mtx_init(&sc->xmitq_hipri.ifq_mtx, "sr_xmitq_hipri", NULL, MTX_DEF); sc->running = 0; #endif /* NETGRAPH */ } if (hc->mempages) SRC_SET_OFF(hc); return (0); errexit: sr_deallocate_resources(device); return (ENXIO); } int sr_detach(device_t device) { device_t parent = device_get_parent(device); struct sr_hardc *hc = device_get_softc(device); if (hc->intr_cookie != NULL) { if (BUS_TEARDOWN_INTR(parent, device, hc->res_irq, hc->intr_cookie) != 0) { printf("intr teardown failed.. continuing\n"); } hc->intr_cookie = NULL; } /* XXX Stop the DMA. */ /* * deallocate any system resources we may have * allocated on behalf of this driver. */ FREE(hc->sc, M_DEVBUF); hc->sc = NULL; hc->mem_start = NULL; return (sr_deallocate_resources(device)); } int sr_allocate_ioport(device_t device, int rid, u_long size) { struct sr_hardc *hc = device_get_softc(device); hc->rid_ioport = rid; hc->res_ioport = bus_alloc_resource(device, SYS_RES_IOPORT, &hc->rid_ioport, 0ul, ~0ul, size, RF_ACTIVE); if (hc->res_ioport == NULL) { goto errexit; } hc->bt_ioport = rman_get_bustag(hc->res_ioport); hc->bh_ioport = rman_get_bushandle(hc->res_ioport); return (0); errexit: sr_deallocate_resources(device); return (ENXIO); } int sr_allocate_irq(device_t device, int rid, u_long size) { struct sr_hardc *hc = device_get_softc(device); hc->rid_irq = rid; hc->res_irq = bus_alloc_resource_any(device, SYS_RES_IRQ, &hc->rid_irq, RF_SHAREABLE|RF_ACTIVE); if (hc->res_irq == NULL) { goto errexit; } return (0); errexit: sr_deallocate_resources(device); return (ENXIO); } int sr_allocate_memory(device_t device, int rid, u_long size) { struct sr_hardc *hc = device_get_softc(device); hc->rid_memory = rid; hc->res_memory = bus_alloc_resource(device, SYS_RES_MEMORY, &hc->rid_memory, 0ul, ~0ul, size, RF_ACTIVE); if (hc->res_memory == NULL) { goto errexit; } hc->bt_memory = rman_get_bustag(hc->res_memory); hc->bh_memory = rman_get_bushandle(hc->res_memory); return (0); errexit: sr_deallocate_resources(device); return (ENXIO); } int sr_allocate_plx_memory(device_t device, int rid, u_long size) { struct sr_hardc *hc = device_get_softc(device); hc->rid_plx_memory = rid; hc->res_plx_memory = bus_alloc_resource(device, SYS_RES_MEMORY, &hc->rid_plx_memory, 0ul, ~0ul, size, RF_ACTIVE); if (hc->res_plx_memory == NULL) { goto errexit; } return (0); errexit: sr_deallocate_resources(device); return (ENXIO); } int sr_deallocate_resources(device_t device) { struct sr_hardc *hc = device_get_softc(device); if (hc->res_irq != 0) { bus_deactivate_resource(device, SYS_RES_IRQ, hc->rid_irq, hc->res_irq); bus_release_resource(device, SYS_RES_IRQ, hc->rid_irq, hc->res_irq); hc->res_irq = 0; } if (hc->res_ioport != 0) { bus_deactivate_resource(device, SYS_RES_IOPORT, hc->rid_ioport, hc->res_ioport); bus_release_resource(device, SYS_RES_IOPORT, hc->rid_ioport, hc->res_ioport); hc->res_ioport = 0; } if (hc->res_memory != 0) { bus_deactivate_resource(device, SYS_RES_MEMORY, hc->rid_memory, hc->res_memory); bus_release_resource(device, SYS_RES_MEMORY, hc->rid_memory, hc->res_memory); hc->res_memory = 0; } if (hc->res_plx_memory != 0) { bus_deactivate_resource(device, SYS_RES_MEMORY, hc->rid_plx_memory, hc->res_plx_memory); bus_release_resource(device, SYS_RES_MEMORY, hc->rid_plx_memory, hc->res_plx_memory); hc->res_plx_memory = 0; } return (0); } /* * N2 Interrupt Service Routine * * First figure out which SCA gave the interrupt. * Process it. * See if there is other interrupts pending. * Repeat until there no interrupts remain. */ static void srintr(void *arg) { struct sr_hardc *hc = (struct sr_hardc *)arg; sca_regs *sca = hc->sca; /* MSCI register tree */ u_char isr0, isr1, isr2; /* interrupt statii captured */ #if BUGGY > 1 printf("sr: srintr_hc(hc=%08x)\n", hc); #endif /* * Since multiple interfaces may share this interrupt, we must loop * until no interrupts are still pending service. */ while (1) { /* * Read all three interrupt status registers from the N2 * card... */ isr0 = SRC_GET8(hc, sca->isr0); isr1 = SRC_GET8(hc, sca->isr1); isr2 = SRC_GET8(hc, sca->isr2); /* * If all three registers returned 0, we've finished * processing interrupts from this device, so we can quit * this loop... */ if ((isr0 | isr1 | isr2) == 0) break; #if BUGGY > 2 printf("src%d: srintr_hc isr0 %x, isr1 %x, isr2 %x\n", #ifndef NETGRAPH unit, isr0, isr1, isr2); #else hc->cunit, isr0, isr1, isr2); #endif /* NETGRAPH */ #endif /* * Now we can dispatch the interrupts. Since we don't expect * either MSCI or timer interrupts, we'll test for DMA * interrupts first... */ if (isr1) /* DMA-initiated interrupt */ sr_dmac_intr(hc, isr1); if (isr0) /* serial part IRQ? */ sr_msci_intr(hc, isr0); if (isr2) /* timer-initiated interrupt */ sr_timer_intr(hc, isr2); } } /* * This will only start the transmitter. It is assumed that the data * is already there. * It is normally called from srstart() or sr_dmac_intr(). */ static void sr_xmit(struct sr_softc *sc) { u_short cda_value; /* starting descriptor */ u_short eda_value; /* ending descriptor */ struct sr_hardc *hc; #ifndef NETGRAPH struct ifnet *ifp; /* O/S Network Services */ #endif /* NETGRAPH */ dmac_channel *dmac; /* DMA channel registers */ #if BUGGY > 0 printf("sr: sr_xmit( sc=%08x)\n", sc); #endif hc = sc->hc; #ifndef NETGRAPH ifp = &sc->ifsppp.pp_if; #endif /* NETGRAPH */ dmac = &hc->sca->dmac[DMAC_TXCH(sc->scachan)]; /* * Get the starting and ending addresses of the chain to be * transmitted and pass these on to the DMA engine on-chip. */ cda_value = sc->block[sc->txb_next_tx].txdesc + hc->mem_pstart; cda_value &= 0x00ffff; eda_value = sc->block[sc->txb_next_tx].txeda + hc->mem_pstart; eda_value &= 0x00ffff; SRC_PUT16(hc, dmac->cda, cda_value); SRC_PUT16(hc, dmac->eda, eda_value); /* * Now we'll let the DMA status register know about this change */ SRC_PUT8(hc, dmac->dsr, SCA_DSR_DE); sc->xmit_busy = 1; /* mark transmitter busy */ #if BUGGY > 2 printf("sr%d: XMIT cda=%04x, eda=%4x, rcda=%08lx\n", sc->unit, cda_value, eda_value, sc->block[sc->txb_next_tx].txdesc + hc->mem_pstart); #endif sc->txb_next_tx++; /* update next transmit seq# */ if (sc->txb_next_tx == SR_TX_BLOCKS) /* handle wrap... */ sc->txb_next_tx = 0; #ifndef NETGRAPH /* * Finally, we'll set a timout (which will start srwatchdog()) * within the O/S network services layer... */ ifp->if_timer = 2; /* Value in seconds. */ #else /* * Don't time out for a while. */ sc->out_dog = DOG_HOLDOFF; /* give ourself some breathing space*/ #endif /* NETGRAPH */ } /* * This function will be called from the upper level when a user add a * packet to be send, and from the interrupt handler after a finished * transmit. * * NOTE: it should run at spl_imp(). * * This function only place the data in the oncard buffers. It does not * start the transmition. sr_xmit() does that. * * Transmitter idle state is indicated by the IFF_OACTIVE flag. * The function that clears that should ensure that the transmitter * and its DMA is in a "good" idle state. */ #ifndef NETGRAPH static void srstart(struct ifnet *ifp) { struct sr_softc *sc; /* channel control structure */ #else static void srstart(struct sr_softc *sc) { #endif /* NETGRAPH */ struct sr_hardc *hc; /* card control/config block */ int len; /* total length of a packet */ int pkts; /* packets placed in DPRAM */ int tlen; /* working length of pkt */ u_int i; struct mbuf *mtx; /* message buffer from O/S */ u_char *txdata; /* buffer address in DPRAM */ sca_descriptor *txdesc; /* working descriptor pointr */ struct buf_block *blkp; #ifndef NETGRAPH #if BUGGY > 0 printf("sr: srstart( ifp=%08x)\n", ifp); #endif sc = ifp->if_softc; if ((ifp->if_flags & IFF_RUNNING) == 0) return; #endif /* NETGRAPH */ hc = sc->hc; /* * It is OK to set the memory window outside the loop because all tx * buffers and descriptors are assumed to be in the same 16K window. */ if (hc->mempages) { SRC_SET_ON(hc); SRC_SET_MEM(hc, sc->block[0].txdesc); } /* * Loop to place packets into DPRAM. * * We stay in this loop until there is nothing in * the TX queue left or the tx buffers are full. */ top_srstart: /* * See if we have space for more packets. */ if (sc->txb_inuse == SR_TX_BLOCKS) { /* out of space? */ #ifndef NETGRAPH ifp->if_flags |= IFF_OACTIVE; /* yes, mark active */ #else /*ifp->if_flags |= IFF_OACTIVE;*/ /* yes, mark active */ #endif /* NETGRAPH */ if (hc->mempages) SRC_SET_OFF(hc); #if BUGGY > 9 printf("sr%d.srstart: sc->txb_inuse=%d; DPRAM full...\n", sc->unit, sc->txb_inuse); #endif return; } /* * OK, the card can take more traffic. Let's see if there's any * pending from the system... * * NOTE: * The architecture of the networking interface doesn't * actually call us like 'write()', providing an address. We get * started, a lot like a disk strategy routine, and we actually call * back out to the system to get traffic to send... * * NOTE: * If we were gonna run through another layer, we would use a * dispatch table to select the service we're getting a packet * from... */ #ifndef NETGRAPH mtx = sppp_dequeue(ifp); #else /* NETGRAPH */ IF_DEQUEUE(&sc->xmitq_hipri, mtx); if (mtx == NULL) { IF_DEQUEUE(&sc->xmitq, mtx); } #endif /* NETGRAPH */ if (!mtx) { if (hc->mempages) SRC_SET_OFF(hc); return; } /* * OK, we got a packet from the network services of the O/S. Now we * can move it into the DPRAM (under control of the descriptors) and * fire it off... */ pkts = 0; i = 0; /* counts # of granules used */ blkp = &sc->block[sc->txb_new]; /* address of free granule */ txdesc = (sca_descriptor *) (hc->mem_start + (blkp->txdesc & hc->winmsk)); txdata = (u_char *)(hc->mem_start + (blkp->txstart & hc->winmsk)); /* * Now we'll try to install as many packets as possible into the * card's DP RAM buffers. */ for (;;) { /* perform actual copy of packet */ len = mtx->m_pkthdr.len; /* length of message */ #if BUGGY > 1 printf("sr%d.srstart: mbuf @ %08lx, %d bytes\n", sc->unit, mtx, len); #endif #ifndef NETGRAPH BPF_MTAP(ifp, mtx); #else /* NETGRAPH */ sc->outbytes += len; #endif /* NETGRAPH */ /* * We can perform a straight copy because the tranmit * buffers won't wrap. */ m_copydata(mtx, 0, len, txdata); /* * Now we know how big the message is gonna be. We must now * construct the descriptors to drive this message out... */ tlen = len; while (tlen > SR_BUF_SIZ) { /* loop for full granules */ txdesc->stat = 0; /* reset bits */ txdesc->len = SR_BUF_SIZ; /* size of granule */ tlen -= SR_BUF_SIZ; txdesc++; /* move to next dscr */ txdata += SR_BUF_SIZ; /* adjust data addr */ i++; } /* * This section handles the setting of the final piece of a * message. */ txdesc->stat = SCA_DESC_EOM; txdesc->len = tlen; pkts++; /* * prepare for subsequent packets (if any) */ txdesc++; txdata += SR_BUF_SIZ; /* next mem granule */ i++; /* count of granules */ /* * OK, we've now placed the message into the DPRAM where it * can be transmitted. We'll now release the message memory * and update the statistics... */ m_freem(mtx); #ifndef NETGRAPH ++sc->ifsppp.pp_if.if_opackets; #else /* NETGRAPH */ sc->opackets++; #endif /* NETGRAPH */ /* * Check if we have space for another packet. XXX This is * hardcoded. A packet can't be larger than 3 buffers (3 x * 512). */ if ((i + 3) >= blkp->txmax) { /* enough remains? */ #if BUGGY > 9 printf("sr%d.srstart: i=%d (%d pkts); card full.\n", sc->unit, i, pkts); #endif break; } /* * We'll pull the next message to be sent (if any) */ #ifndef NETGRAPH mtx = sppp_dequeue(ifp); #else /* NETGRAPH */ IF_DEQUEUE(&sc->xmitq_hipri, mtx); if (mtx == NULL) { IF_DEQUEUE(&sc->xmitq, mtx); } #endif /* NETGRAPH */ if (!mtx) { /* no message? We're done! */ #if BUGGY > 9 printf("sr%d.srstart: pending=0, pkts=%d\n", sc->unit, pkts); #endif break; } } blkp->txtail = i; /* record next free granule */ /* * Mark the last descriptor, so that the SCA know where to stop. */ txdesc--; /* back up to last descriptor in list */ txdesc->stat |= SCA_DESC_EOT; /* mark as end of list */ /* * Now we'll reset the transmit granule's descriptor address so we * can record this in the structure and fire it off w/ the DMA * processor of the serial chip... */ txdesc = (sca_descriptor *)(uintptr_t)blkp->txdesc; blkp->txeda = (u_short)((uintptr_t)&txdesc[i]); sc->txb_inuse++; /* update inuse status */ sc->txb_new++; /* new traffic wuz added */ if (sc->txb_new == SR_TX_BLOCKS) sc->txb_new = 0; /* * If the tranmitter wasn't marked as "busy" we will force it to be * started... */ if (sc->xmit_busy == 0) { sr_xmit(sc); #if BUGGY > 9 printf("sr%d.srstart: called sr_xmit()\n", sc->unit); #endif } goto top_srstart; } #ifndef NETGRAPH /* * Handle ioctl's at the device level, though we *will* call up * a layer... */ #if BUGGY > 2 static int bug_splats[] = {0, 0, 0, 0, 0, 0, 0, 0}; #endif static int srioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { int s, error, was_up, should_be_up; struct sr_softc *sc = ifp->if_softc; #if BUGGY > 0 if_printf(ifp, "srioctl(ifp=%08x, cmd=%08x, data=%08x)\n", ifp, cmd, data); #endif was_up = ifp->if_flags & IFF_RUNNING; error = sppp_ioctl(ifp, cmd, data); #if BUGGY > 1 if_printf(ifp, "ioctl: ifsppp.pp_flags = %08x, if_flags %08x.\n", ((struct sppp *)ifp)->pp_flags, ifp->if_flags); #endif if (error) return error; if ((cmd != SIOCSIFFLAGS) && (cmd != SIOCSIFADDR)) { #if BUGGY > 2 if (bug_splats[sc->unit]++ < 2) { printf("sr(%d).if_addrlist = %08x\n", sc->unit, ifp->if_addrlist); printf("sr(%d).if_bpf = %08x\n", sc->unit, ifp->if_bpf); printf("sr(%d).if_init = %08x\n", sc->unit, ifp->if_init); printf("sr(%d).if_output = %08x\n", sc->unit, ifp->if_output); printf("sr(%d).if_start = %08x\n", sc->unit, ifp->if_start); printf("sr(%d).if_done = %08x\n", sc->unit, ifp->if_done); printf("sr(%d).if_ioctl = %08x\n", sc->unit, ifp->if_ioctl); printf("sr(%d).if_reset = %08x\n", sc->unit, ifp->if_reset); printf("sr(%d).if_watchdog = %08x\n", sc->unit, ifp->if_watchdog); } #endif return 0; } s = splimp(); should_be_up = ifp->if_flags & IFF_RUNNING; if (!was_up && should_be_up) { /* * Interface should be up -- start it. */ sr_up(sc); srstart(ifp); /* * XXX Clear the IFF_UP flag so that the link will only go * up after sppp lcp and ipcp negotiation. */ /* ifp->if_flags &= ~IFF_UP; */ } else if (was_up && !should_be_up) { /* * Interface should be down -- stop it. */ sr_down(sc); sppp_flush(ifp); } splx(s); return 0; } #endif /* NETGRAPH */ /* * This is to catch lost tx interrupts. */ static void #ifndef NETGRAPH srwatchdog(struct ifnet *ifp) #else srwatchdog(struct sr_softc *sc) #endif /* NETGRAPH */ { int got_st0, got_st1, got_st3, got_dsr; #ifndef NETGRAPH struct sr_softc *sc = ifp->if_softc; #endif /* NETGRAPH */ struct sr_hardc *hc = sc->hc; msci_channel *msci = &hc->sca->msci[sc->scachan]; dmac_channel *dmac = &sc->hc->sca->dmac[sc->scachan]; #if BUGGY > 0 #ifndef NETGRAPH printf("srwatchdog(unit=%d)\n", unit); #else printf("srwatchdog(unit=%d)\n", sc->unit); #endif /* NETGRAPH */ #endif #ifndef NETGRAPH if (!(ifp->if_flags & IFF_RUNNING)) return; ifp->if_oerrors++; /* update output error count */ #else /* NETGRAPH */ sc->oerrors++; /* update output error count */ #endif /* NETGRAPH */ got_st0 = SRC_GET8(hc, msci->st0); got_st1 = SRC_GET8(hc, msci->st1); got_st3 = SRC_GET8(hc, msci->st3); got_dsr = SRC_GET8(hc, dmac->dsr); #ifndef NETGRAPH #if 0 if (ifp->if_flags & IFF_DEBUG) #endif printf("sr%d: transmit failed, " #else /* NETGRAPH */ printf("sr%d: transmit failed, " #endif /* NETGRAPH */ "ST0 %02x, ST1 %02x, ST3 %02x, DSR %02x.\n", sc->unit, got_st0, got_st1, got_st3, got_dsr); if (SRC_GET8(hc, msci->st1) & SCA_ST1_UDRN) { SRC_PUT8(hc, msci->cmd, SCA_CMD_TXABORT); SRC_PUT8(hc, msci->cmd, SCA_CMD_TXENABLE); SRC_PUT8(hc, msci->st1, SCA_ST1_UDRN); } sc->xmit_busy = 0; #ifndef NETGRAPH ifp->if_flags &= ~IFF_OACTIVE; #else /*ifp->if_flags &= ~IFF_OACTIVE; */ #endif /* NETGRAPH */ if (sc->txb_inuse && --sc->txb_inuse) sr_xmit(sc); #ifndef NETGRAPH srstart(ifp); /* restart transmitter */ #else srstart(sc); /* restart transmitter */ #endif /* NETGRAPH */ } static void sr_up(struct sr_softc *sc) { struct sr_hardc *hc = sc->hc; sca_regs *sca = hc->sca; msci_channel *msci = &sca->msci[sc->scachan]; #if BUGGY > 0 printf("sr_up(sc=%08x)\n", sc); #endif /* * Enable transmitter and receiver. Raise DTR and RTS. Enable * interrupts. * * XXX What about using AUTO mode in msci->md0 ??? */ SRC_PUT8(hc, msci->ctl, SRC_GET8(hc, msci->ctl) & ~SCA_CTL_RTS); if (sc->scachan == 0) switch (hc->cardtype) { case SR_CRD_N2: sr_outb(hc, SR_MCR, (sr_inb(hc, SR_MCR) & ~SR_MCR_DTR0)); break; case SR_CRD_N2PCI: sr_write_fecr(hc, sr_read_fecr(hc) & ~SR_FECR_DTR0); break; } else switch (hc->cardtype) { case SR_CRD_N2: sr_outb(hc, SR_MCR, (sr_inb(hc, SR_MCR) & ~SR_MCR_DTR1)); break; case SR_CRD_N2PCI: sr_write_fecr(hc, sr_read_fecr(hc) & ~SR_FECR_DTR1); break; } if (sc->scachan == 0) { SRC_PUT8(hc, sca->ier0, SRC_GET8(hc, sca->ier0) | 0x000F); SRC_PUT8(hc, sca->ier1, SRC_GET8(hc, sca->ier1) | 0x000F); } else { SRC_PUT8(hc, sca->ier0, SRC_GET8(hc, sca->ier0) | 0x00F0); SRC_PUT8(hc, sca->ier1, SRC_GET8(hc, sca->ier1) | 0x00F0); } SRC_PUT8(hc, msci->cmd, SCA_CMD_RXENABLE); sr_inb(hc, 0); /* XXX slow it down a bit. */ SRC_PUT8(hc, msci->cmd, SCA_CMD_TXENABLE); #ifndef NETGRAPH #ifdef USE_MODEMCK if (sr_watcher == 0) sr_modemck(NULL); #endif #else /* NETGRAPH */ untimeout(ngsr_watchdog_frame, sc, sc->handle); sc->handle = timeout(ngsr_watchdog_frame, sc, hz); sc->running = 1; #endif /* NETGRAPH */ } static void sr_down(struct sr_softc *sc) { struct sr_hardc *hc = sc->hc; sca_regs *sca = hc->sca; msci_channel *msci = &sca->msci[sc->scachan]; #if BUGGY > 0 printf("sr_down(sc=%08x)\n", sc); #endif #ifdef NETGRAPH untimeout(ngsr_watchdog_frame, sc, sc->handle); sc->running = 0; #endif /* NETGRAPH */ /* * Disable transmitter and receiver. Lower DTR and RTS. Disable * interrupts. */ SRC_PUT8(hc, msci->cmd, SCA_CMD_RXDISABLE); sr_inb(hc, 0); /* XXX slow it down a bit. */ SRC_PUT8(hc, msci->cmd, SCA_CMD_TXDISABLE); SRC_PUT8(hc, msci->ctl, SRC_GET8(hc, msci->ctl) | SCA_CTL_RTS); if (sc->scachan == 0) switch (hc->cardtype) { case SR_CRD_N2: sr_outb(hc, SR_MCR, sr_inb(hc, SR_MCR) | SR_MCR_DTR0); break; case SR_CRD_N2PCI: sr_write_fecr(hc, sr_read_fecr(hc) | SR_FECR_DTR0); break; } else switch (hc->cardtype) { case SR_CRD_N2: sr_outb(hc, SR_MCR, sr_inb(hc, SR_MCR) | SR_MCR_DTR1); break; case SR_CRD_N2PCI: sr_write_fecr(hc, sr_read_fecr(hc) | SR_FECR_DTR1); break; } if (sc->scachan == 0) { SRC_PUT8(hc, sca->ier0, SRC_GET8(hc, sca->ier0) & ~0x0F); SRC_PUT8(hc, sca->ier1, SRC_GET8(hc, sca->ier1) & ~0x0F); } else { SRC_PUT8(hc, sca->ier0, SRC_GET8(hc, sca->ier0) & ~0xF0); SRC_PUT8(hc, sca->ier1, SRC_GET8(hc, sca->ier1) & ~0xF0); } } /* * Initialize the card, allocate memory for the sr_softc structures * and fill in the pointers. */ static void src_init(struct sr_hardc *hc) { struct sr_softc *sc = hc->sc; int x; u_int chanmem; u_int bufmem; u_int next; u_int descneeded; #if BUGGY > 0 printf("src_init(hc=%08x)\n", hc); #endif chanmem = hc->memsize / hc->numports; next = 0; for (x = 0; x < hc->numports; x++, sc++) { int blk; for (blk = 0; blk < SR_TX_BLOCKS; blk++) { sc->block[blk].txdesc = next; bufmem = (16 * 1024) / SR_TX_BLOCKS; descneeded = bufmem / SR_BUF_SIZ; sc->block[blk].txstart = sc->block[blk].txdesc + ((((descneeded * sizeof(sca_descriptor)) / SR_BUF_SIZ) + 1) * SR_BUF_SIZ); sc->block[blk].txend = next + bufmem; sc->block[blk].txmax = (sc->block[blk].txend - sc->block[blk].txstart) / SR_BUF_SIZ; next += bufmem; #if BUGGY > 2 printf("sr%d: blk %d: txdesc %08x, txstart %08x\n", sc->unit, blk, sc->block[blk].txdesc, sc->block[blk].txstart); #endif } sc->rxdesc = next; bufmem = chanmem - (bufmem * SR_TX_BLOCKS); descneeded = bufmem / SR_BUF_SIZ; sc->rxstart = sc->rxdesc + ((((descneeded * sizeof(sca_descriptor)) / SR_BUF_SIZ) + 1) * SR_BUF_SIZ); sc->rxend = next + bufmem; sc->rxmax = (sc->rxend - sc->rxstart) / SR_BUF_SIZ; next += bufmem; } } /* * The things done here are channel independent. * * Configure the sca waitstates. * Configure the global interrupt registers. * Enable master dma enable. */ static void sr_init_sca(struct sr_hardc *hc) { sca_regs *sca = hc->sca; #if BUGGY > 0 printf("sr_init_sca(hc=%08x)\n", hc); #endif /* * Do the wait registers. Set everything to 0 wait states. */ SRC_PUT8(hc, sca->pabr0, 0); SRC_PUT8(hc, sca->pabr1, 0); SRC_PUT8(hc, sca->wcrl, 0); SRC_PUT8(hc, sca->wcrm, 0); SRC_PUT8(hc, sca->wcrh, 0); /* * Configure the interrupt registers. Most are cleared until the * interface is configured. */ SRC_PUT8(hc, sca->ier0, 0x00); /* MSCI interrupts. */ SRC_PUT8(hc, sca->ier1, 0x00); /* DMAC interrupts */ SRC_PUT8(hc, sca->ier2, 0x00); /* TIMER interrupts. */ SRC_PUT8(hc, sca->itcr, 0x00); /* Use ivr and no intr ack */ SRC_PUT8(hc, sca->ivr, 0x40); /* Interrupt vector. */ SRC_PUT8(hc, sca->imvr, 0x40); /* * Configure the timers. XXX Later */ /* * Set the DMA channel priority to rotate between all four channels. * * Enable all dma channels. */ SRC_PUT8(hc, sca->pcr, SCA_PCR_PR2); SRC_PUT8(hc, sca->dmer, SCA_DMER_EN); } /* * Configure the msci * * NOTE: The serial port configuration is hardcoded at the moment. */ static void sr_init_msci(struct sr_softc *sc) { int portndx; /* on-board port number */ u_int mcr_v; /* contents of modem control */ struct sr_hardc *hc = sc->hc; msci_channel *msci = &hc->sca->msci[sc->scachan]; #ifdef N2_TEST_SPEED int br_v; /* contents for BR divisor */ int etcndx; /* index into ETC table */ int fifo_v, gotspeed; /* final tabled speed found */ int tmc_v; /* timer control register */ int wanted; /* speed (bitrate) wanted... */ struct rate_line *rtp; #endif portndx = sc->scachan; #if BUGGY > 0 printf("sr: sr_init_msci( sc=%08x)\n", sc); #endif SRC_PUT8(hc, msci->cmd, SCA_CMD_RESET); SRC_PUT8(hc, msci->md0, SCA_MD0_CRC_1 | SCA_MD0_CRC_CCITT | SCA_MD0_CRC_ENABLE | SCA_MD0_MODE_HDLC); SRC_PUT8(hc, msci->md1, SCA_MD1_NOADDRCHK); SRC_PUT8(hc, msci->md2, SCA_MD2_DUPLEX | SCA_MD2_NRZ); /* * According to the manual I should give a reset after changing the * mode registers. */ SRC_PUT8(hc, msci->cmd, SCA_CMD_RXRESET); SRC_PUT8(hc, msci->ctl, SCA_CTL_IDLPAT | SCA_CTL_UDRNC | SCA_CTL_RTS); /* * XXX Later we will have to support different clock settings. */ switch (sc->clk_cfg) { default: #if BUGGY > 0 printf("sr%: clk_cfg=%08x, selected default clock.\n", portndx, sc->clk_cfg); #endif /* FALLTHROUGH */ case SR_FLAGS_EXT_CLK: /* * For now all interfaces are programmed to use the RX clock * for the TX clock. */ #if BUGGY > 0 printf("sr%d: External Clock Selected.\n", portndx); #endif SRC_PUT8(hc, msci->rxs, SCA_RXS_CLK_RXC0 | SCA_RXS_DIV1); SRC_PUT8(hc, msci->txs, SCA_TXS_CLK_RX | SCA_TXS_DIV1); break; case SR_FLAGS_EXT_SEP_CLK: #if BUGGY > 0 printf("sr%d: Split Clocking Selected.\n", portndx); #endif SRC_PUT8(hc, msci->rxs, SCA_RXS_CLK_RXC0 | SCA_RXS_DIV1); SRC_PUT8(hc, msci->txs, SCA_TXS_CLK_TXC | SCA_TXS_DIV1); break; case SR_FLAGS_INT_CLK: #if BUGGY > 0 printf("sr%d: Internal Clocking selected.\n", portndx); #endif /* * XXX I do need some code to set the baud rate here! */ #ifdef N2_TEST_SPEED switch (hc->cardtype) { case SR_CRD_N2PCI: mcr_v = sr_read_fecr(hc); etcndx = 2; break; case SR_CRD_N2: default: mcr_v = sr_inb(hc, SR_MCR); etcndx = 0; } fifo_v = 0x10; /* stolen from Linux version */ /* * search for appropriate speed in table, don't calc it: */ wanted = sr_test_speed[portndx]; rtp = &n2_rates[0]; /* point to first table item */ while ((rtp->target > 0) /* search table for speed */ &&(rtp->target != wanted)) rtp++; /* * We've searched the table for a matching speed. If we've * found the correct rate line, we'll get the pre-calc'd * values for the TMC and baud rate divisor for subsequent * use... */ if (rtp->target > 0) { /* use table-provided values */ gotspeed = wanted; tmc_v = rtp->tmc_reg; br_v = rtp->br_reg; } else { /* otherwise assume 1MBit comm rate */ gotspeed = 10000; tmc_v = 5; br_v = 1; } /* * Now we mask in the enable clock output for the MCR: */ mcr_v |= etc0vals[etcndx + portndx]; /* * Now we'll program the registers with these speed- related * contents... */ SRC_PUT8(hc, msci->tmc, tmc_v); SRC_PUT8(hc, msci->trc0, fifo_v); SRC_PUT8(hc, msci->rxs, SCA_RXS_CLK_INT + br_v); SRC_PUT8(hc, msci->txs, SCA_TXS_CLK_INT + br_v); switch (hc->cardtype) { case SR_CRD_N2PCI: sr_write_fecr(hc, mcr_v); break; case SR_CRD_N2: default: sr_outb(hc, SR_MCR, mcr_v); } #if BUGGY > 0 if (wanted != gotspeed) printf("sr%d: Speed wanted=%d, found=%d\n", wanted, gotspeed); printf("sr%d: Internal Clock %dx100 BPS, tmc=%d, div=%d\n", portndx, gotspeed, tmc_v, br_v); #endif #else SRC_PUT8(hc, msci->rxs, SCA_RXS_CLK_INT | SCA_RXS_DIV1); SRC_PUT8(hc, msci->txs, SCA_TXS_CLK_INT | SCA_TXS_DIV1); SRC_PUT8(hc, msci->tmc, 5); if (portndx == 0) switch (hc->cardtype) { case SR_CRD_N2PCI: sr_write_fecr(hc, sr_read_fecr(hc) | SR_FECR_ETC0); break; case SR_CRD_N2: default: mcr_v = sr_inb(hc, SR_MCR); mcr_v |= SR_MCR_ETC0; sr_outb(hc, SR_MCR, mcr_v); } else switch (hc->cardtype) { case SR_CRD_N2: mcr_v = sr_inb(hc, SR_MCR); mcr_v |= SR_MCR_ETC1; sr_outb(hc, SR_MCR, mcr_v); break; case SR_CRD_N2PCI: sr_write_fecr(hc, sr_read_fecr(hc) | SR_FECR_ETC1); break; } #endif } /* * XXX Disable all interrupts for now. I think if you are using the * dmac you don't use these interrupts. */ SRC_PUT8(hc, msci->ie0, 0); SRC_PUT8(hc, msci->ie1, 0x0C); SRC_PUT8(hc, msci->ie2, 0); SRC_PUT8(hc, msci->fie, 0); SRC_PUT8(hc, msci->sa0, 0); SRC_PUT8(hc, msci->sa1, 0); SRC_PUT8(hc, msci->idl, 0x7E); /* set flags value */ SRC_PUT8(hc, msci->rrc, 0x0E); SRC_PUT8(hc, msci->trc0, 0x10); SRC_PUT8(hc, msci->trc1, 0x1F); } /* * Configure the rx dma controller. */ static void sr_init_rx_dmac(struct sr_softc *sc) { struct sr_hardc *hc; dmac_channel *dmac; sca_descriptor *rxd; u_int cda_v, sarb_v, rxbuf, rxda, rxda_d; #if BUGGY > 0 printf("sr_init_rx_dmac(sc=%08x)\n", sc); #endif hc = sc->hc; dmac = &hc->sca->dmac[DMAC_RXCH(sc->scachan)]; if (hc->mempages) SRC_SET_MEM(hc, sc->rxdesc); /* * This phase initializes the contents of the descriptor table * needed to construct a circular buffer... */ rxd = (sca_descriptor *)(hc->mem_start + (sc->rxdesc & hc->winmsk)); rxda_d = (uintptr_t) hc->mem_start - (sc->rxdesc & ~hc->winmsk); for (rxbuf = sc->rxstart; rxbuf < sc->rxend; rxbuf += SR_BUF_SIZ, rxd++) { /* * construct the circular chain... */ rxda = (uintptr_t) &rxd[1] - rxda_d + hc->mem_pstart; rxd->cp = (u_short)(rxda & 0xffff); /* * set the on-card buffer address... */ rxd->bp = (u_short)((rxbuf + hc->mem_pstart) & 0xffff); rxd->bpb = (u_char)(((rxbuf + hc->mem_pstart) >> 16) & 0xff); rxd->len = 0; /* bytes resident w/in granule */ rxd->stat = 0xff; /* The sca write here when finished */ } /* * heal the chain so that the last entry points to the first... */ rxd--; rxd->cp = (u_short)((sc->rxdesc + hc->mem_pstart) & 0xffff); /* * reset the reception handler's index... */ sc->rxhind = 0; /* * We'll now configure the receiver's DMA logic... */ SRC_PUT8(hc, dmac->dsr, 0); /* Disable DMA transfer */ SRC_PUT8(hc, dmac->dcr, SCA_DCR_ABRT); /* XXX maybe also SCA_DMR_CNTE */ SRC_PUT8(hc, dmac->dmr, SCA_DMR_TMOD | SCA_DMR_NF); SRC_PUT16(hc, dmac->bfl, SR_BUF_SIZ); cda_v = (u_short)((sc->rxdesc + hc->mem_pstart) & 0xffff); sarb_v = (u_char)(((sc->rxdesc + hc->mem_pstart) >> 16) & 0xff); SRC_PUT16(hc, dmac->cda, cda_v); SRC_PUT8(hc, dmac->sarb, sarb_v); rxd = (sca_descriptor *)(uintptr_t)sc->rxstart; SRC_PUT16(hc, dmac->eda, (u_short)((uintptr_t)&rxd[sc->rxmax - 1] & 0xffff)); SRC_PUT8(hc, dmac->dir, 0xF0); SRC_PUT8(hc, dmac->dsr, SCA_DSR_DE); /* Enable DMA */ } /* * Configure the TX DMA descriptors. * Initialize the needed values and chain the descriptors. */ static void sr_init_tx_dmac(struct sr_softc *sc) { int blk; u_int txbuf, txda, txda_d; struct sr_hardc *hc; sca_descriptor *txd; dmac_channel *dmac; struct buf_block *blkp; u_int x; u_int sarb_v; #if BUGGY > 0 printf("sr_init_tx_dmac(sc=%08x)\n", sc); #endif hc = sc->hc; dmac = &hc->sca->dmac[DMAC_TXCH(sc->scachan)]; if (hc->mempages) SRC_SET_MEM(hc, sc->block[0].txdesc); /* * Initialize the array of descriptors for transmission */ for (blk = 0; blk < SR_TX_BLOCKS; blk++) { blkp = &sc->block[blk]; txd = (sca_descriptor *)(hc->mem_start + (blkp->txdesc & hc->winmsk)); txda_d = (uintptr_t) hc->mem_start - (blkp->txdesc & ~hc->winmsk); x = 0; txbuf = blkp->txstart; for (; txbuf < blkp->txend; txbuf += SR_BUF_SIZ, txd++) { txda = (uintptr_t) &txd[1] - txda_d + hc->mem_pstart; txd->cp = (u_short)(txda & 0xffff); txd->bp = (u_short)((txbuf + hc->mem_pstart) & 0xffff); txd->bpb = (u_char)(((txbuf + hc->mem_pstart) >> 16) & 0xff); txd->len = 0; txd->stat = 0; x++; } txd--; txd->cp = (u_short)((blkp->txdesc + hc->mem_pstart) & 0xffff); blkp->txtail = (uintptr_t)txd - (uintptr_t)hc->mem_start; } SRC_PUT8(hc, dmac->dsr, 0); /* Disable DMA */ SRC_PUT8(hc, dmac->dcr, SCA_DCR_ABRT); SRC_PUT8(hc, dmac->dmr, SCA_DMR_TMOD | SCA_DMR_NF); SRC_PUT8(hc, dmac->dir, SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF); sarb_v = (sc->block[0].txdesc + hc->mem_pstart) >> 16; sarb_v &= 0x00ff; SRC_PUT8(hc, dmac->sarb, (u_char) sarb_v); } /* * Look through the descriptors to see if there is a complete packet * available. Stop if we get to where the sca is busy. * * Return the length and status of the packet. * Return nonzero if there is a packet available. * * NOTE: * It seems that we get the interrupt a bit early. The updateing of * descriptor values is not always completed when this is called. */ static int sr_packet_avail(struct sr_softc *sc, int *len, u_char *rxstat) { int granules; /* count of granules in pkt */ int wki, wko; struct sr_hardc *hc; sca_descriptor *rxdesc; /* current descriptor */ sca_descriptor *endp; /* ending descriptor */ sca_descriptor *cda; /* starting descriptor */ hc = sc->hc; /* get card's information */ /* * set up starting descriptor by pulling that info from the DMA half * of the HD chip... */ wki = DMAC_RXCH(sc->scachan); wko = SRC_GET16(hc, hc->sca->dmac[wki].cda); cda = (sca_descriptor *)(hc->mem_start + (wko & hc->winmsk)); #if BUGGY > 1 printf("sr_packet_avail(): wki=%d, wko=%04x, cda=%08x\n", wki, wko, cda); #endif /* * open the appropriate memory window and set our expectations... */ if (hc->mempages) { SRC_SET_MEM(hc, sc->rxdesc); SRC_SET_ON(hc); } rxdesc = (sca_descriptor *) (hc->mem_start + (sc->rxdesc & hc->winmsk)); endp = rxdesc; rxdesc = &rxdesc[sc->rxhind]; endp = &endp[sc->rxmax]; *len = 0; /* reset result total length */ granules = 0; /* reset count of granules */ /* * This loop will scan descriptors, but it *will* puke up if we wrap * around to our starting point... */ while (rxdesc != cda) { *len += rxdesc->len; /* increment result length */ granules++; /* * If we hit a valid packet's completion we'll know we've * got a live one, and that we can deliver the packet. * Since we're only allowed to report a packet available, * somebody else does that... */ if (rxdesc->stat & SCA_DESC_EOM) { /* End Of Message */ *rxstat = rxdesc->stat; /* return closing */ #if BUGGY > 0 printf("sr%d: PKT AVAIL len %d, %x, bufs %u.\n", sc->unit, *len, *rxstat, granules); #endif return 1; /* indicate success */ } /* * OK, this packet take up multiple granules. Move on to * the next descriptor so we can consider it... */ rxdesc++; if (rxdesc == endp) /* recognize & act on wrap point */ rxdesc = (sca_descriptor *) (hc->mem_start + (sc->rxdesc & hc->winmsk)); } /* * Nothing found in the DPRAM. Let the caller know... */ *len = 0; *rxstat = 0; return 0; } /* * Copy a packet from the on card memory into a provided mbuf. * Take into account that buffers wrap and that a packet may * be larger than a buffer. */ static void sr_copy_rxbuf(struct mbuf *m, struct sr_softc *sc, int len) { struct sr_hardc *hc; sca_descriptor *rxdesc; u_int rxdata; u_int rxmax; u_int off = 0; u_int tlen; #if BUGGY > 0 printf("sr_copy_rxbuf(m=%08x,sc=%08x,len=%d)\n", m, sc, len); #endif hc = sc->hc; rxdata = sc->rxstart + (sc->rxhind * SR_BUF_SIZ); rxmax = sc->rxstart + (sc->rxmax * SR_BUF_SIZ); rxdesc = (sca_descriptor *) (hc->mem_start + (sc->rxdesc & hc->winmsk)); rxdesc = &rxdesc[sc->rxhind]; /* * Using the count of bytes in the received packet, we decrement it * for each granule (controller by an SCA descriptor) to control the * looping... */ while (len) { /* * tlen gets the length of *this* granule... ...which is * then copied to the target buffer. */ tlen = (len < SR_BUF_SIZ) ? len : SR_BUF_SIZ; if (hc->mempages) SRC_SET_MEM(hc, rxdata); bcopy(hc->mem_start + (rxdata & hc->winmsk), mtod(m, caddr_t) +off, tlen); off += tlen; len -= tlen; /* * now, return to the descriptor's window in DPRAM and reset * the descriptor we've just suctioned... */ if (hc->mempages) SRC_SET_MEM(hc, sc->rxdesc); rxdesc->len = 0; rxdesc->stat = 0xff; /* * Move on to the next granule. If we've any remaining * bytes to process we'll just continue in our loop... */ rxdata += SR_BUF_SIZ; rxdesc++; if (rxdata == rxmax) { /* handle the wrap point */ rxdata = sc->rxstart; rxdesc = (sca_descriptor *) (hc->mem_start + (sc->rxdesc & hc->winmsk)); } } } /* * If single is set, just eat a packet. Otherwise eat everything up to * where cda points. Update pointers to point to the next packet. * * This handles "flushing" of a packet as received... * * If the "single" parameter is zero, all pending reeceive traffic will * be flushed out of existence. A non-zero value will only drop the * *next* (currently) pending packet... */ static void sr_eat_packet(struct sr_softc *sc, int single) { struct sr_hardc *hc; sca_descriptor *rxdesc; /* current descriptor being eval'd */ sca_descriptor *endp; /* last descriptor in chain */ sca_descriptor *cda; /* current start point */ u_int loopcnt = 0; /* count of packets flushed ??? */ u_char stat; /* captured status byte from descr */ hc = sc->hc; cda = (sca_descriptor *)(hc->mem_start + (SRC_GET16(hc, hc->sca->dmac[DMAC_RXCH(sc->scachan)].cda) & hc->winmsk)); /* * loop until desc->stat == (0xff || EOM) Clear the status and * length in the descriptor. Increment the descriptor. */ if (hc->mempages) SRC_SET_MEM(hc, sc->rxdesc); rxdesc = (sca_descriptor *) (hc->mem_start + (sc->rxdesc & hc->winmsk)); endp = rxdesc; rxdesc = &rxdesc[sc->rxhind]; endp = &endp[sc->rxmax]; /* * allow loop, but abort it if we wrap completely... */ while (rxdesc != cda) { loopcnt++; if (loopcnt > sc->rxmax) { printf("sr%d: eat pkt %d loop, cda %p, " "rxdesc %p, stat %x.\n", sc->unit, loopcnt, cda, rxdesc, rxdesc->stat); break; } stat = rxdesc->stat; rxdesc->len = 0; rxdesc->stat = 0xff; rxdesc++; sc->rxhind++; if (rxdesc == endp) { rxdesc = (sca_descriptor *) (hc->mem_start + (sc->rxdesc & hc->winmsk)); sc->rxhind = 0; } if (single && (stat == SCA_DESC_EOM)) break; } /* * Update the eda to the previous descriptor. */ rxdesc = (sca_descriptor *)(uintptr_t)sc->rxdesc; rxdesc = &rxdesc[(sc->rxhind + sc->rxmax - 2) % sc->rxmax]; SRC_PUT16(hc, hc->sca->dmac[DMAC_RXCH(sc->scachan)].eda, (u_short)(((uintptr_t)rxdesc + hc->mem_pstart) & 0xffff)); } /* * While there is packets available in the rx buffer, read them out * into mbufs and ship them off. */ static void sr_get_packets(struct sr_softc *sc) { u_char rxstat; /* acquired status byte */ int i; int pkts; /* count of packets found */ int rxndx; /* rcv buffer index */ int tries; /* settling time counter */ u_int len; /* length of pending packet */ struct sr_hardc *hc; /* card-level information */ sca_descriptor *rxdesc; /* descriptor in memory */ #ifndef NETGRAPH struct ifnet *ifp; /* network intf ctl table */ #else int error; #endif /* NETGRAPH */ struct mbuf *m = NULL; /* message buffer */ #if BUGGY > 0 printf("sr_get_packets(sc=%08x)\n", sc); #endif hc = sc->hc; #ifndef NETGRAPH ifp = &sc->ifsppp.pp_if; #endif /* NETGRAPH */ if (hc->mempages) { SRC_SET_MEM(hc, sc->rxdesc); SRC_SET_ON(hc); /* enable shared memory */ } pkts = 0; /* reset count of found packets */ /* * for each complete packet in the receiving pool, process each * packet... */ while (sr_packet_avail(sc, &len, &rxstat)) { /* packet pending? */ /* * I have seen situations where we got the interrupt but the * status value wasn't deposited. This code should allow * the status byte's value to settle... */ tries = 5; while ((rxstat == 0x00ff) && --tries) sr_packet_avail(sc, &len, &rxstat); #if BUGGY > 1 printf("sr_packet_avail() returned len=%d, rxstat=%02ux\n", len, rxstat); #endif pkts++; #ifdef NETGRAPH sc->inbytes += len; sc->inlast = 0; #endif /* NETGRAPH */ /* * OK, we've settled the incoming message status. We can now * process it... */ if (((rxstat & SCA_DESC_ERRORS) == 0) && (len < MCLBYTES)) { #if BUGGY > 1 printf("sr%d: sr_get_packet() rxstat=%02x, len=%d\n", sc->unit, rxstat, len); #endif MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) { /* * eat (flush) packet if get mbuf fail!! */ sr_eat_packet(sc, 1); continue; } /* * construct control information for pass-off */ #ifndef NETGRAPH m->m_pkthdr.rcvif = ifp; #else m->m_pkthdr.rcvif = NULL; #endif /* NETGRAPH */ m->m_pkthdr.len = m->m_len = len; if (len > MHLEN) { MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { /* * We couldn't get a big enough * message packet, so we'll send the * packet to /dev/null... */ m_freem(m); sr_eat_packet(sc, 1); continue; } } /* * OK, we've got a good message buffer. Now we can * copy the received message into it */ sr_copy_rxbuf(m, sc, len); /* copy from DPRAM */ #ifndef NETGRAPH BPF_MTAP(ifp, m); #if BUGGY > 3 { u_char *bp; bp = (u_char *)m; printf("sr%d: rcvd=%02x%02x%02x%02x%02x%02x\n", sc->unit, bp[0], bp[1], bp[2], bp[4], bp[5], bp[6]); } #endif sppp_input(ifp, m); ifp->if_ipackets++; #else /* NETGRAPH */ #if BUGGY > 3 { u_char *bp; bp = mtod(m,u_char *); printf("sr%d: rd=%02x:%02x:%02x:%02x:%02x:%02x", sc->unit, bp[0], bp[1], bp[2], bp[4], bp[5], bp[6]); printf(":%02x:%02x:%02x:%02x:%02x:%02x\n", bp[6], bp[7], bp[8], bp[9], bp[10], bp[11]); } #endif NG_SEND_DATA_ONLY(error, sc->hook, m); sc->ipackets++; #endif /* NETGRAPH */ /* * Update the eda to the previous descriptor. */ i = (len + SR_BUF_SIZ - 1) / SR_BUF_SIZ; sc->rxhind = (sc->rxhind + i) % sc->rxmax; rxdesc = (sca_descriptor *)(uintptr_t)sc->rxdesc; rxndx = (sc->rxhind + sc->rxmax - 2) % sc->rxmax; rxdesc = &rxdesc[rxndx]; SRC_PUT16(hc, hc->sca->dmac[DMAC_RXCH(sc->scachan)].eda, (u_short)(((uintptr_t)rxdesc + hc->mem_pstart) & 0xffff)); } else { int got_st3, got_cda, got_eda; int tries = 5; while ((rxstat == 0xff) && --tries) sr_packet_avail(sc, &len, &rxstat); /* * It look like we get an interrupt early * sometimes and then the status is not * filled in yet. */ if (tries && (tries != 5)) continue; /* * This chunk of code handles the error packets. * We'll log them for posterity... */ sr_eat_packet(sc, 1); #ifndef NETGRAPH ifp->if_ierrors++; #else sc->ierrors[0]++; #endif /* NETGRAPH */ got_st3 = SRC_GET8(hc, hc->sca->msci[sc->scachan].st3); got_cda = SRC_GET16(hc, hc->sca->dmac[DMAC_RXCH(sc->scachan)].cda); got_eda = SRC_GET16(hc, hc->sca->dmac[DMAC_RXCH(sc->scachan)].eda); #if BUGGY > 0 printf("sr%d: Receive error chan %d, " "stat %02x, msci st3 %02x," "rxhind %d, cda %04x, eda %04x.\n", sc->unit, sc->scachan, rxstat, got_st3, sc->rxhind, got_cda, got_eda); #endif } } #if BUGGY > 0 printf("sr%d: sr_get_packets() found %d packet(s)\n", sc->unit, pkts); #endif if (hc->mempages) SRC_SET_OFF(hc); } /* * All DMA interrupts come here. * * Each channel has two interrupts. * Interrupt A for errors and Interrupt B for normal stuff like end * of transmit or receive dmas. */ static void sr_dmac_intr(struct sr_hardc *hc, u_char isr1) { u_char dsr; /* contents of DMA Stat Reg */ u_char dotxstart; /* enables for tranmit part */ int mch; /* channel being processed */ struct sr_softc *sc; /* channel's softc structure */ sca_regs *sca = hc->sca; dmac_channel *dmac; /* dma structure of chip */ #if BUGGY > 0 printf("sr_dmac_intr(hc=%08x,isr1=%04x)\n", hc, isr1); #endif mch = 0; /* assume chan0 on card */ dotxstart = isr1; /* copy for xmitter starts */ /* * Shortcut if there is no interrupts for dma channel 0 or 1. * Skip processing for channel 0 if no incoming hit */ if ((isr1 & 0x0F) == 0) { mch = 1; isr1 >>= 4; } do { sc = &hc->sc[mch]; /* * Transmit channel - DMA Status Register Evaluation */ if (isr1 & 0x0C) { dmac = &sca->dmac[DMAC_TXCH(mch)]; /* * get the DMA Status Register contents and write * back to reset interrupt... */ dsr = SRC_GET8(hc, dmac->dsr); SRC_PUT8(hc, dmac->dsr, dsr); /* * Check for (& process) a Counter overflow */ if (dsr & SCA_DSR_COF) { printf("sr%d: TX DMA Counter overflow, " "txpacket no %lu.\n", #ifndef NETGRAPH sc->unit, sc->ifsppp.pp_if.if_opackets); sc->ifsppp.pp_if.if_oerrors++; #else sc->unit, sc->opackets); sc->oerrors++; #endif /* NETGRAPH */ } /* * Check for (& process) a Buffer overflow */ if (dsr & SCA_DSR_BOF) { printf("sr%d: TX DMA Buffer overflow, " "txpacket no %lu, dsr %02x, " "cda %04x, eda %04x.\n", #ifndef NETGRAPH sc->unit, sc->ifsppp.pp_if.if_opackets, #else sc->unit, sc->opackets, #endif /* NETGRAPH */ dsr, SRC_GET16(hc, dmac->cda), SRC_GET16(hc, dmac->eda)); #ifndef NETGRAPH sc->ifsppp.pp_if.if_oerrors++; #else sc->oerrors++; #endif /* NETGRAPH */ } /* * Check for (& process) an End of Transfer (OK) */ if (dsr & SCA_DSR_EOT) { /* * This should be the most common case. * * Clear the IFF_OACTIVE flag. * * Call srstart to start a new transmit if * there is data to transmit. */ #if BUGGY > 0 printf("sr%d: TX Completed OK\n", sc->unit); #endif sc->xmit_busy = 0; #ifndef NETGRAPH sc->ifsppp.pp_if.if_flags &= ~IFF_OACTIVE; sc->ifsppp.pp_if.if_timer = 0; #else /* XXX may need to mark tx inactive? */ sc->out_deficit++; sc->out_dog = DOG_HOLDOFF; #endif /* NETGRAPH */ if (sc->txb_inuse && --sc->txb_inuse) sr_xmit(sc); } } /* * Receive channel processing of DMA Status Register */ if (isr1 & 0x03) { dmac = &sca->dmac[DMAC_RXCH(mch)]; dsr = SRC_GET8(hc, dmac->dsr); SRC_PUT8(hc, dmac->dsr, dsr); /* * End of frame processing (MSG OK?) */ if (dsr & SCA_DSR_EOM) { #if BUGGY > 0 int tt, ind; #ifndef NETGRAPH tt = sc->ifsppp.pp_if.if_ipackets; #else /* NETGRAPH */ tt = sc->ipackets; #endif /* NETGRAPH */ ind = sc->rxhind; #endif sr_get_packets(sc); #if BUGGY > 0 #ifndef NETGRAPH if (tt == sc->ifsppp.pp_if.if_ipackets) #else /* NETGRAPH */ if (tt == sc->ipackets) #endif /* NETGRAPH */ { sca_descriptor *rxdesc; int i; printf("SR: RXINTR isr1 %x, dsr %x, " "no data %d pkts, orxind %d.\n", dotxstart, dsr, tt, ind); printf("SR: rxdesc %x, rxstart %x, " "rxend %x, rxhind %d, " "rxmax %d.\n", sc->rxdesc, sc->rxstart, sc->rxend, sc->rxhind, sc->rxmax); printf("SR: cda %x, eda %x.\n", SRC_GET16(hc, dmac->cda), SRC_GET16(hc, dmac->eda)); if (hc->mempages) { SRC_SET_ON(hc); SRC_SET_MEM(hc, sc->rxdesc); } rxdesc = (sca_descriptor *) (hc->mem_start + (sc->rxdesc & hc->winmsk)); rxdesc = &rxdesc[sc->rxhind]; for (i = 0; i < 3; i++, rxdesc++) printf("SR: rxdesc->stat %x, " "len %d.\n", rxdesc->stat, rxdesc->len); if (hc->mempages) SRC_SET_OFF(hc); } #endif /* BUGGY */ } /* * Check for Counter overflow */ if (dsr & SCA_DSR_COF) { printf("sr%d: RX DMA Counter overflow, " "rxpkts %lu.\n", #ifndef NETGRAPH sc->unit, sc->ifsppp.pp_if.if_ipackets); sc->ifsppp.pp_if.if_ierrors++; #else /* NETGRAPH */ sc->unit, sc->ipackets); sc->ierrors[1]++; #endif /* NETGRAPH */ } /* * Check for Buffer overflow */ if (dsr & SCA_DSR_BOF) { printf("sr%d: RX DMA Buffer overflow, " "rxpkts %lu, rxind %d, " "cda %x, eda %x, dsr %x.\n", #ifndef NETGRAPH sc->unit, sc->ifsppp.pp_if.if_ipackets, #else /* NETGRAPH */ sc->unit, sc->ipackets, #endif /* NETGRAPH */ sc->rxhind, SRC_GET16(hc, dmac->cda), SRC_GET16(hc, dmac->eda), dsr); /* * Make sure we eat as many as possible. * Then get the system running again. */ if (hc->mempages) SRC_SET_ON(hc); sr_eat_packet(sc, 0); #ifndef NETGRAPH sc->ifsppp.pp_if.if_ierrors++; #else /* NETGRAPH */ sc->ierrors[2]++; #endif /* NETGRAPH */ SRC_PUT8(hc, sca->msci[mch].cmd, SCA_CMD_RXMSGREJ); SRC_PUT8(hc, dmac->dsr, SCA_DSR_DE); #if BUGGY > 0 printf("sr%d: RX DMA Buffer overflow, " "rxpkts %lu, rxind %d, " "cda %x, eda %x, dsr %x. After\n", sc->unit, #ifndef NETGRAPH sc->ipackets, #else /* NETGRAPH */ sc->ifsppp.pp_if.if_ipackets, #endif /* NETGRAPH */ sc->rxhind, SRC_GET16(hc, dmac->cda), SRC_GET16(hc, dmac->eda), SRC_GET8(hc, dmac->dsr)); #endif if (hc->mempages) SRC_SET_OFF(hc); } /* * End of Transfer */ if (dsr & SCA_DSR_EOT) { /* * If this happen, it means that we are * receiving faster than what the processor * can handle. * * XXX We should enable the dma again. */ printf("sr%d: RX End of xfer, rxpkts %lu.\n", sc->unit, #ifndef NETGRAPH sc->ifsppp.pp_if.if_ipackets); sc->ifsppp.pp_if.if_ierrors++; #else sc->ipackets); sc->ierrors[3]++; #endif /* NETGRAPH */ } } isr1 >>= 4; /* process next half of ISR */ mch++; /* and move to next channel */ } while ((mch < NCHAN) && isr1); /* loop for each chn */ /* * Now that we have done all the urgent things, see if we can fill * the transmit buffers. */ for (mch = 0; mch < NCHAN; mch++) { if (dotxstart & 0x0C) { /* TX initiation enabled? */ sc = &hc->sc[mch]; #ifndef NETGRAPH srstart(&sc->ifsppp.pp_if); #else srstart(sc); #endif /* NETGRAPH */ } dotxstart >>= 4;/* shift for next channel */ } } #ifndef NETGRAPH #ifdef USE_MODEMCK /* * Perform timeout on an FR channel * * Establish a periodic check of open N2 ports; If * a port is open/active, its DCD state is checked * and a loss of DCD is recognized (and eventually * processed). */ static void sr_modemck(void *arg) { u_int s; int card; /* card index in table */ int cards; /* card list index */ int mch; /* channel on card */ u_char dcd_v; /* Data Carrier Detect */ u_char got_st0; /* contents of ST0 */ u_char got_st1; /* contents of ST1 */ u_char got_st2; /* contents of ST2 */ u_char got_st3; /* contents of ST3 */ struct sr_hardc *hc; /* card's configuration */ struct sr_hardc *Card[16];/* up to 16 cards in system */ struct sr_softc *sc; /* channel's softc structure */ struct ifnet *ifp; /* interface control table */ msci_channel *msci; /* regs specific to channel */ s = splimp(); #if 0 if (sr_opens == 0) { /* count of "up" channels */ sr_watcher = 0; /* indicate no watcher */ splx(s); return; } #endif sr_watcher = 1; /* mark that we're online */ /* * Now we'll need a list of cards to process. Since we can handle * both ISA and PCI cards (and I didn't think of making this logic * global YET) we'll generate a single table of card table * addresses. */ cards = 0; for (card = 0; card < NSR; card++) { hc = &sr_hardc[card]; if (hc->sc == (void *)0) continue; Card[cards++] = hc; } hc = sr_hardc_pci; while (hc) { Card[cards++] = hc; hc = hc->next; } /* * OK, we've got work we can do. Let's do it... (Please note that * this code _only_ deals w/ ISA cards) */ for (card = 0; card < cards; card++) { hc = Card[card];/* get card table */ for (mch = 0; mch < hc->numports; mch++) { sc = &hc->sc[mch]; ifp = &sc->ifsppp.pp_if; /* * if this channel isn't "up", skip it */ if ((ifp->if_flags & IFF_UP) == 0) continue; /* * OK, now we can go looking at this channel's * actual register contents... */ msci = &hc->sca->msci[sc->scachan]; /* * OK, now we'll look into the actual status of this * channel... * * I suck in more registers than strictly needed */ got_st0 = SRC_GET8(hc, msci->st0); got_st1 = SRC_GET8(hc, msci->st1); got_st2 = SRC_GET8(hc, msci->st2); got_st3 = SRC_GET8(hc, msci->st3); /* * We want to see if the DCD signal is up (DCD is * true if zero) */ dcd_v = (got_st3 & SCA_ST3_DCD) == 0; if (dcd_v == 0) printf("sr%d: DCD lost\n", sc->unit); } } /* * OK, now set up for the next modem signal checking pass... */ timeout(sr_modemck, NULL, hz); splx(s); } #endif #else /* NETGRAPH */ /* * If a port is open/active, it's DCD state is checked * and a loss of DCD is recognized (and eventually processed?). */ static void sr_modemck(struct sr_softc *sc ) { u_int s; u_char got_st3; /* contents of ST3 */ struct sr_hardc *hc = sc->hc; /* card's configuration */ msci_channel *msci; /* regs specific to channel */ s = splimp(); if (sc->running == 0) return; /* * OK, now we can go looking at this channel's register contents... */ msci = &hc->sca->msci[sc->scachan]; got_st3 = SRC_GET8(hc, msci->st3); /* * We want to see if the DCD signal is up (DCD is true if zero) */ sc->dcd = (got_st3 & SCA_ST3_DCD) == 0; splx(s); } #endif /* NETGRAPH */ static void sr_msci_intr(struct sr_hardc *hc, u_char isr0) { printf("src%d: SRINTR: MSCI\n", hc->cunit); } static void sr_timer_intr(struct sr_hardc *hc, u_char isr2) { printf("src%d: SRINTR: TIMER\n", hc->cunit); } #ifdef NETGRAPH /***************************************** * Device timeout/watchdog routine. * called once per second. * checks to see that if activity was expected, that it hapenned. * At present we only look to see if expected output was completed. */ static void ngsr_watchdog_frame(void * arg) { struct sr_softc * sc = arg; int s; int speed; if (sc->running == 0) return; /* if we are not running let timeouts die */ /* * calculate the apparent throughputs * XXX a real hack */ s = splimp(); speed = sc->inbytes - sc->lastinbytes; sc->lastinbytes = sc->inbytes; if ( sc->inrate < speed ) sc->inrate = speed; speed = sc->outbytes - sc->lastoutbytes; sc->lastoutbytes = sc->outbytes; if ( sc->outrate < speed ) sc->outrate = speed; sc->inlast++; splx(s); if ((sc->inlast > QUITE_A_WHILE) && (sc->out_deficit > LOTS_OF_PACKETS)) { log(LOG_ERR, "sr%d: No response from remote end\n", sc->unit); s = splimp(); sr_down(sc); sr_up(sc); sc->inlast = sc->out_deficit = 0; splx(s); } else if ( sc->xmit_busy ) { /* no TX -> no TX timeouts */ if (sc->out_dog == 0) { log(LOG_ERR, "sr%d: Transmit failure.. no clock?\n", sc->unit); s = splimp(); srwatchdog(sc); #if 0 sr_down(sc); sr_up(sc); #endif splx(s); sc->inlast = sc->out_deficit = 0; } else { sc->out_dog--; } } sr_modemck(sc); /* update the DCD status */ sc->handle = timeout(ngsr_watchdog_frame, sc, hz); } /*********************************************************************** * This section contains the methods for the Netgraph interface ***********************************************************************/ /* * It is not possible or allowable to create a node of this type. * If the hardware exists, it will already have created it. */ static int ngsr_constructor(node_p node) { return (EINVAL); } /* * give our ok for a hook to be added... * If we are not running this should kick the device into life. * The hook's private info points to our stash of info about that * channel. */ static int ngsr_newhook(node_p node, hook_p hook, const char *name) { struct sr_softc * sc = NG_NODE_PRIVATE(node); /* * check if it's our friend the debug hook */ if (strcmp(name, NG_SR_HOOK_DEBUG) == 0) { NG_HOOK_SET_PRIVATE(hook, NULL); /* paranoid */ sc->debug_hook = hook; return (0); } /* * Check for raw mode hook. */ if (strcmp(name, NG_SR_HOOK_RAW) != 0) { return (EINVAL); } NG_HOOK_SET_PRIVATE(hook, sc); sc->hook = hook; sc->datahooks++; sr_up(sc); return (0); } /* * incoming messages. * Just respond to the generic TEXT_STATUS message */ static int ngsr_rcvmsg(node_p node, item_p item, hook_p lasthook) { struct sr_softc * sc; struct ng_mesg *resp = NULL; int error = 0; struct ng_mesg *msg; NGI_GET_MSG(item,msg); sc = NG_NODE_PRIVATE(node); switch (msg->header.typecookie) { case NG_SR_COOKIE: error = EINVAL; break; case NGM_GENERIC_COOKIE: switch(msg->header.cmd) { case NGM_TEXT_STATUS: { char *arg; int pos = 0; int resplen = sizeof(struct ng_mesg) + 512; NG_MKRESPONSE(resp, msg, resplen, M_NOWAIT); if (resp == NULL) { error = ENOMEM; break; } arg = (resp)->data; pos = sprintf(arg, "%ld bytes in, %ld bytes out\n" "highest rate seen: %ld B/S in, %ld B/S out\n", sc->inbytes, sc->outbytes, sc->inrate, sc->outrate); pos += sprintf(arg + pos, "%ld output errors\n", sc->oerrors); pos += sprintf(arg + pos, "ierrors = %ld, %ld, %ld, %ld, %ld, %ld\n", sc->ierrors[0], sc->ierrors[1], sc->ierrors[2], sc->ierrors[3], sc->ierrors[4], sc->ierrors[5]); resp->header.arglen = pos + 1; break; } default: error = EINVAL; break; } break; default: error = EINVAL; break; } /* Take care of synchronous response, if any */ NG_RESPOND_MSG(error, node, item, resp); NG_FREE_MSG(msg); return (error); } /* * get data from another node and transmit it to the correct channel */ static int ngsr_rcvdata(hook_p hook, item_p item) { int s; int error = 0; struct sr_softc * sc = NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); struct ifqueue *xmitq_p; struct mbuf *m; meta_p meta; NGI_GET_M(item, m); NGI_GET_META(item, meta); NG_FREE_ITEM(item); /* * data doesn't come in from just anywhere (e.g control hook) */ if ( NG_HOOK_PRIVATE(hook) == NULL) { error = ENETDOWN; goto bad; } /* * Now queue the data for when it can be sent */ if (meta && meta->priority > 0) { xmitq_p = (&sc->xmitq_hipri); } else { xmitq_p = (&sc->xmitq); } s = splimp(); IF_LOCK(xmitq_p); if (_IF_QFULL(xmitq_p)) { _IF_DROP(xmitq_p); IF_UNLOCK(xmitq_p); splx(s); error = ENOBUFS; goto bad; } _IF_ENQUEUE(xmitq_p, m); IF_UNLOCK(xmitq_p); srstart(sc); splx(s); return (0); bad: /* * It was an error case. * check if we need to free the mbuf, and then return the error */ NG_FREE_M(m); NG_FREE_META(meta); return (error); } /* * do local shutdown processing.. * this node will refuse to go away, unless the hardware says to.. * don't unref the node, or remove our name. just clear our links up. */ static int ngsr_shutdown(node_p node) { struct sr_softc * sc = NG_NODE_PRIVATE(node); sr_down(sc); NG_NODE_UNREF(node); /* XXX should drain queues! */ if (ng_make_node_common(&typestruct, &sc->node) != 0) return (0); sprintf(sc->nodename, "%s%d", NG_SR_NODE_TYPE, sc->unit); if (ng_name_node(sc->node, sc->nodename)) { printf("node naming failed\n"); sc->node = NULL; NG_NODE_UNREF(sc->node); /* drop it again */ return (0); } NG_NODE_SET_PRIVATE(sc->node, sc); callout_handle_init(&sc->handle); /* should kill timeout */ sc->running = 0; return (0); } /* already linked */ static int ngsr_connect(hook_p hook) { /* probably not at splnet, force outward queueing */ NG_HOOK_FORCE_QUEUE(NG_HOOK_PEER(hook)); /* be really amiable and just say "YUP that's OK by me! " */ return (0); } /* * notify on hook disconnection (destruction) * * Invalidate the private data associated with this dlci. * For this type, removal of the last link resets tries to destroy the node. * As the device still exists, the shutdown method will not actually * destroy the node, but reset the device and leave it 'fresh' :) * * The node removal code will remove all references except that owned by the * driver. */ static int ngsr_disconnect(hook_p hook) { struct sr_softc * sc = NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); int s; /* * If it's the data hook, then free resources etc. */ if (NG_HOOK_PRIVATE(hook)) { s = splimp(); sc->datahooks--; if (sc->datahooks == 0) sr_down(sc); splx(s); } else { sc->debug_hook = NULL; } return (0); } /* * called during bootup * or LKM loading to put this type into the list of known modules */ static void ngsr_init(void *ignored) { if (ng_newtype(&typestruct)) printf("ngsr install failed\n"); ngsr_done_init = 1; } #endif /* NETGRAPH */ /* ********************************* END ************************************ */ Index: head/sys/dev/sr/if_sr_pci.c =================================================================== --- head/sys/dev/sr/if_sr_pci.c (revision 129878) +++ head/sys/dev/sr/if_sr_pci.c (revision 129879) @@ -1,250 +1,251 @@ /* * Copyright (c) 1996 - 2001 John Hay. * Copyright (c) 1996 SDL Communications, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #ifndef BUGGY #define BUGGY 0 #endif static int sr_pci_probe(device_t); static int sr_pci_attach(device_t); static device_method_t sr_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, sr_pci_probe), DEVMETHOD(device_attach, sr_pci_attach), DEVMETHOD(device_detach, sr_detach), { 0, 0 } }; static driver_t sr_pci_driver = { "sr", sr_pci_methods, sizeof(struct sr_hardc), }; DRIVER_MODULE(sr, pci, sr_pci_driver, sr_devclass, 0, 0); MODULE_DEPEND(sr, pci, 1, 1, 1); static u_int src_get8_mem(struct sr_hardc *hc, u_int off); static u_int src_get16_mem(struct sr_hardc *hc, u_int off); static void src_put8_mem(struct sr_hardc *hc, u_int off, u_int val); static void src_put16_mem(struct sr_hardc *hc, u_int off, u_int val); static int sr_pci_probe(device_t device) { u_int32_t type = pci_get_devid(device); switch(type) { case 0x556812aa: device_set_desc(device, "RISCom/N2pci"); return (0); break; case 0x55684778: case 0x55684877: /* * XXX This can probably be removed sometime. */ device_set_desc(device, "RISCom/N2pci (old id)"); return (0); break; default: break; } return (ENXIO); } static int sr_pci_attach(device_t device) { int numports; u_int fecr; struct sr_hardc *hc; bus_space_tag_t bt_plx; bus_space_handle_t bh_plx; hc = (struct sr_hardc *)device_get_softc(device); bzero(hc, sizeof(struct sr_hardc)); if (sr_allocate_plx_memory(device, 0x10, 1)) goto errexit; bt_plx = rman_get_bustag(hc->res_plx_memory); bh_plx = rman_get_bushandle(hc->res_plx_memory); if (sr_allocate_memory(device, 0x18, 1)) goto errexit; if (sr_allocate_irq(device, 0, 1)) goto errexit; hc->cunit = device_get_unit(device); /* * Configure the PLX. This is magic. I'm doing it just like I'm told * to. :-) * * offset * 0x00 - Map Range - Mem-mapped to locate anywhere * 0x04 - Re-Map - PCI address decode enable * 0x18 - Bus Region - 32-bit bus, ready enable * 0x1c - Master Range - include all 16 MB * 0x20 - Master RAM - Map SCA Base at 0 * 0x28 - Master Remap - direct master memory enable * 0x68 - Interrupt - Enable interrupt (0 to disable) * * Note: This is "cargo cult" stuff. - jrc */ bus_space_write_4(bt_plx, bh_plx, 0x00, 0xfffff000); bus_space_write_4(bt_plx, bh_plx, 0x04, 0x00000001); bus_space_write_4(bt_plx, bh_plx, 0x18, 0x40030043); bus_space_write_4(bt_plx, bh_plx, 0x1c, 0xff000000); bus_space_write_4(bt_plx, bh_plx, 0x20, 0x00000000); bus_space_write_4(bt_plx, bh_plx, 0x28, 0x000000e9); bus_space_write_4(bt_plx, bh_plx, 0x68, 0x00010900); /* * Get info from card. * * Only look for the second port if the first exists. Too many things * will break if we have only a second port. */ fecr = sr_read_fecr(hc); numports = 0; if (((fecr & SR_FECR_ID0) >> SR_FE_ID0_SHFT) != SR_FE_ID_NONE) { numports++; if (((fecr & SR_FECR_ID1) >> SR_FE_ID1_SHFT) != SR_FE_ID_NONE) numports++; } if (numports == 0) goto errexit; hc->numports = numports; hc->cardtype = SR_CRD_N2PCI; hc->src_put8 = src_put8_mem; hc->src_put16 = src_put16_mem; hc->src_get8 = src_get8_mem; hc->src_get16 = src_get16_mem; /* * Malloc area for tx and rx buffers. For now allocate SRC_WIN_SIZ * (16k) for each buffer. * * Allocate the block below 16M because the N2pci card can only access * 16M memory at a time. * * (We could actually allocate a contiguous block above the 16MB limit, * but this would complicate card programming more than we want to * right now -jrc) */ hc->memsize = 2 * hc->numports * SRC_WIN_SIZ; hc->mem_start = contigmalloc(hc->memsize, M_DEVBUF, M_NOWAIT, 0ul, 0xfffffful, 0x10000, 0x1000000); if (hc->mem_start == NULL) { printf("src%d: pci: failed to allocate buffer space.\n", hc->cunit); goto errexit; } hc->winmsk = 0xffffffff; hc->mem_end = (caddr_t)((u_int)hc->mem_start + hc->memsize); hc->mem_pstart = kvtop(hc->mem_start); bzero(hc->mem_start, hc->memsize); sr_write_fecr(hc, SR_FECR_DTR0 | SR_FECR_DTR1 | SR_FECR_TE0 | SR_FECR_TE1); if (sr_attach(device)) goto errexit; return (0); errexit: sr_deallocate_resources(device); return (ENXIO); } /* * I/O for PCI N2 card(s) */ #define SRC_PCI_SCA_REG(y) ((y & 2) ? ((y & 0xfd) + 0x100) : y) static u_int src_get8_mem(struct sr_hardc *hc, u_int off) { return bus_space_read_1(hc->bt_memory, hc->bh_memory, SRC_PCI_SCA_REG(off)); } static u_int src_get16_mem(struct sr_hardc *hc, u_int off) { return bus_space_read_2(hc->bt_memory, hc->bh_memory, SRC_PCI_SCA_REG(off)); } static void src_put8_mem(struct sr_hardc *hc, u_int off, u_int val) { bus_space_write_1(hc->bt_memory, hc->bh_memory, SRC_PCI_SCA_REG(off), val); } static void src_put16_mem(struct sr_hardc *hc, u_int off, u_int val) { bus_space_write_2(hc->bt_memory, hc->bh_memory, SRC_PCI_SCA_REG(off), val); } Index: head/sys/dev/sx/sx_pci.c =================================================================== --- head/sys/dev/sx/sx_pci.c (revision 129878) +++ head/sys/dev/sx/sx_pci.c (revision 129879) @@ -1,166 +1,167 @@ /* * Device driver for Specialix I/O8+ multiport serial card. * * Copyright 2003 Frank Mayhar * * Derived from the "si" driver by Peter Wemm , using * lots of information from the Linux "specialix" driver by Roger Wolff * and from the Intel CD1865 "Intelligent Eight- * Channel Communications Controller" datasheet. Roger was also nice * enough to answer numerous questions about stuff specific to the I/O8+ * not covered by the CD1865 datasheet. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notices, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notices, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHORS BE LIABLE. * * $FreeBSD$ */ #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include static int sx_pci_probe( device_t dev) { const char *desc = NULL; switch (pci_get_devid(dev)) { case 0x200011cb: if (pci_get_subdevice(dev) == (uint16_t)0xb008) { desc = "Specialix I/O8+ Multiport Serial Card"; } break; } if (desc) { device_set_desc(dev, desc); return 0; } return ENXIO; } static int sx_pci_attach(device_t dev) { struct sx_softc *sc; void *ih; int error; error = 0; ih = NULL; sc = device_get_softc(dev); sc->sc_io_rid = 0x18; sc->sc_io_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &sc->sc_io_rid, 0, ~0, 1, RF_ACTIVE); if (!sc->sc_io_res) { device_printf(dev, "can't map I/O\n"); goto fail; } sc->sc_st = rman_get_bustag(sc->sc_io_res); sc->sc_sh = rman_get_bushandle(sc->sc_io_res); /* * Now that we have the bus handle, we can make certain that this * is an I/O8+. */ if (sx_probe_io8(dev)) { device_printf(dev, "Oops! Device is not an I/O8+ board!\n"); goto fail; } /*sc->sc_paddr = (caddr_t)rman_get_start(sc->sc_io_res);*/ /*sc->sc_maddr = rman_get_virtual(sc->sc_io_res);*/ sc->sc_irq_rid = 0; sc->sc_irq_res = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->sc_irq_rid, 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE); if (!sc->sc_irq_res) { device_printf(dev, "Can't map interrupt\n"); goto fail; } sc->sc_irq = rman_get_start(sc->sc_irq_res); error = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_TTY, sx_intr, sc, &ih); if (error) { device_printf(dev, "Can't activate interrupt\n"); goto fail; } error = sxattach(dev); if (error) goto fail; return (0); /* success */ fail: if (error == 0) error = ENXIO; if (sc->sc_irq_res) { if (ih) bus_teardown_intr(dev, sc->sc_irq_res, ih); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid, sc->sc_irq_res); sc->sc_irq_res = 0; } if (sc->sc_io_res) { bus_release_resource(dev, SYS_RES_IOPORT, sc->sc_io_rid, sc->sc_io_res); sc->sc_io_res = 0; } return(error); } static device_method_t sx_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, sx_pci_probe), DEVMETHOD(device_attach, sx_pci_attach), /* DEVMETHOD(device_detach, sx_pci_detach),*/ { 0, 0 } }; static driver_t sx_pci_driver = { "sx", sx_pci_methods, sizeof(struct sx_softc), }; DRIVER_MODULE(sx, pci, sx_pci_driver, sx_devclass, 0, 0); Index: head/sys/dev/syscons/scterm-sc.c =================================================================== --- head/sys/dev/syscons/scterm-sc.c (revision 129878) +++ head/sys/dev/syscons/scterm-sc.c (revision 129879) @@ -1,822 +1,823 @@ /*- * Copyright (c) 1999 Kazutaka YOKOTA * Copyright (c) 1992-1998 Søren Schmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer as * the first lines of this file unmodified. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_syscons.h" #include #include #include +#include #include #if __sparc64__ || __powerpc__ #include #else #include #endif #include #include #ifndef SC_DUMB_TERMINAL #define MAX_ESC_PAR 5 /* attribute flags */ typedef struct { u_short fg; /* foreground color */ u_short bg; /* background color */ } color_t; typedef struct { int flags; #define SCTERM_BUSY (1 << 0) int esc; int num_param; int last_param; int param[MAX_ESC_PAR]; int saved_xpos; int saved_ypos; int attr_mask; /* current logical attr mask */ #define NORMAL_ATTR 0x00 #define BLINK_ATTR 0x01 #define BOLD_ATTR 0x02 #define UNDERLINE_ATTR 0x04 #define REVERSE_ATTR 0x08 #define FG_CHANGED 0x10 #define BG_CHANGED 0x20 int cur_attr; /* current hardware attr word */ color_t cur_color; /* current hardware color */ color_t std_color; /* normal hardware color */ color_t rev_color; /* reverse hardware color */ color_t dflt_std_color; /* default normal color */ color_t dflt_rev_color; /* default reverse color */ } term_stat; static sc_term_init_t scterm_init; static sc_term_term_t scterm_term; static sc_term_puts_t scterm_puts; static sc_term_ioctl_t scterm_ioctl; static sc_term_reset_t scterm_reset; static sc_term_default_attr_t scterm_default_attr; static sc_term_clear_t scterm_clear; static sc_term_notify_t scterm_notify; static sc_term_input_t scterm_input; static sc_term_sw_t sc_term_sc = { { NULL, NULL }, "sc", /* emulator name */ "syscons terminal", /* description */ "*", /* matching renderer, any :-) */ sizeof(term_stat), /* softc size */ 0, scterm_init, scterm_term, scterm_puts, scterm_ioctl, scterm_reset, scterm_default_attr, scterm_clear, scterm_notify, scterm_input, }; SCTERM_MODULE(sc, sc_term_sc); static term_stat reserved_term_stat; static void scterm_scan_esc(scr_stat *scp, term_stat *tcp, u_char c); static int mask2attr(term_stat *tcp); static int scterm_init(scr_stat *scp, void **softc, int code) { term_stat *tcp; if (*softc == NULL) { if (reserved_term_stat.flags & SCTERM_BUSY) return EINVAL; *softc = &reserved_term_stat; } tcp = *softc; switch (code) { case SC_TE_COLD_INIT: bzero(tcp, sizeof(*tcp)); tcp->flags = SCTERM_BUSY; tcp->esc = 0; tcp->saved_xpos = -1; tcp->saved_ypos = -1; tcp->attr_mask = NORMAL_ATTR; /* XXX */ tcp->dflt_std_color.fg = SC_NORM_ATTR & 0x0f; tcp->dflt_std_color.bg = (SC_NORM_ATTR >> 4) & 0x0f; tcp->dflt_rev_color.fg = SC_NORM_REV_ATTR & 0x0f; tcp->dflt_rev_color.bg = (SC_NORM_REV_ATTR >> 4) & 0x0f; tcp->std_color = tcp->dflt_std_color; tcp->rev_color = tcp->dflt_rev_color; tcp->cur_color = tcp->std_color; tcp->cur_attr = mask2attr(tcp); ++sc_term_sc.te_refcount; break; case SC_TE_WARM_INIT: tcp->esc = 0; tcp->saved_xpos = -1; tcp->saved_ypos = -1; #if 0 tcp->std_color = tcp->dflt_std_color; tcp->rev_color = tcp->dflt_rev_color; #endif tcp->cur_color = tcp->std_color; tcp->cur_attr = mask2attr(tcp); break; } return 0; } static int scterm_term(scr_stat *scp, void **softc) { if (*softc == &reserved_term_stat) { *softc = NULL; bzero(&reserved_term_stat, sizeof(reserved_term_stat)); } --sc_term_sc.te_refcount; return 0; } static void scterm_scan_esc(scr_stat *scp, term_stat *tcp, u_char c) { static u_char ansi_col[16] = { #ifdef __alpha__ /* * DEC is evil. They switch the red and blue attributes in * the palette in the system console. As a simple work-around, * re-map the ANSI colors appropriately. */ FG_BLACK, FG_BLUE, FG_GREEN, FG_CYAN, FG_RED, FG_MAGENTA, FG_BROWN, FG_LIGHTGREY, FG_DARKGREY, FG_LIGHTBLUE, FG_LIGHTGREEN, FG_LIGHTCYAN, FG_LIGHTRED, FG_LIGHTMAGENTA, FG_YELLOW, FG_WHITE #else FG_BLACK, FG_RED, FG_GREEN, FG_BROWN, FG_BLUE, FG_MAGENTA, FG_CYAN, FG_LIGHTGREY, FG_DARKGREY, FG_LIGHTRED, FG_LIGHTGREEN, FG_YELLOW, FG_LIGHTBLUE, FG_LIGHTMAGENTA, FG_LIGHTCYAN, FG_WHITE #endif }; static int cattrs[] = { 0, /* block */ CONS_BLINK_CURSOR, /* blinking block */ CONS_CHAR_CURSOR, /* underline */ CONS_CHAR_CURSOR | CONS_BLINK_CURSOR, /* blinking underline */ CONS_RESET_CURSOR, /* reset to default */ CONS_HIDDEN_CURSOR, /* hide cursor */ }; static int tcattrs[] = { CONS_RESET_CURSOR | CONS_LOCAL_CURSOR, /* normal */ CONS_HIDDEN_CURSOR | CONS_LOCAL_CURSOR, /* invisible */ CONS_BLINK_CURSOR | CONS_LOCAL_CURSOR, /* very visible */ }; sc_softc_t *sc; int v0, v1, v2; int i, n; i = n = 0; sc = scp->sc; if (tcp->esc == 1) { /* seen ESC */ switch (c) { case '7': /* Save cursor position */ tcp->saved_xpos = scp->xpos; tcp->saved_ypos = scp->ypos; break; case '8': /* Restore saved cursor position */ if (tcp->saved_xpos >= 0 && tcp->saved_ypos >= 0) sc_move_cursor(scp, tcp->saved_xpos, tcp->saved_ypos); break; case '[': /* Start ESC [ sequence */ tcp->esc = 2; tcp->last_param = -1; for (i = tcp->num_param; i < MAX_ESC_PAR; i++) tcp->param[i] = 1; tcp->num_param = 0; return; case 'M': /* Move cursor up 1 line, scroll if at top */ sc_term_up_scroll(scp, 1, sc->scr_map[0x20], tcp->cur_attr, 0, 0); break; #if notyet case 'Q': tcp->esc = 4; return; #endif case 'c': /* reset */ tcp->attr_mask = NORMAL_ATTR; tcp->cur_color = tcp->std_color = tcp->dflt_std_color; tcp->rev_color = tcp->dflt_rev_color; tcp->cur_attr = mask2attr(tcp); sc_change_cursor_shape(scp, CONS_RESET_CURSOR | CONS_LOCAL_CURSOR, -1, -1); sc_clear_screen(scp); break; case '(': /* iso-2022: designate 94 character set to G0 */ tcp->esc = 5; return; } } else if (tcp->esc == 2) { /* seen ESC [ */ if (c >= '0' && c <= '9') { if (tcp->num_param < MAX_ESC_PAR) { if (tcp->last_param != tcp->num_param) { tcp->last_param = tcp->num_param; tcp->param[tcp->num_param] = 0; } else { tcp->param[tcp->num_param] *= 10; } tcp->param[tcp->num_param] += c - '0'; return; } } tcp->num_param = tcp->last_param + 1; switch (c) { case ';': if (tcp->num_param < MAX_ESC_PAR) return; break; case '=': tcp->esc = 3; tcp->last_param = -1; for (i = tcp->num_param; i < MAX_ESC_PAR; i++) tcp->param[i] = 1; tcp->num_param = 0; return; case 'A': /* up n rows */ sc_term_up(scp, tcp->param[0], 0); break; case 'B': /* down n rows */ sc_term_down(scp, tcp->param[0], 0); break; case 'C': /* right n columns */ sc_term_right(scp, tcp->param[0]); break; case 'D': /* left n columns */ sc_term_left(scp, tcp->param[0]); break; case 'E': /* cursor to start of line n lines down */ n = tcp->param[0]; if (n < 1) n = 1; sc_move_cursor(scp, 0, scp->ypos + n); break; case 'F': /* cursor to start of line n lines up */ n = tcp->param[0]; if (n < 1) n = 1; sc_move_cursor(scp, 0, scp->ypos - n); break; case 'f': /* Cursor move */ case 'H': if (tcp->num_param == 0) sc_move_cursor(scp, 0, 0); else if (tcp->num_param == 2) sc_move_cursor(scp, tcp->param[1] - 1, tcp->param[0] - 1); break; case 'J': /* Clear all or part of display */ if (tcp->num_param == 0) n = 0; else n = tcp->param[0]; sc_term_clr_eos(scp, n, sc->scr_map[0x20], tcp->cur_attr); break; case 'K': /* Clear all or part of line */ if (tcp->num_param == 0) n = 0; else n = tcp->param[0]; sc_term_clr_eol(scp, n, sc->scr_map[0x20], tcp->cur_attr); break; case 'L': /* Insert n lines */ sc_term_ins_line(scp, scp->ypos, tcp->param[0], sc->scr_map[0x20], tcp->cur_attr, 0); break; case 'M': /* Delete n lines */ sc_term_del_line(scp, scp->ypos, tcp->param[0], sc->scr_map[0x20], tcp->cur_attr, 0); break; case 'P': /* Delete n chars */ sc_term_del_char(scp, tcp->param[0], sc->scr_map[0x20], tcp->cur_attr); break; case '@': /* Insert n chars */ sc_term_ins_char(scp, tcp->param[0], sc->scr_map[0x20], tcp->cur_attr); break; case 'S': /* scroll up n lines */ sc_term_del_line(scp, 0, tcp->param[0], sc->scr_map[0x20], tcp->cur_attr, 0); break; case 'T': /* scroll down n lines */ sc_term_ins_line(scp, 0, tcp->param[0], sc->scr_map[0x20], tcp->cur_attr, 0); break; case 'X': /* erase n characters in line */ n = tcp->param[0]; if (n < 1) n = 1; if (n > scp->xsize - scp->xpos) n = scp->xsize - scp->xpos; sc_vtb_erase(&scp->vtb, scp->cursor_pos, n, sc->scr_map[0x20], tcp->cur_attr); mark_for_update(scp, scp->cursor_pos); mark_for_update(scp, scp->cursor_pos + n - 1); break; case 'Z': /* move n tabs backwards */ sc_term_backtab(scp, tcp->param[0]); break; case '`': /* move cursor to column n */ sc_term_col(scp, tcp->param[0]); break; case 'a': /* move cursor n columns to the right */ sc_term_right(scp, tcp->param[0]); break; case 'd': /* move cursor to row n */ sc_term_row(scp, tcp->param[0]); break; case 'e': /* move cursor n rows down */ sc_term_down(scp, tcp->param[0], 0); break; case 'm': /* change attribute */ if (tcp->num_param == 0) { tcp->attr_mask = NORMAL_ATTR; tcp->cur_color = tcp->std_color; tcp->cur_attr = mask2attr(tcp); break; } for (i = 0; i < tcp->num_param; i++) { switch (n = tcp->param[i]) { case 0: /* back to normal */ tcp->attr_mask = NORMAL_ATTR; tcp->cur_color = tcp->std_color; tcp->cur_attr = mask2attr(tcp); break; case 1: /* bold */ tcp->attr_mask |= BOLD_ATTR; tcp->cur_attr = mask2attr(tcp); break; case 4: /* underline */ tcp->attr_mask |= UNDERLINE_ATTR; tcp->cur_attr = mask2attr(tcp); break; case 5: /* blink */ tcp->attr_mask |= BLINK_ATTR; tcp->cur_attr = mask2attr(tcp); break; case 7: /* reverse */ tcp->attr_mask |= REVERSE_ATTR; tcp->cur_attr = mask2attr(tcp); break; case 22: /* remove bold (or dim) */ tcp->attr_mask &= ~BOLD_ATTR; tcp->cur_attr = mask2attr(tcp); break; case 24: /* remove underline */ tcp->attr_mask &= ~UNDERLINE_ATTR; tcp->cur_attr = mask2attr(tcp); break; case 25: /* remove blink */ tcp->attr_mask &= ~BLINK_ATTR; tcp->cur_attr = mask2attr(tcp); break; case 27: /* remove reverse */ tcp->attr_mask &= ~REVERSE_ATTR; tcp->cur_attr = mask2attr(tcp); break; case 30: case 31: /* set ansi fg color */ case 32: case 33: case 34: case 35: case 36: case 37: tcp->attr_mask |= FG_CHANGED; tcp->cur_color.fg = ansi_col[n - 30]; tcp->cur_attr = mask2attr(tcp); break; case 39: /* restore fg color back to normal */ tcp->attr_mask &= ~(FG_CHANGED|BOLD_ATTR); tcp->cur_color.fg = tcp->std_color.fg; tcp->cur_attr = mask2attr(tcp); break; case 40: case 41: /* set ansi bg color */ case 42: case 43: case 44: case 45: case 46: case 47: tcp->attr_mask |= BG_CHANGED; tcp->cur_color.bg = ansi_col[n - 40]; tcp->cur_attr = mask2attr(tcp); break; case 49: /* restore bg color back to normal */ tcp->attr_mask &= ~BG_CHANGED; tcp->cur_color.bg = tcp->std_color.bg; tcp->cur_attr = mask2attr(tcp); break; } } break; case 's': /* Save cursor position */ tcp->saved_xpos = scp->xpos; tcp->saved_ypos = scp->ypos; break; case 'u': /* Restore saved cursor position */ if (tcp->saved_xpos >= 0 && tcp->saved_ypos >= 0) sc_move_cursor(scp, tcp->saved_xpos, tcp->saved_ypos); break; case 'x': if (tcp->num_param == 0) n = 0; else n = tcp->param[0]; switch (n) { case 0: /* reset colors and attributes back to normal */ tcp->attr_mask = NORMAL_ATTR; tcp->cur_color = tcp->std_color = tcp->dflt_std_color; tcp->rev_color = tcp->dflt_rev_color; tcp->cur_attr = mask2attr(tcp); break; case 1: /* set ansi background */ tcp->attr_mask &= ~BG_CHANGED; tcp->cur_color.bg = tcp->std_color.bg = ansi_col[tcp->param[1] & 0x0f]; tcp->cur_attr = mask2attr(tcp); break; case 2: /* set ansi foreground */ tcp->attr_mask &= ~FG_CHANGED; tcp->cur_color.fg = tcp->std_color.fg = ansi_col[tcp->param[1] & 0x0f]; tcp->cur_attr = mask2attr(tcp); break; case 3: /* set adapter attribute directly */ tcp->attr_mask &= ~(FG_CHANGED | BG_CHANGED); tcp->cur_color.fg = tcp->std_color.fg = tcp->param[1] & 0x0f; tcp->cur_color.bg = tcp->std_color.bg = (tcp->param[1] >> 4) & 0x0f; tcp->cur_attr = mask2attr(tcp); break; case 5: /* set ansi reverse background */ tcp->rev_color.bg = ansi_col[tcp->param[1] & 0x0f]; tcp->cur_attr = mask2attr(tcp); break; case 6: /* set ansi reverse foreground */ tcp->rev_color.fg = ansi_col[tcp->param[1] & 0x0f]; tcp->cur_attr = mask2attr(tcp); break; case 7: /* set adapter reverse attribute directly */ tcp->rev_color.fg = tcp->param[1] & 0x0f; tcp->rev_color.bg = (tcp->param[1] >> 4) & 0x0f; tcp->cur_attr = mask2attr(tcp); break; } break; case 'z': /* switch to (virtual) console n */ if (tcp->num_param == 1) sc_switch_scr(sc, tcp->param[0]); break; } } else if (tcp->esc == 3) { /* seen ESC [0-9]+ = */ if (c >= '0' && c <= '9') { if (tcp->num_param < MAX_ESC_PAR) { if (tcp->last_param != tcp->num_param) { tcp->last_param = tcp->num_param; tcp->param[tcp->num_param] = 0; } else { tcp->param[tcp->num_param] *= 10; } tcp->param[tcp->num_param] += c - '0'; return; } } tcp->num_param = tcp->last_param + 1; switch (c) { case ';': if (tcp->num_param < MAX_ESC_PAR) return; break; case 'A': /* set display border color */ if (tcp->num_param == 1) { scp->border=tcp->param[0] & 0xff; if (scp == sc->cur_scp) sc_set_border(scp, scp->border); } break; case 'B': /* set bell pitch and duration */ if (tcp->num_param == 2) { scp->bell_pitch = tcp->param[0]; scp->bell_duration = (tcp->param[1] * hz + 99) / 100; } break; case 'C': /* set global/parmanent cursor type & shape */ i = spltty(); n = tcp->num_param; v0 = tcp->param[0]; v1 = tcp->param[1]; v2 = tcp->param[2]; switch (n) { case 1: /* flags only */ if (v0 < sizeof(cattrs)/sizeof(cattrs[0])) v0 = cattrs[v0]; else /* backward compatibility */ v0 = cattrs[v0 & 0x3]; sc_change_cursor_shape(scp, v0, -1, -1); break; case 2: v2 = 0; v0 &= 0x1f; /* backward compatibility */ v1 &= 0x1f; /* FALL THROUGH */ case 3: /* base and height */ if (v2 == 0) /* count from top */ sc_change_cursor_shape(scp, -1, scp->font_size - v1 - 1, v1 - v0 + 1); else if (v2 == 1) /* count from bottom */ sc_change_cursor_shape(scp, -1, v0, v1 - v0 + 1); break; } splx(i); break; case 'F': /* set adapter foreground */ if (tcp->num_param == 1) { tcp->attr_mask &= ~FG_CHANGED; tcp->cur_color.fg = tcp->std_color.fg = tcp->param[0] & 0x0f; tcp->cur_attr = mask2attr(tcp); } break; case 'G': /* set adapter background */ if (tcp->num_param == 1) { tcp->attr_mask &= ~BG_CHANGED; tcp->cur_color.bg = tcp->std_color.bg = tcp->param[0] & 0x0f; tcp->cur_attr = mask2attr(tcp); } break; case 'H': /* set adapter reverse foreground */ if (tcp->num_param == 1) { tcp->rev_color.fg = tcp->param[0] & 0x0f; tcp->cur_attr = mask2attr(tcp); } break; case 'I': /* set adapter reverse background */ if (tcp->num_param == 1) { tcp->rev_color.bg = tcp->param[0] & 0x0f; tcp->cur_attr = mask2attr(tcp); } break; case 'S': /* set local/temporary cursor type & shape */ i = spltty(); n = tcp->num_param; v0 = tcp->param[0]; switch (n) { case 0: v0 = 0; /* FALL THROUGH */ case 1: if (v0 < sizeof(tcattrs)/sizeof(tcattrs[0])) sc_change_cursor_shape(scp, tcattrs[v0], -1, -1); break; } splx(i); break; } #if notyet } else if (tcp->esc == 4) { /* seen ESC Q */ /* to be filled */ #endif } else if (tcp->esc == 5) { /* seen ESC ( */ switch (c) { case 'B': /* iso-2022: desginate ASCII into G0 */ break; /* other items to be filled */ default: break; } } tcp->esc = 0; } static void scterm_puts(scr_stat *scp, u_char *buf, int len) { term_stat *tcp; tcp = scp->ts; outloop: scp->sc->write_in_progress++; if (tcp->esc) { scterm_scan_esc(scp, tcp, *buf); buf++; len--; } else { switch (*buf) { case 0x1b: tcp->esc = 1; tcp->num_param = 0; buf++; len--; break; default: sc_term_gen_print(scp, &buf, &len, tcp->cur_attr); break; } } sc_term_gen_scroll(scp, scp->sc->scr_map[0x20], tcp->cur_attr); scp->sc->write_in_progress--; if (len) goto outloop; } static int scterm_ioctl(scr_stat *scp, struct tty *tp, u_long cmd, caddr_t data, int flag, struct thread *td) { term_stat *tcp = scp->ts; vid_info_t *vi; switch (cmd) { case GIO_ATTR: /* get current attributes */ /* FIXME: */ *(int*)data = (tcp->cur_attr >> 8) & 0xff; return 0; case CONS_GETINFO: /* get current (virtual) console info */ vi = (vid_info_t *)data; if (vi->size != sizeof(struct vid_info)) return EINVAL; vi->mv_norm.fore = tcp->std_color.fg; vi->mv_norm.back = tcp->std_color.bg; vi->mv_rev.fore = tcp->rev_color.fg; vi->mv_rev.back = tcp->rev_color.bg; /* * The other fields are filled by the upper routine. XXX */ return ENOIOCTL; } return ENOIOCTL; } static int scterm_reset(scr_stat *scp, int code) { /* FIXME */ return 0; } static void scterm_default_attr(scr_stat *scp, int color, int rev_color) { term_stat *tcp = scp->ts; tcp->dflt_std_color.fg = color & 0x0f; tcp->dflt_std_color.bg = (color >> 4) & 0x0f; tcp->dflt_rev_color.fg = rev_color & 0x0f; tcp->dflt_rev_color.bg = (rev_color >> 4) & 0x0f; tcp->std_color = tcp->dflt_std_color; tcp->rev_color = tcp->dflt_rev_color; tcp->cur_color = tcp->std_color; tcp->cur_attr = mask2attr(tcp); } static void scterm_clear(scr_stat *scp) { term_stat *tcp = scp->ts; sc_move_cursor(scp, 0, 0); sc_vtb_clear(&scp->vtb, scp->sc->scr_map[0x20], tcp->cur_attr); mark_all(scp); } static void scterm_notify(scr_stat *scp, int event) { switch (event) { case SC_TE_NOTIFY_VTSWITCH_IN: break; case SC_TE_NOTIFY_VTSWITCH_OUT: break; } } static int scterm_input(scr_stat *scp, int c, struct tty *tp) { return FALSE; } /* * Calculate hardware attributes word using logical attributes mask and * hardware colors */ /* FIXME */ static int mask2attr(term_stat *tcp) { int attr, mask = tcp->attr_mask; if (mask & REVERSE_ATTR) { attr = ((mask & FG_CHANGED) ? tcp->cur_color.bg : tcp->rev_color.fg) | (((mask & BG_CHANGED) ? tcp->cur_color.fg : tcp->rev_color.bg) << 4); } else attr = tcp->cur_color.fg | (tcp->cur_color.bg << 4); /* XXX: underline mapping for Hercules adapter can be better */ if (mask & (BOLD_ATTR | UNDERLINE_ATTR)) attr ^= 0x08; if (mask & BLINK_ATTR) attr ^= 0x80; return (attr << 8); } #endif /* SC_DUMB_TERMINAL */ Index: head/sys/dev/syscons/scvgarndr.c =================================================================== --- head/sys/dev/syscons/scvgarndr.c (revision 129878) +++ head/sys/dev/syscons/scvgarndr.c (revision 129879) @@ -1,881 +1,882 @@ /*- * Copyright (c) 1999 Kazutaka YOKOTA * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer as * the first lines of this file unmodified. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include "opt_syscons.h" #include "opt_vga.h" #include #include #include +#include #include #include #include #include #include #include #include #ifndef SC_RENDER_DEBUG #define SC_RENDER_DEBUG 0 #endif static vr_clear_t vga_txtclear; static vr_draw_border_t vga_txtborder; static vr_draw_t vga_txtdraw; static vr_set_cursor_t vga_txtcursor_shape; static vr_draw_cursor_t vga_txtcursor; static vr_blink_cursor_t vga_txtblink; #ifndef SC_NO_CUTPASTE static vr_draw_mouse_t vga_txtmouse; #else #define vga_txtmouse (vr_draw_mouse_t *)vga_nop #endif #ifdef SC_PIXEL_MODE static vr_clear_t vga_pxlclear; static vr_draw_border_t vga_pxlborder; static vr_draw_t vga_egadraw; static vr_draw_t vga_vgadraw; static vr_set_cursor_t vga_pxlcursor_shape; static vr_draw_cursor_t vga_pxlcursor; static vr_blink_cursor_t vga_pxlblink; #ifndef SC_NO_CUTPASTE static vr_draw_mouse_t vga_pxlmouse; #else #define vga_pxlmouse (vr_draw_mouse_t *)vga_nop #endif #endif /* SC_PIXEL_MODE */ #ifndef SC_NO_MODE_CHANGE static vr_draw_border_t vga_grborder; #endif static void vga_nop(scr_stat *scp, ...); static sc_rndr_sw_t txtrndrsw = { vga_txtclear, vga_txtborder, vga_txtdraw, vga_txtcursor_shape, vga_txtcursor, vga_txtblink, (vr_set_mouse_t *)vga_nop, vga_txtmouse, }; RENDERER(mda, 0, txtrndrsw, vga_set); RENDERER(cga, 0, txtrndrsw, vga_set); RENDERER(ega, 0, txtrndrsw, vga_set); RENDERER(vga, 0, txtrndrsw, vga_set); #ifdef SC_PIXEL_MODE static sc_rndr_sw_t egarndrsw = { vga_pxlclear, vga_pxlborder, vga_egadraw, vga_pxlcursor_shape, vga_pxlcursor, vga_pxlblink, (vr_set_mouse_t *)vga_nop, vga_pxlmouse, }; RENDERER(ega, PIXEL_MODE, egarndrsw, vga_set); static sc_rndr_sw_t vgarndrsw = { vga_pxlclear, vga_pxlborder, vga_vgadraw, vga_pxlcursor_shape, vga_pxlcursor, vga_pxlblink, (vr_set_mouse_t *)vga_nop, vga_pxlmouse, }; RENDERER(vga, PIXEL_MODE, vgarndrsw, vga_set); #endif /* SC_PIXEL_MODE */ #ifndef SC_NO_MODE_CHANGE static sc_rndr_sw_t grrndrsw = { (vr_clear_t *)vga_nop, vga_grborder, (vr_draw_t *)vga_nop, (vr_set_cursor_t *)vga_nop, (vr_draw_cursor_t *)vga_nop, (vr_blink_cursor_t *)vga_nop, (vr_set_mouse_t *)vga_nop, (vr_draw_mouse_t *)vga_nop, }; RENDERER(cga, GRAPHICS_MODE, grrndrsw, vga_set); RENDERER(ega, GRAPHICS_MODE, grrndrsw, vga_set); RENDERER(vga, GRAPHICS_MODE, grrndrsw, vga_set); #endif /* SC_NO_MODE_CHANGE */ RENDERER_MODULE(vga, vga_set); #ifndef SC_NO_CUTPASTE #if !defined(SC_ALT_MOUSE_IMAGE) || defined(SC_PIXEL_MODE) static u_short mouse_and_mask[16] = { 0xc000, 0xe000, 0xf000, 0xf800, 0xfc00, 0xfe00, 0xff00, 0xff80, 0xfe00, 0x1e00, 0x1f00, 0x0f00, 0x0f00, 0x0000, 0x0000, 0x0000 }; static u_short mouse_or_mask[16] = { 0x0000, 0x4000, 0x6000, 0x7000, 0x7800, 0x7c00, 0x7e00, 0x6800, 0x0c00, 0x0c00, 0x0600, 0x0600, 0x0000, 0x0000, 0x0000, 0x0000 }; #endif #endif static void vga_nop(scr_stat *scp, ...) { } /* text mode renderer */ static void vga_txtclear(scr_stat *scp, int c, int attr) { sc_vtb_clear(&scp->scr, c, attr); } static void vga_txtborder(scr_stat *scp, int color) { (*vidsw[scp->sc->adapter]->set_border)(scp->sc->adp, color); } static void vga_txtdraw(scr_stat *scp, int from, int count, int flip) { vm_offset_t p; int c; int a; if (from + count > scp->xsize*scp->ysize) count = scp->xsize*scp->ysize - from; if (flip) { for (p = sc_vtb_pointer(&scp->scr, from); count-- > 0; ++from) { c = sc_vtb_getc(&scp->vtb, from); a = sc_vtb_geta(&scp->vtb, from); a = (a & 0x8800) | ((a & 0x7000) >> 4) | ((a & 0x0700) << 4); p = sc_vtb_putchar(&scp->scr, p, c, a); } } else { sc_vtb_copy(&scp->vtb, from, &scp->scr, from, count); } } static void vga_txtcursor_shape(scr_stat *scp, int base, int height, int blink) { if (base < 0 || base >= scp->font_size) return; /* the caller may set height <= 0 in order to disable the cursor */ #if 0 scp->curs_attr.base = base; scp->curs_attr.height = height; #endif (*vidsw[scp->sc->adapter]->set_hw_cursor_shape)(scp->sc->adp, base, height, scp->font_size, blink); } static void draw_txtcharcursor(scr_stat *scp, int at, u_short c, u_short a, int flip) { sc_softc_t *sc; sc = scp->sc; scp->cursor_saveunder_char = c; scp->cursor_saveunder_attr = a; #ifndef SC_NO_FONT_LOADING if (scp->curs_attr.flags & CONS_CHAR_CURSOR) { unsigned char *font; int h; int i; if (scp->font_size < 14) { font = sc->font_8; h = 8; } else if (scp->font_size >= 16) { font = sc->font_16; h = 16; } else { font = sc->font_14; h = 14; } if (scp->curs_attr.base >= h) return; if (flip) a = (a & 0x8800) | ((a & 0x7000) >> 4) | ((a & 0x0700) << 4); bcopy(font + c*h, font + sc->cursor_char*h, h); font = font + sc->cursor_char*h; for (i = imax(h - scp->curs_attr.base - scp->curs_attr.height, 0); i < h - scp->curs_attr.base; ++i) { font[i] ^= 0xff; } /* XXX */ (*vidsw[sc->adapter]->load_font)(sc->adp, 0, h, font, sc->cursor_char, 1); sc_vtb_putc(&scp->scr, at, sc->cursor_char, a); } else #endif /* SC_NO_FONT_LOADING */ { if ((a & 0x7000) == 0x7000) { a &= 0x8f00; if ((a & 0x0700) == 0) a |= 0x0700; } else { a |= 0x7000; if ((a & 0x0700) == 0x0700) a &= 0xf000; } if (flip) a = (a & 0x8800) | ((a & 0x7000) >> 4) | ((a & 0x0700) << 4); sc_vtb_putc(&scp->scr, at, c, a); } } static void vga_txtcursor(scr_stat *scp, int at, int blink, int on, int flip) { video_adapter_t *adp; int cursor_attr; if (scp->curs_attr.height <= 0) /* the text cursor is disabled */ return; adp = scp->sc->adp; if (blink) { scp->status |= VR_CURSOR_BLINK; if (on) { scp->status |= VR_CURSOR_ON; (*vidsw[adp->va_index]->set_hw_cursor)(adp, at%scp->xsize, at/scp->xsize); } else { if (scp->status & VR_CURSOR_ON) (*vidsw[adp->va_index]->set_hw_cursor)(adp, -1, -1); scp->status &= ~VR_CURSOR_ON; } } else { scp->status &= ~VR_CURSOR_BLINK; if (on) { scp->status |= VR_CURSOR_ON; draw_txtcharcursor(scp, at, sc_vtb_getc(&scp->scr, at), sc_vtb_geta(&scp->scr, at), flip); } else { cursor_attr = scp->cursor_saveunder_attr; if (flip) cursor_attr = (cursor_attr & 0x8800) | ((cursor_attr & 0x7000) >> 4) | ((cursor_attr & 0x0700) << 4); if (scp->status & VR_CURSOR_ON) sc_vtb_putc(&scp->scr, at, scp->cursor_saveunder_char, cursor_attr); scp->status &= ~VR_CURSOR_ON; } } } static void vga_txtblink(scr_stat *scp, int at, int flip) { } #ifndef SC_NO_CUTPASTE static void draw_txtmouse(scr_stat *scp, int x, int y) { #ifndef SC_ALT_MOUSE_IMAGE if (ISMOUSEAVAIL(scp->sc->adp->va_flags)) { u_char font_buf[128]; u_short cursor[32]; u_char c; int pos; int xoffset, yoffset; int crtc_addr; int i; /* prepare mousepointer char's bitmaps */ pos = (y/scp->font_size - scp->yoff)*scp->xsize + x/8 - scp->xoff; bcopy(scp->font + sc_vtb_getc(&scp->scr, pos)*scp->font_size, &font_buf[0], scp->font_size); bcopy(scp->font + sc_vtb_getc(&scp->scr, pos + 1)*scp->font_size, &font_buf[32], scp->font_size); bcopy(scp->font + sc_vtb_getc(&scp->scr, pos + scp->xsize)*scp->font_size, &font_buf[64], scp->font_size); bcopy(scp->font + sc_vtb_getc(&scp->scr, pos + scp->xsize + 1)*scp->font_size, &font_buf[96], scp->font_size); for (i = 0; i < scp->font_size; ++i) { cursor[i] = font_buf[i]<<8 | font_buf[i+32]; cursor[i + scp->font_size] = font_buf[i+64]<<8 | font_buf[i+96]; } /* now and-or in the mousepointer image */ xoffset = x%8; yoffset = y%scp->font_size; for (i = 0; i < 16; ++i) { cursor[i + yoffset] = (cursor[i + yoffset] & ~(mouse_and_mask[i] >> xoffset)) | (mouse_or_mask[i] >> xoffset); } for (i = 0; i < scp->font_size; ++i) { font_buf[i] = (cursor[i] & 0xff00) >> 8; font_buf[i + 32] = cursor[i] & 0xff; font_buf[i + 64] = (cursor[i + scp->font_size] & 0xff00) >> 8; font_buf[i + 96] = cursor[i + scp->font_size] & 0xff; } #if 1 /* wait for vertical retrace to avoid jitter on some videocards */ crtc_addr = scp->sc->adp->va_crtc_addr; while (!(inb(crtc_addr + 6) & 0x08)) /* idle */ ; #endif c = scp->sc->mouse_char; (*vidsw[scp->sc->adapter]->load_font)(scp->sc->adp, 0, 32, font_buf, c, 4); sc_vtb_putc(&scp->scr, pos, c, sc_vtb_geta(&scp->scr, pos)); /* FIXME: may be out of range! */ sc_vtb_putc(&scp->scr, pos + scp->xsize, c + 2, sc_vtb_geta(&scp->scr, pos + scp->xsize)); if (x < (scp->xsize - 1)*8) { sc_vtb_putc(&scp->scr, pos + 1, c + 1, sc_vtb_geta(&scp->scr, pos + 1)); sc_vtb_putc(&scp->scr, pos + scp->xsize + 1, c + 3, sc_vtb_geta(&scp->scr, pos + scp->xsize + 1)); } } else #endif /* SC_ALT_MOUSE_IMAGE */ { /* Red, magenta and brown are mapped to green to to keep it readable */ static const int col_conv[16] = { 6, 6, 6, 6, 2, 2, 2, 6, 14, 14, 14, 14, 10, 10, 10, 14 }; int pos; int color; int a; pos = (y/scp->font_size - scp->yoff)*scp->xsize + x/8 - scp->xoff; a = sc_vtb_geta(&scp->scr, pos); if (scp->sc->adp->va_flags & V_ADP_COLOR) color = (col_conv[(a & 0xf000) >> 12] << 12) | ((a & 0x0f00) | 0x0800); else color = ((a & 0xf000) >> 4) | ((a & 0x0f00) << 4); sc_vtb_putc(&scp->scr, pos, sc_vtb_getc(&scp->scr, pos), color); } } static void remove_txtmouse(scr_stat *scp, int x, int y) { } static void vga_txtmouse(scr_stat *scp, int x, int y, int on) { if (on) draw_txtmouse(scp, x, y); else remove_txtmouse(scp, x, y); } #endif /* SC_NO_CUTPASTE */ #ifdef SC_PIXEL_MODE /* pixel (raster text) mode renderer */ static void vga_pxlclear(scr_stat *scp, int c, int attr) { vm_offset_t p; int line_width; int lines; int i; /* XXX: we are just filling the screen with the background color... */ outw(GDCIDX, 0x0005); /* read mode 0, write mode 0 */ outw(GDCIDX, 0x0003); /* data rotate/function select */ outw(GDCIDX, 0x0f01); /* set/reset enable */ outw(GDCIDX, 0xff08); /* bit mask */ outw(GDCIDX, ((attr & 0xf000) >> 4) | 0x00); /* set/reset */ line_width = scp->sc->adp->va_line_width; lines = scp->ysize*scp->font_size; p = scp->sc->adp->va_window + line_width*scp->yoff*scp->font_size + scp->xoff; for (i = 0; i < lines; ++i) { bzero_io((void *)p, scp->xsize); p += line_width; } outw(GDCIDX, 0x0000); /* set/reset */ outw(GDCIDX, 0x0001); /* set/reset enable */ } static void vga_pxlborder(scr_stat *scp, int color) { vm_offset_t p; int line_width; int x; int y; int i; (*vidsw[scp->sc->adapter]->set_border)(scp->sc->adp, color); outw(GDCIDX, 0x0005); /* read mode 0, write mode 0 */ outw(GDCIDX, 0x0003); /* data rotate/function select */ outw(GDCIDX, 0x0f01); /* set/reset enable */ outw(GDCIDX, 0xff08); /* bit mask */ outw(GDCIDX, (color << 8) | 0x00); /* set/reset */ line_width = scp->sc->adp->va_line_width; p = scp->sc->adp->va_window; if (scp->yoff > 0) bzero_io((void *)p, line_width*scp->yoff*scp->font_size); y = (scp->yoff + scp->ysize)*scp->font_size; if (scp->ypixel > y) bzero_io((void *)(p + line_width*y), line_width*(scp->ypixel - y)); y = scp->yoff*scp->font_size; x = scp->xpixel/8 - scp->xoff - scp->xsize; for (i = 0; i < scp->ysize*scp->font_size; ++i) { if (scp->xoff > 0) bzero_io((void *)(p + line_width*(y + i)), scp->xoff); if (x > 0) bzero_io((void *)(p + line_width*(y + i) + scp->xoff + scp->xsize), x); } outw(GDCIDX, 0x0000); /* set/reset */ outw(GDCIDX, 0x0001); /* set/reset enable */ } static void vga_egadraw(scr_stat *scp, int from, int count, int flip) { vm_offset_t d; vm_offset_t e; u_char *f; u_short bg; u_short col1, col2; int line_width; int i, j; int a; u_char c; line_width = scp->sc->adp->va_line_width; d = scp->sc->adp->va_window + scp->xoff + scp->yoff*scp->font_size*line_width + (from%scp->xsize) + scp->font_size*line_width*(from/scp->xsize); outw(GDCIDX, 0x0005); /* read mode 0, write mode 0 */ outw(GDCIDX, 0x0003); /* data rotate/function select */ outw(GDCIDX, 0x0f01); /* set/reset enable */ bg = -1; if (from + count > scp->xsize*scp->ysize) count = scp->xsize*scp->ysize - from; for (i = from; count-- > 0; ++i) { a = sc_vtb_geta(&scp->vtb, i); if (flip) { col1 = ((a & 0x7000) >> 4) | (a & 0x0800); col2 = ((a & 0x8000) >> 4) | (a & 0x0700); } else { col1 = (a & 0x0f00); col2 = (a & 0xf000) >> 4; } /* set background color in EGA/VGA latch */ if (bg != col2) { bg = col2; outw(GDCIDX, bg | 0x00); /* set/reset */ outw(GDCIDX, 0xff08); /* bit mask */ writeb(d, 0); c = readb(d); /* set bg color in the latch */ } /* foreground color */ outw(GDCIDX, col1 | 0x00); /* set/reset */ e = d; f = &(scp->font[sc_vtb_getc(&scp->vtb, i)*scp->font_size]); for (j = 0; j < scp->font_size; ++j, ++f) { outw(GDCIDX, (*f << 8) | 0x08); /* bit mask */ writeb(e, 0); e += line_width; } ++d; if ((i % scp->xsize) == scp->xsize - 1) d += scp->xoff*2 + (scp->font_size - 1)*line_width; } outw(GDCIDX, 0x0000); /* set/reset */ outw(GDCIDX, 0x0001); /* set/reset enable */ outw(GDCIDX, 0xff08); /* bit mask */ } static void vga_vgadraw(scr_stat *scp, int from, int count, int flip) { vm_offset_t d; vm_offset_t e; u_char *f; u_short bg; u_short col1, col2; int line_width; int i, j; int a; u_char c; line_width = scp->sc->adp->va_line_width; d = scp->sc->adp->va_window + scp->xoff + scp->yoff*scp->font_size*line_width + (from%scp->xsize) + scp->font_size*line_width*(from/scp->xsize); outw(GDCIDX, 0x0305); /* read mode 0, write mode 3 */ outw(GDCIDX, 0x0003); /* data rotate/function select */ outw(GDCIDX, 0x0f01); /* set/reset enable */ outw(GDCIDX, 0xff08); /* bit mask */ bg = -1; if (from + count > scp->xsize*scp->ysize) count = scp->xsize*scp->ysize - from; for (i = from; count-- > 0; ++i) { a = sc_vtb_geta(&scp->vtb, i); if (flip) { col1 = ((a & 0x7000) >> 4) | (a & 0x0800); col2 = ((a & 0x8000) >> 4) | (a & 0x0700); } else { col1 = (a & 0x0f00); col2 = (a & 0xf000) >> 4; } /* set background color in EGA/VGA latch */ if (bg != col2) { bg = col2; outw(GDCIDX, 0x0005); /* read mode 0, write mode 0 */ outw(GDCIDX, bg | 0x00); /* set/reset */ writeb(d, 0); c = readb(d); /* set bg color in the latch */ outw(GDCIDX, 0x0305); /* read mode 0, write mode 3 */ } /* foreground color */ outw(GDCIDX, col1 | 0x00); /* set/reset */ e = d; f = &(scp->font[sc_vtb_getc(&scp->vtb, i)*scp->font_size]); for (j = 0; j < scp->font_size; ++j, ++f) { writeb(e, *f); e += line_width; } ++d; if ((i % scp->xsize) == scp->xsize - 1) d += scp->xoff*2 + (scp->font_size - 1)*line_width; } outw(GDCIDX, 0x0005); /* read mode 0, write mode 0 */ outw(GDCIDX, 0x0000); /* set/reset */ outw(GDCIDX, 0x0001); /* set/reset enable */ } static void vga_pxlcursor_shape(scr_stat *scp, int base, int height, int blink) { if (base < 0 || base >= scp->font_size) return; /* the caller may set height <= 0 in order to disable the cursor */ #if 0 scp->curs_attr.base = base; scp->curs_attr.height = height; #endif } static void draw_pxlcursor(scr_stat *scp, int at, int on, int flip) { vm_offset_t d; u_char *f; int line_width; int height; int col; int a; int i; u_char c; line_width = scp->sc->adp->va_line_width; d = scp->sc->adp->va_window + scp->xoff + scp->yoff*scp->font_size*line_width + (at%scp->xsize) + scp->font_size*line_width*(at/scp->xsize) + (scp->font_size - scp->curs_attr.base - 1)*line_width; outw(GDCIDX, 0x0005); /* read mode 0, write mode 0 */ outw(GDCIDX, 0x0003); /* data rotate/function select */ outw(GDCIDX, 0x0f01); /* set/reset enable */ /* set background color in EGA/VGA latch */ a = sc_vtb_geta(&scp->vtb, at); if (flip) col = (on) ? ((a & 0xf000) >> 4) : (a & 0x0f00); else col = (on) ? (a & 0x0f00) : ((a & 0xf000) >> 4); outw(GDCIDX, col | 0x00); /* set/reset */ outw(GDCIDX, 0xff08); /* bit mask */ writeb(d, 0); c = readb(d); /* set bg color in the latch */ /* foreground color */ if (flip) col = (on) ? (a & 0x0f00) : ((a & 0xf000) >> 4); else col = (on) ? ((a & 0xf000) >> 4) : (a & 0x0f00); outw(GDCIDX, col | 0x00); /* set/reset */ f = &(scp->font[sc_vtb_getc(&scp->vtb, at)*scp->font_size + scp->font_size - scp->curs_attr.base - 1]); height = imin(scp->curs_attr.height, scp->font_size); for (i = 0; i < height; ++i, --f) { outw(GDCIDX, (*f << 8) | 0x08); /* bit mask */ writeb(d, 0); d -= line_width; } outw(GDCIDX, 0x0000); /* set/reset */ outw(GDCIDX, 0x0001); /* set/reset enable */ outw(GDCIDX, 0xff08); /* bit mask */ } static int pxlblinkrate = 0; static void vga_pxlcursor(scr_stat *scp, int at, int blink, int on, int flip) { if (scp->curs_attr.height <= 0) /* the text cursor is disabled */ return; if (on) { if (!blink) { scp->status |= VR_CURSOR_ON; draw_pxlcursor(scp, at, on, flip); } else if (++pxlblinkrate & 4) { pxlblinkrate = 0; scp->status ^= VR_CURSOR_ON; draw_pxlcursor(scp, at, scp->status & VR_CURSOR_ON, flip); } } else { if (scp->status & VR_CURSOR_ON) draw_pxlcursor(scp, at, on, flip); scp->status &= ~VR_CURSOR_ON; } if (blink) scp->status |= VR_CURSOR_BLINK; else scp->status &= ~VR_CURSOR_BLINK; } static void vga_pxlblink(scr_stat *scp, int at, int flip) { if (!(scp->status & VR_CURSOR_BLINK)) return; if (!(++pxlblinkrate & 4)) return; pxlblinkrate = 0; scp->status ^= VR_CURSOR_ON; draw_pxlcursor(scp, at, scp->status & VR_CURSOR_ON, flip); } #ifndef SC_NO_CUTPASTE static void draw_pxlmouse(scr_stat *scp, int x, int y) { vm_offset_t p; int line_width; int xoff, yoff; int ymax; u_short m; int i, j; line_width = scp->sc->adp->va_line_width; xoff = (x - scp->xoff*8)%8; yoff = y - (y/line_width)*line_width; ymax = imin(y + 16, scp->ypixel); outw(GDCIDX, 0x0805); /* read mode 1, write mode 0 */ outw(GDCIDX, 0x0001); /* set/reset enable */ outw(GDCIDX, 0x0002); /* color compare */ outw(GDCIDX, 0x0007); /* color don't care */ outw(GDCIDX, 0xff08); /* bit mask */ outw(GDCIDX, 0x0803); /* data rotate/function select (and) */ p = scp->sc->adp->va_window + line_width*y + x/8; if (x < scp->xpixel - 8) { for (i = y, j = 0; i < ymax; ++i, ++j) { m = ~(mouse_and_mask[j] >> xoff); #ifdef __i386__ *(u_char *)p &= m >> 8; *(u_char *)(p + 1) &= m; #elif defined(__alpha__) writeb(p, readb(p) & (m >> 8)); writeb(p + 1, readb(p + 1) & (m >> 8)); #endif p += line_width; } } else { xoff += 8; for (i = y, j = 0; i < ymax; ++i, ++j) { m = ~(mouse_and_mask[j] >> xoff); #ifdef __i386__ *(u_char *)p &= m; #elif defined(__alpha__) writeb(p, readb(p) & (m >> 8)); #endif p += line_width; } } outw(GDCIDX, 0x1003); /* data rotate/function select (or) */ p = scp->sc->adp->va_window + line_width*y + x/8; if (x < scp->xpixel - 8) { for (i = y, j = 0; i < ymax; ++i, ++j) { m = mouse_or_mask[j] >> xoff; #ifdef __i386__ *(u_char *)p &= m >> 8; *(u_char *)(p + 1) &= m; #elif defined(__alpha__) writeb(p, readb(p) & (m >> 8)); writeb(p + 1, readb(p + 1) & (m >> 8)); #endif p += line_width; } } else { for (i = y, j = 0; i < ymax; ++i, ++j) { m = mouse_or_mask[j] >> xoff; #ifdef __i386__ *(u_char *)p &= m; #elif defined(__alpha__) writeb(p, readb(p) & (m >> 8)); #endif p += line_width; } } outw(GDCIDX, 0x0005); /* read mode 0, write mode 0 */ outw(GDCIDX, 0x0003); /* data rotate/function select */ } static void remove_pxlmouse(scr_stat *scp, int x, int y) { vm_offset_t p; int col, row; int pos; int line_width; int ymax; int i; /* erase the mouse cursor image */ col = x/8 - scp->xoff; row = y/scp->font_size - scp->yoff; pos = row*scp->xsize + col; i = (col < scp->xsize - 1) ? 2 : 1; (*scp->rndr->draw)(scp, pos, i, FALSE); if (row < scp->ysize - 1) (*scp->rndr->draw)(scp, pos + scp->xsize, i, FALSE); /* paint border if necessary */ line_width = scp->sc->adp->va_line_width; outw(GDCIDX, 0x0005); /* read mode 0, write mode 0 */ outw(GDCIDX, 0x0003); /* data rotate/function select */ outw(GDCIDX, 0x0f01); /* set/reset enable */ outw(GDCIDX, 0xff08); /* bit mask */ outw(GDCIDX, (scp->border << 8) | 0x00); /* set/reset */ if (row == scp->ysize - 1) { i = (scp->ysize + scp->yoff)*scp->font_size; ymax = imin(i + scp->font_size, scp->ypixel); p = scp->sc->adp->va_window + i*line_width + scp->xoff + col; if (col < scp->xsize - 1) { for (; i < ymax; ++i) { writeb(p, 0); writeb(p + 1, 0); p += line_width; } } else { for (; i < ymax; ++i) { writeb(p, 0); p += line_width; } } } if ((col == scp->xsize - 1) && (scp->xoff > 0)) { i = (row + scp->yoff)*scp->font_size; ymax = imin(i + scp->font_size*2, scp->ypixel); p = scp->sc->adp->va_window + i*line_width + scp->xoff + scp->xsize; for (; i < ymax; ++i) { writeb(p, 0); p += line_width; } } outw(GDCIDX, 0x0000); /* set/reset */ outw(GDCIDX, 0x0001); /* set/reset enable */ } static void vga_pxlmouse(scr_stat *scp, int x, int y, int on) { if (on) draw_pxlmouse(scp, x, y); else remove_pxlmouse(scp, x, y); } #endif /* SC_NO_CUTPASTE */ #endif /* SC_PIXEL_MODE */ #ifndef SC_NO_MODE_CHANGE /* graphics mode renderer */ static void vga_grborder(scr_stat *scp, int color) { (*vidsw[scp->sc->adapter]->set_border)(scp->sc->adp, color); } #endif Index: head/sys/dev/tdfx/tdfx_pci.c =================================================================== --- head/sys/dev/tdfx/tdfx_pci.c (revision 129878) +++ head/sys/dev/tdfx/tdfx_pci.c (revision 129879) @@ -1,866 +1,867 @@ /*- * Copyright (c) 2000-2001 by Coleman Kane * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Gardner Buchanan. * 4. The name of Gardner Buchanan may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* 3dfx driver for FreeBSD 4.x - Finished 11 May 2000, 12:25AM ET * * Copyright (C) 2000-2001, by Coleman Kane , * based upon the 3dfx driver written for linux, by Daryll Straus, Jon Taylor, * and Jens Axboe, located at http://linux.3dfx.com. */ #include #include #include #include #include #include #include #include #include #include -#include +#include +#include #include #include #include #include #include #include #include #include #include #include /* rman.h depends on machine/bus.h */ #include #include #include /* This must come first */ #include "opt_tdfx.h" #ifdef TDFX_LINUX #include #endif #include #include #include static devclass_t tdfx_devclass; static int tdfx_count = 0; /* Set up the boot probe/attach routines */ static device_method_t tdfx_methods[] = { DEVMETHOD(device_probe, tdfx_probe), DEVMETHOD(device_attach, tdfx_attach), DEVMETHOD(device_detach, tdfx_detach), DEVMETHOD(device_shutdown, tdfx_shutdown), { 0, 0 } }; MALLOC_DEFINE(M_TDFX,"TDFX Driver","3DFX Graphics[/2D]/3D Accelerator(s)"); #ifdef TDFX_LINUX MODULE_DEPEND(tdfx, linux, 1, 1, 1); LINUX_IOCTL_SET(tdfx, LINUX_IOCTL_TDFX_MIN, LINUX_IOCTL_TDFX_MAX); #endif /* Char. Dev. file operations structure */ static struct cdevsw tdfx_cdev = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT, .d_open = tdfx_open, .d_close = tdfx_close, .d_ioctl = tdfx_ioctl, .d_mmap = tdfx_mmap, .d_name = "tdfx", }; static int tdfx_probe(device_t dev) { /* * probe routine called on kernel boot to register supported devices. We get * a device structure to work with, and we can test the VENDOR/DEVICE IDs to * see if this PCI device is one that we support. Return 0 if yes, ENXIO if * not. */ switch(pci_get_devid(dev)) { case PCI_DEVICE_ALLIANCE_AT3D: device_set_desc(dev, "ProMotion At3D 3D Accelerator"); return 0; case PCI_DEVICE_3DFX_VOODOO2: device_set_desc(dev, "3DFX Voodoo II 3D Accelerator"); return 0; /*case PCI_DEVICE_3DFX_BANSHEE: device_set_desc(dev, "3DFX Voodoo Banshee 2D/3D Graphics Accelerator"); return 0; case PCI_DEVICE_3DFX_VOODOO3: device_set_desc(dev, "3DFX Voodoo3 2D/3D Graphics Accelerator"); return 0;*/ case PCI_DEVICE_3DFX_VOODOO1: device_set_desc(dev, "3DFX Voodoo Graphics 3D Accelerator"); return 0;; }; return ENXIO; } static int tdfx_attach(device_t dev) { /* * The attach routine is called after the probe routine successfully says it * supports a given card. We now proceed to initialize this card for use with * the system. I want to map the device memory for userland allocation and * fill an information structure with information on this card. I'd also like * to set Write Combining with the MTRR code so that we can hopefully speed * up memory writes. The last thing is to register the character device * interface to the card, so we can open it from /dev/3dfxN, where N is a * small, whole number. */ struct tdfx_softc *tdfx_info; u_long val; /* rid value tells bus_alloc_resource where to find the addresses of ports or * of memory ranges in the PCI config space*/ int rid = PCIR_BAR(0); /* Increment the card counter (for the ioctl code) */ tdfx_count++; /* Enable MemMap on Voodoo */ val = pci_read_config(dev, PCIR_COMMAND, 2); val |= (PCIM_CMD_MEMEN); pci_write_config(dev, PCIR_COMMAND, val, 2); val = pci_read_config(dev, PCIR_COMMAND, 2); /* Fill the soft config struct with info about this device*/ tdfx_info = device_get_softc(dev); tdfx_info->dev = dev; tdfx_info->vendor = pci_get_vendor(dev); tdfx_info->type = pci_get_devid(dev) >> 16; tdfx_info->bus = pci_get_bus(dev); tdfx_info->dv = pci_get_slot(dev); tdfx_info->curFile = NULL; /* * Get the Memory Location from the PCI Config, mask out lower word, since * the config space register is only one word long (this is nicer than a * bitshift). */ tdfx_info->addr0 = (pci_read_config(dev, 0x10, 4) & 0xffff0000); #ifdef DEBUG device_printf(dev, "Base0 @ 0x%x\n", tdfx_info->addr0); #endif /* Notify the VM that we will be mapping some memory later */ tdfx_info->memrange = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE | RF_SHAREABLE); if(tdfx_info->memrange == NULL) { #ifdef DEBUG device_printf(dev, "Error mapping mem, won't be able to use mmap()\n"); #endif tdfx_info->memrid = 0; } else { tdfx_info->memrid = rid; #ifdef DEBUG device_printf(dev, "Mapped to: 0x%x\n", (unsigned int)rman_get_start(tdfx_info->memrange)); #endif } /* Setup for Voodoo3 and Banshee, PIO and an extram Memrange */ if(pci_get_devid(dev) == PCI_DEVICE_3DFX_VOODOO3 || pci_get_devid(dev) == PCI_DEVICE_3DFX_BANSHEE) { rid = 0x14; /* 2nd mem map */ tdfx_info->addr1 = (pci_read_config(dev, 0x14, 4) & 0xffff0000); #ifdef DEBUG device_printf(dev, "Base1 @ 0x%x\n", tdfx_info->addr1); #endif tdfx_info->memrange2 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE | RF_SHAREABLE); if(tdfx_info->memrange2 == NULL) { #ifdef DEBUG device_printf(dev, "Mem1 couldn't be allocated, glide may not work."); #endif tdfx_info->memrid2 = 0; } else { tdfx_info->memrid2 = rid; } /* Now to map the PIO stuff */ rid = PCIR_IOBASE0_2; tdfx_info->pio0 = pci_read_config(dev, 0x2c, 2); tdfx_info->pio0max = pci_read_config(dev, 0x30, 2) + tdfx_info->pio0; tdfx_info->piorange = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE | RF_SHAREABLE); if(tdfx_info->piorange == NULL) { #ifdef DEBUG device_printf(dev, "Couldn't map PIO range."); #endif tdfx_info->piorid = 0; } else { tdfx_info->piorid = rid; } } else { tdfx_info->addr1 = 0; tdfx_info->memrange2 = NULL; tdfx_info->piorange = NULL; } /* * Set Writecombining, or at least Uncacheable for the memory region, if we * are able to */ if(tdfx_setmtrr(dev) != 0) { #ifdef DEBUG device_printf(dev, "Some weird error setting MTRRs"); #endif return -1; } /* * make_dev registers the cdev to access the 3dfx card from /dev * use hex here for the dev num, simply to provide better support if > 10 * voodoo cards, for the mad. The user must set the link, or use MAKEDEV. * Why would we want that many voodoo cards anyhow? */ tdfx_info->devt = make_dev(&tdfx_cdev, device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "3dfx%x", device_get_unit(dev)); return 0; } static int tdfx_detach(device_t dev) { struct tdfx_softc* tdfx_info; int retval; tdfx_info = device_get_softc(dev); /* Delete allocated resource, of course */ bus_release_resource(dev, SYS_RES_MEMORY, tdfx_info->memrid, tdfx_info->memrange); /* Release extended Voodoo3/Banshee resources */ if(pci_get_devid(dev) == PCI_DEVICE_3DFX_BANSHEE || pci_get_devid(dev) == PCI_DEVICE_3DFX_VOODOO3) { if(tdfx_info->memrange2 != NULL) bus_release_resource(dev, SYS_RES_MEMORY, tdfx_info->memrid2, tdfx_info->memrange); /* if(tdfx_info->piorange != NULL) bus_release_resource(dev, SYS_RES_IOPORT, tdfx_info->piorid, tdfx_info->piorange);*/ } /* Though it is safe to leave the WRCOMB support since the mem driver checks for it, we should remove it in order to free an MTRR for another device */ retval = tdfx_clrmtrr(dev); #ifdef DEBUG if(retval != 0) printf("tdfx: For some reason, I couldn't clear the mtrr\n"); #endif /* Remove device entry when it can no longer be accessed */ destroy_dev(tdfx_info->devt); return(0); } static int tdfx_shutdown(device_t dev) { #ifdef DEBUG device_printf(dev, "tdfx: Device Shutdown\n"); #endif return 0; } static int tdfx_clrmtrr(device_t dev) { /* This function removes the MTRR set by the attach call, so it can be used * in the future by other drivers. */ int retval, act; struct tdfx_softc *tdfx_info = device_get_softc(dev); act = MEMRANGE_SET_REMOVE; retval = mem_range_attr_set(&tdfx_info->mrdesc, &act); return retval; } static int tdfx_setmtrr(device_t dev) { /* * This is the MTRR setting function for the 3dfx card. It is called from * tdfx_attach. If we can't set the MTRR properly, it's not the end of the * world. We can still continue, just with slightly (very slightly) degraded * performance. */ int retval = 0, act; struct tdfx_softc *tdfx_info = device_get_softc(dev); /* The older Voodoo cards have a shorter memrange than the newer ones */ if((pci_get_devid(dev) == PCI_DEVICE_3DFX_VOODOO1) || (pci_get_devid(dev) == PCI_DEVICE_3DFX_VOODOO2)) { tdfx_info->mrdesc.mr_len = 0x400000; /* The memory descriptor is described as the top 15 bits of the real address */ tdfx_info->mrdesc.mr_base = tdfx_info->addr0 & 0xfffe0000; } else if((pci_get_devid(dev) == PCI_DEVICE_3DFX_VOODOO3) || (pci_get_devid(dev) == PCI_DEVICE_3DFX_BANSHEE)) { tdfx_info->mrdesc.mr_len = 0x1000000; /* The Voodoo3 and Banshee LFB is the second memory address */ /* The memory descriptor is described as the top 15 bits of the real address */ tdfx_info->mrdesc.mr_base = tdfx_info->addr1 & 0xfffe0000; } else return 0; /* * The Alliance Pro Motion AT3D was not mentioned in the linux * driver as far as MTRR support goes, so I just won't put the * code in here for it. This is where it should go, though. */ /* Firstly, try to set write combining */ tdfx_info->mrdesc.mr_flags = MDF_WRITECOMBINE; bcopy("tdfx", &tdfx_info->mrdesc.mr_owner, 4); act = MEMRANGE_SET_UPDATE; retval = mem_range_attr_set(&tdfx_info->mrdesc, &act); if(retval == 0) { #ifdef DEBUG device_printf(dev, "MTRR Set Correctly for tdfx\n"); #endif } else if((pci_get_devid(dev) == PCI_DEVICE_3DFX_VOODOO2) || (pci_get_devid(dev) == PCI_DEVICE_3DFX_VOODOO1)) { /* if, for some reason we can't set the WRCOMB range with the V1/V2, we * can still possibly use the UNCACHEABLE region for it instead, and help * out in a small way */ tdfx_info->mrdesc.mr_flags = MDF_UNCACHEABLE; /* This length of 1000h was taken from the linux device driver... */ tdfx_info->mrdesc.mr_len = 0x1000; /* * If, for some reason, we can't set the MTRR (N/A?) we may still continue */ #ifdef DEBUG if(retval == 0) { device_printf(dev, "MTRR Set Type Uncacheable %x\n", (u_int32_t)tdfx_info->mrdesc.mr_base); } else { device_printf(dev, "Couldn't Set MTRR\n"); } #endif } #ifdef DEBUG else { device_printf(dev, "Couldn't Set MTRR\n"); return 0; } #endif return 0; } static int tdfx_open(dev_t dev, int flags, int fmt, struct thread *td) { /* * The open cdev method handles open(2) calls to /dev/3dfx[n] * We can pretty much allow any opening of the device. */ struct tdfx_softc *tdfx_info = devclass_get_softc(tdfx_devclass, UNIT(minor(dev))); if(tdfx_info->busy != 0) return EBUSY; #ifdef DEBUG printf("3dfx: Opened by #%d\n", td->td_proc->p_pid); #endif /* Set the driver as busy */ tdfx_info->busy++; return 0; } static int tdfx_close(dev_t dev, int fflag, int devtype, struct thread *td) { /* * The close cdev method handles close(2) calls to /dev/3dfx[n] * We'll always want to close the device when it's called. */ struct tdfx_softc *tdfx_info = devclass_get_softc(tdfx_devclass, UNIT(minor(dev))); if(tdfx_info->busy == 0) return EBADF; tdfx_info->busy = 0; #ifdef DEBUG printf("Closed by #%d\n", td->td_proc->p_pid); #endif return 0; } static int tdfx_mmap(dev_t dev, vm_offset_t offset, vm_paddr_t *paddr, int nprot) { /* * mmap(2) is called by a user process to request that an area of memory * associated with this device be mapped for the process to work with. Nprot * holds the protections requested, PROT_READ, PROT_WRITE, or both. */ /**** OLD GET CONFIG ****/ /* struct tdfx_softc* tdfx_info; */ /* Get the configuration for our card XXX*/ /*tdfx_info = (struct tdfx_softc*)devclass_get_softc(tdfx_devclass, UNIT(minor(dev)));*/ /************************/ struct tdfx_softc* tdfx_info[2]; tdfx_info[0] = (struct tdfx_softc*)devclass_get_softc(tdfx_devclass, 0); /* If, for some reason, its not configured, we bail out */ if(tdfx_info[0] == NULL) { #ifdef DEBUG printf("tdfx: tdfx_info (softc) is NULL\n"); #endif return -1; } /* We must stay within the bound of our address space */ if((offset & 0xff000000) == tdfx_info[0]->addr0) { offset &= 0xffffff; *paddr = rman_get_start(tdfx_info[0]->memrange) + offset; return 0; } if(tdfx_count > 1) { tdfx_info[1] = (struct tdfx_softc*)devclass_get_softc(tdfx_devclass, 1); if((offset & 0xff000000) == tdfx_info[1]->addr0) { offset &= 0xffffff; *paddr = rman_get_start(tdfx_info[1]->memrange) + offset; return 0; } } /* See if the Banshee/V3 LFB is being requested */ /*if(tdfx_info->memrange2 != NULL && (offset & 0xff000000) == tdfx_info->addr1) { offset &= 0xffffff; return atop(rman_get_start(tdfx_info[1]->memrange2) + offset); }*/ /* VoodooNG code */ /* The ret call */ /* atop -> address to page * rman_get_start, get the (struct resource*)->r_start member, * the mapping base address. */ return -1; } static int tdfx_query_boards(void) { /* * This returns the number of installed tdfx cards, we have been keeping * count, look at tdfx_attach */ return tdfx_count; } static int tdfx_query_fetch(u_int cmd, struct tdfx_pio_data *piod) { /* XXX Comment this later, after careful inspection and spring cleaning :) */ /* Various return values 8bit-32bit */ u_int8_t ret_byte; u_int16_t ret_word; u_int32_t ret_dword; struct tdfx_softc* tdfx_info = NULL; /* This one depend on the tdfx_* structs being properly initialized */ /*piod->device &= 0xf;*/ if((piod == NULL) ||(tdfx_count <= piod->device) || (piod->device < 0)) { #ifdef DEBUG printf("tdfx: Bad device or internal struct in tdfx_query_fetch\n"); #endif return -EINVAL; } tdfx_info = (struct tdfx_softc*)devclass_get_softc(tdfx_devclass, piod->device); if(tdfx_info == NULL) return -ENXIO; /* We must restrict the size reads from the port, since to high or low of a * size witll result in wrong data being passed, and that's bad */ /* A few of these were pulled during the attach phase */ switch(piod->port) { case PCI_VENDOR_ID_FREEBSD: if(piod->size != 2) return -EINVAL; copyout(&tdfx_info->vendor, piod->value, piod->size); return 0; case PCI_DEVICE_ID_FREEBSD: if(piod->size != 2) return -EINVAL; copyout(&tdfx_info->type, piod->value, piod->size); return 0; case PCI_BASE_ADDRESS_0_FREEBSD: if(piod->size != 4) return -EINVAL; copyout(&tdfx_info->addr0, piod->value, piod->size); return 0; case PCI_BASE_ADDRESS_1_FREEBSD: if(piod->size != 4) return -EINVAL; copyout(&tdfx_info->addr1, piod->value, piod->size); return 0; case PCI_PRIBUS_FREEBSD: if(piod->size != 1) return -EINVAL; break; case PCI_IOBASE_0_FREEBSD: if(piod->size != 2) return -EINVAL; break; case PCI_IOLIMIT_0_FREEBSD: if(piod->size != 2) return -EINVAL; break; case SST1_PCI_SPECIAL1_FREEBSD: if(piod->size != 4) return -EINVAL; break; case PCI_REVISION_ID_FREEBSD: if(piod->size != 1) return -EINVAL; break; case SST1_PCI_SPECIAL4_FREEBSD: if(piod->size != 4) return -EINVAL; break; default: return -EINVAL; } /* Read the value and return */ switch(piod->size) { case 1: ret_byte = pci_read_config(tdfx_info[piod->device].dev, piod->port, 1); copyout(&ret_byte, piod->value, 1); break; case 2: ret_word = pci_read_config(tdfx_info[piod->device].dev, piod->port, 2); copyout(&ret_word, piod->value, 2); break; case 4: ret_dword = pci_read_config(tdfx_info[piod->device].dev, piod->port, 4); copyout(&ret_dword, piod->value, 4); break; default: return -EINVAL; } return 0; } static int tdfx_query_update(u_int cmd, struct tdfx_pio_data *piod) { /* XXX Comment this later, after careful inspection and spring cleaning :) */ /* Return vals */ u_int8_t ret_byte; u_int16_t ret_word; u_int32_t ret_dword; /* Port vals, mask */ u_int32_t retval, preval, mask; struct tdfx_softc* tdfx_info = NULL; if((piod == NULL) || (piod->device >= (tdfx_count & 0xf))) { #ifdef DEBUG printf("tdfx: Bad struct or device in tdfx_query_update\n"); #endif return -EINVAL; } tdfx_info = (struct tdfx_softc*)devclass_get_softc(tdfx_devclass, piod->device); if(tdfx_info == NULL) return -ENXIO; /* Code below this line in the fuction was taken from the * Linux driver and converted for freebsd. */ /* Check the size for all the ports, to make sure stuff doesn't get messed up * by poorly written clients */ switch(piod->port) { case PCI_COMMAND_FREEBSD: if(piod->size != 2) return -EINVAL; break; case SST1_PCI_SPECIAL1_FREEBSD: if(piod->size != 4) return -EINVAL; break; case SST1_PCI_SPECIAL2_FREEBSD: if(piod->size != 4) return -EINVAL; break; case SST1_PCI_SPECIAL3_FREEBSD: if(piod->size != 4) return -EINVAL; break; case SST1_PCI_SPECIAL4_FREEBSD: if(piod->size != 4) return -EINVAL; break; default: return -EINVAL; } /* Read the current value */ retval = pci_read_config(tdfx_info->dev, piod->port & ~3, 4); /* These set up a mask to use, since apparently they wanted to write 4 bytes * at once to the ports */ switch (piod->size) { case 1: copyin(piod->value, &ret_byte, 1); preval = ret_byte << (8 * (piod->port & 0x3)); mask = 0xff << (8 * (piod->port & 0x3)); break; case 2: copyin(piod->value, &ret_word, 2); preval = ret_word << (8 * (piod->port & 0x3)); mask = 0xffff << (8 * (piod->port & 0x3)); break; case 4: copyin(piod->value, &ret_dword, 4); preval = ret_dword; mask = ~0; break; default: return -EINVAL; } /* Finally, combine the values and write it to the port */ retval = (retval & ~mask) | preval; pci_write_config(tdfx_info->dev, piod->port & ~3, retval, 4); return 0; } /* For both of these, I added a variable named workport of type u_int so * that I could eliminate the warning about my data type size. The * applications expect the port to be of type short, so I needed to change * this within the function */ static int tdfx_do_pio_rd(struct tdfx_pio_data *piod) { /* Return val */ u_int8_t ret_byte; u_int workport; struct tdfx_softc *tdfx_info = (struct tdfx_softc*)devclass_get_softc(tdfx_devclass, piod->device); /* Restricts the access of ports other than those we use */ if(((piod->port != VGA_INPUT_STATUS_1C) || (piod->port != SC_INDEX) || (piod->port != SC_DATA) || (piod->port != VGA_MISC_OUTPUT_READ)) && (piod->port < tdfx_info->pio0) && (piod->port > tdfx_info->pio0max)) return -EPERM; /* All VGA STATUS REGS are byte registers, size should never be > 1 */ if(piod->size != 1) { return -EINVAL; } /* Write the data to the intended port */ workport = piod->port; ret_byte = inb(workport); copyout(&ret_byte, piod->value, sizeof(u_int8_t)); return 0; } static int tdfx_do_pio_wt(struct tdfx_pio_data *piod) { /* return val */ u_int8_t ret_byte; u_int workport; struct tdfx_softc *tdfx_info = (struct tdfx_softc*)devclass_get_softc(tdfx_devclass, piod->device); /* Replace old switch w/ massive if(...) */ /* Restricts the access of ports other than those we use */ if(((piod->port != SC_INDEX) && (piod->port != SC_DATA) && (piod->port != VGA_MISC_OUTPUT_READ)) /* Can't write VGA_ST_1C */ && (piod->port < tdfx_info->pio0) && (piod->port > tdfx_info->pio0max)) return -EPERM; /* All VGA STATUS REGS are byte registers, size should never be > 1 */ if(piod->size != 1) { return -EINVAL; } /* Write the data to the intended port */ copyin(piod->value, &ret_byte, sizeof(u_int8_t)); workport = piod->port; outb(workport, ret_byte); return 0; } static int tdfx_do_query(u_int cmd, struct tdfx_pio_data *piod) { /* There are three sub-commands to the query 0x33 */ switch(_IOC_NR(cmd)) { case 2: return tdfx_query_boards(); break; case 3: return tdfx_query_fetch(cmd, piod); break; case 4: return tdfx_query_update(cmd, piod); break; default: /* In case we are thrown a bogus sub-command! */ #ifdef DEBUG printf("Bad Sub-cmd: 0x%x\n", _IOC_NR(cmd)); #endif return -EINVAL; } } static int tdfx_do_pio(u_int cmd, struct tdfx_pio_data *piod) { /* Two types of PIO, INPUT and OUTPUT, as the name suggests */ switch(_IOC_DIR(cmd)) { case IOCV_OUT: return tdfx_do_pio_rd(piod); break; case IOCV_IN: return tdfx_do_pio_wt(piod); break; default: return -EINVAL; } } /* Calls to ioctl(2) eventually end up here. Unhandled ioctls return an ENXIO, * normally, you would read in the data pointed to by data, then write your * output to it. The ioctl *should* normally return zero if everything is * alright, but 3dfx didn't make it that way... * * For all of the ioctl code, in the event of a real error, * we return -Exxxx rather than simply Exxxx. The reason for this * is that the ioctls actually RET information back to the program * sometimes, rather than filling it in the passed structure. We * want to distinguish errors from useful data, and maintain compatibility. * * There is this portion of the proc struct called p_retval[], we can store a * return value in td->td_retval[0] and place the return value if it is positive * in there, then we can return 0 (good). If the return value is negative, we * can return -retval and the error should be properly handled. */ static int tdfx_ioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct thread *td) { int retval = 0; struct tdfx_pio_data *piod = (struct tdfx_pio_data*)data; #ifdef DEBUG printf("IOCTL'd by #%d, cmd: 0x%x, data: %p\n", td->td_proc->p_pid, (u_int32_t)cmd, piod); #endif switch(_IOC_TYPE(cmd)) { /* Return the real error if negative, or simply stick the valid return * in td->td_retval */ case 0x33: /* The '3'(0x33) type IOCTL is for querying the installed cards */ if((retval = tdfx_do_query(cmd, piod)) > 0) td->td_retval[0] = retval; else return -retval; break; case 0: /* The 0 type IOCTL is for programmed I/O methods */ if((tdfx_do_pio(cmd, piod)) > 0) td->td_retval[0] = retval; else return -retval; break; default: /* Technically, we won't reach this from linux emu, but when glide * finally gets ported, watch out! */ #ifdef DEBUG printf("Bad IOCTL from #%d\n", td->td_proc->p_pid); #endif return ENXIO; } return 0; } #ifdef TDFX_LINUX /* * Linux emulation IOCTL for /dev/tdfx */ static int linux_ioctl_tdfx(struct thread *td, struct linux_ioctl_args* args) { int error = 0; u_long cmd = args->cmd & 0xffff; /* The structure passed to ioctl has two shorts, one int and one void*. */ char d_pio[2*sizeof(short) + sizeof(int) + sizeof(void*)]; struct file *fp; if ((error = fget(td, args->fd, &fp)) != 0) return (error); /* We simply copy the data and send it right to ioctl */ copyin((caddr_t)args->arg, &d_pio, sizeof(d_pio)); error = fo_ioctl(fp, cmd, (caddr_t)&d_pio, td->td_ucred, td); fdrop(fp, td); return error; } #endif /* TDFX_LINUX */ /* This is the device driver struct. This is sent to the driver subsystem to * register the method structure and the info strcut space for this particular * instance of the driver. */ static driver_t tdfx_driver = { "tdfx", tdfx_methods, sizeof(struct tdfx_softc), }; /* Tell Mr. Kernel about us! */ DRIVER_MODULE(tdfx, pci, tdfx_driver, tdfx_devclass, 0, 0); Index: head/sys/dev/trm/trm.c =================================================================== --- head/sys/dev/trm/trm.c (revision 129878) +++ head/sys/dev/trm/trm.c (revision 129879) @@ -1,3747 +1,3748 @@ /* * O.S : FreeBSD CAM * FILE NAME : trm.c * BY : C.L. Huang (ching@tekram.com.tw) * Erich Chen (erich@tekram.com.tw) * Description: Device Driver for Tekram SCSI adapters * DC395U/UW/F ,DC315/U(TRM-S1040) * DC395U2D/U2W(TRM-S2080) * PCI SCSI Bus Master Host Adapter * (SCSI chip set used Tekram ASIC TRM-S1040,TRM-S2080) *(C)Copyright 1995-2001 Tekram Technology Co.,Ltd. */ #include __FBSDID("$FreeBSD$"); /* * HISTORY: * * REV# DATE NAME DESCRIPTION * 1.05 05/01/1999 ERICH CHEN First released for 3.x.x (CAM) * 1.06 07/29/1999 ERICH CHEN Modify for NEW PCI * 1.07 12/12/1999 ERICH CHEN Modify for 3.3.x ,DCB no free * 1.08 06/12/2000 ERICH CHEN Modify for 4.x.x * 1.09 11/03/2000 ERICH CHEN Modify for 4.1.R ,new sim * 1.10 10/10/2001 Oscar Feng Fixed CAM rescan hang up bug. * 1.11 10/13/2001 Oscar Feng Fixed wrong Async speed display bug. */ /* * * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ /* * Imported into FreeBSD source repository, and updated to compile under * FreeBSD-3.0-DEVELOPMENT, by Stefan Esser , 1996-12-17 */ /* * Updated to compile under FreeBSD 5.0-CURRENT by Olivier Houchard * , 2002-03-04 */ #include #include #include #include #if __FreeBSD_version >= 500000 #include #endif #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define trm_reg_read8(reg) bus_space_read_1(pACB->tag, pACB->bsh, reg) #define trm_reg_read16(reg) bus_space_read_2(pACB->tag, pACB->bsh, reg) #define trm_reg_read32(reg) bus_space_read_4(pACB->tag, pACB->bsh, reg) #define trm_reg_write8(value,reg) bus_space_write_1(pACB->tag, pACB->bsh,\ reg, value) #define trm_reg_write16(value,reg) bus_space_write_2(pACB->tag, pACB->bsh,\ reg, value) #define trm_reg_write32(value,reg) bus_space_write_4(pACB->tag, pACB->bsh,\ reg, value) #define PCI_Vendor_ID_TEKRAM 0x1DE1 #define PCI_Device_ID_TRM_S1040 0x0391 #define PCI_DEVICEID_TRMS1040 0x03911DE1 #define PCI_DEVICEID_TRMS2080 0x03921DE1 #ifdef trm_DEBUG1 #define TRM_DPRINTF(fmt, arg...) printf("trm: " fmt, ##arg) #else #define TRM_DPRINTF(fmt, arg...) {} #endif /* TRM_DEBUG */ static void trm_check_eeprom(PNVRAMTYPE pEEpromBuf,PACB pACB); static void NVRAM_trm_read_all(PNVRAMTYPE pEEpromBuf,PACB pACB); static u_int8_t NVRAM_trm_get_data(PACB pACB, u_int8_t bAddr); static void NVRAM_trm_write_all(PNVRAMTYPE pEEpromBuf,PACB pACB); static void NVRAM_trm_set_data(PACB pACB, u_int8_t bAddr, u_int8_t bData); static void NVRAM_trm_write_cmd(PACB pACB, u_int8_t bCmd, u_int8_t bAddr); static void NVRAM_trm_wait_30us(PACB pACB); static void trm_Interrupt(void *vpACB); static void trm_DataOutPhase0(PACB pACB, PSRB pSRB, u_int16_t * pscsi_status); static void trm_DataInPhase0(PACB pACB, PSRB pSRB, u_int16_t * pscsi_status); static void trm_CommandPhase0(PACB pACB, PSRB pSRB, u_int16_t * pscsi_status); static void trm_StatusPhase0(PACB pACB, PSRB pSRB, u_int16_t * pscsi_status); static void trm_MsgOutPhase0(PACB pACB, PSRB pSRB, u_int16_t * pscsi_status); static void trm_MsgInPhase0(PACB pACB, PSRB pSRB, u_int16_t * pscsi_status); static void trm_DataOutPhase1(PACB pACB, PSRB pSRB, u_int16_t * pscsi_status); static void trm_DataInPhase1(PACB pACB, PSRB pSRB, u_int16_t * pscsi_status); static void trm_CommandPhase1(PACB pACB, PSRB pSRB, u_int16_t * pscsi_status); static void trm_StatusPhase1(PACB pACB, PSRB pSRB, u_int16_t * pscsi_status); static void trm_MsgOutPhase1(PACB pACB, PSRB pSRB, u_int16_t * pscsi_status); static void trm_MsgInPhase1(PACB pACB, PSRB pSRB, u_int16_t * pscsi_status); static void trm_Nop0(PACB pACB, PSRB pSRB, u_int16_t * pscsi_status); static void trm_Nop1(PACB pACB, PSRB pSRB, u_int16_t * pscsi_status); static void trm_SetXferRate(PACB pACB, PSRB pSRB,PDCB pDCB); static void trm_DataIO_transfer(PACB pACB, PSRB pSRB, u_int16_t ioDir); static void trm_Disconnect(PACB pACB); static void trm_Reselect(PACB pACB); static void trm_SRBdone(PACB pACB, PDCB pDCB, PSRB pSRB); static void trm_DoingSRB_Done(PACB pACB); static void trm_ScsiRstDetect(PACB pACB); static void trm_ResetSCSIBus(PACB pACB); static void trm_RequestSense(PACB pACB, PDCB pDCB, PSRB pSRB); static void trm_EnableMsgOutAbort2(PACB pACB, PSRB pSRB); static void trm_EnableMsgOutAbort1(PACB pACB, PSRB pSRB); static void trm_SendSRB(PACB pACB, PSRB pSRB); static int trm_probe(device_t tag); static int trm_attach(device_t tag); static void trm_reset(PACB pACB); static u_int16_t trm_StartSCSI(PACB pACB, PDCB pDCB, PSRB pSRB); static int trm_initAdapter(PACB pACB, u_int16_t unit); static void trm_initDCB(PACB pACB, PDCB pDCB, u_int16_t unit, u_int32_t i, u_int32_t j); static int trm_initSRB(PACB pACB); static void trm_initACB(PACB pACB, u_int8_t adaptType, u_int16_t unit); /* CAM SIM entry points */ #define ccb_trmsrb_ptr spriv_ptr0 #define ccb_trmacb_ptr spriv_ptr1 static void trm_action(struct cam_sim *psim, union ccb *pccb); static void trm_poll(struct cam_sim *psim); static void * trm_SCSI_phase0[] = { trm_DataOutPhase0, /* phase:0 */ trm_DataInPhase0, /* phase:1 */ trm_CommandPhase0, /* phase:2 */ trm_StatusPhase0, /* phase:3 */ trm_Nop0, /* phase:4 */ trm_Nop1, /* phase:5 */ trm_MsgOutPhase0, /* phase:6 */ trm_MsgInPhase0, /* phase:7 */ }; /* * * stateV = (void *) trm_SCSI_phase1[phase] * */ static void * trm_SCSI_phase1[] = { trm_DataOutPhase1, /* phase:0 */ trm_DataInPhase1, /* phase:1 */ trm_CommandPhase1, /* phase:2 */ trm_StatusPhase1, /* phase:3 */ trm_Nop0, /* phase:4 */ trm_Nop1, /* phase:5 */ trm_MsgOutPhase1, /* phase:6 */ trm_MsgInPhase1, /* phase:7 */ }; NVRAMTYPE trm_eepromBuf[TRM_MAX_ADAPTER_NUM]; /* *Fast20: 000 50ns, 20.0 Mbytes/s * 001 75ns, 13.3 Mbytes/s * 010 100ns, 10.0 Mbytes/s * 011 125ns, 8.0 Mbytes/s * 100 150ns, 6.6 Mbytes/s * 101 175ns, 5.7 Mbytes/s * 110 200ns, 5.0 Mbytes/s * 111 250ns, 4.0 Mbytes/s * *Fast40: 000 25ns, 40.0 Mbytes/s * 001 50ns, 20.0 Mbytes/s * 010 75ns, 13.3 Mbytes/s * 011 100ns, 10.0 Mbytes/s * 100 125ns, 8.0 Mbytes/s * 101 150ns, 6.6 Mbytes/s * 110 175ns, 5.7 Mbytes/s * 111 200ns, 5.0 Mbytes/s */ /* real period: */ u_int8_t dc395x_clock_period[] = { 12,/* 48 ns 20 MB/sec */ 18,/* 72 ns 13.3 MB/sec */ 25,/* 100 ns 10.0 MB/sec */ 31,/* 124 ns 8.0 MB/sec */ 37,/* 148 ns 6.6 MB/sec */ 43,/* 172 ns 5.7 MB/sec */ 50,/* 200 ns 5.0 MB/sec */ 62 /* 248 ns 4.0 MB/sec */ }; u_int8_t dc395u2x_clock_period[]={ 10,/* 25 ns 40.0 MB/sec */ 12,/* 48 ns 20.0 MB/sec */ 18,/* 72 ns 13.3 MB/sec */ 25,/* 100 ns 10.0 MB/sec */ 31,/* 124 ns 8.0 MB/sec */ 37,/* 148 ns 6.6 MB/sec */ 43,/* 172 ns 5.7 MB/sec */ 50,/* 200 ns 5.0 MB/sec */ }; #define dc395x_tinfo_period dc395x_clock_period #define dc395u2x_tinfo_period dc395u2x_clock_period static PSRB trm_GetSRB(PACB pACB) { int intflag; PSRB pSRB; intflag = splcam(); pSRB = pACB->pFreeSRB; if (pSRB) { pACB->pFreeSRB = pSRB->pNextSRB; pSRB->pNextSRB = NULL; } splx(intflag); return (pSRB); } static void trm_RewaitSRB0(PDCB pDCB, PSRB pSRB) { PSRB psrb1; int intflag; intflag = splcam(); if ((psrb1 = pDCB->pWaitingSRB)) { pSRB->pNextSRB = psrb1; pDCB->pWaitingSRB = pSRB; } else { pSRB->pNextSRB = NULL; pDCB->pWaitingSRB = pSRB; pDCB->pWaitingLastSRB = pSRB; } splx(intflag); } static void trm_RewaitSRB(PDCB pDCB, PSRB pSRB) { PSRB psrb1; int intflag; intflag = splcam(); pDCB->GoingSRBCnt--; psrb1 = pDCB->pGoingSRB; if (pSRB == psrb1) /* * if this SRB is GoingSRB * remove this SRB from GoingSRB Q */ pDCB->pGoingSRB = psrb1->pNextSRB; else { /* * if this SRB is not current GoingSRB * remove this SRB from GoingSRB Q */ while (pSRB != psrb1->pNextSRB) psrb1 = psrb1->pNextSRB; psrb1->pNextSRB = pSRB->pNextSRB; if (pSRB == pDCB->pGoingLastSRB) pDCB->pGoingLastSRB = psrb1; } if ((psrb1 = pDCB->pWaitingSRB)) { /* * if WaitingSRB Q is not NULL * Q back this SRB into WaitingSRB */ pSRB->pNextSRB = psrb1; pDCB->pWaitingSRB = pSRB; } else { pSRB->pNextSRB = NULL; pDCB->pWaitingSRB = pSRB; pDCB->pWaitingLastSRB = pSRB; } splx(intflag); } static void trm_DoWaitingSRB(PACB pACB) { int intflag; PDCB ptr, ptr1; PSRB pSRB; intflag = splcam(); if (!(pACB->pActiveDCB) && !(pACB->ACBFlag & (RESET_DETECT+RESET_DONE+RESET_DEV))) { ptr = pACB->pDCBRunRobin; if (!ptr) { ptr = pACB->pLinkDCB; pACB->pDCBRunRobin = ptr; } ptr1 = ptr; for (;ptr1 ;) { pACB->pDCBRunRobin = ptr1->pNextDCB; if (!(ptr1->MaxActiveCommandCnt > ptr1->GoingSRBCnt) || !(pSRB = ptr1->pWaitingSRB)) { if (pACB->pDCBRunRobin == ptr) break; ptr1 = ptr1->pNextDCB; } else { if (!trm_StartSCSI(pACB, ptr1, pSRB)) { /* * If trm_StartSCSI return 0 : * current interrupt status is interrupt enable * It's said that SCSI processor is unoccupied */ ptr1->GoingSRBCnt++; if (ptr1->pWaitingLastSRB == pSRB) { ptr1->pWaitingSRB = NULL; ptr1->pWaitingLastSRB = NULL; } else ptr1->pWaitingSRB = pSRB->pNextSRB; pSRB->pNextSRB = NULL; if (ptr1->pGoingSRB) ptr1->pGoingLastSRB->pNextSRB = pSRB; else ptr1->pGoingSRB = pSRB; ptr1->pGoingLastSRB = pSRB; } break; } } } splx(intflag); return; } static void trm_SRBwaiting(PDCB pDCB, PSRB pSRB) { if (pDCB->pWaitingSRB) { pDCB->pWaitingLastSRB->pNextSRB = pSRB; pDCB->pWaitingLastSRB = pSRB; pSRB->pNextSRB = NULL; } else { pDCB->pWaitingSRB = pSRB; pDCB->pWaitingLastSRB = pSRB; } } static u_int32_t trm_get_sense_bufaddr(PACB pACB, PSRB pSRB) { int offset; offset = pSRB->TagNumber; return (pACB->sense_busaddr + (offset * sizeof(struct scsi_sense_data))); } static struct scsi_sense_data * trm_get_sense_buf(PACB pACB, PSRB pSRB) { int offset; offset = pSRB->TagNumber; return (&pACB->sense_buffers[offset]); } static void trm_ExecuteSRB(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { int flags; PACB pACB; PSRB pSRB; union ccb *ccb; u_long totalxferlen=0; flags = splcam(); pSRB = (PSRB)arg; ccb = pSRB->pccb; pACB = (PACB)ccb->ccb_h.ccb_trmacb_ptr; TRM_DPRINTF("trm_ExecuteSRB..........\n"); if (nseg != 0) { PSEG psg; bus_dma_segment_t *end_seg; int op; /* Copy the segments into our SG list */ end_seg = dm_segs + nseg; psg = pSRB->pSRBSGL; while (dm_segs < end_seg) { psg->address = dm_segs->ds_addr; psg->length = (u_long)dm_segs->ds_len; totalxferlen += dm_segs->ds_len; psg++; dm_segs++; } if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op = BUS_DMASYNC_PREREAD; } else { op = BUS_DMASYNC_PREWRITE; } bus_dmamap_sync(pACB->buffer_dmat, pSRB->dmamap, op); } pSRB->RetryCnt = 0; pSRB->SRBTotalXferLength = totalxferlen; pSRB->SRBSGCount = nseg; pSRB->SRBSGIndex = 0; pSRB->AdaptStatus = 0; pSRB->TargetStatus = 0; pSRB->MsgCnt = 0; pSRB->SRBStatus = 0; pSRB->SRBFlag = 0; pSRB->SRBState = 0; pSRB->ScsiPhase = PH_BUS_FREE; /* SCSI bus free Phase */ if (ccb->ccb_h.status != CAM_REQ_INPROG) { if (nseg != 0) bus_dmamap_unload(pACB->buffer_dmat, pSRB->dmamap); pSRB->pNextSRB = pACB->pFreeSRB; pACB->pFreeSRB = pSRB; xpt_done(ccb); splx(flags); return; } ccb->ccb_h.status |= CAM_SIM_QUEUED; #if 0 /* XXX Need a timeout handler */ ccb->ccb_h.timeout_ch = timeout(trmtimeout, (caddr_t)srb, (ccb->ccb_h.timeout * hz) / 1000); #endif trm_SendSRB(pACB, pSRB); splx(flags); return; } static void trm_SendSRB(PACB pACB, PSRB pSRB) { PDCB pDCB; pDCB = pSRB->pSRBDCB; if (!(pDCB->MaxActiveCommandCnt > pDCB->GoingSRBCnt) || (pACB->pActiveDCB) || (pACB->ACBFlag & (RESET_DETECT+RESET_DONE+RESET_DEV))) { TRM_DPRINTF("pDCB->MaxCommand=%d \n",pDCB->MaxActiveCommandCnt); TRM_DPRINTF("pDCB->GoingSRBCnt=%d \n",pDCB->GoingSRBCnt); TRM_DPRINTF("pACB->pActiveDCB=%8x \n",(u_int)pACB->pActiveDCB); TRM_DPRINTF("pACB->ACBFlag=%x \n",pACB->ACBFlag); trm_SRBwaiting(pDCB, pSRB); goto SND_EXIT; } if (pDCB->pWaitingSRB) { trm_SRBwaiting(pDCB, pSRB); pSRB = pDCB->pWaitingSRB; pDCB->pWaitingSRB = pSRB->pNextSRB; pSRB->pNextSRB = NULL; } if (!trm_StartSCSI(pACB, pDCB, pSRB)) { /* * If trm_StartSCSI return 0 : * current interrupt status is interrupt enable * It's said that SCSI processor is unoccupied */ pDCB->GoingSRBCnt++; /* stack waiting SRB*/ if (pDCB->pGoingSRB) { pDCB->pGoingLastSRB->pNextSRB = pSRB; pDCB->pGoingLastSRB = pSRB; } else { pDCB->pGoingSRB = pSRB; pDCB->pGoingLastSRB = pSRB; } } else { /* * If trm_StartSCSI return 1 : * current interrupt status is interrupt disreenable * It's said that SCSI processor has more one SRB need to do */ trm_RewaitSRB0(pDCB, pSRB); } SND_EXIT: return; } static void trm_action(struct cam_sim *psim, union ccb *pccb) { PACB pACB; int actionflags; u_int target_id,target_lun; CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE, ("trm_action\n")); actionflags = splcam(); pACB = (PACB) cam_sim_softc(psim); target_id = pccb->ccb_h.target_id; target_lun = pccb->ccb_h.target_lun; switch (pccb->ccb_h.func_code) { case XPT_NOOP: TRM_DPRINTF(" XPT_NOOP \n"); pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; /* * Execute the requested I/O operation */ case XPT_SCSI_IO: { PDCB pDCB = NULL; PSRB pSRB; struct ccb_scsiio *pcsio; pcsio = &pccb->csio; TRM_DPRINTF(" XPT_SCSI_IO \n"); TRM_DPRINTF("trm: target_id= %d target_lun= %d \n" ,target_id, target_lun); TRM_DPRINTF( "pACB->scan_devices[target_id][target_lun]= %d \n" ,pACB->scan_devices[target_id][target_lun]); if ((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { xpt_done(pccb); splx(actionflags); return; } pDCB = &pACB->DCBarray[target_id][target_lun]; if (!(pDCB->DCBstatus & DS_IN_QUEUE)) { pACB->scan_devices[target_id][target_lun] = 1; trm_initDCB(pACB, pDCB, pACB->AdapterUnit, target_id, target_lun); } /* * Assign an SRB and connect it with this ccb. */ pSRB = trm_GetSRB(pACB); if (!pSRB) { /* Freeze SIMQ */ pccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_done(pccb); splx(actionflags); return; } pSRB->pSRBDCB = pDCB; pccb->ccb_h.ccb_trmsrb_ptr = pSRB; pccb->ccb_h.ccb_trmacb_ptr = pACB; pSRB->pccb = pccb; pSRB->ScsiCmdLen = pcsio->cdb_len; /* * move layer of CAM command block to layer of SCSI * Request Block for SCSI processor command doing */ if ((pccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { if ((pccb->ccb_h.flags & CAM_CDB_PHYS) == 0) { bcopy(pcsio->cdb_io.cdb_ptr,pSRB->CmdBlock ,pcsio->cdb_len); } else { pccb->ccb_h.status = CAM_REQ_INVALID; pSRB->pNextSRB = pACB->pFreeSRB; pACB->pFreeSRB= pSRB; xpt_done(pccb); splx(actionflags); return; } } else bcopy(pcsio->cdb_io.cdb_bytes, pSRB->CmdBlock, pcsio->cdb_len); if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { if ((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { if ((pccb->ccb_h.flags & CAM_DATA_PHYS) == 0) { int vmflags; int error; vmflags = splsoftvm(); error = bus_dmamap_load( pACB->buffer_dmat, pSRB->dmamap, pcsio->data_ptr, pcsio->dxfer_len, trm_ExecuteSRB, pSRB, 0); if (error == EINPROGRESS) { xpt_freeze_simq( pACB->psim, 1); pccb->ccb_h.status |= CAM_RELEASE_SIMQ; } splx(vmflags); } else { struct bus_dma_segment seg; /* Pointer to physical buffer */ seg.ds_addr = (bus_addr_t)pcsio->data_ptr; seg.ds_len = pcsio->dxfer_len; trm_ExecuteSRB(pSRB, &seg, 1, 0); } } else { /* CAM_SCATTER_VALID */ struct bus_dma_segment *segs; if ((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0 || (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) { pSRB->pNextSRB = pACB->pFreeSRB; pACB->pFreeSRB = pSRB; pccb->ccb_h.status = CAM_PROVIDE_FAIL; xpt_done(pccb); splx(actionflags); return; } /* cam SG list is physical, * cam data is virtual */ segs = (struct bus_dma_segment *) pcsio->data_ptr; trm_ExecuteSRB(pSRB, segs, pcsio->sglist_cnt, 1); } /* CAM_SCATTER_VALID */ } else trm_ExecuteSRB(pSRB, NULL, 0, 0); } break; case XPT_GDEV_TYPE: TRM_DPRINTF(" XPT_GDEV_TYPE \n"); pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; case XPT_GDEVLIST: TRM_DPRINTF(" XPT_GDEVLIST \n"); pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; /* * Path routing inquiry * Path Inquiry CCB */ case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &pccb->cpi; TRM_DPRINTF(" XPT_PATH_INQ \n"); cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = 15 ; cpi->max_lun = pACB->max_lun; /* 7 or 0 */ cpi->initiator_id = pACB->AdaptSCSIID; cpi->bus_id = cam_sim_bus(psim); cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Tekram_TRM", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(psim); cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(pccb); } break; /* * Release a frozen SIM queue * Release SIM Queue */ case XPT_REL_SIMQ: TRM_DPRINTF(" XPT_REL_SIMQ \n"); pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; /* * Set Asynchronous Callback Parameters * Set Asynchronous Callback CCB */ case XPT_SASYNC_CB: TRM_DPRINTF(" XPT_SASYNC_CB \n"); pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; /* * Set device type information * Set Device Type CCB */ case XPT_SDEV_TYPE: TRM_DPRINTF(" XPT_SDEV_TYPE \n"); pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; /* * (Re)Scan the SCSI Bus * Rescan the given bus, or bus/target/lun */ case XPT_SCAN_BUS: TRM_DPRINTF(" XPT_SCAN_BUS \n"); pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; /* * Get EDT entries matching the given pattern */ case XPT_DEV_MATCH: TRM_DPRINTF(" XPT_DEV_MATCH \n"); pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; /* * Turn on debugging for a bus, target or lun */ case XPT_DEBUG: TRM_DPRINTF(" XPT_DEBUG \n"); pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; /* * XPT_ABORT = 0x10, Abort the specified CCB * Abort XPT request CCB */ case XPT_ABORT: TRM_DPRINTF(" XPT_ABORT \n"); pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; /* * Reset the specified SCSI bus * Reset SCSI Bus CCB */ case XPT_RESET_BUS: { int i; TRM_DPRINTF(" XPT_RESET_BUS \n"); trm_reset(pACB); pACB->ACBFlag=0; for (i=0; i<500; i++) DELAY(1000); pccb->ccb_h.status = CAM_REQ_CMP; xpt_done(pccb); } break; /* * Bus Device Reset the specified SCSI device * Reset SCSI Device CCB */ case XPT_RESET_DEV: /* * Don't (yet?) support vendor * specific commands. */ TRM_DPRINTF(" XPT_RESET_DEV \n"); pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; /* * Terminate the I/O process * Terminate I/O Process Request CCB */ case XPT_TERM_IO: TRM_DPRINTF(" XPT_TERM_IO \n"); pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; /* * Scan Logical Unit */ case XPT_SCAN_LUN: TRM_DPRINTF(" XPT_SCAN_LUN \n"); pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; /* * Get/Set transfer rate/width/disconnection/tag queueing * settings * (GET) default/user transfer settings for the target */ case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts; int intflag; struct trm_transinfo *tinfo; PDCB pDCB; TRM_DPRINTF(" XPT_GET_TRAN_SETTINGS \n"); cts = &pccb->cts; pDCB = &pACB->DCBarray[target_id][target_lun]; intflag = splcam(); /* * disable interrupt */ if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) { /* current transfer settings */ if (pDCB->tinfo.disc_tag & TRM_CUR_DISCENB) cts->flags = CCB_TRANS_DISC_ENB; else cts->flags = 0;/* no tag & disconnect */ if (pDCB->tinfo.disc_tag & TRM_CUR_TAGENB) cts->flags |= CCB_TRANS_TAG_ENB; tinfo = &pDCB->tinfo.current; TRM_DPRINTF("CURRENT: cts->flags= %2x \n", cts->flags); } else { /* default(user) transfer settings */ if (pDCB->tinfo.disc_tag & TRM_USR_DISCENB) cts->flags = CCB_TRANS_DISC_ENB; else cts->flags = 0; if (pDCB->tinfo.disc_tag & TRM_USR_TAGENB) cts->flags |= CCB_TRANS_TAG_ENB; tinfo = &pDCB->tinfo.user; TRM_DPRINTF("USER: cts->flags= %2x \n", cts->flags); } cts->sync_period = tinfo->period; cts->sync_offset = tinfo->offset; cts->bus_width = tinfo->width; TRM_DPRINTF("pDCB->SyncPeriod: %d \n", pDCB->SyncPeriod); TRM_DPRINTF("period: %d \n", tinfo->period); TRM_DPRINTF("offset: %d \n", tinfo->offset); TRM_DPRINTF("width: %d \n", tinfo->width); splx(intflag); cts->valid = CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_SYNC_OFFSET_VALID | CCB_TRANS_BUS_WIDTH_VALID | CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; pccb->ccb_h.status = CAM_REQ_CMP; xpt_done(pccb); } break; /* * Get/Set transfer rate/width/disconnection/tag queueing * settings * (Set) transfer rate/width negotiation settings */ case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings *cts; u_int update_type; int intflag; PDCB pDCB; TRM_DPRINTF(" XPT_SET_TRAN_SETTINGS \n"); cts = &pccb->cts; update_type = 0; if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) update_type |= TRM_TRANS_GOAL; if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) update_type |= TRM_TRANS_USER; intflag = splcam(); pDCB = &pACB->DCBarray[target_id][target_lun]; if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { /*ccb disc enables */ if (update_type & TRM_TRANS_GOAL) { if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) pDCB->tinfo.disc_tag |= TRM_CUR_DISCENB; else pDCB->tinfo.disc_tag &= ~TRM_CUR_DISCENB; } if (update_type & TRM_TRANS_USER) { if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) pDCB->tinfo.disc_tag |= TRM_USR_DISCENB; else pDCB->tinfo.disc_tag &= ~TRM_USR_DISCENB; } } if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { /* if ccb tag q active */ if (update_type & TRM_TRANS_GOAL) { if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) pDCB->tinfo.disc_tag |= TRM_CUR_TAGENB; else pDCB->tinfo.disc_tag &= ~TRM_CUR_TAGENB; } if (update_type & TRM_TRANS_USER) { if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) pDCB->tinfo.disc_tag |= TRM_USR_TAGENB; else pDCB->tinfo.disc_tag &= ~TRM_USR_TAGENB; } } /* Minimum sync period factor */ if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0) { /* if ccb sync active */ /* TRM-S1040 MinSyncPeriod = 4 clocks/byte */ if ((cts->sync_period != 0) && (cts->sync_period < 125)) cts->sync_period = 125; /* 1/(125*4) minsync 2 MByte/sec */ if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0) { if (cts->sync_offset == 0) cts->sync_period = 0; /* TRM-S1040 MaxSyncOffset = 15 bytes*/ if (cts->sync_offset > 15) cts->sync_offset = 15; } } if ((update_type & TRM_TRANS_USER) != 0) { pDCB->tinfo.user.period = cts->sync_period; pDCB->tinfo.user.offset = cts->sync_offset; pDCB->tinfo.user.width = cts->bus_width; } if ((update_type & TRM_TRANS_GOAL) != 0) { pDCB->tinfo.goal.period = cts->sync_period; pDCB->tinfo.goal.offset = cts->sync_offset; pDCB->tinfo.goal.width = cts->bus_width; } splx(intflag); pccb->ccb_h.status = CAM_REQ_CMP; xpt_done(pccb); break; } /* * Calculate the geometry parameters for a device give * the sector size and volume size. */ case XPT_CALC_GEOMETRY: TRM_DPRINTF(" XPT_CALC_GEOMETRY \n"); cam_calc_geometry(&pccb->ccg, /*extended*/1); xpt_done(pccb); break; case XPT_ENG_INQ: TRM_DPRINTF(" XPT_ENG_INQ \n"); pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; /* * HBA execute engine request * This structure must match SCSIIO size */ case XPT_ENG_EXEC: TRM_DPRINTF(" XPT_ENG_EXEC \n"); pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; /* * XPT_EN_LUN = 0x30, Enable LUN as a target * Target mode structures. */ case XPT_EN_LUN: /* * Don't (yet?) support vendor * specific commands. */ TRM_DPRINTF(" XPT_EN_LUN \n"); pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; /* * Execute target I/O request */ case XPT_TARGET_IO: /* * Don't (yet?) support vendor * specific commands. */ TRM_DPRINTF(" XPT_TARGET_IO \n"); pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; /* * Accept Host Target Mode CDB */ case XPT_ACCEPT_TARGET_IO: /* * Don't (yet?) support vendor * specific commands. */ TRM_DPRINTF(" XPT_ACCEPT_TARGET_IO \n"); pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; /* * Continue Host Target I/O Connection */ case XPT_CONT_TARGET_IO: /* * Don't (yet?) support vendor * specific commands. */ TRM_DPRINTF(" XPT_CONT_TARGET_IO \n"); pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; /* * Notify Host Target driver of event */ case XPT_IMMED_NOTIFY: TRM_DPRINTF(" XPT_IMMED_NOTIFY \n"); pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; /* * Acknowledgement of event */ case XPT_NOTIFY_ACK: TRM_DPRINTF(" XPT_NOTIFY_ACK \n"); pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; /* * XPT_VUNIQUE = 0x80 */ case XPT_VUNIQUE: pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; default: pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; } splx(actionflags); } static void trm_poll(struct cam_sim *psim) { trm_Interrupt(cam_sim_softc(psim)); } static void trm_ResetDevParam(PACB pACB) { PDCB pDCB, pdcb; PNVRAMTYPE pEEpromBuf; u_int8_t PeriodIndex; pDCB = pACB->pLinkDCB; if (pDCB == NULL) return; pdcb = pDCB; do { pDCB->SyncMode &= ~(SYNC_NEGO_DONE+ WIDE_NEGO_DONE); pDCB->SyncPeriod = 0; pDCB->SyncOffset = 0; pEEpromBuf = &trm_eepromBuf[pACB->AdapterUnit]; pDCB->DevMode = pEEpromBuf->NvramTarget[pDCB->TargetID].NvmTarCfg0; pDCB->AdpMode = pEEpromBuf->NvramChannelCfg; PeriodIndex = pEEpromBuf->NvramTarget[pDCB->TargetID].NvmTarPeriod & 0x07; if (pACB->AdaptType == 1) /* is U2? */ pDCB->MaxNegoPeriod = dc395u2x_clock_period[PeriodIndex]; else pDCB->MaxNegoPeriod = dc395x_clock_period[PeriodIndex]; if ((pDCB->DevMode & NTC_DO_WIDE_NEGO) && (pACB->Config & HCC_WIDE_CARD)) pDCB->SyncMode |= WIDE_NEGO_ENABLE; pDCB = pDCB->pNextDCB; } while (pdcb != pDCB); } static void trm_RecoverSRB(PACB pACB) { PDCB pDCB, pdcb; PSRB psrb, psrb2; u_int16_t cnt, i; pDCB = pACB->pLinkDCB; if (pDCB == NULL) return; pdcb = pDCB; do { cnt = pdcb->GoingSRBCnt; psrb = pdcb->pGoingSRB; for (i = 0; i < cnt; i++) { psrb2 = psrb; psrb = psrb->pNextSRB; if (pdcb->pWaitingSRB) { psrb2->pNextSRB = pdcb->pWaitingSRB; pdcb->pWaitingSRB = psrb2; } else { pdcb->pWaitingSRB = psrb2; pdcb->pWaitingLastSRB = psrb2; psrb2->pNextSRB = NULL; } } pdcb->GoingSRBCnt = 0; pdcb->pGoingSRB = NULL; pdcb = pdcb->pNextDCB; } while (pdcb != pDCB); } static void trm_reset(PACB pACB) { int intflag; u_int16_t i; TRM_DPRINTF("trm: RESET"); intflag = splcam(); trm_reg_write8(0x00, TRMREG_DMA_INTEN); trm_reg_write8(0x00, TRMREG_SCSI_INTEN); trm_ResetSCSIBus(pACB); for (i = 0; i < 500; i++) DELAY(1000); trm_reg_write8(0x7F, TRMREG_SCSI_INTEN); /* Enable DMA interrupt */ trm_reg_write8(EN_SCSIINTR, TRMREG_DMA_INTEN); /* Clear DMA FIFO */ trm_reg_write8(CLRXFIFO, TRMREG_DMA_CONTROL); /* Clear SCSI FIFO */ trm_reg_write16(DO_CLRFIFO,TRMREG_SCSI_CONTROL); trm_ResetDevParam(pACB); trm_DoingSRB_Done(pACB); pACB->pActiveDCB = NULL; pACB->ACBFlag = 0;/* RESET_DETECT, RESET_DONE ,RESET_DEV */ trm_DoWaitingSRB(pACB); /* Tell the XPT layer that a bus reset occured */ if (pACB->ppath != NULL) xpt_async(AC_BUS_RESET, pACB->ppath, NULL); splx(intflag); return; } static u_int16_t trm_StartSCSI(PACB pACB, PDCB pDCB, PSRB pSRB) { u_int16_t return_code; u_int8_t scsicommand, i,command,identify_message; u_int8_t * ptr; union ccb *pccb; struct ccb_scsiio *pcsio; pccb = pSRB->pccb; pcsio = &pccb->csio; trm_reg_write8(pACB->AdaptSCSIID, TRMREG_SCSI_HOSTID); trm_reg_write8(pDCB->TargetID, TRMREG_SCSI_TARGETID); trm_reg_write8(pDCB->SyncPeriod, TRMREG_SCSI_SYNC); trm_reg_write8(pDCB->SyncOffset, TRMREG_SCSI_OFFSET); pSRB->ScsiPhase = PH_BUS_FREE;/* initial phase */ /* Flush FIFO */ trm_reg_write16(DO_CLRFIFO, TRMREG_SCSI_CONTROL); identify_message = pDCB->IdentifyMsg; if ((pSRB->CmdBlock[0] == INQUIRY) || (pSRB->CmdBlock[0] == REQUEST_SENSE) || (pSRB->SRBFlag & AUTO_REQSENSE)) { if (((pDCB->SyncMode & WIDE_NEGO_ENABLE) && !(pDCB->SyncMode & WIDE_NEGO_DONE)) || ((pDCB->SyncMode & SYNC_NEGO_ENABLE) && !(pDCB->SyncMode & SYNC_NEGO_DONE))) { if (!(pDCB->IdentifyMsg & 7) || (pSRB->CmdBlock[0] != INQUIRY)) { scsicommand = SCMD_SEL_ATNSTOP; pSRB->SRBState = SRB_MSGOUT; goto polling; } } /* * Send identify message */ trm_reg_write8((identify_message & 0xBF) ,TRMREG_SCSI_FIFO); scsicommand = SCMD_SEL_ATN; pSRB->SRBState = SRB_START_; } else { /* not inquiry,request sense,auto request sense */ /* * Send identify message */ trm_reg_write8(identify_message,TRMREG_SCSI_FIFO); scsicommand = SCMD_SEL_ATN; pSRB->SRBState = SRB_START_; if (pDCB->SyncMode & EN_TAG_QUEUING) { /* Send Tag message */ trm_reg_write8(MSG_SIMPLE_QTAG, TRMREG_SCSI_FIFO); trm_reg_write8(pSRB->TagNumber, TRMREG_SCSI_FIFO); scsicommand = SCMD_SEL_ATN3; } } polling: /* * Send CDB ..command block ......... */ if (pSRB->SRBFlag & AUTO_REQSENSE) { trm_reg_write8(REQUEST_SENSE, TRMREG_SCSI_FIFO); trm_reg_write8((pDCB->IdentifyMsg << 5), TRMREG_SCSI_FIFO); trm_reg_write8(0, TRMREG_SCSI_FIFO); trm_reg_write8(0, TRMREG_SCSI_FIFO); trm_reg_write8(pcsio->sense_len, TRMREG_SCSI_FIFO); trm_reg_write8(0, TRMREG_SCSI_FIFO); } else { ptr = (u_int8_t *) pSRB->CmdBlock; for (i = 0; i < pSRB->ScsiCmdLen ; i++) { command = *ptr++; trm_reg_write8(command,TRMREG_SCSI_FIFO); } } if (trm_reg_read16(TRMREG_SCSI_STATUS) & SCSIINTERRUPT) { /* * If trm_StartSCSI return 1 : * current interrupt status is interrupt disreenable * It's said that SCSI processor has more one SRB need to do, * SCSI processor has been occupied by one SRB. */ pSRB->SRBState = SRB_READY; return_code = 1; } else { /* * If trm_StartSCSI return 0 : * current interrupt status is interrupt enable * It's said that SCSI processor is unoccupied */ pSRB->ScsiPhase = SCSI_NOP1; /* SCSI bus free Phase */ pACB->pActiveDCB = pDCB; pDCB->pActiveSRB = pSRB; return_code = 0; trm_reg_write16(DO_DATALATCH | DO_HWRESELECT, TRMREG_SCSI_CONTROL);/* it's important for atn stop*/ /* * SCSI cammand */ trm_reg_write8(scsicommand,TRMREG_SCSI_COMMAND); } return (return_code); } static void trm_Interrupt(vpACB) void *vpACB; { PACB pACB; PDCB pDCB; PSRB pSRB; u_int16_t phase; void (*stateV)(PACB, PSRB, u_int16_t *); u_int16_t scsi_status=0; u_int8_t scsi_intstatus; pACB = vpACB; scsi_status = trm_reg_read16(TRMREG_SCSI_STATUS); if (!(scsi_status & SCSIINTERRUPT)) { TRM_DPRINTF("trm_Interrupt: TRMREG_SCSI_STATUS scsi_status = NULL ,return......"); return; } TRM_DPRINTF("scsi_status=%2x,",scsi_status); scsi_intstatus = trm_reg_read8(TRMREG_SCSI_INTSTATUS); TRM_DPRINTF("scsi_intstatus=%2x,",scsi_intstatus); if (scsi_intstatus & (INT_SELTIMEOUT | INT_DISCONNECT)) { trm_Disconnect(pACB); return; } if (scsi_intstatus & INT_RESELECTED) { trm_Reselect(pACB); return; } if (scsi_intstatus & INT_SCSIRESET) { trm_ScsiRstDetect(pACB); return; } if (scsi_intstatus & (INT_BUSSERVICE | INT_CMDDONE)) { pDCB = pACB->pActiveDCB; KASSERT(pDCB != NULL, ("no active DCB")); pSRB = pDCB->pActiveSRB; if (pDCB->DCBFlag & ABORT_DEV_) trm_EnableMsgOutAbort1(pACB, pSRB); phase = (u_int16_t) pSRB->ScsiPhase; /* phase: */ stateV = (void *) trm_SCSI_phase0[phase]; stateV(pACB, pSRB, &scsi_status); pSRB->ScsiPhase = scsi_status & PHASEMASK; /* phase:0,1,2,3,4,5,6,7 */ phase = (u_int16_t) scsi_status & PHASEMASK; stateV = (void *) trm_SCSI_phase1[phase]; stateV(pACB, pSRB, &scsi_status); } } static void trm_MsgOutPhase0(PACB pACB, PSRB pSRB, u_int16_t *pscsi_status) { if (pSRB->SRBState & (SRB_UNEXPECT_RESEL+SRB_ABORT_SENT)) *pscsi_status = PH_BUS_FREE; /*.. initial phase*/ } static void trm_MsgOutPhase1(PACB pACB, PSRB pSRB, u_int16_t *pscsi_status) { u_int8_t bval; u_int16_t i, cnt; u_int8_t * ptr; PDCB pDCB; trm_reg_write16(DO_CLRFIFO, TRMREG_SCSI_CONTROL); pDCB = pACB->pActiveDCB; if (!(pSRB->SRBState & SRB_MSGOUT)) { cnt = pSRB->MsgCnt; if (cnt) { ptr = (u_int8_t *) pSRB->MsgOutBuf; for (i = 0; i < cnt; i++) { trm_reg_write8(*ptr, TRMREG_SCSI_FIFO); ptr++; } pSRB->MsgCnt = 0; if ((pDCB->DCBFlag & ABORT_DEV_) && (pSRB->MsgOutBuf[0] == MSG_ABORT)) { pSRB->SRBState = SRB_ABORT_SENT; } } else { bval = MSG_ABORT; if ((pSRB->CmdBlock[0] == INQUIRY) || (pSRB->CmdBlock[0] == REQUEST_SENSE) || (pSRB->SRBFlag & AUTO_REQSENSE)) { if (pDCB->SyncMode & SYNC_NEGO_ENABLE) { goto mop1; } } trm_reg_write8(bval, TRMREG_SCSI_FIFO); } } else { mop1: /* message out phase */ if (!(pSRB->SRBState & SRB_DO_WIDE_NEGO) && (pDCB->SyncMode & WIDE_NEGO_ENABLE)) { /* * WIDE DATA TRANSFER REQUEST code (03h) */ pDCB->SyncMode &= ~(SYNC_NEGO_DONE | EN_ATN_STOP); trm_reg_write8((pDCB->IdentifyMsg & 0xBF), TRMREG_SCSI_FIFO); trm_reg_write8(MSG_EXTENDED,TRMREG_SCSI_FIFO); /* (01h) */ trm_reg_write8(2,TRMREG_SCSI_FIFO); /* Message length (02h) */ trm_reg_write8(3,TRMREG_SCSI_FIFO); /* wide data xfer (03h) */ trm_reg_write8(1,TRMREG_SCSI_FIFO); /* width:0(8bit),1(16bit),2(32bit) */ pSRB->SRBState |= SRB_DO_WIDE_NEGO; } else if (!(pSRB->SRBState & SRB_DO_SYNC_NEGO) && (pDCB->SyncMode & SYNC_NEGO_ENABLE)) { /* * SYNCHRONOUS DATA TRANSFER REQUEST code (01h) */ if (!(pDCB->SyncMode & WIDE_NEGO_DONE)) trm_reg_write8((pDCB->IdentifyMsg & 0xBF), TRMREG_SCSI_FIFO); trm_reg_write8(MSG_EXTENDED,TRMREG_SCSI_FIFO); /* (01h) */ trm_reg_write8(3,TRMREG_SCSI_FIFO); /* Message length (03h) */ trm_reg_write8(1,TRMREG_SCSI_FIFO); /* SYNCHRONOUS DATA TRANSFER REQUEST code (01h) */ trm_reg_write8(pDCB->MaxNegoPeriod,TRMREG_SCSI_FIFO); /* Transfer peeriod factor */ trm_reg_write8((pACB->AdaptType == 1) ? 31 : 15, TRMREG_SCSI_FIFO); /* REQ/ACK offset */ pSRB->SRBState |= SRB_DO_SYNC_NEGO; } } trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* it's important for atn stop */ /* * SCSI cammand */ trm_reg_write8(SCMD_FIFO_OUT, TRMREG_SCSI_COMMAND); } static void trm_CommandPhase0(PACB pACB, PSRB pSRB, u_int16_t *pscsi_status) { } static void trm_CommandPhase1(PACB pACB, PSRB pSRB, u_int16_t *pscsi_status) { PDCB pDCB; u_int8_t * ptr; u_int16_t i, cnt; union ccb *pccb; struct ccb_scsiio *pcsio; pccb = pSRB->pccb; pcsio = &pccb->csio; trm_reg_write16(DO_CLRATN | DO_CLRFIFO , TRMREG_SCSI_CONTROL); if (!(pSRB->SRBFlag & AUTO_REQSENSE)) { cnt = (u_int16_t) pSRB->ScsiCmdLen; ptr = (u_int8_t *) pSRB->CmdBlock; for (i = 0; i < cnt; i++) { trm_reg_write8(*ptr, TRMREG_SCSI_FIFO); ptr++; } } else { trm_reg_write8(REQUEST_SENSE, TRMREG_SCSI_FIFO); pDCB = pACB->pActiveDCB; /* target id */ trm_reg_write8((pDCB->IdentifyMsg << 5), TRMREG_SCSI_FIFO); trm_reg_write8(0, TRMREG_SCSI_FIFO); trm_reg_write8(0, TRMREG_SCSI_FIFO); /* sizeof(struct scsi_sense_data) */ trm_reg_write8(pcsio->sense_len, TRMREG_SCSI_FIFO); trm_reg_write8(0, TRMREG_SCSI_FIFO); } pSRB->SRBState = SRB_COMMAND; trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* it's important for atn stop*/ /* * SCSI cammand */ trm_reg_write8(SCMD_FIFO_OUT, TRMREG_SCSI_COMMAND); } static void trm_DataOutPhase0(PACB pACB, PSRB pSRB, u_int16_t *pscsi_status) { PDCB pDCB; u_int8_t TempDMAstatus,SGIndexTemp; u_int16_t scsi_status; PSEG pseg; u_long TempSRBXferredLength,dLeftCounter=0; pDCB = pSRB->pSRBDCB; scsi_status = *pscsi_status; if (!(pSRB->SRBState & SRB_XFERPAD)) { if (scsi_status & PARITYERROR) pSRB->SRBStatus |= PARITY_ERROR; if (!(scsi_status & SCSIXFERDONE)) { /* * when data transfer from DMA FIFO to SCSI FIFO * if there was some data left in SCSI FIFO */ dLeftCounter = (u_long) (trm_reg_read8(TRMREG_SCSI_FIFOCNT) & 0x3F); if (pDCB->SyncPeriod & WIDE_SYNC) { /* * if WIDE scsi SCSI FIFOCNT unit is word * so need to * 2 */ dLeftCounter <<= 1; } } /* * caculate all the residue data that not yet tranfered * SCSI transfer counter + left in SCSI FIFO data * * .....TRM_SCSI_COUNTER (24bits) * The counter always decrement by one for every SCSI byte *transfer. * .....TRM_SCSI_FIFOCNT (5bits) * The counter is SCSI FIFO offset counter */ dLeftCounter += trm_reg_read32(TRMREG_SCSI_COUNTER); if (dLeftCounter == 1) { dLeftCounter = 0; trm_reg_write16(DO_CLRFIFO,TRMREG_SCSI_CONTROL); } if ((dLeftCounter == 0) || (scsi_status & SCSIXFERCNT_2_ZERO)) { TempDMAstatus = trm_reg_read8(TRMREG_DMA_STATUS); while (!(TempDMAstatus & DMAXFERCOMP)) { TempDMAstatus = trm_reg_read8(TRMREG_DMA_STATUS); } pSRB->SRBTotalXferLength = 0; } else { /* Update SG list */ /* * if transfer not yet complete * there were some data residue in SCSI FIFO or * SCSI transfer counter not empty */ if (pSRB->SRBTotalXferLength != dLeftCounter) { /* * data that had transferred length */ TempSRBXferredLength = pSRB->SRBTotalXferLength - dLeftCounter; /* * next time to be transferred length */ pSRB->SRBTotalXferLength = dLeftCounter; /* * parsing from last time disconnect SRBSGIndex */ pseg = pSRB->pSRBSGL + pSRB->SRBSGIndex; for (SGIndexTemp = pSRB->SRBSGIndex; SGIndexTemp < pSRB->SRBSGCount; SGIndexTemp++) { /* * find last time which SG transfer be * disconnect */ if (TempSRBXferredLength >= pseg->length) TempSRBXferredLength -= pseg->length; else { /* * update last time disconnected SG * list */ pseg->length -= TempSRBXferredLength; /* residue data length */ pseg->address += TempSRBXferredLength; /* residue data pointer */ pSRB->SRBSGIndex = SGIndexTemp; break; } pseg++; } } } } trm_reg_write8(STOPDMAXFER ,TRMREG_DMA_CONTROL); } static void trm_DataOutPhase1(PACB pACB, PSRB pSRB, u_int16_t *pscsi_status) { u_int16_t ioDir; /* * do prepare befor transfer when data out phase */ ioDir = XFERDATAOUT; trm_DataIO_transfer(pACB, pSRB, ioDir); } static void trm_DataInPhase0(PACB pACB, PSRB pSRB, u_int16_t *pscsi_status) { u_int8_t TempDMAstatus, SGIndexTemp; u_int16_t scsi_status; PSEG pseg; u_long TempSRBXferredLength,dLeftCounter = 0; scsi_status = *pscsi_status; if (!(pSRB->SRBState & SRB_XFERPAD)) { if (scsi_status & PARITYERROR) pSRB->SRBStatus |= PARITY_ERROR; dLeftCounter += trm_reg_read32(TRMREG_SCSI_COUNTER); if ((dLeftCounter == 0) || (scsi_status & SCSIXFERCNT_2_ZERO)) { TempDMAstatus = trm_reg_read8(TRMREG_DMA_STATUS); while (!(TempDMAstatus & DMAXFERCOMP)) TempDMAstatus = trm_reg_read8(TRMREG_DMA_STATUS); pSRB->SRBTotalXferLength = 0; } else { /* * parsing the case: * when a transfer not yet complete * but be disconnected by uper layer * if transfer not yet complete * there were some data residue in SCSI FIFO or * SCSI transfer counter not empty */ if (pSRB->SRBTotalXferLength != dLeftCounter) { /* * data that had transferred length */ TempSRBXferredLength = pSRB->SRBTotalXferLength - dLeftCounter; /* * next time to be transferred length */ pSRB->SRBTotalXferLength = dLeftCounter; /* * parsing from last time disconnect SRBSGIndex */ pseg = pSRB->pSRBSGL + pSRB->SRBSGIndex; for (SGIndexTemp = pSRB->SRBSGIndex; SGIndexTemp < pSRB->SRBSGCount; SGIndexTemp++) { /* * find last time which SG transfer be disconnect */ if (TempSRBXferredLength >= pseg->length) TempSRBXferredLength -= pseg->length; else { /* * update last time disconnected SG list */ pseg->length -= TempSRBXferredLength; /* residue data length */ pseg->address += TempSRBXferredLength; /* residue data pointer */ pSRB->SRBSGIndex = SGIndexTemp; break; } pseg++; } } } } } static void trm_DataInPhase1(PACB pACB, PSRB pSRB, u_int16_t *pscsi_status) { u_int16_t ioDir; /* * do prepare befor transfer when data in phase */ ioDir = XFERDATAIN; trm_DataIO_transfer(pACB, pSRB, ioDir); } static void trm_DataIO_transfer(PACB pACB, PSRB pSRB, u_int16_t ioDir) { u_int8_t bval; PDCB pDCB; pDCB = pSRB->pSRBDCB; if (pSRB->SRBSGIndex < pSRB->SRBSGCount) { if (pSRB->SRBTotalXferLength != 0) { /* * load what physical address of Scatter/Gather list table want to be transfer */ TRM_DPRINTF(" SG->address=%8x \n",pSRB->pSRBSGL->address); TRM_DPRINTF(" SG->length=%8x \n",pSRB->pSRBSGL->length); TRM_DPRINTF(" pDCB->SyncPeriod=%x \n",pDCB->SyncPeriod); TRM_DPRINTF(" pSRB->pSRBSGL=%8x \n",(unsigned int)pSRB->pSRBSGL); TRM_DPRINTF(" pSRB->SRBSGPhyAddr=%8x \n",pSRB->SRBSGPhyAddr); TRM_DPRINTF(" pSRB->SRBSGIndex=%d \n",pSRB->SRBSGIndex); TRM_DPRINTF(" pSRB->SRBSGCount=%d \n",pSRB->SRBSGCount); TRM_DPRINTF(" pSRB->SRBTotalXferLength=%d \n",pSRB->SRBTotalXferLength); pSRB->SRBState = SRB_DATA_XFER; trm_reg_write32(0, TRMREG_DMA_XHIGHADDR); trm_reg_write32( (pSRB->SRBSGPhyAddr + ((u_long)pSRB->SRBSGIndex << 3)), TRMREG_DMA_XLOWADDR); /* * load how many bytes in the Scatter/Gather * list table */ trm_reg_write32( ((u_long)(pSRB->SRBSGCount - pSRB->SRBSGIndex) << 3), TRMREG_DMA_XCNT); /* * load total transfer length (24bits) max value * 16Mbyte */ trm_reg_write32(pSRB->SRBTotalXferLength, TRMREG_SCSI_COUNTER); /* Start DMA transfer */ trm_reg_write16(ioDir, TRMREG_DMA_COMMAND); /* Start SCSI transfer */ trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* it's important for atn stop */ /* * SCSI cammand */ bval = (ioDir == XFERDATAOUT) ? SCMD_DMA_OUT : SCMD_DMA_IN; trm_reg_write8(bval, TRMREG_SCSI_COMMAND); } else { /* xfer pad */ if (pSRB->SRBSGCount) { pSRB->AdaptStatus = H_OVER_UNDER_RUN; pSRB->SRBStatus |= OVER_RUN; } if (pDCB->SyncPeriod & WIDE_SYNC) trm_reg_write32(2,TRMREG_SCSI_COUNTER); else trm_reg_write32(1,TRMREG_SCSI_COUNTER); if (ioDir == XFERDATAOUT) trm_reg_write16(0, TRMREG_SCSI_FIFO); else trm_reg_read16(TRMREG_SCSI_FIFO); pSRB->SRBState |= SRB_XFERPAD; trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* it's important for atn stop */ /* * SCSI cammand */ bval = (ioDir == XFERDATAOUT) ? SCMD_FIFO_OUT : SCMD_FIFO_IN; trm_reg_write8(bval, TRMREG_SCSI_COMMAND); } } } static void trm_StatusPhase0(PACB pACB, PSRB pSRB, u_int16_t *pscsi_status) { pSRB->TargetStatus = trm_reg_read8(TRMREG_SCSI_FIFO); pSRB->SRBState = SRB_COMPLETED; *pscsi_status = PH_BUS_FREE; /*.. initial phase*/ trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* it's important for atn stop */ /* * SCSI cammand */ trm_reg_write8(SCMD_MSGACCEPT, TRMREG_SCSI_COMMAND); } static void trm_StatusPhase1(PACB pACB, PSRB pSRB, u_int16_t *pscsi_status) { if (trm_reg_read16(TRMREG_DMA_COMMAND) & 0x0001) { if (!(trm_reg_read8(TRMREG_SCSI_FIFOCNT) & 0x40)) trm_reg_write16(DO_CLRFIFO, TRMREG_SCSI_CONTROL); if (!(trm_reg_read16(TRMREG_DMA_FIFOCNT) & 0x8000)) trm_reg_write8(CLRXFIFO, TRMREG_DMA_CONTROL); } else { if (!(trm_reg_read16(TRMREG_DMA_FIFOCNT) & 0x8000)) trm_reg_write8(CLRXFIFO, TRMREG_DMA_CONTROL); if (!(trm_reg_read8(TRMREG_SCSI_FIFOCNT) & 0x40)) trm_reg_write16(DO_CLRFIFO, TRMREG_SCSI_CONTROL); } pSRB->SRBState = SRB_STATUS; trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* it's important for atn stop */ /* * SCSI cammand */ trm_reg_write8(SCMD_COMP, TRMREG_SCSI_COMMAND); } /* *scsiiom * trm_MsgInPhase0: one of trm_SCSI_phase0[] vectors * stateV = (void *) trm_SCSI_phase0[phase] * if phase =7 * extended message codes: * * code description * * 02h Reserved * 00h MODIFY DATA POINTER * 01h SYNCHRONOUS DATA TRANSFER REQUEST * 03h WIDE DATA TRANSFER REQUEST * 04h - 7Fh Reserved * 80h - FFh Vendor specific * */ static void trm_MsgInPhase0(PACB pACB, PSRB pSRB, u_int16_t *pscsi_status) { u_int8_t message_in_code,bIndex,message_in_tag_id; PDCB pDCB; PSRB pSRBTemp; pDCB = pACB->pActiveDCB; message_in_code = trm_reg_read8(TRMREG_SCSI_FIFO); if (!(pSRB->SRBState & SRB_EXTEND_MSGIN)) { if (message_in_code == MSG_DISCONNECT) { pSRB->SRBState = SRB_DISCONNECT; *pscsi_status = PH_BUS_FREE; /* .. initial phase */ /* it's important for atn stop */ trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* * SCSI command */ trm_reg_write8(SCMD_MSGACCEPT, TRMREG_SCSI_COMMAND); return; } else if (message_in_code == MSG_SAVE_PTR) { *pscsi_status = PH_BUS_FREE; /* .. initial phase */ /* it's important for atn stop */ trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* * SCSI command */ trm_reg_write8(SCMD_MSGACCEPT, TRMREG_SCSI_COMMAND); return; } else if ((message_in_code == MSG_EXTENDED) || ((message_in_code >= MSG_SIMPLE_QTAG) && (message_in_code <= MSG_ORDER_QTAG))) { pSRB->SRBState |= SRB_EXTEND_MSGIN; pSRB->MsgInBuf[0] = message_in_code; /* extended message (01h) */ pSRB->MsgCnt = 1; pSRB->pMsgPtr = &pSRB->MsgInBuf[1]; /* extended message length (n) */ *pscsi_status = PH_BUS_FREE; /* .. initial phase */ /* it's important for atn stop */ trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* * SCSI command */ trm_reg_write8(SCMD_MSGACCEPT, TRMREG_SCSI_COMMAND); return; } else if (message_in_code == MSG_REJECT_) { /* Reject message */ if (pDCB->SyncMode & WIDE_NEGO_ENABLE) { /* do wide nego reject */ pDCB = pSRB->pSRBDCB; pDCB->SyncMode |= WIDE_NEGO_DONE; pDCB->SyncMode &= ~(SYNC_NEGO_DONE | EN_ATN_STOP | WIDE_NEGO_ENABLE); pSRB->SRBState &= ~(SRB_DO_WIDE_NEGO+SRB_MSGIN); if ((pDCB->SyncMode & SYNC_NEGO_ENABLE) && !(pDCB->SyncMode & SYNC_NEGO_DONE)) { /* Set ATN, in case ATN was clear */ pSRB->SRBState |= SRB_MSGOUT; trm_reg_write16( DO_SETATN, TRMREG_SCSI_CONTROL); } else { /* Clear ATN */ trm_reg_write16( DO_CLRATN, TRMREG_SCSI_CONTROL); } } else if (pDCB->SyncMode & SYNC_NEGO_ENABLE) { /* do sync nego reject */ trm_reg_write16(DO_CLRATN,TRMREG_SCSI_CONTROL); if (pSRB->SRBState & SRB_DO_SYNC_NEGO) { pDCB = pSRB->pSRBDCB; pDCB->SyncMode &= ~(SYNC_NEGO_ENABLE+SYNC_NEGO_DONE); pDCB->SyncPeriod = 0; pDCB->SyncOffset = 0; /* * * program SCSI control register * */ trm_reg_write8(pDCB->SyncPeriod, TRMREG_SCSI_SYNC); trm_reg_write8(pDCB->SyncOffset, TRMREG_SCSI_OFFSET); trm_SetXferRate(pACB,pSRB,pDCB); } } *pscsi_status = PH_BUS_FREE; /* .. initial phase */ /* it's important for atn stop */ trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* * SCSI command */ trm_reg_write8(SCMD_MSGACCEPT, TRMREG_SCSI_COMMAND); return; } else if (message_in_code == MSG_IGNOREWIDE) { trm_reg_write32(1, TRMREG_SCSI_COUNTER); trm_reg_read8(TRMREG_SCSI_FIFO); *pscsi_status = PH_BUS_FREE; /* .. initial phase */ /* it's important for atn stop */ trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* * SCSI command */ trm_reg_write8(SCMD_MSGACCEPT, TRMREG_SCSI_COMMAND); return; } else { /* Restore data pointer message */ /* Save data pointer message */ /* Completion message */ /* NOP message */ *pscsi_status = PH_BUS_FREE; /* .. initial phase */ /* it's important for atn stop */ trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* * SCSI command */ trm_reg_write8(SCMD_MSGACCEPT, TRMREG_SCSI_COMMAND); return; } } else { /* * Parsing incomming extented messages */ *pSRB->pMsgPtr = message_in_code; pSRB->MsgCnt++; pSRB->pMsgPtr++; TRM_DPRINTF("pSRB->MsgInBuf[0]=%2x \n ",pSRB->MsgInBuf[0]); TRM_DPRINTF("pSRB->MsgInBuf[1]=%2x \n ",pSRB->MsgInBuf[1]); TRM_DPRINTF("pSRB->MsgInBuf[2]=%2x \n ",pSRB->MsgInBuf[2]); TRM_DPRINTF("pSRB->MsgInBuf[3]=%2x \n ",pSRB->MsgInBuf[3]); TRM_DPRINTF("pSRB->MsgInBuf[4]=%2x \n ",pSRB->MsgInBuf[4]); if ((pSRB->MsgInBuf[0] >= MSG_SIMPLE_QTAG) && (pSRB->MsgInBuf[0] <= MSG_ORDER_QTAG)) { /* * is QUEUE tag message : * * byte 0: * HEAD QUEUE TAG (20h) * ORDERED QUEUE TAG (21h) * SIMPLE QUEUE TAG (22h) * byte 1: * Queue tag (00h - FFh) */ if (pSRB->MsgCnt == 2) { pSRB->SRBState = 0; message_in_tag_id = pSRB->MsgInBuf[1]; pSRB = pDCB->pGoingSRB; pSRBTemp = pDCB->pGoingLastSRB; if (pSRB) { for (;;) { if (pSRB->TagNumber != message_in_tag_id) { if (pSRB == pSRBTemp) { goto mingx0; } pSRB = pSRB->pNextSRB; } else break; } if (pDCB->DCBFlag & ABORT_DEV_) { pSRB->SRBState = SRB_ABORT_SENT; trm_EnableMsgOutAbort1( pACB, pSRB); } if (!(pSRB->SRBState & SRB_DISCONNECT)) { TRM_DPRINTF("SRB not yet disconnect........ \n "); goto mingx0; } pDCB->pActiveSRB = pSRB; pSRB->SRBState = SRB_DATA_XFER; } else { mingx0: pSRB = &pACB->TmpSRB; pSRB->SRBState = SRB_UNEXPECT_RESEL; pDCB->pActiveSRB = pSRB; pSRB->MsgOutBuf[0] = MSG_ABORT_TAG; trm_EnableMsgOutAbort2( pACB, pSRB); } } *pscsi_status = PH_BUS_FREE; /* .. initial phase */ trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* it's important for atn stop */ /* * SCSI command */ trm_reg_write8(SCMD_MSGACCEPT, TRMREG_SCSI_COMMAND); return; } else if ((pSRB->MsgInBuf[0] == MSG_EXTENDED) && (pSRB->MsgInBuf[2] == 3) && (pSRB->MsgCnt == 4)) { /* * is Wide data xfer Extended message : * ====================================== * WIDE DATA TRANSFER REQUEST * ====================================== * byte 0 : Extended message (01h) * byte 1 : Extended message length (02h) * byte 2 : WIDE DATA TRANSFER code (03h) * byte 3 : Transfer width exponent */ pDCB = pSRB->pSRBDCB; pSRB->SRBState &= ~(SRB_EXTEND_MSGIN+SRB_DO_WIDE_NEGO); if ((pSRB->MsgInBuf[1] != 2)) { /* Length is wrong, reject it */ pDCB->SyncMode &= ~(WIDE_NEGO_ENABLE+WIDE_NEGO_DONE); pSRB->MsgCnt = 1; pSRB->MsgInBuf[0] = MSG_REJECT_; trm_reg_write16(DO_SETATN, TRMREG_SCSI_CONTROL); *pscsi_status = PH_BUS_FREE; /* .. initial phase */ /* it's important for atn stop */ trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* * SCSI command */ trm_reg_write8(SCMD_MSGACCEPT, TRMREG_SCSI_COMMAND); return; } if (pDCB->SyncMode & WIDE_NEGO_ENABLE) { /* Do wide negoniation */ if (pSRB->MsgInBuf[3] > 2) { /* > 32 bit */ /* reject_msg: */ pDCB->SyncMode &= ~(WIDE_NEGO_ENABLE+WIDE_NEGO_DONE); pSRB->MsgCnt = 1; pSRB->MsgInBuf[0] = MSG_REJECT_; trm_reg_write16(DO_SETATN, TRMREG_SCSI_CONTROL); *pscsi_status = PH_BUS_FREE; /* .. initial phase */ /* it's important for atn stop */ trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* * SCSI command */ trm_reg_write8(SCMD_MSGACCEPT, TRMREG_SCSI_COMMAND); return; } if (pSRB->MsgInBuf[3] == 2) { pSRB->MsgInBuf[3] = 1; /* do 16 bits */ } else { if (!(pDCB->SyncMode & WIDE_NEGO_DONE)) { pSRB->SRBState &= ~(SRB_DO_WIDE_NEGO+SRB_MSGIN); pDCB->SyncMode |= WIDE_NEGO_DONE; pDCB->SyncMode &= ~(SYNC_NEGO_DONE | EN_ATN_STOP | WIDE_NEGO_ENABLE); if (pSRB->MsgInBuf[3] != 0) { /* is Wide data xfer */ pDCB->SyncPeriod |= WIDE_SYNC; pDCB->tinfo.current.width = MSG_EXT_WDTR_BUS_16_BIT; pDCB->tinfo.goal.width = MSG_EXT_WDTR_BUS_16_BIT; } } } } else pSRB->MsgInBuf[3] = 0; pSRB->SRBState |= SRB_MSGOUT; trm_reg_write16(DO_SETATN,TRMREG_SCSI_CONTROL); *pscsi_status = PH_BUS_FREE; /* .. initial phase */ /* it's important for atn stop */ trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* * SCSI command */ trm_reg_write8(SCMD_MSGACCEPT, TRMREG_SCSI_COMMAND); return; } else if ((pSRB->MsgInBuf[0] == MSG_EXTENDED) && (pSRB->MsgInBuf[2] == 1) && (pSRB->MsgCnt == 5)) { /* * is 8bit transfer Extended message : * ================================= * SYNCHRONOUS DATA TRANSFER REQUEST * ================================= * byte 0 : Extended message (01h) * byte 1 : Extended message length (03) * byte 2 : SYNCHRONOUS DATA TRANSFER code (01h) * byte 3 : Transfer period factor * byte 4 : REQ/ACK offset */ pSRB->SRBState &= ~(SRB_EXTEND_MSGIN+SRB_DO_SYNC_NEGO); if ((pSRB->MsgInBuf[1] != 3) || (pSRB->MsgInBuf[2] != 1)) { /* reject_msg: */ pSRB->MsgCnt = 1; pSRB->MsgInBuf[0] = MSG_REJECT_; trm_reg_write16(DO_SETATN, TRMREG_SCSI_CONTROL); *pscsi_status = PH_BUS_FREE; /* .. initial phase */ trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* it's important for atn stop */ /* * SCSI cammand */ trm_reg_write8(SCMD_MSGACCEPT, TRMREG_SCSI_COMMAND); return; } else if (!(pSRB->MsgInBuf[3]) || !(pSRB->MsgInBuf[4])) { /* set async */ pDCB = pSRB->pSRBDCB; /* disable sync & sync nego */ pDCB->SyncMode &= ~(SYNC_NEGO_ENABLE+SYNC_NEGO_DONE); pDCB->SyncPeriod = 0; pDCB->SyncOffset = 0; pDCB->tinfo.goal.period = 0; pDCB->tinfo.goal.offset = 0; pDCB->tinfo.current.period = 0; pDCB->tinfo.current.offset = 0; pDCB->tinfo.current.width = MSG_EXT_WDTR_BUS_8_BIT; /* * * program SCSI control register * */ trm_reg_write8(pDCB->SyncPeriod,TRMREG_SCSI_SYNC); trm_reg_write8(pDCB->SyncOffset,TRMREG_SCSI_OFFSET); trm_SetXferRate(pACB,pSRB,pDCB); *pscsi_status = PH_BUS_FREE; /* .. initial phase */ trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* it's important for atn stop */ /* * SCSI cammand */ trm_reg_write8(SCMD_MSGACCEPT, TRMREG_SCSI_COMMAND); return; } else { /* set sync */ pDCB = pSRB->pSRBDCB; pDCB->SyncMode |= SYNC_NEGO_ENABLE+SYNC_NEGO_DONE; pDCB->MaxNegoPeriod = pSRB->MsgInBuf[3]; /* Transfer period factor */ pDCB->SyncOffset = pSRB->MsgInBuf[4]; /* REQ/ACK offset */ if (pACB->AdaptType == 1) { for(bIndex = 0; bIndex < 7; bIndex++) { if (pSRB->MsgInBuf[3] <= dc395u2x_clock_period[bIndex]) { pDCB->tinfo.goal.period = dc395u2x_tinfo_period[bIndex]; pDCB->tinfo.current.period = dc395u2x_tinfo_period[bIndex]; pDCB->tinfo.goal.offset = pDCB->SyncOffset; pDCB->tinfo.current.offset = pDCB->SyncOffset; pDCB->SyncPeriod |= (bIndex|LVDS_SYNC); break; } } } else { for(bIndex = 0; bIndex < 7; bIndex++) { if (pSRB->MsgInBuf[3] <= dc395x_clock_period[bIndex]) { pDCB->tinfo.goal.period = dc395x_tinfo_period[bIndex]; pDCB->tinfo.current.period = dc395x_tinfo_period[bIndex]; pDCB->tinfo.goal.offset = pDCB->SyncOffset; pDCB->tinfo.current.offset = pDCB->SyncOffset; pDCB->SyncPeriod |= (bIndex|ALT_SYNC); break; } } } /* * * program SCSI control register * */ trm_reg_write8(pDCB->SyncPeriod, TRMREG_SCSI_SYNC); trm_reg_write8(pDCB->SyncOffset, TRMREG_SCSI_OFFSET); trm_SetXferRate(pACB,pSRB,pDCB); *pscsi_status=PH_BUS_FREE;/*.. initial phase*/ trm_reg_write16(DO_DATALATCH,TRMREG_SCSI_CONTROL);/* it's important for atn stop*/ /* ** SCSI command */ trm_reg_write8(SCMD_MSGACCEPT,TRMREG_SCSI_COMMAND); return; } } *pscsi_status = PH_BUS_FREE; /* .. initial phase */ trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* it's important for atn stop */ /* * SCSI cammand */ trm_reg_write8(SCMD_MSGACCEPT, TRMREG_SCSI_COMMAND); } } static void trm_MsgInPhase1(PACB pACB, PSRB pSRB, u_int16_t *pscsi_status) { trm_reg_write16(DO_CLRFIFO, TRMREG_SCSI_CONTROL); trm_reg_write32(1,TRMREG_SCSI_COUNTER); if (!(pSRB->SRBState & SRB_MSGIN)) { pSRB->SRBState &= SRB_DISCONNECT; pSRB->SRBState |= SRB_MSGIN; } trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* it's important for atn stop*/ /* * SCSI cammand */ trm_reg_write8(SCMD_FIFO_IN, TRMREG_SCSI_COMMAND); } static void trm_Nop0(PACB pACB, PSRB pSRB, u_int16_t *pscsi_status) { } static void trm_Nop1(PACB pACB, PSRB pSRB, u_int16_t *pscsi_status) { } static void trm_SetXferRate(PACB pACB,PSRB pSRB, PDCB pDCB) { union ccb *pccb; struct ccb_trans_settings neg; u_int16_t cnt, i; u_int8_t bval; PDCB pDCBTemp; /* * set all lun device's period , offset */ TRM_DPRINTF("trm_SetXferRate\n"); pccb = pSRB->pccb; neg.sync_period = pDCB->tinfo.goal.period; neg.sync_offset = pDCB->tinfo.goal.offset; neg.valid = CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_SYNC_OFFSET_VALID; xpt_setup_ccb(&neg.ccb_h, pccb->ccb_h.path, /* priority */1); xpt_async(AC_TRANSFER_NEG, pccb->ccb_h.path, &neg); if (!(pDCB->IdentifyMsg & 0x07)) { pDCBTemp = pACB->pLinkDCB; cnt = pACB->DeviceCnt; bval = pDCB->TargetID; for (i = 0; i < cnt; i++) { if (pDCBTemp->TargetID == bval) { pDCBTemp->SyncPeriod = pDCB->SyncPeriod; pDCBTemp->SyncOffset = pDCB->SyncOffset; pDCBTemp->SyncMode = pDCB->SyncMode; } pDCBTemp = pDCBTemp->pNextDCB; } } return; } /* * scsiiom * trm_Interrupt * * * ---SCSI bus phase * * PH_DATA_OUT 0x00 Data out phase * PH_DATA_IN 0x01 Data in phase * PH_COMMAND 0x02 Command phase * PH_STATUS 0x03 Status phase * PH_BUS_FREE 0x04 Invalid phase used as bus free * PH_BUS_FREE 0x05 Invalid phase used as bus free * PH_MSG_OUT 0x06 Message out phase * PH_MSG_IN 0x07 Message in phase * */ static void trm_Disconnect(PACB pACB) { PDCB pDCB; PSRB pSRB, psrb; u_int16_t i,j, cnt; u_int target_id,target_lun; TRM_DPRINTF("trm_Disconnect...............\n "); pDCB = pACB->pActiveDCB; if (!pDCB) { TRM_DPRINTF(" Exception Disconnect DCB=NULL..............\n "); j = 400; while (--j) DELAY(1); /* 1 msec */ trm_reg_write16((DO_CLRFIFO | DO_HWRESELECT), TRMREG_SCSI_CONTROL); return; } pSRB = pDCB->pActiveSRB; /* bug pSRB=0 */ target_id = pSRB->pccb->ccb_h.target_id; target_lun = pSRB->pccb->ccb_h.target_lun; TRM_DPRINTF(":pDCB->pActiveSRB= %8x \n ",(u_int) pDCB->pActiveSRB); pACB->pActiveDCB = 0; pSRB->ScsiPhase = PH_BUS_FREE; /* SCSI bus free Phase */ trm_reg_write16((DO_CLRFIFO | DO_HWRESELECT), TRMREG_SCSI_CONTROL); if (pSRB->SRBState & SRB_UNEXPECT_RESEL) { pSRB->SRBState = 0; trm_DoWaitingSRB(pACB); } else if (pSRB->SRBState & SRB_ABORT_SENT) { pDCB->DCBFlag = 0; cnt = pDCB->GoingSRBCnt; pDCB->GoingSRBCnt = 0; pSRB = pDCB->pGoingSRB; for (i = 0; i < cnt; i++) { psrb = pSRB->pNextSRB; pSRB->pNextSRB = pACB->pFreeSRB; pACB->pFreeSRB = pSRB; pSRB = psrb; } pDCB->pGoingSRB = 0; trm_DoWaitingSRB(pACB); } else { if ((pSRB->SRBState & (SRB_START_+SRB_MSGOUT)) || !(pSRB->SRBState & (SRB_DISCONNECT+SRB_COMPLETED))) { /* Selection time out */ if (!(pACB->scan_devices[target_id][target_lun]) && pSRB->CmdBlock[0] != 0x00 && /* TEST UNIT READY */ pSRB->CmdBlock[0] != INQUIRY) { pSRB->SRBState = SRB_READY; trm_RewaitSRB(pDCB, pSRB); } else { pSRB->TargetStatus = SCSI_STAT_SEL_TIMEOUT; goto disc1; } } else if (pSRB->SRBState & SRB_DISCONNECT) { /* * SRB_DISCONNECT */ trm_DoWaitingSRB(pACB); } else if (pSRB->SRBState & SRB_COMPLETED) { disc1: /* * SRB_COMPLETED */ pDCB->pActiveSRB = 0; pSRB->SRBState = SRB_FREE; trm_SRBdone(pACB, pDCB, pSRB); } } return; } static void trm_Reselect(PACB pACB) { PDCB pDCB; PSRB pSRB; u_int16_t RselTarLunId; TRM_DPRINTF("trm_Reselect................. \n"); pDCB = pACB->pActiveDCB; if (pDCB) { /* Arbitration lost but Reselection win */ pSRB = pDCB->pActiveSRB; pSRB->SRBState = SRB_READY; trm_RewaitSRB(pDCB, pSRB); } /* Read Reselected Target Id and LUN */ RselTarLunId = trm_reg_read16(TRMREG_SCSI_TARGETID) & 0x1FFF; pDCB = pACB->pLinkDCB; while (RselTarLunId != *((u_int16_t *) &pDCB->TargetID)) { /* get pDCB of the reselect id */ pDCB = pDCB->pNextDCB; } pACB->pActiveDCB = pDCB; if (pDCB->SyncMode & EN_TAG_QUEUING) { pSRB = &pACB->TmpSRB; pDCB->pActiveSRB = pSRB; } else { pSRB = pDCB->pActiveSRB; if (!pSRB || !(pSRB->SRBState & SRB_DISCONNECT)) { /* * abort command */ pSRB = &pACB->TmpSRB; pSRB->SRBState = SRB_UNEXPECT_RESEL; pDCB->pActiveSRB = pSRB; trm_EnableMsgOutAbort1(pACB, pSRB); } else { if (pDCB->DCBFlag & ABORT_DEV_) { pSRB->SRBState = SRB_ABORT_SENT; trm_EnableMsgOutAbort1(pACB, pSRB); } else pSRB->SRBState = SRB_DATA_XFER; } } pSRB->ScsiPhase = PH_BUS_FREE; /* SCSI bus free Phase */ /* * Program HA ID, target ID, period and offset */ trm_reg_write8((u_int8_t) RselTarLunId,TRMREG_SCSI_TARGETID); /* target ID */ trm_reg_write8(pACB->AdaptSCSIID,TRMREG_SCSI_HOSTID); /* host ID */ trm_reg_write8(pDCB->SyncPeriod,TRMREG_SCSI_SYNC); /* period */ trm_reg_write8(pDCB->SyncOffset,TRMREG_SCSI_OFFSET); /* offset */ trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* it's important for atn stop*/ /* * SCSI cammand */ trm_reg_write8(SCMD_MSGACCEPT, TRMREG_SCSI_COMMAND); /* to rls the /ACK signal */ } static void trm_SRBdone(PACB pACB, PDCB pDCB, PSRB pSRB) { PSRB psrb; u_int8_t bval, bval1,status; union ccb *pccb; struct ccb_scsiio *pcsio; PSCSI_INQDATA ptr; int intflag; u_int target_id,target_lun; PDCB pTempDCB; pccb = pSRB->pccb; if (pccb == NULL) return; pcsio = &pccb->csio; target_id = pSRB->pccb->ccb_h.target_id; target_lun = pSRB->pccb->ccb_h.target_lun; if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if ((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_POSTREAD; else op = BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(pACB->buffer_dmat, pSRB->dmamap, op); bus_dmamap_unload(pACB->buffer_dmat, pSRB->dmamap); } /* * * target status * */ status = pSRB->TargetStatus; pcsio->scsi_status=SCSI_STAT_GOOD; pccb->ccb_h.status = CAM_REQ_CMP; if (pSRB->SRBFlag & AUTO_REQSENSE) { /* * status of auto request sense */ pSRB->SRBFlag &= ~AUTO_REQSENSE; pSRB->AdaptStatus = 0; pSRB->TargetStatus = SCSI_STATUS_CHECK_COND; if (status == SCSI_STATUS_CHECK_COND) { pccb->ccb_h.status = CAM_SEL_TIMEOUT; goto ckc_e; } *((u_long *) &(pSRB->CmdBlock[0])) = pSRB->Segment0[0]; *((u_long *) &(pSRB->CmdBlock[4])) = pSRB->Segment0[1]; pSRB->SRBTotalXferLength = pSRB->Segment1[1]; pSRB->pSRBSGL->address = pSRB->SgSenseTemp.address; pSRB->pSRBSGL->length = pSRB->SgSenseTemp.length; pcsio->scsi_status = SCSI_STATUS_CHECK_COND; bcopy(trm_get_sense_buf(pACB, pSRB), &pcsio->sense_data, pcsio->sense_len); pcsio->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; goto ckc_e; } /* * target status */ if (status) { if (status == SCSI_STATUS_CHECK_COND) { if ((pcsio->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) { TRM_DPRINTF("trm_RequestSense..................\n"); trm_RequestSense(pACB, pDCB, pSRB); return; } pcsio->scsi_status = SCSI_STATUS_CHECK_COND; pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; goto ckc_e; } else if (status == SCSI_STAT_QUEUEFULL) { bval = (u_int8_t) pDCB->GoingSRBCnt; bval--; pDCB->MaxActiveCommandCnt = bval; trm_RewaitSRB(pDCB, pSRB); pSRB->AdaptStatus = 0; pSRB->TargetStatus = 0; return; } else if (status == SCSI_STAT_SEL_TIMEOUT) { pSRB->AdaptStatus = H_SEL_TIMEOUT; pSRB->TargetStatus = 0; pcsio->scsi_status = SCSI_STAT_SEL_TIMEOUT; pccb->ccb_h.status = CAM_SEL_TIMEOUT; } else if (status == SCSI_STAT_BUSY) { TRM_DPRINTF("trm: target busy at %s %d\n", __FILE__, __LINE__); pcsio->scsi_status = SCSI_STAT_BUSY; pccb->ccb_h.status = CAM_SCSI_BUSY; return; /* The device busy, try again later? */ } else if (status == SCSI_STAT_RESCONFLICT) { TRM_DPRINTF("trm: target reserved at %s %d\n", __FILE__, __LINE__); pcsio->scsi_status = SCSI_STAT_RESCONFLICT; pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; /*XXX*/ return; } else { pSRB->AdaptStatus = 0; if (pSRB->RetryCnt) { pSRB->RetryCnt--; pSRB->TargetStatus = 0; pSRB->SRBSGIndex = 0; if (trm_StartSCSI(pACB, pDCB, pSRB)) { /* * If trm_StartSCSI return 1 : * current interrupt status is interrupt * disreenable * It's said that SCSI processor has more * one SRB need to do */ trm_RewaitSRB(pDCB, pSRB); } return; } else { TRM_DPRINTF("trm: driver stuffup at %s %d\n", __FILE__, __LINE__); pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; } } } else { /* * process initiator status.......................... * Adapter (initiator) status */ status = pSRB->AdaptStatus; if (status & H_OVER_UNDER_RUN) { pSRB->TargetStatus = 0; pccb->ccb_h.status = CAM_DATA_RUN_ERR; /* Illegal length (over/under run) */ } else if (pSRB->SRBStatus & PARITY_ERROR) { TRM_DPRINTF("trm: driver stuffup %s %d\n", __FILE__, __LINE__); pDCB->tinfo.goal.period = 0; pDCB->tinfo.goal.offset = 0; /* Driver failed to perform operation */ pccb->ccb_h.status = CAM_UNCOR_PARITY; } else { /* no error */ pSRB->AdaptStatus = 0; pSRB->TargetStatus = 0; pccb->ccb_h.status = CAM_REQ_CMP; /* there is no error, (sense is invalid) */ } } ckc_e: if (pACB->scan_devices[target_id][target_lun]) { /* * if SCSI command in "scan devices" duty */ if (pSRB->CmdBlock[0] == TEST_UNIT_READY) pACB->scan_devices[target_id][target_lun] = 0; /* SCSI command phase :test unit ready */ else if (pSRB->CmdBlock[0] == INQUIRY) { /* * SCSI command phase :inquiry scsi device data * (type,capacity,manufacture.... */ if (pccb->ccb_h.status == CAM_SEL_TIMEOUT) goto NO_DEV; ptr = (PSCSI_INQDATA) pcsio->data_ptr; /* page fault */ TRM_DPRINTF("trm_SRBdone..PSCSI_INQDATA:%2x \n", ptr->DevType); bval1 = ptr->DevType & SCSI_DEVTYPE; if (bval1 == SCSI_NODEV) { NO_DEV: TRM_DPRINTF("trm_SRBdone NO Device:target_id= %d ,target_lun= %d \n", target_id, target_lun); intflag = splcam(); pACB->scan_devices[target_id][target_lun] = 0; /* no device set scan device flag =0*/ /* pDCB Q link */ /* move the head of DCB to tempDCB*/ pTempDCB=pACB->pLinkDCB; /* search current DCB for pass link */ while (pTempDCB->pNextDCB != pDCB) { pTempDCB = pTempDCB->pNextDCB; } /* * when the current DCB found than connect * current DCB tail */ /* to the DCB tail that before current DCB */ pTempDCB->pNextDCB = pDCB->pNextDCB; /* * if there was only one DCB ,connect his tail * to his head */ if (pACB->pLinkDCB == pDCB) pACB->pLinkDCB = pTempDCB->pNextDCB; if (pACB->pDCBRunRobin == pDCB) pACB->pDCBRunRobin = pTempDCB->pNextDCB; pDCB->DCBstatus &= ~DS_IN_QUEUE; pACB->DeviceCnt--; if (pACB->DeviceCnt == 0) { pACB->pLinkDCB = NULL; pACB->pDCBRunRobin = NULL; } splx(intflag); } else { #ifdef trm_DEBUG1 int j; for (j = 0; j < 28; j++) { TRM_DPRINTF("ptr=%2x ", ((u_int8_t *)ptr)[j]); } #endif pDCB->DevType = bval1; if (bval1 == SCSI_DASD || bval1 == SCSI_OPTICAL) { if ((((ptr->Vers & 0x07) >= 2) || ((ptr->RDF & 0x0F) == 2)) && (ptr->Flags & SCSI_INQ_CMDQUEUE) && (pDCB->DevMode & TAG_QUEUING_) && (pDCB->DevMode & EN_DISCONNECT_)) { if (pDCB->DevMode & TAG_QUEUING_) { pDCB-> MaxActiveCommandCnt = pACB->TagMaxNum; pDCB->SyncMode |= EN_TAG_QUEUING; pDCB->tinfo.disc_tag |= TRM_CUR_TAGENB; } else { pDCB->SyncMode |= EN_ATN_STOP; pDCB->tinfo.disc_tag &= ~TRM_CUR_TAGENB; } } } } /* pSRB->CmdBlock[0] == INQUIRY */ } /* pACB->scan_devices[target_id][target_lun] */ } intflag = splcam(); /* ReleaseSRB(pDCB, pSRB); */ if (pSRB == pDCB->pGoingSRB) pDCB->pGoingSRB = pSRB->pNextSRB; else { psrb = pDCB->pGoingSRB; while (psrb->pNextSRB != pSRB) { psrb = psrb->pNextSRB; } psrb->pNextSRB = pSRB->pNextSRB; if (pSRB == pDCB->pGoingLastSRB) { pDCB->pGoingLastSRB = psrb; } } pSRB->pNextSRB = pACB->pFreeSRB; pACB->pFreeSRB = pSRB; pDCB->GoingSRBCnt--; trm_DoWaitingSRB(pACB); splx(intflag); /* Notify cmd done */ xpt_done (pccb); } static void trm_DoingSRB_Done(PACB pACB) { PDCB pDCB, pdcb; PSRB psrb, psrb2; u_int16_t cnt, i; union ccb *pccb; pDCB = pACB->pLinkDCB; if (pDCB == NULL) return; pdcb = pDCB; do { cnt = pdcb->GoingSRBCnt; psrb = pdcb->pGoingSRB; for (i = 0; i < cnt; i++) { psrb2 = psrb->pNextSRB; pccb = psrb->pccb; pccb->ccb_h.status = CAM_SEL_TIMEOUT; /* ReleaseSRB(pDCB, pSRB); */ psrb->pNextSRB = pACB->pFreeSRB; pACB->pFreeSRB = psrb; xpt_done(pccb); psrb = psrb2; } pdcb->GoingSRBCnt = 0;; pdcb->pGoingSRB = NULL; pdcb = pdcb->pNextDCB; } while (pdcb != pDCB); } static void trm_ResetSCSIBus(PACB pACB) { int intflag; intflag = splcam(); pACB->ACBFlag |= RESET_DEV; trm_reg_write16(DO_RSTSCSI,TRMREG_SCSI_CONTROL); while (!(trm_reg_read16(TRMREG_SCSI_INTSTATUS) & INT_SCSIRESET)); splx(intflag); return; } static void trm_ScsiRstDetect(PACB pACB) { int intflag; u_long wlval; TRM_DPRINTF("trm_ScsiRstDetect \n"); wlval = 1000; while (--wlval) DELAY(1000); intflag = splcam(); trm_reg_write8(STOPDMAXFER,TRMREG_DMA_CONTROL); trm_reg_write16(DO_CLRFIFO,TRMREG_SCSI_CONTROL); if (pACB->ACBFlag & RESET_DEV) pACB->ACBFlag |= RESET_DONE; else { pACB->ACBFlag |= RESET_DETECT; trm_ResetDevParam(pACB); /* trm_DoingSRB_Done(pACB); ???? */ trm_RecoverSRB(pACB); pACB->pActiveDCB = NULL; pACB->ACBFlag = 0; trm_DoWaitingSRB(pACB); } splx(intflag); return; } static void trm_RequestSense(PACB pACB, PDCB pDCB, PSRB pSRB) { union ccb *pccb; struct ccb_scsiio *pcsio; pccb = pSRB->pccb; pcsio = &pccb->csio; pSRB->SRBFlag |= AUTO_REQSENSE; pSRB->Segment0[0] = *((u_long *) &(pSRB->CmdBlock[0])); pSRB->Segment0[1] = *((u_long *) &(pSRB->CmdBlock[4])); pSRB->Segment1[0] = (u_long) ((pSRB->ScsiCmdLen << 8) + pSRB->SRBSGCount); pSRB->Segment1[1] = pSRB->SRBTotalXferLength; /* ?????????? */ /* $$$$$$ Status of initiator/target $$$$$$$$ */ pSRB->AdaptStatus = 0; pSRB->TargetStatus = 0; /* $$$$$$ Status of initiator/target $$$$$$$$ */ pSRB->SRBTotalXferLength = sizeof(pcsio->sense_data); pSRB->SgSenseTemp.address = pSRB->pSRBSGL->address; pSRB->SgSenseTemp.length = pSRB->pSRBSGL->length; pSRB->pSRBSGL->address = trm_get_sense_bufaddr(pACB, pSRB); pSRB->pSRBSGL->length = (u_long) sizeof(struct scsi_sense_data); pSRB->SRBSGCount = 1; pSRB->SRBSGIndex = 0; *((u_long *) &(pSRB->CmdBlock[0])) = 0x00000003; pSRB->CmdBlock[1] = pDCB->IdentifyMsg << 5; *((u_int16_t *) &(pSRB->CmdBlock[4])) = pcsio->sense_len; pSRB->ScsiCmdLen = 6; if (trm_StartSCSI(pACB, pDCB, pSRB)) /* * If trm_StartSCSI return 1 : * current interrupt status is interrupt disreenable * It's said that SCSI processor has more one SRB need to do */ trm_RewaitSRB(pDCB, pSRB); } static void trm_EnableMsgOutAbort2(PACB pACB, PSRB pSRB) { pSRB->MsgCnt = 1; trm_reg_write16(DO_SETATN, TRMREG_SCSI_CONTROL); } static void trm_EnableMsgOutAbort1(PACB pACB, PSRB pSRB) { pSRB->MsgOutBuf[0] = MSG_ABORT; trm_EnableMsgOutAbort2(pACB, pSRB); } static void trm_initDCB(PACB pACB, PDCB pDCB, u_int16_t unit,u_int32_t i,u_int32_t j) { PNVRAMTYPE pEEpromBuf; u_int8_t bval,PeriodIndex; u_int target_id,target_lun; PDCB pTempDCB; int intflag; target_id = i; target_lun = j; /* * Using the lun 0 device to init other DCB first, if the device * has been initialized. * I don't want init sync arguments one by one, it is the same. */ if (target_lun != 0 && (pACB->DCBarray[target_id][0].DCBstatus & DS_IN_QUEUE)) bcopy(&pACB->DCBarray[target_id][0], pDCB, sizeof(TRM_DCB)); intflag = splcam(); if (pACB->pLinkDCB == 0) { pACB->pLinkDCB = pDCB; /* * RunRobin impersonate the role * that let each device had good proportion * about SCSI command proceeding */ pACB->pDCBRunRobin = pDCB; pDCB->pNextDCB = pDCB; } else { pTempDCB=pACB->pLinkDCB; /* search the last nod of DCB link */ while (pTempDCB->pNextDCB != pACB->pLinkDCB) pTempDCB = pTempDCB->pNextDCB; /* connect current DCB with last DCB tail */ pTempDCB->pNextDCB = pDCB; /* connect current DCB tail to this DCB Q head */ pDCB->pNextDCB=pACB->pLinkDCB; } splx(intflag); pACB->DeviceCnt++; pDCB->TargetID = target_id; pDCB->TargetLUN = target_lun; pDCB->pWaitingSRB = NULL; pDCB->pGoingSRB = NULL; pDCB->GoingSRBCnt = 0; pDCB->pActiveSRB = NULL; pDCB->MaxActiveCommandCnt = 1; pDCB->DCBFlag = 0; pDCB->DCBstatus |= DS_IN_QUEUE; /* $$$$$$$ */ pEEpromBuf = &trm_eepromBuf[unit]; pDCB->DevMode = pEEpromBuf->NvramTarget[target_id].NvmTarCfg0; pDCB->AdpMode = pEEpromBuf->NvramChannelCfg; /* $$$$$$$ */ /* * disconnect enable ? */ if (pDCB->DevMode & NTC_DO_DISCONNECT) { bval = 0xC0; pDCB->tinfo.disc_tag |= TRM_USR_DISCENB ; } else { bval = 0x80; pDCB->tinfo.disc_tag &= ~(TRM_USR_DISCENB); } bval |= target_lun; pDCB->IdentifyMsg = bval; if (target_lun != 0 && (pACB->DCBarray[target_id][0].DCBstatus & DS_IN_QUEUE)) return; /* $$$$$$$ */ /* * tag Qing enable ? */ if (pDCB->DevMode & TAG_QUEUING_) { pDCB->tinfo.disc_tag |= TRM_USR_TAGENB ; } else pDCB->tinfo.disc_tag &= ~(TRM_USR_TAGENB); /* $$$$$$$ */ /* * wide nego ,sync nego enable ? */ pDCB->SyncPeriod = 0; pDCB->SyncOffset = 0; PeriodIndex = pEEpromBuf->NvramTarget[target_id].NvmTarPeriod & 0x07; if (pACB->AdaptType==1) {/* is U2? */ pDCB->MaxNegoPeriod=dc395u2x_clock_period[ PeriodIndex ]; pDCB->tinfo.user.period=pDCB->MaxNegoPeriod; pDCB->tinfo.user.offset=(pDCB->SyncMode & SYNC_NEGO_ENABLE) ? 31 : 0; } else { pDCB->MaxNegoPeriod=dc395x_clock_period[ PeriodIndex ]; pDCB->tinfo.user.period=pDCB->MaxNegoPeriod; pDCB->tinfo.user.offset=(pDCB->SyncMode & SYNC_NEGO_ENABLE) ? 15 : 0; } pDCB->SyncMode = 0; if ((pDCB->DevMode & NTC_DO_WIDE_NEGO) && (pACB->Config & HCC_WIDE_CARD)) pDCB->SyncMode |= WIDE_NEGO_ENABLE; /* enable wide nego */ if (pDCB->DevMode & NTC_DO_SYNC_NEGO) pDCB->SyncMode |= SYNC_NEGO_ENABLE; /* enable sync nego */ /* $$$$$$$ */ /* * Fill in tinfo structure. */ pDCB->tinfo.user.width = (pDCB->SyncMode & WIDE_NEGO_ENABLE) ? MSG_EXT_WDTR_BUS_16_BIT : MSG_EXT_WDTR_BUS_8_BIT; pDCB->tinfo.current.period = 0; pDCB->tinfo.current.offset = 0; pDCB->tinfo.current.width = MSG_EXT_WDTR_BUS_8_BIT; } static void trm_srbmapSG(void *arg, bus_dma_segment_t *segs, int nseg, int error) { PSRB pSRB; pSRB=(PSRB) arg; pSRB->SRBSGPhyAddr=segs->ds_addr; return; } static void trm_destroySRB(PACB pACB) { PSRB pSRB; pSRB = pACB->pFreeSRB; while (pSRB) { if (pSRB->sg_dmamap) { bus_dmamap_unload(pACB->sg_dmat, pSRB->sg_dmamap); bus_dmamem_free(pACB->sg_dmat, pSRB->pSRBSGL, pSRB->sg_dmamap); bus_dmamap_destroy(pACB->sg_dmat, pSRB->sg_dmamap); } if (pSRB->dmamap) bus_dmamap_destroy(pACB->buffer_dmat, pSRB->dmamap); pSRB = pSRB->pNextSRB; } } static int trm_initSRB(PACB pACB) { u_int16_t i; PSRB pSRB; int error; for (i = 0; i < TRM_MAX_SRB_CNT; i++) { pSRB = (PSRB)&pACB->pFreeSRB[i]; if (bus_dmamem_alloc(pACB->sg_dmat, (void **)&pSRB->pSRBSGL, BUS_DMA_NOWAIT, &pSRB->sg_dmamap) !=0 ) { return ENXIO; } bus_dmamap_load(pACB->sg_dmat, pSRB->sg_dmamap, pSRB->pSRBSGL, TRM_MAX_SG_LISTENTRY * sizeof(SGentry), trm_srbmapSG, pSRB, /*flags*/0); if (i != TRM_MAX_SRB_CNT - 1) { /* * link all SRB */ pSRB->pNextSRB = &pACB->pFreeSRB[i+1]; } else { /* * load NULL to NextSRB of the last SRB */ pSRB->pNextSRB = NULL; } pSRB->TagNumber = i; /* * Create the dmamap. This is no longer optional! */ if ((error = bus_dmamap_create(pACB->buffer_dmat, 0, &pSRB->dmamap)) != 0) return (error); } return (0); } static void trm_initACB(PACB pACB, u_int8_t adaptType, u_int16_t unit) { PNVRAMTYPE pEEpromBuf; pEEpromBuf = &trm_eepromBuf[unit]; pACB->max_id = 15; if (pEEpromBuf->NvramChannelCfg & NAC_SCANLUN) pACB->max_lun = 7; else pACB->max_lun = 0; TRM_DPRINTF("trm: pACB->max_id= %d pACB->max_lun= %d \n", pACB->max_id, pACB->max_lun); pACB->pLinkDCB = NULL; pACB->pDCBRunRobin = NULL; pACB->pActiveDCB = NULL; pACB->AdapterUnit = (u_int8_t)unit; pACB->AdaptSCSIID = pEEpromBuf->NvramScsiId; pACB->AdaptSCSILUN = 0; pACB->DeviceCnt = 0; pACB->AdaptType = adaptType; pACB->TagMaxNum = 2 << pEEpromBuf->NvramMaxTag; pACB->ACBFlag = 0; return; } static void NVRAM_trm_write_all(PNVRAMTYPE pEEpromBuf,PACB pACB) { u_int8_t *bpEeprom = (u_int8_t *) pEEpromBuf; u_int8_t bAddr; /* Enable SEEPROM */ trm_reg_write8((trm_reg_read8(TRMREG_GEN_CONTROL) | EN_EEPROM), TRMREG_GEN_CONTROL); /* * Write enable */ NVRAM_trm_write_cmd(pACB, 0x04, 0xFF); trm_reg_write8(0, TRMREG_GEN_NVRAM); NVRAM_trm_wait_30us(pACB); for (bAddr = 0; bAddr < 128; bAddr++, bpEeprom++) { NVRAM_trm_set_data(pACB, bAddr, *bpEeprom); } /* * Write disable */ NVRAM_trm_write_cmd(pACB, 0x04, 0x00); trm_reg_write8(0 , TRMREG_GEN_NVRAM); NVRAM_trm_wait_30us(pACB); /* Disable SEEPROM */ trm_reg_write8((trm_reg_read8(TRMREG_GEN_CONTROL) & ~EN_EEPROM), TRMREG_GEN_CONTROL); return; } static void NVRAM_trm_set_data(PACB pACB, u_int8_t bAddr, u_int8_t bData) { int i; u_int8_t bSendData; /* * Send write command & address */ NVRAM_trm_write_cmd(pACB, 0x05, bAddr); /* * Write data */ for (i = 0; i < 8; i++, bData <<= 1) { bSendData = NVR_SELECT; if (bData & 0x80) /* Start from bit 7 */ bSendData |= NVR_BITOUT; trm_reg_write8(bSendData , TRMREG_GEN_NVRAM); NVRAM_trm_wait_30us(pACB); trm_reg_write8((bSendData | NVR_CLOCK), TRMREG_GEN_NVRAM); NVRAM_trm_wait_30us(pACB); } trm_reg_write8(NVR_SELECT , TRMREG_GEN_NVRAM); NVRAM_trm_wait_30us(pACB); /* * Disable chip select */ trm_reg_write8(0 , TRMREG_GEN_NVRAM); NVRAM_trm_wait_30us(pACB); trm_reg_write8(NVR_SELECT ,TRMREG_GEN_NVRAM); NVRAM_trm_wait_30us(pACB); /* * Wait for write ready */ while (1) { trm_reg_write8((NVR_SELECT | NVR_CLOCK), TRMREG_GEN_NVRAM); NVRAM_trm_wait_30us(pACB); trm_reg_write8(NVR_SELECT, TRMREG_GEN_NVRAM); NVRAM_trm_wait_30us(pACB); if (trm_reg_read8(TRMREG_GEN_NVRAM) & NVR_BITIN) { break; } } /* * Disable chip select */ trm_reg_write8(0, TRMREG_GEN_NVRAM); return; } static void NVRAM_trm_read_all(PNVRAMTYPE pEEpromBuf, PACB pACB) { u_int8_t *bpEeprom = (u_int8_t*) pEEpromBuf; u_int8_t bAddr; /* * Enable SEEPROM */ trm_reg_write8((trm_reg_read8(TRMREG_GEN_CONTROL) | EN_EEPROM), TRMREG_GEN_CONTROL); for (bAddr = 0; bAddr < 128; bAddr++, bpEeprom++) *bpEeprom = NVRAM_trm_get_data(pACB, bAddr); /* * Disable SEEPROM */ trm_reg_write8((trm_reg_read8(TRMREG_GEN_CONTROL) & ~EN_EEPROM), TRMREG_GEN_CONTROL); return; } static u_int8_t NVRAM_trm_get_data(PACB pACB, u_int8_t bAddr) { int i; u_int8_t bReadData, bData = 0; /* * Send read command & address */ NVRAM_trm_write_cmd(pACB, 0x06, bAddr); for (i = 0; i < 8; i++) { /* * Read data */ trm_reg_write8((NVR_SELECT | NVR_CLOCK) , TRMREG_GEN_NVRAM); NVRAM_trm_wait_30us(pACB); trm_reg_write8(NVR_SELECT , TRMREG_GEN_NVRAM); /* * Get data bit while falling edge */ bReadData = trm_reg_read8(TRMREG_GEN_NVRAM); bData <<= 1; if (bReadData & NVR_BITIN) { bData |= 1; } NVRAM_trm_wait_30us(pACB); } /* * Disable chip select */ trm_reg_write8(0, TRMREG_GEN_NVRAM); return (bData); } static void NVRAM_trm_wait_30us(PACB pACB) { /* ScsiPortStallExecution(30); wait 30 us */ trm_reg_write8(5, TRMREG_GEN_TIMER); while (!(trm_reg_read8(TRMREG_GEN_STATUS) & GTIMEOUT)); return; } static void NVRAM_trm_write_cmd(PACB pACB, u_int8_t bCmd, u_int8_t bAddr) { int i; u_int8_t bSendData; for (i = 0; i < 3; i++, bCmd <<= 1) { /* * Program SB+OP code */ bSendData = NVR_SELECT; if (bCmd & 0x04) bSendData |= NVR_BITOUT; /* start from bit 2 */ trm_reg_write8(bSendData, TRMREG_GEN_NVRAM); NVRAM_trm_wait_30us(pACB); trm_reg_write8((bSendData | NVR_CLOCK), TRMREG_GEN_NVRAM); NVRAM_trm_wait_30us(pACB); } for (i = 0; i < 7; i++, bAddr <<= 1) { /* * Program address */ bSendData = NVR_SELECT; if (bAddr & 0x40) /* Start from bit 6 */ bSendData |= NVR_BITOUT; trm_reg_write8(bSendData , TRMREG_GEN_NVRAM); NVRAM_trm_wait_30us(pACB); trm_reg_write8((bSendData | NVR_CLOCK), TRMREG_GEN_NVRAM); NVRAM_trm_wait_30us(pACB); } trm_reg_write8(NVR_SELECT, TRMREG_GEN_NVRAM); NVRAM_trm_wait_30us(pACB); } static void trm_check_eeprom(PNVRAMTYPE pEEpromBuf, PACB pACB) { u_int16_t *wpEeprom = (u_int16_t *) pEEpromBuf; u_int16_t wAddr, wCheckSum; u_long dAddr, *dpEeprom; NVRAM_trm_read_all(pEEpromBuf,pACB); wCheckSum = 0; for (wAddr = 0, wpEeprom = (u_int16_t *) pEEpromBuf; wAddr < 64; wAddr++, wpEeprom++) { wCheckSum += *wpEeprom; } if (wCheckSum != 0x1234) { /* * Checksum error, load default */ pEEpromBuf->NvramSubVendorID[0] = (u_int8_t) PCI_Vendor_ID_TEKRAM; pEEpromBuf->NvramSubVendorID[1] = (u_int8_t) (PCI_Vendor_ID_TEKRAM >> 8); pEEpromBuf->NvramSubSysID[0] = (u_int8_t) PCI_Device_ID_TRM_S1040; pEEpromBuf->NvramSubSysID[1] = (u_int8_t) (PCI_Device_ID_TRM_S1040 >> 8); pEEpromBuf->NvramSubClass = 0x00; pEEpromBuf->NvramVendorID[0] = (u_int8_t) PCI_Vendor_ID_TEKRAM; pEEpromBuf->NvramVendorID[1] = (u_int8_t) (PCI_Vendor_ID_TEKRAM >> 8); pEEpromBuf->NvramDeviceID[0] = (u_int8_t) PCI_Device_ID_TRM_S1040; pEEpromBuf->NvramDeviceID[1] = (u_int8_t) (PCI_Device_ID_TRM_S1040 >> 8); pEEpromBuf->NvramReserved = 0x00; for (dAddr = 0, dpEeprom = (u_long *) pEEpromBuf->NvramTarget; dAddr < 16; dAddr++, dpEeprom++) { *dpEeprom = 0x00000077; /* NvmTarCfg3,NvmTarCfg2,NvmTarPeriod,NvmTarCfg0 */ } *dpEeprom++ = 0x04000F07; /* NvramMaxTag,NvramDelayTime,NvramChannelCfg,NvramScsiId */ *dpEeprom++ = 0x00000015; /* NvramReserved1,NvramBootLun,NvramBootTarget,NvramReserved0 */ for (dAddr = 0; dAddr < 12; dAddr++, dpEeprom++) *dpEeprom = 0x00; pEEpromBuf->NvramCheckSum = 0x00; for (wAddr = 0, wCheckSum = 0, wpEeprom = (u_int16_t *) pEEpromBuf; wAddr < 63; wAddr++, wpEeprom++) wCheckSum += *wpEeprom; *wpEeprom = 0x1234 - wCheckSum; NVRAM_trm_write_all(pEEpromBuf,pACB); } return; } static int trm_initAdapter(PACB pACB, u_int16_t unit) { PNVRAMTYPE pEEpromBuf; u_int16_t wval; u_int8_t bval; pEEpromBuf = &trm_eepromBuf[unit]; /* 250ms selection timeout */ trm_reg_write8(SEL_TIMEOUT, TRMREG_SCSI_TIMEOUT); /* Mask all the interrupt */ trm_reg_write8(0x00, TRMREG_DMA_INTEN); trm_reg_write8(0x00, TRMREG_SCSI_INTEN); /* Reset SCSI module */ trm_reg_write16(DO_RSTMODULE, TRMREG_SCSI_CONTROL); /* program configuration 0 */ pACB->Config = HCC_AUTOTERM | HCC_PARITY; if (trm_reg_read8(TRMREG_GEN_STATUS) & WIDESCSI) pACB->Config |= HCC_WIDE_CARD; if (pEEpromBuf->NvramChannelCfg & NAC_POWERON_SCSI_RESET) pACB->Config |= HCC_SCSI_RESET; if (pACB->Config & HCC_PARITY) bval = PHASELATCH | INITIATOR | BLOCKRST | PARITYCHECK; else bval = PHASELATCH | INITIATOR | BLOCKRST ; trm_reg_write8(bval,TRMREG_SCSI_CONFIG0); /* program configuration 1 */ trm_reg_write8(0x13, TRMREG_SCSI_CONFIG1); /* program Host ID */ bval = pEEpromBuf->NvramScsiId; trm_reg_write8(bval, TRMREG_SCSI_HOSTID); /* set ansynchronous transfer */ trm_reg_write8(0x00, TRMREG_SCSI_OFFSET); /* Trun LED control off*/ wval = trm_reg_read16(TRMREG_GEN_CONTROL) & 0x7F; trm_reg_write16(wval, TRMREG_GEN_CONTROL); /* DMA config */ wval = trm_reg_read16(TRMREG_DMA_CONFIG) | DMA_ENHANCE; trm_reg_write16(wval, TRMREG_DMA_CONFIG); /* Clear pending interrupt status */ trm_reg_read8(TRMREG_SCSI_INTSTATUS); /* Enable SCSI interrupt */ trm_reg_write8(0x7F, TRMREG_SCSI_INTEN); trm_reg_write8(EN_SCSIINTR, TRMREG_DMA_INTEN); return (0); } static void trm_mapSRB(void *arg, bus_dma_segment_t *segs, int nseg, int error) { PACB pACB; pACB = (PACB)arg; pACB->srb_physbase = segs->ds_addr; } static void trm_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *baddr; baddr = (bus_addr_t *)arg; *baddr = segs->ds_addr; } static PACB trm_init(u_int16_t unit, device_t dev) { PACB pACB; int rid = PCIR_BAR(0), i = 0, j = 0; u_int16_t adaptType = 0; pACB = (PACB) device_get_softc(dev); if (!pACB) { printf("trm%d: cannot allocate ACB !\n", unit); return (NULL); } pACB->iores = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (pACB->iores == NULL) { printf("trm_init: bus_alloc_resource failed!\n"); return (NULL); } switch (pci_get_devid(dev)) { case PCI_DEVICEID_TRMS1040: adaptType = 0; break; case PCI_DEVICEID_TRMS2080: adaptType = 1; break; default: printf("trm_init %d: unknown adapter type!\n", unit); goto bad; } pACB->dev = dev; pACB->tag = rman_get_bustag(pACB->iores); pACB->bsh = rman_get_bushandle(pACB->iores); if (bus_dma_tag_create( /*parent_dmat*/ pACB->parent_dmat, /*alignment*/ 1, /*boundary*/ 0, /*lowaddr*/ BUS_SPACE_MAXADDR, /*highaddr*/ BUS_SPACE_MAXADDR, /*filter*/ NULL, /*filterarg*/ NULL, /*maxsize*/ MAXBSIZE, /*nsegments*/ TRM_NSEG, /*maxsegsz*/ TRM_MAXTRANSFER_SIZE, /*flags*/ BUS_DMA_ALLOCNOW, /*lockfunc*/ busdma_lock_mutex, /*lockarg*/ &Giant, /* dmat */ &pACB->buffer_dmat) != 0) goto bad; /* DMA tag for our ccb structures */ if (bus_dma_tag_create( /*parent_dmat*/pACB->parent_dmat, /*alignment*/ 1, /*boundary*/ 0, /*lowaddr*/ BUS_SPACE_MAXADDR, /*highaddr*/ BUS_SPACE_MAXADDR, /*filter*/ NULL, /*filterarg*/ NULL, /*maxsize*/ TRM_MAX_SRB_CNT * sizeof(TRM_SRB), /*nsegments*/ 1, /*maxsegsz*/ TRM_MAXTRANSFER_SIZE, /*flags*/ 0, /*lockfunc*/ busdma_lock_mutex, /*lockarg*/ &Giant, /*dmat*/ &pACB->srb_dmat) != 0) { printf("trm_init %d: bus_dma_tag_create SRB failure\n", unit); goto bad; } if (bus_dmamem_alloc(pACB->srb_dmat, (void **)&pACB->pFreeSRB, BUS_DMA_NOWAIT, &pACB->srb_dmamap) != 0) { printf("trm_init %d: bus_dmamem_alloc SRB failure\n", unit); goto bad; } bus_dmamap_load(pACB->srb_dmat, pACB->srb_dmamap, pACB->pFreeSRB, TRM_MAX_SRB_CNT * sizeof(TRM_SRB), trm_mapSRB, pACB, /* flags */0); /* Create, allocate, and map DMA buffers for autosense data */ if (bus_dma_tag_create(/*parent_dmat*/NULL, /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, sizeof(struct scsi_sense_data) * TRM_MAX_SRB_CNT, /*nsegments*/1, /*maxsegsz*/TRM_MAXTRANSFER_SIZE, /*flags*/0, /*lockfunc*/busdma_lock_mutex, /*lockarg*/&Giant, &pACB->sense_dmat) != 0) { if (bootverbose) device_printf(dev, "cannot create sense buffer dmat\n"); goto bad; } if (bus_dmamem_alloc(pACB->sense_dmat, (void **)&pACB->sense_buffers, BUS_DMA_NOWAIT, &pACB->sense_dmamap) != 0) goto bad; bus_dmamap_load(pACB->sense_dmat, pACB->sense_dmamap, pACB->sense_buffers, sizeof(struct scsi_sense_data) * TRM_MAX_SRB_CNT, trm_dmamap_cb, &pACB->sense_busaddr, /*flags*/0); trm_check_eeprom(&trm_eepromBuf[unit],pACB); trm_initACB(pACB, adaptType, unit); for (i = 0; i < (pACB->max_id + 1); i++) { if (pACB->AdaptSCSIID == i) continue; for(j = 0; j < (pACB->max_lun + 1); j++) { pACB->scan_devices[i][j] = 1; /* we assume we need to scan all devices */ trm_initDCB(pACB, &pACB->DCBarray[i][j], unit, i, j); } } bzero(pACB->pFreeSRB, TRM_MAX_SRB_CNT * sizeof(TRM_SRB)); if (bus_dma_tag_create( /*parent_dmat*/NULL, /*alignment*/ 1, /*boundary*/ 0, /*lowaddr*/ BUS_SPACE_MAXADDR, /*highaddr*/ BUS_SPACE_MAXADDR, /*filter*/ NULL, /*filterarg*/ NULL, /*maxsize*/ TRM_MAX_SG_LISTENTRY * sizeof(SGentry), /*nsegments*/ 1, /*maxsegsz*/ TRM_MAXTRANSFER_SIZE, /*flags*/ 0, /*lockfunc*/ busdma_lock_mutex, /*lockarg*/ &Giant, /*dmat*/ &pACB->sg_dmat) != 0) goto bad; if (trm_initSRB(pACB)) { printf("trm_initSRB: error\n"); goto bad; } if (trm_initAdapter(pACB, unit)) { printf("trm_initAdapter: initial ERROR\n"); goto bad; } return (pACB); bad: if (pACB->iores) bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(0), pACB->iores); if (pACB->sense_dmamap) { bus_dmamap_unload(pACB->sense_dmat, pACB->sense_dmamap); bus_dmamem_free(pACB->sense_dmat, pACB->sense_buffers, pACB->sense_dmamap); bus_dmamap_destroy(pACB->sense_dmat, pACB->sense_dmamap); } if (pACB->sense_dmat) bus_dma_tag_destroy(pACB->sense_dmat); if (pACB->sg_dmat) { trm_destroySRB(pACB); bus_dma_tag_destroy(pACB->sg_dmat); } if (pACB->srb_dmamap) { bus_dmamap_unload(pACB->srb_dmat, pACB->srb_dmamap); bus_dmamem_free(pACB->srb_dmat, pACB->pFreeSRB, pACB->srb_dmamap); bus_dmamap_destroy(pACB->srb_dmat, pACB->srb_dmamap); } if (pACB->srb_dmat) bus_dma_tag_destroy(pACB->srb_dmat); if (pACB->buffer_dmat) bus_dma_tag_destroy(pACB->buffer_dmat); return (NULL); } static int trm_attach(device_t dev) { struct cam_devq *device_Q; u_long device_id; PACB pACB = 0; int rid = 0; int unit = device_get_unit(dev); device_id = pci_get_devid(dev); /* * These cards do not allow memory mapped accesses */ if ((pACB = trm_init((u_int16_t) unit, dev)) == NULL) { printf("trm%d: trm_init error!\n",unit); return (ENXIO); } /* After setting up the adapter, map our interrupt */ /* * Now let the CAM generic SCSI layer find the SCSI devices on the bus * start queue to reset to the idle loop. * Create device queue of SIM(s) * (MAX_START_JOB - 1) : max_sim_transactions */ pACB->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (pACB->irq == NULL || bus_setup_intr(dev, pACB->irq, INTR_TYPE_CAM, trm_Interrupt, pACB, &pACB->ih)) { printf("trm%d: register Interrupt handler error!\n", unit); goto bad; } device_Q = cam_simq_alloc(TRM_MAX_START_JOB); if (device_Q == NULL){ printf("trm%d: device_Q == NULL !\n",unit); goto bad; } /* * Now tell the generic SCSI layer * about our bus. * If this is the xpt layer creating a sim, then it's OK * to wait for an allocation. * XXX Should we pass in a flag to indicate that wait is OK? * * SIM allocation * * SCSI Interface Modules * The sim driver creates a sim for each controller. The sim device * queue is separately created in order to allow resource sharing betwee * sims. For instance, a driver may create one sim for each channel of * a multi-channel controller and use the same queue for each channel. * In this way, the queue resources are shared across all the channels * of the multi-channel controller. * trm_action : sim_action_func * trm_poll : sim_poll_func * "trm" : sim_name ,if sim_name = "xpt" ..M_DEVBUF,M_WAITOK * pACB : *softc if sim_name <> "xpt" ..M_DEVBUF,M_NOWAIT * pACB->unit : unit * 1 : max_dev_transactions * MAX_TAGS : max_tagged_dev_transactions * * *******Construct our first channel SIM entry */ pACB->psim = cam_sim_alloc(trm_action, trm_poll, "trm", pACB, unit, 1, TRM_MAX_TAGS_CMD_QUEUE, device_Q); if (pACB->psim == NULL) { printf("trm%d: SIM allocate fault !\n",unit); cam_simq_free(device_Q); /* SIM allocate fault*/ goto bad; } if (xpt_bus_register(pACB->psim, 0) != CAM_SUCCESS) { printf("trm%d: xpt_bus_register fault !\n",unit); goto bad; } if (xpt_create_path(&pACB->ppath, NULL, cam_sim_path(pACB->psim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { printf("trm%d: xpt_create_path fault !\n",unit); xpt_bus_deregister(cam_sim_path(pACB->psim)); goto bad; } return (0); bad: if (pACB->iores) bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(0), pACB->iores); if (pACB->sg_dmat) { trm_destroySRB(pACB); bus_dma_tag_destroy(pACB->sg_dmat); } if (pACB->srb_dmamap) { bus_dmamap_unload(pACB->srb_dmat, pACB->srb_dmamap); bus_dmamem_free(pACB->srb_dmat, pACB->pFreeSRB, pACB->srb_dmamap); bus_dmamap_destroy(pACB->srb_dmat, pACB->srb_dmamap); } if (pACB->srb_dmat) bus_dma_tag_destroy(pACB->srb_dmat); if (pACB->sense_dmamap) { bus_dmamap_unload(pACB->sense_dmat, pACB->sense_dmamap); bus_dmamem_free(pACB->sense_dmat, pACB->sense_buffers, pACB->sense_dmamap); bus_dmamap_destroy(pACB->sense_dmat, pACB->sense_dmamap); } if (pACB->sense_dmat) bus_dma_tag_destroy(pACB->sense_dmat); if (pACB->buffer_dmat) bus_dma_tag_destroy(pACB->buffer_dmat); if (pACB->ih) bus_teardown_intr(dev, pACB->irq, pACB->ih); if (pACB->irq) bus_release_resource(dev, SYS_RES_IRQ, 0, pACB->irq); if (pACB->psim) cam_sim_free(pACB->psim, TRUE); return (ENXIO); } /* * pci_device * trm_probe (device_t tag, pcidi_t type) * */ static int trm_probe(device_t dev) { switch (pci_get_devid(dev)) { case PCI_DEVICEID_TRMS1040: device_set_desc(dev, "Tekram DC395U/UW/F DC315/U Fast20 Wide SCSI Adapter"); return (0); case PCI_DEVICEID_TRMS2080: device_set_desc(dev, "Tekram DC395U2D/U2W Fast40 Wide SCSI Adapter"); return 0; default: return (ENXIO); } } static int trm_detach(device_t dev) { PACB pACB = device_get_softc(dev); bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(0), pACB->iores); trm_destroySRB(pACB); bus_dma_tag_destroy(pACB->sg_dmat); bus_dmamap_unload(pACB->srb_dmat, pACB->srb_dmamap); bus_dmamem_free(pACB->srb_dmat, pACB->pFreeSRB, pACB->srb_dmamap); bus_dmamap_destroy(pACB->srb_dmat, pACB->srb_dmamap); bus_dma_tag_destroy(pACB->srb_dmat); bus_dmamap_unload(pACB->sense_dmat, pACB->sense_dmamap); bus_dmamem_free(pACB->sense_dmat, pACB->sense_buffers, pACB->sense_dmamap); bus_dmamap_destroy(pACB->sense_dmat, pACB->sense_dmamap); bus_dma_tag_destroy(pACB->sense_dmat); bus_dma_tag_destroy(pACB->buffer_dmat); bus_teardown_intr(dev, pACB->irq, pACB->ih); bus_release_resource(dev, SYS_RES_IRQ, 0, pACB->irq); xpt_async(AC_LOST_DEVICE, pACB->ppath, NULL); xpt_free_path(pACB->ppath); xpt_bus_deregister(cam_sim_path(pACB->psim)); cam_sim_free(pACB->psim, TRUE); return (0); } static device_method_t trm_methods[] = { /* Device interface */ DEVMETHOD(device_probe, trm_probe), DEVMETHOD(device_attach, trm_attach), DEVMETHOD(device_detach, trm_detach), { 0, 0 } }; static driver_t trm_driver = { "trm", trm_methods, sizeof(struct _ACB) }; static devclass_t trm_devclass; DRIVER_MODULE(trm, pci, trm_driver, trm_devclass, 0, 0); MODULE_DEPEND(trm, cam, 1, 1, 1); Index: head/sys/dev/twa/twa_includes.h =================================================================== --- head/sys/dev/twa/twa_includes.h (revision 129878) +++ head/sys/dev/twa/twa_includes.h (revision 129879) @@ -1,63 +1,64 @@ /*- * Copyright (c) 2003-04 3ware, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * 3ware driver for 9000 series storage controllers. * * Author: Vinod Kashyap */ #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include Index: head/sys/dev/twe/twe_compat.h =================================================================== --- head/sys/dev/twe/twe_compat.h (revision 129878) +++ head/sys/dev/twe/twe_compat.h (revision 129879) @@ -1,161 +1,162 @@ /*- * Copyright (c) 2000 Michael Smith * Copyright (c) 2003 Paul Saab * Copyright (c) 2003 Vinod Kashyap * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Portability and compatibility interfaces. */ #ifdef __FreeBSD__ /****************************************************************************** * FreeBSD */ #define TWE_SUPPORTED_PLATFORM #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #define TWE_DRIVER_NAME twe #define TWED_DRIVER_NAME twed #define TWE_MALLOC_CLASS M_TWE /* * Wrappers for bus-space actions */ #define TWE_CONTROL(sc, val) bus_space_write_4((sc)->twe_btag, (sc)->twe_bhandle, 0x0, (u_int32_t)val) #define TWE_STATUS(sc) (u_int32_t)bus_space_read_4((sc)->twe_btag, (sc)->twe_bhandle, 0x4) #define TWE_COMMAND_QUEUE(sc, val) bus_space_write_4((sc)->twe_btag, (sc)->twe_bhandle, 0x8, (u_int32_t)val) #define TWE_RESPONSE_QUEUE(sc) (TWE_Response_Queue)bus_space_read_4((sc)->twe_btag, (sc)->twe_bhandle, 0xc) /* * FreeBSD-specific softc elements */ #define TWE_PLATFORM_SOFTC \ bus_dmamap_t twe_cmdmap; /* DMA map for command */ \ u_int32_t twe_cmdphys; /* address of command in controller space */ \ device_t twe_dev; /* bus device */ \ dev_t twe_dev_t; /* control device */ \ struct resource *twe_io; /* register interface window */ \ bus_space_handle_t twe_bhandle; /* bus space handle */ \ bus_space_tag_t twe_btag; /* bus space tag */ \ bus_dma_tag_t twe_parent_dmat; /* parent DMA tag */ \ bus_dma_tag_t twe_buffer_dmat; /* data buffer DMA tag */ \ bus_dma_tag_t twe_cmd_dmat; /* command buffer DMA tag */ \ bus_dma_tag_t twe_immediate_dmat; /* command buffer DMA tag */ \ struct resource *twe_irq; /* interrupt */ \ void *twe_intr; /* interrupt handle */ \ struct intr_config_hook twe_ich; /* delayed-startup hook */ \ void *twe_cmd; /* command structures */ \ void *twe_immediate; /* immediate commands */ \ bus_dmamap_t twe_immediate_map; \ struct sysctl_ctx_list sysctl_ctx; \ struct sysctl_oid *sysctl_tree; /* * FreeBSD-specific request elements */ #define TWE_PLATFORM_REQUEST \ bus_dmamap_t tr_dmamap; /* DMA map for data */ \ u_int32_t tr_dataphys; /* data buffer base address in controller space */ /* * Output identifying the controller/disk */ #define twe_printf(sc, fmt, args...) device_printf(sc->twe_dev, fmt , ##args) #define twed_printf(twed, fmt, args...) device_printf(twed->twed_dev, fmt , ##args) #if __FreeBSD_version < 500003 # include # define INTR_ENTROPY 0 # define FREEBSD_4 # include /* old buf style */ typedef struct buf twe_bio; typedef struct buf_queue_head twe_bioq; # define TWE_BIO_QINIT(bq) bufq_init(&bq); # define TWE_BIO_QINSERT(bq, bp) bufq_insert_tail(&bq, bp) # define TWE_BIO_QFIRST(bq) bufq_first(&bq) # define TWE_BIO_QREMOVE(bq, bp) bufq_remove(&bq, bp) # define TWE_BIO_IS_READ(bp) ((bp)->b_flags & B_READ) # define TWE_BIO_DATA(bp) (bp)->b_data # define TWE_BIO_LENGTH(bp) (bp)->b_bcount # define TWE_BIO_LBA(bp) (bp)->b_pblkno # define TWE_BIO_SOFTC(bp) (bp)->b_dev->si_drv1 # define TWE_BIO_UNIT(bp) *(int *)((bp)->b_dev->si_drv2) # define TWE_BIO_SET_ERROR(bp, err) do { (bp)->b_error = err; (bp)->b_flags |= B_ERROR;} while(0) # define TWE_BIO_HAS_ERROR(bp) ((bp)->b_flags & B_ERROR) # define TWE_BIO_RESID(bp) (bp)->b_resid # define TWE_BIO_DONE(bp) biodone(bp) # define TWE_BIO_STATS_START(bp) devstat_start_transaction(&((struct twed_softc *)TWE_BIO_SOFTC(bp))->twed_stats) # define TWE_BIO_STATS_END(bp) devstat_end_transaction_buf(&((struct twed_softc *)TWE_BIO_SOFTC(bp))->twed_stats, bp) #else # include # include typedef struct bio twe_bio; typedef struct bio_queue_head twe_bioq; # define TWE_BIO_QINIT(bq) bioq_init(&bq); # define TWE_BIO_QINSERT(bq, bp) bioq_insert_tail(&bq, bp) # define TWE_BIO_QFIRST(bq) bioq_first(&bq) # define TWE_BIO_QREMOVE(bq, bp) bioq_remove(&bq, bp) # define TWE_BIO_IS_READ(bp) ((bp)->bio_cmd == BIO_READ) # define TWE_BIO_DATA(bp) (bp)->bio_data # define TWE_BIO_LENGTH(bp) (bp)->bio_bcount # define TWE_BIO_LBA(bp) (bp)->bio_pblkno # define TWE_BIO_SOFTC(bp) (bp)->bio_disk->d_drv1 # define TWE_BIO_UNIT(bp) *(int *)(bp->bio_driver1) # define TWE_BIO_SET_ERROR(bp, err) do { (bp)->bio_error = err; (bp)->bio_flags |= BIO_ERROR;} while(0) # define TWE_BIO_HAS_ERROR(bp) ((bp)->bio_flags & BIO_ERROR) # define TWE_BIO_RESID(bp) (bp)->bio_resid # define TWE_BIO_DONE(bp) biodone(bp) # define TWE_BIO_STATS_START(bp) # define TWE_BIO_STATS_END(bp) #endif #endif /* FreeBSD */ #ifndef TWE_SUPPORTED_PLATFORM #error platform not supported #endif Index: head/sys/dev/tx/if_tx.c =================================================================== --- head/sys/dev/tx/if_tx.c (revision 129878) +++ head/sys/dev/tx/if_tx.c (revision 129879) @@ -1,1915 +1,1916 @@ /*- * Copyright (c) 1997 Semen Ustimenko (semenu@FreeBSD.org) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * EtherPower II 10/100 Fast Ethernet (SMC 9432 serie) * * These cards are based on SMC83c17x (EPIC) chip and one of the various * PHYs (QS6612, AC101 and LXT970 were seen). The media support depends on * card model. All cards support 10baseT/UTP and 100baseTX half- and full- * duplex (SMB9432TX). SMC9432BTX also supports 10baseT/BNC. SMC9432FTX also * supports fibre optics. * * Thanks are going to Steve Bauer and Jason Wright. */ #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for DELAY */ #include #include #include #include #include #include #include "miidevs.h" #include #include "miibus_if.h" #include #include MODULE_DEPEND(tx, pci, 1, 1, 1); MODULE_DEPEND(tx, ether, 1, 1, 1); MODULE_DEPEND(tx, miibus, 1, 1, 1); static int epic_ifioctl(struct ifnet *, u_long, caddr_t); static void epic_intr(void *); static void epic_tx_underrun(epic_softc_t *); static void epic_ifstart(struct ifnet *); static void epic_ifwatchdog(struct ifnet *); static void epic_stats_update(epic_softc_t *); static void epic_init(void *); static void epic_stop(epic_softc_t *); static void epic_rx_done(epic_softc_t *); static void epic_tx_done(epic_softc_t *); static int epic_init_rings(epic_softc_t *); static void epic_free_rings(epic_softc_t *); static void epic_stop_activity(epic_softc_t *); static int epic_queue_last_packet(epic_softc_t *); static void epic_start_activity(epic_softc_t *); static void epic_set_rx_mode(epic_softc_t *); static void epic_set_tx_mode(epic_softc_t *); static void epic_set_mc_table(epic_softc_t *); static uint32_t tx_mchash(const uint8_t *); static int epic_read_eeprom(epic_softc_t *,u_int16_t); static void epic_output_eepromw(epic_softc_t *, u_int16_t); static u_int16_t epic_input_eepromw(epic_softc_t *); static u_int8_t epic_eeprom_clock(epic_softc_t *,u_int8_t); static void epic_write_eepromreg(epic_softc_t *,u_int8_t); static u_int8_t epic_read_eepromreg(epic_softc_t *); static int epic_read_phy_reg(epic_softc_t *, int, int); static void epic_write_phy_reg(epic_softc_t *, int, int, int); static int epic_miibus_readreg(device_t, int, int); static int epic_miibus_writereg(device_t, int, int, int); static void epic_miibus_statchg(device_t); static void epic_miibus_mediainit(device_t); static int epic_ifmedia_upd(struct ifnet *); static void epic_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int epic_probe(device_t); static int epic_attach(device_t); static void epic_shutdown(device_t); static int epic_detach(device_t); static void epic_release(epic_softc_t *); static struct epic_type *epic_devtype(device_t); static device_method_t epic_methods[] = { /* Device interface */ DEVMETHOD(device_probe, epic_probe), DEVMETHOD(device_attach, epic_attach), DEVMETHOD(device_detach, epic_detach), DEVMETHOD(device_shutdown, epic_shutdown), /* MII interface */ DEVMETHOD(miibus_readreg, epic_miibus_readreg), DEVMETHOD(miibus_writereg, epic_miibus_writereg), DEVMETHOD(miibus_statchg, epic_miibus_statchg), DEVMETHOD(miibus_mediainit, epic_miibus_mediainit), { 0, 0 } }; static driver_t epic_driver = { "tx", epic_methods, sizeof(epic_softc_t) }; static devclass_t epic_devclass; DRIVER_MODULE(tx, pci, epic_driver, epic_devclass, 0, 0); DRIVER_MODULE(miibus, tx, miibus_driver, miibus_devclass, 0, 0); static struct epic_type epic_devs[] = { { SMC_VENDORID, SMC_DEVICEID_83C170, "SMC EtherPower II 10/100" }, { 0, 0, NULL } }; static int epic_probe(dev) device_t dev; { struct epic_type *t; t = epic_devtype(dev); if (t != NULL) { device_set_desc(dev, t->name); return (0); } return (ENXIO); } static struct epic_type * epic_devtype(dev) device_t dev; { struct epic_type *t; t = epic_devs; while (t->name != NULL) { if ((pci_get_vendor(dev) == t->ven_id) && (pci_get_device(dev) == t->dev_id)) { return (t); } t++; } return (NULL); } #ifdef EPIC_USEIOSPACE #define EPIC_RES SYS_RES_IOPORT #define EPIC_RID PCIR_BASEIO #else #define EPIC_RES SYS_RES_MEMORY #define EPIC_RID PCIR_BASEMEM #endif static void epic_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { u_int32_t *addr; if (error) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); addr = arg; *addr = segs->ds_addr; } /* * Attach routine: map registers, allocate softc, rings and descriptors. * Reset to known state. */ static int epic_attach(dev) device_t dev; { struct ifnet *ifp; epic_softc_t *sc; int unit, error; int i, s, rid, tmp; s = splimp(); sc = device_get_softc(dev); unit = device_get_unit(dev); /* Preinitialize softc structure. */ sc->unit = unit; sc->dev = dev; /* Fill ifnet structure. */ ifp = &sc->sc_if; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST; ifp->if_ioctl = epic_ifioctl; ifp->if_start = epic_ifstart; ifp->if_watchdog = epic_ifwatchdog; ifp->if_init = epic_init; ifp->if_timer = 0; ifp->if_baudrate = 10000000; ifp->if_snd.ifq_maxlen = TX_RING_SIZE - 1; /* Enable busmastering. */ pci_enable_busmaster(dev); rid = EPIC_RID; sc->res = bus_alloc_resource_any(dev, EPIC_RES, &rid, RF_ACTIVE); if (sc->res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); error = ENXIO; goto fail; } sc->sc_st = rman_get_bustag(sc->res); sc->sc_sh = rman_get_bushandle(sc->res); /* Allocate interrupt. */ rid = 0; sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } /* Allocate DMA tags. */ error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * EPIC_MAX_FRAGS, EPIC_MAX_FRAGS, MCLBYTES, 0, busdma_lock_mutex, &Giant, &sc->mtag); if (error) { device_printf(dev, "couldn't allocate dma tag\n"); goto fail; } error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct epic_rx_desc) * RX_RING_SIZE, 1, sizeof(struct epic_rx_desc) * RX_RING_SIZE, 0, busdma_lock_mutex, &Giant, &sc->rtag); if (error) { device_printf(dev, "couldn't allocate dma tag\n"); goto fail; } error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct epic_tx_desc) * TX_RING_SIZE, 1, sizeof(struct epic_tx_desc) * TX_RING_SIZE, 0, busdma_lock_mutex, &Giant, &sc->ttag); if (error) { device_printf(dev, "couldn't allocate dma tag\n"); goto fail; } error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct epic_frag_list) * TX_RING_SIZE, 1, sizeof(struct epic_frag_list) * TX_RING_SIZE, 0, busdma_lock_mutex, &Giant, &sc->ftag); if (error) { device_printf(dev, "couldn't allocate dma tag\n"); goto fail; } /* Allocate DMA safe memory and get the DMA addresses. */ error = bus_dmamem_alloc(sc->ftag, (void **)&sc->tx_flist, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->fmap); if (error) { device_printf(dev, "couldn't allocate dma memory\n"); goto fail; } error = bus_dmamap_load(sc->ftag, sc->fmap, sc->tx_flist, sizeof(struct epic_frag_list) * TX_RING_SIZE, epic_dma_map_addr, &sc->frag_addr, 0); if (error) { device_printf(dev, "couldn't map dma memory\n"); goto fail; } error = bus_dmamem_alloc(sc->ttag, (void **)&sc->tx_desc, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->tmap); if (error) { device_printf(dev, "couldn't allocate dma memory\n"); goto fail; } error = bus_dmamap_load(sc->ttag, sc->tmap, sc->tx_desc, sizeof(struct epic_tx_desc) * TX_RING_SIZE, epic_dma_map_addr, &sc->tx_addr, 0); if (error) { device_printf(dev, "couldn't map dma memory\n"); goto fail; } error = bus_dmamem_alloc(sc->rtag, (void **)&sc->rx_desc, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->rmap); if (error) { device_printf(dev, "couldn't allocate dma memory\n"); goto fail; } error = bus_dmamap_load(sc->rtag, sc->rmap, sc->rx_desc, sizeof(struct epic_rx_desc) * RX_RING_SIZE, epic_dma_map_addr, &sc->rx_addr, 0); if (error) { device_printf(dev, "couldn't map dma memory\n"); goto fail; } /* Bring the chip out of low-power mode. */ CSR_WRITE_4(sc, GENCTL, GENCTL_SOFT_RESET); DELAY(500); /* Workaround for Application Note 7-15. */ for (i = 0; i < 16; i++) CSR_WRITE_4(sc, TEST1, TEST1_CLOCK_TEST); /* Read MAC address from EEPROM. */ for (i = 0; i < ETHER_ADDR_LEN / sizeof(u_int16_t); i++) ((u_int16_t *)sc->sc_macaddr)[i] = epic_read_eeprom(sc,i); /* Set Non-Volatile Control Register from EEPROM. */ CSR_WRITE_4(sc, NVCTL, epic_read_eeprom(sc, EEPROM_NVCTL) & 0x1F); /* Set defaults. */ sc->tx_threshold = TRANSMIT_THRESHOLD; sc->txcon = TXCON_DEFAULT; sc->miicfg = MIICFG_SMI_ENABLE; sc->phyid = EPIC_UNKN_PHY; sc->serinst = -1; /* Fetch card id. */ sc->cardvend = pci_read_config(dev, PCIR_SUBVEND_0, 2); sc->cardid = pci_read_config(dev, PCIR_SUBDEV_0, 2); if (sc->cardvend != SMC_VENDORID) device_printf(dev, "unknown card vendor %04xh\n", sc->cardvend); /* Do ifmedia setup. */ if (mii_phy_probe(dev, &sc->miibus, epic_ifmedia_upd, epic_ifmedia_sts)) { device_printf(dev, "ERROR! MII without any PHY!?\n"); error = ENXIO; goto fail; } /* board type and ... */ printf(" type "); for(i = 0x2c; i < 0x32; i++) { tmp = epic_read_eeprom(sc, i); if (' ' == (u_int8_t)tmp) break; printf("%c", (u_int8_t)tmp); tmp >>= 8; if (' ' == (u_int8_t)tmp) break; printf("%c", (u_int8_t)tmp); } printf("\n"); /* Initialize rings. */ if (epic_init_rings(sc)) { device_printf(dev, "failed to init rings\n"); error = ENXIO; goto fail; } ifp->if_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_VLAN_MTU; ifp->if_capenable |= IFCAP_VLAN_MTU; callout_handle_init(&sc->stat_ch); /* Activate our interrupt handler. */ error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET, epic_intr, sc, &sc->sc_ih); if (error) { device_printf(dev, "couldn't set up irq\n"); goto fail; } /* Attach to OS's managers. */ ether_ifattach(ifp, sc->sc_macaddr); splx(s); return (0); fail: epic_release(sc); splx(s); return (error); } /* * Free any resources allocated by the driver. */ static void epic_release(epic_softc_t *sc) { if (sc->irq) bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->irq); if (sc->res) bus_release_resource(sc->dev, EPIC_RES, EPIC_RID, sc->res); epic_free_rings(sc); if (sc->tx_flist) { bus_dmamap_unload(sc->ftag, sc->fmap); bus_dmamem_free(sc->ftag, sc->tx_flist, sc->fmap); bus_dmamap_destroy(sc->ftag, sc->fmap); } if (sc->tx_desc) { bus_dmamap_unload(sc->ttag, sc->tmap); bus_dmamem_free(sc->ttag, sc->tx_desc, sc->tmap); bus_dmamap_destroy(sc->ttag, sc->tmap); } if (sc->rx_desc) { bus_dmamap_unload(sc->rtag, sc->rmap); bus_dmamem_free(sc->rtag, sc->rx_desc, sc->rmap); bus_dmamap_destroy(sc->rtag, sc->rmap); } if (sc->mtag) bus_dma_tag_destroy(sc->mtag); if (sc->ftag) bus_dma_tag_destroy(sc->ftag); if (sc->ttag) bus_dma_tag_destroy(sc->ttag); if (sc->rtag) bus_dma_tag_destroy(sc->rtag); } /* * Detach driver and free resources. */ static int epic_detach(dev) device_t dev; { struct ifnet *ifp; epic_softc_t *sc; int s; s = splimp(); sc = device_get_softc(dev); ifp = &sc->arpcom.ac_if; ether_ifdetach(ifp); epic_stop(sc); bus_generic_detach(dev); device_delete_child(dev, sc->miibus); bus_teardown_intr(dev, sc->irq, sc->sc_ih); epic_release(sc); splx(s); return (0); } #undef EPIC_RES #undef EPIC_RID /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void epic_shutdown(dev) device_t dev; { epic_softc_t *sc; sc = device_get_softc(dev); epic_stop(sc); } /* * This is if_ioctl handler. */ static int epic_ifioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { epic_softc_t *sc = ifp->if_softc; struct mii_data *mii; struct ifreq *ifr = (struct ifreq *) data; int x, error = 0; x = splimp(); switch (command) { case SIOCSIFMTU: if (ifp->if_mtu == ifr->ifr_mtu) break; /* XXX Though the datasheet doesn't imply any * limitations on RX and TX sizes beside max 64Kb * DMA transfer, seems we can't send more then 1600 * data bytes per ethernet packet (transmitter hangs * up if more data is sent). */ if (ifr->ifr_mtu + ifp->if_hdrlen <= EPIC_MAX_MTU) { ifp->if_mtu = ifr->ifr_mtu; epic_stop(sc); epic_init(sc); } else error = EINVAL; break; case SIOCSIFFLAGS: /* * If the interface is marked up and stopped, then start it. * If it is marked down and running, then stop it. */ if (ifp->if_flags & IFF_UP) { if ((ifp->if_flags & IFF_RUNNING) == 0) { epic_init(sc); break; } } else { if (ifp->if_flags & IFF_RUNNING) { epic_stop(sc); break; } } /* Handle IFF_PROMISC and IFF_ALLMULTI flags. */ epic_stop_activity(sc); epic_set_mc_table(sc); epic_set_rx_mode(sc); epic_start_activity(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: epic_set_mc_table(sc); error = 0; break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: mii = device_get_softc(sc->miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: error = ether_ioctl(ifp, command, data); break; } splx(x); return (error); } static void epic_dma_map_txbuf(void *arg, bus_dma_segment_t *segs, int nseg, bus_size_t mapsize, int error) { struct epic_frag_list *flist; int i; if (error) return; KASSERT(nseg <= EPIC_MAX_FRAGS, ("too many DMA segments")); flist = arg; /* Fill fragments list. */ for (i = 0; i < nseg; i++) { KASSERT(segs[i].ds_len <= MCLBYTES, ("segment size too large")); flist->frag[i].fraglen = segs[i].ds_len; flist->frag[i].fragaddr = segs[i].ds_addr; } flist->numfrags = nseg; } static void epic_dma_map_rxbuf(void *arg, bus_dma_segment_t *segs, int nseg, bus_size_t mapsize, int error) { struct epic_rx_desc *desc; if (error) return; KASSERT(nseg == 1, ("too many DMA segments")); desc = arg; desc->bufaddr = segs->ds_addr; } /* * This is if_start handler. It takes mbufs from if_snd queue * and queue them for transmit, one by one, until TX ring become full * or queue become empty. */ static void epic_ifstart(ifp) struct ifnet * ifp; { epic_softc_t *sc = ifp->if_softc; struct epic_tx_buffer *buf; struct epic_tx_desc *desc; struct epic_frag_list *flist; struct mbuf *m0, *m; int error; while (sc->pending_txs < TX_RING_SIZE) { buf = sc->tx_buffer + sc->cur_tx; desc = sc->tx_desc + sc->cur_tx; flist = sc->tx_flist + sc->cur_tx; /* Get next packet to send. */ IF_DEQUEUE(&ifp->if_snd, m0); /* If nothing to send, return. */ if (m0 == NULL) return; error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m0, epic_dma_map_txbuf, flist, 0); if (error && error != EFBIG) { m_freem(m0); ifp->if_oerrors++; continue; } /* * If packet was more than EPIC_MAX_FRAGS parts, * recopy packet to a newly allocated mbuf cluster. */ if (error) { m = m_defrag(m0, M_DONTWAIT); if (m == NULL) { m_freem(m0); ifp->if_oerrors++; continue; } m_freem(m0); m0 = m; error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m, epic_dma_map_txbuf, flist, 0); if (error) { m_freem(m); ifp->if_oerrors++; continue; } } bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREWRITE); buf->mbuf = m0; sc->pending_txs++; sc->cur_tx = (sc->cur_tx + 1) & TX_RING_MASK; desc->control = 0x01; desc->txlength = max(m0->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN); desc->status = 0x8000; bus_dmamap_sync(sc->ttag, sc->tmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->ftag, sc->fmap, BUS_DMASYNC_PREWRITE); CSR_WRITE_4(sc, COMMAND, COMMAND_TXQUEUED); /* Set watchdog timer. */ ifp->if_timer = 8; BPF_MTAP(ifp, m0); } ifp->if_flags |= IFF_OACTIVE; } /* * Synopsis: Finish all received frames. */ static void epic_rx_done(sc) epic_softc_t *sc; { struct ifnet *ifp = &sc->sc_if; u_int16_t len; struct epic_rx_buffer *buf; struct epic_rx_desc *desc; struct mbuf *m; bus_dmamap_t map; int error; bus_dmamap_sync(sc->rtag, sc->rmap, BUS_DMASYNC_POSTREAD); while ((sc->rx_desc[sc->cur_rx].status & 0x8000) == 0) { buf = sc->rx_buffer + sc->cur_rx; desc = sc->rx_desc + sc->cur_rx; /* Switch to next descriptor. */ sc->cur_rx = (sc->cur_rx + 1) & RX_RING_MASK; /* * Check for RX errors. This should only happen if * SAVE_ERRORED_PACKETS is set. RX errors generate * RXE interrupt usually. */ if ((desc->status & 1) == 0) { ifp->if_ierrors++; desc->status = 0x8000; continue; } /* Save packet length and mbuf contained packet. */ bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD); len = desc->rxlength - ETHER_CRC_LEN; m = buf->mbuf; /* Try to get an mbuf cluster. */ buf->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (buf->mbuf == NULL) { buf->mbuf = m; desc->status = 0x8000; ifp->if_ierrors++; continue; } buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES; m_adj(buf->mbuf, ETHER_ALIGN); /* Point to new mbuf, and give descriptor to chip. */ error = bus_dmamap_load_mbuf(sc->mtag, sc->sparemap, buf->mbuf, epic_dma_map_rxbuf, desc, 0); if (error) { buf->mbuf = m; desc->status = 0x8000; ifp->if_ierrors++; continue; } desc->status = 0x8000; bus_dmamap_unload(sc->mtag, buf->map); map = buf->map; buf->map = sc->sparemap; sc->sparemap = map; bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD); /* First mbuf in packet holds the ethernet and packet headers */ m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = len; /* Give mbuf to OS. */ (*ifp->if_input)(ifp, m); /* Successfuly received frame */ ifp->if_ipackets++; } bus_dmamap_sync(sc->rtag, sc->rmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } /* * Synopsis: Do last phase of transmission. I.e. if desc is * transmitted, decrease pending_txs counter, free mbuf contained * packet, switch to next descriptor and repeat until no packets * are pending or descriptor is not transmitted yet. */ static void epic_tx_done(sc) epic_softc_t *sc; { struct epic_tx_buffer *buf; struct epic_tx_desc *desc; u_int16_t status; bus_dmamap_sync(sc->ttag, sc->tmap, BUS_DMASYNC_POSTREAD); while (sc->pending_txs > 0) { buf = sc->tx_buffer + sc->dirty_tx; desc = sc->tx_desc + sc->dirty_tx; status = desc->status; /* * If packet is not transmitted, thou followed * packets are not transmitted too. */ if (status & 0x8000) break; /* Packet is transmitted. Switch to next and free mbuf. */ sc->pending_txs--; sc->dirty_tx = (sc->dirty_tx + 1) & TX_RING_MASK; bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->mtag, buf->map); m_freem(buf->mbuf); buf->mbuf = NULL; /* Check for errors and collisions. */ if (status & 0x0001) sc->sc_if.if_opackets++; else sc->sc_if.if_oerrors++; sc->sc_if.if_collisions += (status >> 8) & 0x1F; #ifdef EPIC_DIAG if ((status & 0x1001) == 0x1001) device_printf(sc->dev, "Tx ERROR: excessive coll. number\n"); #endif } if (sc->pending_txs < TX_RING_SIZE) sc->sc_if.if_flags &= ~IFF_OACTIVE; bus_dmamap_sync(sc->ttag, sc->tmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } /* * Interrupt function */ static void epic_intr(arg) void *arg; { epic_softc_t *sc; int status, i; sc = arg; i = 4; while (i-- && ((status = CSR_READ_4(sc, INTSTAT)) & INTSTAT_INT_ACTV)) { CSR_WRITE_4(sc, INTSTAT, status); if (status & (INTSTAT_RQE|INTSTAT_RCC|INTSTAT_OVW)) { epic_rx_done(sc); if (status & (INTSTAT_RQE|INTSTAT_OVW)) { #ifdef EPIC_DIAG if (status & INTSTAT_OVW) device_printf(sc->dev, "RX buffer overflow\n"); if (status & INTSTAT_RQE) device_printf(sc->dev, "RX FIFO overflow\n"); #endif if ((CSR_READ_4(sc, COMMAND) & COMMAND_RXQUEUED) == 0) CSR_WRITE_4(sc, COMMAND, COMMAND_RXQUEUED); sc->sc_if.if_ierrors++; } } if (status & (INTSTAT_TXC|INTSTAT_TCC|INTSTAT_TQE)) { epic_tx_done(sc); if (sc->sc_if.if_snd.ifq_head != NULL) epic_ifstart(&sc->sc_if); } /* Check for rare errors */ if (status & (INTSTAT_FATAL|INTSTAT_PMA|INTSTAT_PTA| INTSTAT_APE|INTSTAT_DPE|INTSTAT_TXU|INTSTAT_RXE)) { if (status & (INTSTAT_FATAL|INTSTAT_PMA|INTSTAT_PTA| INTSTAT_APE|INTSTAT_DPE)) { device_printf(sc->dev, "PCI fatal errors occured: %s%s%s%s\n", (status & INTSTAT_PMA) ? "PMA " : "", (status & INTSTAT_PTA) ? "PTA " : "", (status & INTSTAT_APE) ? "APE " : "", (status & INTSTAT_DPE) ? "DPE" : ""); epic_stop(sc); epic_init(sc); break; } if (status & INTSTAT_RXE) { #ifdef EPIC_DIAG device_printf(sc->dev, "CRC/Alignment error\n"); #endif sc->sc_if.if_ierrors++; } if (status & INTSTAT_TXU) { epic_tx_underrun(sc); sc->sc_if.if_oerrors++; } } } /* If no packets are pending, then no timeouts. */ if (sc->pending_txs == 0) sc->sc_if.if_timer = 0; } /* * Handle the TX underrun error: increase the TX threshold * and restart the transmitter. */ static void epic_tx_underrun(sc) epic_softc_t *sc; { if (sc->tx_threshold > TRANSMIT_THRESHOLD_MAX) { sc->txcon &= ~TXCON_EARLY_TRANSMIT_ENABLE; #ifdef EPIC_DIAG device_printf(sc->dev, "Tx UNDERRUN: early TX disabled\n"); #endif } else { sc->tx_threshold += 0x40; #ifdef EPIC_DIAG device_printf(sc->dev, "Tx UNDERRUN: TX threshold increased to %d\n", sc->tx_threshold); #endif } /* We must set TXUGO to reset the stuck transmitter. */ CSR_WRITE_4(sc, COMMAND, COMMAND_TXUGO); /* Update the TX threshold */ epic_stop_activity(sc); epic_set_tx_mode(sc); epic_start_activity(sc); } /* * Synopsis: This one is called if packets wasn't transmitted * during timeout. Try to deallocate transmitted packets, and * if success continue to work. */ static void epic_ifwatchdog(ifp) struct ifnet *ifp; { epic_softc_t *sc; int x; x = splimp(); sc = ifp->if_softc; device_printf(sc->dev, "device timeout %d packets\n", sc->pending_txs); /* Try to finish queued packets. */ epic_tx_done(sc); /* If not successful. */ if (sc->pending_txs > 0) { ifp->if_oerrors += sc->pending_txs; /* Reinitialize board. */ device_printf(sc->dev, "reinitialization\n"); epic_stop(sc); epic_init(sc); } else device_printf(sc->dev, "seems we can continue normaly\n"); /* Start output. */ if (ifp->if_snd.ifq_head) epic_ifstart(ifp); splx(x); } /* * Despite the name of this function, it doesn't update statistics, it only * helps in autonegotiation process. */ static void epic_stats_update(epic_softc_t * sc) { struct mii_data * mii; int s; s = splimp(); mii = device_get_softc(sc->miibus); mii_tick(mii); sc->stat_ch = timeout((timeout_t *)epic_stats_update, sc, hz); splx(s); } /* * Set media options. */ static int epic_ifmedia_upd(ifp) struct ifnet *ifp; { epic_softc_t *sc; struct mii_data *mii; struct ifmedia *ifm; struct mii_softc *miisc; int cfg, media; sc = ifp->if_softc; mii = device_get_softc(sc->miibus); ifm = &mii->mii_media; media = ifm->ifm_cur->ifm_media; /* Do not do anything if interface is not up. */ if ((ifp->if_flags & IFF_UP) == 0) return (0); /* * Lookup current selected PHY. */ if (IFM_INST(media) == sc->serinst) { sc->phyid = EPIC_SERIAL; sc->physc = NULL; } else { /* If we're not selecting serial interface, select MII mode. */ sc->miicfg &= ~MIICFG_SERIAL_ENABLE; CSR_WRITE_4(sc, MIICFG, sc->miicfg); /* Default to unknown PHY. */ sc->phyid = EPIC_UNKN_PHY; /* Lookup selected PHY. */ for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; miisc = LIST_NEXT(miisc, mii_list)) { if (IFM_INST(media) == miisc->mii_inst) { sc->physc = miisc; break; } } /* Identify selected PHY. */ if (sc->physc) { int id1, id2, model, oui; id1 = PHY_READ(sc->physc, MII_PHYIDR1); id2 = PHY_READ(sc->physc, MII_PHYIDR2); oui = MII_OUI(id1, id2); model = MII_MODEL(id2); switch (oui) { case MII_OUI_QUALSEMI: if (model == MII_MODEL_QUALSEMI_QS6612) sc->phyid = EPIC_QS6612_PHY; break; case MII_OUI_xxALTIMA: if (model == MII_MODEL_xxALTIMA_AC101) sc->phyid = EPIC_AC101_PHY; break; case MII_OUI_xxLEVEL1: if (model == MII_MODEL_xxLEVEL1_LXT970) sc->phyid = EPIC_LXT970_PHY; break; } } } /* * Do PHY specific card setup. */ /* * Call this, to isolate all not selected PHYs and * set up selected. */ mii_mediachg(mii); /* Do our own setup. */ switch (sc->phyid) { case EPIC_QS6612_PHY: break; case EPIC_AC101_PHY: /* We have to powerup fiber tranceivers. */ if (IFM_SUBTYPE(media) == IFM_100_FX) sc->miicfg |= MIICFG_694_ENABLE; else sc->miicfg &= ~MIICFG_694_ENABLE; CSR_WRITE_4(sc, MIICFG, sc->miicfg); break; case EPIC_LXT970_PHY: /* We have to powerup fiber tranceivers. */ cfg = PHY_READ(sc->physc, MII_LXTPHY_CONFIG); if (IFM_SUBTYPE(media) == IFM_100_FX) cfg |= CONFIG_LEDC1 | CONFIG_LEDC0; else cfg &= ~(CONFIG_LEDC1 | CONFIG_LEDC0); PHY_WRITE(sc->physc, MII_LXTPHY_CONFIG, cfg); break; case EPIC_SERIAL: /* Select serial PHY (10base2/BNC usually). */ sc->miicfg |= MIICFG_694_ENABLE | MIICFG_SERIAL_ENABLE; CSR_WRITE_4(sc, MIICFG, sc->miicfg); /* There is no driver to fill this. */ mii->mii_media_active = media; mii->mii_media_status = 0; /* * We need to call this manually as it wasn't called * in mii_mediachg(). */ epic_miibus_statchg(sc->dev); break; default: device_printf(sc->dev, "ERROR! Unknown PHY selected\n"); return (EINVAL); } return (0); } /* * Report current media status. */ static void epic_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { epic_softc_t *sc; struct mii_data *mii; struct ifmedia *ifm; sc = ifp->if_softc; mii = device_get_softc(sc->miibus); ifm = &mii->mii_media; /* Nothing should be selected if interface is down. */ if ((ifp->if_flags & IFF_UP) == 0) { ifmr->ifm_active = IFM_NONE; ifmr->ifm_status = 0; return; } /* Call underlying pollstat, if not serial PHY. */ if (sc->phyid != EPIC_SERIAL) mii_pollstat(mii); /* Simply copy media info. */ ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } /* * Callback routine, called on media change. */ static void epic_miibus_statchg(dev) device_t dev; { epic_softc_t *sc; struct mii_data *mii; int media; sc = device_get_softc(dev); mii = device_get_softc(sc->miibus); media = mii->mii_media_active; sc->txcon &= ~(TXCON_LOOPBACK_MODE | TXCON_FULL_DUPLEX); /* * If we are in full-duplex mode or loopback operation, * we need to decouple receiver and transmitter. */ if (IFM_OPTIONS(media) & (IFM_FDX | IFM_LOOP)) sc->txcon |= TXCON_FULL_DUPLEX; /* On some cards we need manualy set fullduplex led. */ if (sc->cardid == SMC9432FTX || sc->cardid == SMC9432FTX_SC) { if (IFM_OPTIONS(media) & IFM_FDX) sc->miicfg |= MIICFG_694_ENABLE; else sc->miicfg &= ~MIICFG_694_ENABLE; CSR_WRITE_4(sc, MIICFG, sc->miicfg); } /* Update baudrate. */ if (IFM_SUBTYPE(media) == IFM_100_TX || IFM_SUBTYPE(media) == IFM_100_FX) sc->sc_if.if_baudrate = 100000000; else sc->sc_if.if_baudrate = 10000000; epic_stop_activity(sc); epic_set_tx_mode(sc); epic_start_activity(sc); } static void epic_miibus_mediainit(dev) device_t dev; { epic_softc_t *sc; struct mii_data *mii; struct ifmedia *ifm; int media; sc = device_get_softc(dev); mii = device_get_softc(sc->miibus); ifm = &mii->mii_media; /* * Add Serial Media Interface if present, this applies to * SMC9432BTX serie. */ if (CSR_READ_4(sc, MIICFG) & MIICFG_PHY_PRESENT) { /* Store its instance. */ sc->serinst = mii->mii_instance++; /* Add as 10base2/BNC media. */ media = IFM_MAKEWORD(IFM_ETHER, IFM_10_2, 0, sc->serinst); ifmedia_add(ifm, media, 0, NULL); /* Report to user. */ device_printf(sc->dev, "serial PHY detected (10Base2/BNC)\n"); } } /* * Reset chip and update media. */ static void epic_init(xsc) void *xsc; { epic_softc_t *sc = xsc; struct ifnet *ifp = &sc->sc_if; int s, i; s = splimp(); /* If interface is already running, then we need not do anything. */ if (ifp->if_flags & IFF_RUNNING) { splx(s); return; } /* Soft reset the chip (we have to power up card before). */ CSR_WRITE_4(sc, GENCTL, 0); CSR_WRITE_4(sc, GENCTL, GENCTL_SOFT_RESET); /* * Reset takes 15 pci ticks which depends on PCI bus speed. * Assuming it >= 33000000 hz, we have wait at least 495e-6 sec. */ DELAY(500); /* Wake up */ CSR_WRITE_4(sc, GENCTL, 0); /* Workaround for Application Note 7-15 */ for (i = 0; i < 16; i++) CSR_WRITE_4(sc, TEST1, TEST1_CLOCK_TEST); /* Give rings to EPIC */ CSR_WRITE_4(sc, PRCDAR, sc->rx_addr); CSR_WRITE_4(sc, PTCDAR, sc->tx_addr); /* Put node address to EPIC. */ CSR_WRITE_4(sc, LAN0, ((u_int16_t *)sc->sc_macaddr)[0]); CSR_WRITE_4(sc, LAN1, ((u_int16_t *)sc->sc_macaddr)[1]); CSR_WRITE_4(sc, LAN2, ((u_int16_t *)sc->sc_macaddr)[2]); /* Set tx mode, includeing transmit threshold. */ epic_set_tx_mode(sc); /* Compute and set RXCON. */ epic_set_rx_mode(sc); /* Set multicast table. */ epic_set_mc_table(sc); /* Enable interrupts by setting the interrupt mask. */ CSR_WRITE_4(sc, INTMASK, INTSTAT_RCC | /* INTSTAT_RQE | INTSTAT_OVW | INTSTAT_RXE | */ /* INTSTAT_TXC | */ INTSTAT_TCC | INTSTAT_TQE | INTSTAT_TXU | INTSTAT_FATAL); /* Acknowledge all pending interrupts. */ CSR_WRITE_4(sc, INTSTAT, CSR_READ_4(sc, INTSTAT)); /* Enable interrupts, set for PCI read multiple and etc */ CSR_WRITE_4(sc, GENCTL, GENCTL_ENABLE_INTERRUPT | GENCTL_MEMORY_READ_MULTIPLE | GENCTL_ONECOPY | GENCTL_RECEIVE_FIFO_THRESHOLD64); /* Mark interface running ... */ if (ifp->if_flags & IFF_UP) ifp->if_flags |= IFF_RUNNING; else ifp->if_flags &= ~IFF_RUNNING; /* ... and free */ ifp->if_flags &= ~IFF_OACTIVE; /* Start Rx process */ epic_start_activity(sc); /* Set appropriate media */ epic_ifmedia_upd(ifp); sc->stat_ch = timeout((timeout_t *)epic_stats_update, sc, hz); splx(s); } /* * Synopsis: calculate and set Rx mode. Chip must be in idle state to * access RXCON. */ static void epic_set_rx_mode(sc) epic_softc_t *sc; { u_int32_t flags; u_int32_t rxcon; flags = sc->sc_if.if_flags; rxcon = RXCON_DEFAULT; #ifdef EPIC_EARLY_RX rxcon |= RXCON_EARLY_RX; #endif rxcon |= (flags & IFF_PROMISC) ? RXCON_PROMISCUOUS_MODE : 0; CSR_WRITE_4(sc, RXCON, rxcon); } /* * Synopsis: Set transmit control register. Chip must be in idle state to * access TXCON. */ static void epic_set_tx_mode(sc) epic_softc_t *sc; { if (sc->txcon & TXCON_EARLY_TRANSMIT_ENABLE) CSR_WRITE_4(sc, ETXTHR, sc->tx_threshold); CSR_WRITE_4(sc, TXCON, sc->txcon); } /* * Synopsis: Program multicast filter honoring IFF_ALLMULTI and IFF_PROMISC * flags (note that setting PROMISC bit in EPIC's RXCON will only touch * individual frames, multicast filter must be manually programmed). * * Note: EPIC must be in idle state. */ static void epic_set_mc_table(sc) epic_softc_t *sc; { struct ifnet *ifp; struct ifmultiaddr *ifma; u_int16_t filter[4]; u_int8_t h; ifp = &sc->sc_if; if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) { CSR_WRITE_4(sc, MC0, 0xFFFF); CSR_WRITE_4(sc, MC1, 0xFFFF); CSR_WRITE_4(sc, MC2, 0xFFFF); CSR_WRITE_4(sc, MC3, 0xFFFF); return; } filter[0] = 0; filter[1] = 0; filter[2] = 0; filter[3] = 0; #if __FreeBSD_version < 500000 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { #else TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { #endif if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = tx_mchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); filter[h >> 4] |= 1 << (h & 0xF); } CSR_WRITE_4(sc, MC0, filter[0]); CSR_WRITE_4(sc, MC1, filter[1]); CSR_WRITE_4(sc, MC2, filter[2]); CSR_WRITE_4(sc, MC3, filter[3]); } /* * Synopsis: calculate EPIC's hash of multicast address. */ static uint32_t tx_mchash(addr) const uint8_t *addr; { uint32_t crc, carry; int idx, bit; uint8_t data; /* Compute CRC for the address value. */ crc = 0xFFFFFFFF; /* initial value */ for (idx = 0; idx < 6; idx++) { for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) { carry = ((crc & 0x80000000) ? 1 : 0) ^ (data & 0x01); crc <<= 1; if (carry) crc = (crc ^ 0x04c11db6) | carry; } } return ((crc >> 26) & 0x3F); } /* * Synopsis: Start receive process and transmit one, if they need. */ static void epic_start_activity(sc) epic_softc_t *sc; { /* Start rx process. */ CSR_WRITE_4(sc, COMMAND, COMMAND_RXQUEUED | COMMAND_START_RX | (sc->pending_txs ? COMMAND_TXQUEUED : 0)); } /* * Synopsis: Completely stop Rx and Tx processes. If TQE is set additional * packet needs to be queued to stop Tx DMA. */ static void epic_stop_activity(sc) epic_softc_t *sc; { int status, i; /* Stop Tx and Rx DMA. */ CSR_WRITE_4(sc, COMMAND, COMMAND_STOP_RX | COMMAND_STOP_RDMA | COMMAND_STOP_TDMA); /* Wait Rx and Tx DMA to stop (why 1 ms ??? XXX). */ for (i = 0; i < 0x1000; i++) { status = CSR_READ_4(sc, INTSTAT) & (INTSTAT_TXIDLE | INTSTAT_RXIDLE); if (status == (INTSTAT_TXIDLE | INTSTAT_RXIDLE)) break; DELAY(1); } /* Catch all finished packets. */ epic_rx_done(sc); epic_tx_done(sc); status = CSR_READ_4(sc, INTSTAT); if ((status & INTSTAT_RXIDLE) == 0) device_printf(sc->dev, "ERROR! Can't stop Rx DMA\n"); if ((status & INTSTAT_TXIDLE) == 0) device_printf(sc->dev, "ERROR! Can't stop Tx DMA\n"); /* * May need to queue one more packet if TQE, this is rare * but existing case. */ if ((status & INTSTAT_TQE) && !(status & INTSTAT_TXIDLE)) (void)epic_queue_last_packet(sc); } /* * The EPIC transmitter may stuck in TQE state. It will not go IDLE until * a packet from current descriptor will be copied to internal RAM. We * compose a dummy packet here and queue it for transmission. * * XXX the packet will then be actually sent over network... */ static int epic_queue_last_packet(sc) epic_softc_t *sc; { struct epic_tx_desc *desc; struct epic_frag_list *flist; struct epic_tx_buffer *buf; struct mbuf *m0; int error, i; device_printf(sc->dev, "queue last packet\n"); desc = sc->tx_desc + sc->cur_tx; flist = sc->tx_flist + sc->cur_tx; buf = sc->tx_buffer + sc->cur_tx; if ((desc->status & 0x8000) || (buf->mbuf != NULL)) return (EBUSY); MGETHDR(m0, M_DONTWAIT, MT_DATA); if (m0 == NULL) return (ENOBUFS); /* Prepare mbuf. */ m0->m_len = min(MHLEN, ETHER_MIN_LEN - ETHER_CRC_LEN); m0->m_pkthdr.len = m0->m_len; m0->m_pkthdr.rcvif = &sc->sc_if; bzero(mtod(m0, caddr_t), m0->m_len); /* Fill fragments list. */ error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m0, epic_dma_map_txbuf, flist, 0); if (error) { m_freem(m0); return (error); } bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREWRITE); /* Fill in descriptor. */ buf->mbuf = m0; sc->pending_txs++; sc->cur_tx = (sc->cur_tx + 1) & TX_RING_MASK; desc->control = 0x01; desc->txlength = max(m0->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN); desc->status = 0x8000; bus_dmamap_sync(sc->ttag, sc->tmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->ftag, sc->fmap, BUS_DMASYNC_PREWRITE); /* Launch transmission. */ CSR_WRITE_4(sc, COMMAND, COMMAND_STOP_TDMA | COMMAND_TXQUEUED); /* Wait Tx DMA to stop (for how long??? XXX) */ for (i = 0; i < 1000; i++) { if (CSR_READ_4(sc, INTSTAT) & INTSTAT_TXIDLE) break; DELAY(1); } if ((CSR_READ_4(sc, INTSTAT) & INTSTAT_TXIDLE) == 0) device_printf(sc->dev, "ERROR! can't stop Tx DMA (2)\n"); else epic_tx_done(sc); return (0); } /* * Synopsis: Shut down board and deallocates rings. */ static void epic_stop(sc) epic_softc_t *sc; { int s; s = splimp(); sc->sc_if.if_timer = 0; untimeout((timeout_t *)epic_stats_update, sc, sc->stat_ch); /* Disable interrupts */ CSR_WRITE_4(sc, INTMASK, 0); CSR_WRITE_4(sc, GENCTL, 0); /* Try to stop Rx and TX processes */ epic_stop_activity(sc); /* Reset chip */ CSR_WRITE_4(sc, GENCTL, GENCTL_SOFT_RESET); DELAY(1000); /* Make chip go to bed */ CSR_WRITE_4(sc, GENCTL, GENCTL_POWER_DOWN); /* Mark as stoped */ sc->sc_if.if_flags &= ~IFF_RUNNING; splx(s); } /* * Synopsis: This function should free all memory allocated for rings. */ static void epic_free_rings(sc) epic_softc_t *sc; { int i; for (i = 0; i < RX_RING_SIZE; i++) { struct epic_rx_buffer *buf = sc->rx_buffer + i; struct epic_rx_desc *desc = sc->rx_desc + i; desc->status = 0; desc->buflength = 0; desc->bufaddr = 0; if (buf->mbuf) { bus_dmamap_unload(sc->mtag, buf->map); bus_dmamap_destroy(sc->mtag, buf->map); m_freem(buf->mbuf); } buf->mbuf = NULL; } if (sc->sparemap != NULL) bus_dmamap_destroy(sc->mtag, sc->sparemap); for (i = 0; i < TX_RING_SIZE; i++) { struct epic_tx_buffer *buf = sc->tx_buffer + i; struct epic_tx_desc *desc = sc->tx_desc + i; desc->status = 0; desc->buflength = 0; desc->bufaddr = 0; if (buf->mbuf) { bus_dmamap_unload(sc->mtag, buf->map); bus_dmamap_destroy(sc->mtag, buf->map); m_freem(buf->mbuf); } buf->mbuf = NULL; } } /* * Synopsis: Allocates mbufs for Rx ring and point Rx descs to them. * Point Tx descs to fragment lists. Check that all descs and fraglists * are bounded and aligned properly. */ static int epic_init_rings(sc) epic_softc_t *sc; { int error, i; sc->cur_rx = sc->cur_tx = sc->dirty_tx = sc->pending_txs = 0; /* Initialize the RX descriptor ring. */ for (i = 0; i < RX_RING_SIZE; i++) { struct epic_rx_buffer *buf = sc->rx_buffer + i; struct epic_rx_desc *desc = sc->rx_desc + i; desc->status = 0; /* Owned by driver */ desc->next = sc->rx_addr + ((i + 1) & RX_RING_MASK) * sizeof(struct epic_rx_desc); if ((desc->next & 3) || ((desc->next & PAGE_MASK) + sizeof *desc) > PAGE_SIZE) { epic_free_rings(sc); return (EFAULT); } buf->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (buf->mbuf == NULL) { epic_free_rings(sc); return (ENOBUFS); } buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES; m_adj(buf->mbuf, ETHER_ALIGN); error = bus_dmamap_create(sc->mtag, 0, &buf->map); if (error) { epic_free_rings(sc); return (error); } error = bus_dmamap_load_mbuf(sc->mtag, buf->map, buf->mbuf, epic_dma_map_rxbuf, desc, 0); if (error) { epic_free_rings(sc); return (error); } bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD); desc->buflength = buf->mbuf->m_len; /* Max RX buffer length */ desc->status = 0x8000; /* Set owner bit to NIC */ } bus_dmamap_sync(sc->rtag, sc->rmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Create the spare DMA map. */ error = bus_dmamap_create(sc->mtag, 0, &sc->sparemap); if (error) { epic_free_rings(sc); return (error); } /* Initialize the TX descriptor ring. */ for (i = 0; i < TX_RING_SIZE; i++) { struct epic_tx_buffer *buf = sc->tx_buffer + i; struct epic_tx_desc *desc = sc->tx_desc + i; desc->status = 0; desc->next = sc->tx_addr + ((i + 1) & TX_RING_MASK) * sizeof(struct epic_tx_desc); if ((desc->next & 3) || ((desc->next & PAGE_MASK) + sizeof *desc) > PAGE_SIZE) { epic_free_rings(sc); return (EFAULT); } buf->mbuf = NULL; desc->bufaddr = sc->frag_addr + i * sizeof(struct epic_frag_list); if ((desc->bufaddr & 3) || ((desc->bufaddr & PAGE_MASK) + sizeof(struct epic_frag_list)) > PAGE_SIZE) { epic_free_rings(sc); return (EFAULT); } error = bus_dmamap_create(sc->mtag, 0, &buf->map); if (error) { epic_free_rings(sc); return (error); } } bus_dmamap_sync(sc->ttag, sc->tmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->ftag, sc->fmap, BUS_DMASYNC_PREWRITE); return (0); } /* * EEPROM operation functions */ static void epic_write_eepromreg(sc, val) epic_softc_t *sc; u_int8_t val; { u_int16_t i; CSR_WRITE_1(sc, EECTL, val); for (i = 0; i < 0xFF; i++) { if ((CSR_READ_1(sc, EECTL) & 0x20) == 0) break; } } static u_int8_t epic_read_eepromreg(sc) epic_softc_t *sc; { return (CSR_READ_1(sc, EECTL)); } static u_int8_t epic_eeprom_clock(sc, val) epic_softc_t *sc; u_int8_t val; { epic_write_eepromreg(sc, val); epic_write_eepromreg(sc, (val | 0x4)); epic_write_eepromreg(sc, val); return (epic_read_eepromreg(sc)); } static void epic_output_eepromw(sc, val) epic_softc_t *sc; u_int16_t val; { int i; for (i = 0xF; i >= 0; i--) { if (val & (1 << i)) epic_eeprom_clock(sc, 0x0B); else epic_eeprom_clock(sc, 0x03); } } static u_int16_t epic_input_eepromw(sc) epic_softc_t *sc; { u_int16_t retval = 0; int i; for (i = 0xF; i >= 0; i--) { if (epic_eeprom_clock(sc, 0x3) & 0x10) retval |= (1 << i); } return (retval); } static int epic_read_eeprom(sc, loc) epic_softc_t *sc; u_int16_t loc; { u_int16_t dataval; u_int16_t read_cmd; epic_write_eepromreg(sc, 3); if (epic_read_eepromreg(sc) & 0x40) read_cmd = (loc & 0x3F) | 0x180; else read_cmd = (loc & 0xFF) | 0x600; epic_output_eepromw(sc, read_cmd); dataval = epic_input_eepromw(sc); epic_write_eepromreg(sc, 1); return (dataval); } /* * Here goes MII read/write routines. */ static int epic_read_phy_reg(sc, phy, reg) epic_softc_t *sc; int phy, reg; { int i; CSR_WRITE_4(sc, MIICTL, ((reg << 4) | (phy << 9) | 0x01)); for (i = 0; i < 0x100; i++) { if ((CSR_READ_4(sc, MIICTL) & 0x01) == 0) break; DELAY(1); } return (CSR_READ_4(sc, MIIDATA)); } static void epic_write_phy_reg(sc, phy, reg, val) epic_softc_t *sc; int phy, reg, val; { int i; CSR_WRITE_4(sc, MIIDATA, val); CSR_WRITE_4(sc, MIICTL, ((reg << 4) | (phy << 9) | 0x02)); for(i = 0; i < 0x100; i++) { if ((CSR_READ_4(sc, MIICTL) & 0x02) == 0) break; DELAY(1); } } static int epic_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { epic_softc_t *sc; sc = device_get_softc(dev); return (PHY_READ_2(sc, phy, reg)); } static int epic_miibus_writereg(dev, phy, reg, data) device_t dev; int phy, reg, data; { epic_softc_t *sc; sc = device_get_softc(dev); PHY_WRITE_2(sc, phy, reg, data); return (0); } Index: head/sys/dev/txp/if_txp.c =================================================================== --- head/sys/dev/txp/if_txp.c (revision 129878) +++ head/sys/dev/txp/if_txp.c (revision 129879) @@ -1,1892 +1,1893 @@ /* $OpenBSD: if_txp.c,v 1.48 2001/06/27 06:34:50 kjc Exp $ */ /* * Copyright (c) 2001 * Jason L. Wright , Theo de Raadt, and * Aaron Campbell . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Jason L. Wright, * Theo de Raadt and Aaron Campbell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Driver for 3c990 (Typhoon) Ethernet ASIC */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include /* for DELAY */ #include #include #include #include #include #include #include #include #include #include #define TXP_USEIOSPACE #define __STRICT_ALIGNMENT #include #include #ifndef lint static const char rcsid[] = "$FreeBSD$"; #endif /* * Various supported device vendors/types and their names. */ static struct txp_type txp_devs[] = { { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_TX_95, "3Com 3cR990-TX-95 Etherlink with 3XP Processor" }, { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_TX_97, "3Com 3cR990-TX-97 Etherlink with 3XP Processor" }, { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990B_TXM, "3Com 3cR990B-TXM Etherlink with 3XP Processor" }, { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_SRV_95, "3Com 3cR990-SRV-95 Etherlink Server with 3XP Processor" }, { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_SRV_97, "3Com 3cR990-SRV-97 Etherlink Server with 3XP Processor" }, { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990B_SRV, "3Com 3cR990B-SRV Etherlink Server with 3XP Processor" }, { 0, 0, NULL } }; static int txp_probe (device_t); static int txp_attach (device_t); static int txp_detach (device_t); static void txp_intr (void *); static void txp_tick (void *); static int txp_shutdown (device_t); static int txp_ioctl (struct ifnet *, u_long, caddr_t); static void txp_start (struct ifnet *); static void txp_stop (struct txp_softc *); static void txp_init (void *); static void txp_watchdog (struct ifnet *); static void txp_release_resources(struct txp_softc *); static int txp_chip_init(struct txp_softc *); static int txp_reset_adapter(struct txp_softc *); static int txp_download_fw(struct txp_softc *); static int txp_download_fw_wait(struct txp_softc *); static int txp_download_fw_section (struct txp_softc *, struct txp_fw_section_header *, int); static int txp_alloc_rings(struct txp_softc *); static int txp_rxring_fill(struct txp_softc *); static void txp_rxring_empty(struct txp_softc *); static void txp_set_filter(struct txp_softc *); static int txp_cmd_desc_numfree(struct txp_softc *); static int txp_command (struct txp_softc *, u_int16_t, u_int16_t, u_int32_t, u_int32_t, u_int16_t *, u_int32_t *, u_int32_t *, int); static int txp_command2 (struct txp_softc *, u_int16_t, u_int16_t, u_int32_t, u_int32_t, struct txp_ext_desc *, u_int8_t, struct txp_rsp_desc **, int); static int txp_response (struct txp_softc *, u_int32_t, u_int16_t, u_int16_t, struct txp_rsp_desc **); static void txp_rsp_fixup (struct txp_softc *, struct txp_rsp_desc *, struct txp_rsp_desc *); static void txp_capabilities(struct txp_softc *); static void txp_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int txp_ifmedia_upd(struct ifnet *); #ifdef TXP_DEBUG static void txp_show_descriptor(void *); #endif static void txp_tx_reclaim(struct txp_softc *, struct txp_tx_ring *); static void txp_rxbuf_reclaim(struct txp_softc *); static void txp_rx_reclaim(struct txp_softc *, struct txp_rx_ring *); #ifdef TXP_USEIOSPACE #define TXP_RES SYS_RES_IOPORT #define TXP_RID TXP_PCI_LOIO #else #define TXP_RES SYS_RES_MEMORY #define TXP_RID TXP_PCI_LOMEM #endif static device_method_t txp_methods[] = { /* Device interface */ DEVMETHOD(device_probe, txp_probe), DEVMETHOD(device_attach, txp_attach), DEVMETHOD(device_detach, txp_detach), DEVMETHOD(device_shutdown, txp_shutdown), { 0, 0 } }; static driver_t txp_driver = { "txp", txp_methods, sizeof(struct txp_softc) }; static devclass_t txp_devclass; DRIVER_MODULE(txp, pci, txp_driver, txp_devclass, 0, 0); MODULE_DEPEND(txp, pci, 1, 1, 1); MODULE_DEPEND(txp, ether, 1, 1, 1); static int txp_probe(dev) device_t dev; { struct txp_type *t; t = txp_devs; while(t->txp_name != NULL) { if ((pci_get_vendor(dev) == t->txp_vid) && (pci_get_device(dev) == t->txp_did)) { device_set_desc(dev, t->txp_name); return(0); } t++; } return(ENXIO); } static int txp_attach(dev) device_t dev; { struct txp_softc *sc; struct ifnet *ifp; u_int16_t p1; u_int32_t p2; int unit, error = 0, rid; sc = device_get_softc(dev); unit = device_get_unit(dev); sc->sc_dev = dev; sc->sc_cold = 1; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); #ifndef BURN_BRIDGES /* * Handle power management nonsense. */ if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { u_int32_t iobase, membase, irq; /* Save important PCI config data. */ iobase = pci_read_config(dev, TXP_PCI_LOIO, 4); membase = pci_read_config(dev, TXP_PCI_LOMEM, 4); irq = pci_read_config(dev, TXP_PCI_INTLINE, 4); /* Reset the power state. */ device_printf(dev, "chip is in D%d power mode " "-- setting to D0\n", pci_get_powerstate(dev)); pci_set_powerstate(dev, PCI_POWERSTATE_D0); /* Restore PCI config data. */ pci_write_config(dev, TXP_PCI_LOIO, iobase, 4); pci_write_config(dev, TXP_PCI_LOMEM, membase, 4); pci_write_config(dev, TXP_PCI_INTLINE, irq, 4); } #endif /* * Map control/status registers. */ pci_enable_busmaster(dev); rid = TXP_RID; sc->sc_res = bus_alloc_resource_any(dev, TXP_RES, &rid, RF_ACTIVE); if (sc->sc_res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); error = ENXIO; goto fail; } sc->sc_bt = rman_get_bustag(sc->sc_res); sc->sc_bh = rman_get_bushandle(sc->sc_res); /* Allocate interrupt */ rid = 0; sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->sc_irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); txp_release_resources(sc); error = ENXIO; goto fail; } error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET, txp_intr, sc, &sc->sc_intrhand); if (error) { txp_release_resources(sc); device_printf(dev, "couldn't set up irq\n"); goto fail; } if (txp_chip_init(sc)) { txp_release_resources(sc); goto fail; } sc->sc_fwbuf = contigmalloc(32768, M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); error = txp_download_fw(sc); contigfree(sc->sc_fwbuf, 32768, M_DEVBUF); sc->sc_fwbuf = NULL; if (error) { txp_release_resources(sc); goto fail; } sc->sc_ldata = contigmalloc(sizeof(struct txp_ldata), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); bzero(sc->sc_ldata, sizeof(struct txp_ldata)); if (txp_alloc_rings(sc)) { txp_release_resources(sc); goto fail; } if (txp_command(sc, TXP_CMD_MAX_PKT_SIZE_WRITE, TXP_MAX_PKTLEN, 0, 0, NULL, NULL, NULL, 1)) { txp_release_resources(sc); goto fail; } if (txp_command(sc, TXP_CMD_STATION_ADDRESS_READ, 0, 0, 0, &p1, &p2, NULL, 1)) { txp_release_resources(sc); goto fail; } txp_set_filter(sc); sc->sc_arpcom.ac_enaddr[0] = ((u_int8_t *)&p1)[1]; sc->sc_arpcom.ac_enaddr[1] = ((u_int8_t *)&p1)[0]; sc->sc_arpcom.ac_enaddr[2] = ((u_int8_t *)&p2)[3]; sc->sc_arpcom.ac_enaddr[3] = ((u_int8_t *)&p2)[2]; sc->sc_arpcom.ac_enaddr[4] = ((u_int8_t *)&p2)[1]; sc->sc_arpcom.ac_enaddr[5] = ((u_int8_t *)&p2)[0]; sc->sc_cold = 0; ifmedia_init(&sc->sc_ifmedia, 0, txp_ifmedia_upd, txp_ifmedia_sts); ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL); ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL); ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL); ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); sc->sc_xcvr = TXP_XCVR_AUTO; txp_command(sc, TXP_CMD_XCVR_SELECT, TXP_XCVR_AUTO, 0, 0, NULL, NULL, NULL, 0); ifmedia_set(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO); ifp = &sc->sc_arpcom.ac_if; ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = txp_ioctl; ifp->if_start = txp_start; ifp->if_watchdog = txp_watchdog; ifp->if_init = txp_init; ifp->if_baudrate = 100000000; ifp->if_snd.ifq_maxlen = TX_ENTRIES; ifp->if_hwassist = 0; txp_capabilities(sc); /* * Attach us everywhere */ ether_ifattach(ifp, sc->sc_arpcom.ac_enaddr); callout_handle_init(&sc->sc_tick); return(0); fail: txp_release_resources(sc); mtx_destroy(&sc->sc_mtx); return(error); } static int txp_detach(dev) device_t dev; { struct txp_softc *sc; struct ifnet *ifp; int i; sc = device_get_softc(dev); ifp = &sc->sc_arpcom.ac_if; txp_stop(sc); txp_shutdown(dev); ifmedia_removeall(&sc->sc_ifmedia); ether_ifdetach(ifp); for (i = 0; i < RXBUF_ENTRIES; i++) free(sc->sc_rxbufs[i].rb_sd, M_DEVBUF); txp_release_resources(sc); mtx_destroy(&sc->sc_mtx); return(0); } static void txp_release_resources(sc) struct txp_softc *sc; { device_t dev; dev = sc->sc_dev; if (sc->sc_intrhand != NULL) bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand); if (sc->sc_irq != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); if (sc->sc_res != NULL) bus_release_resource(dev, TXP_RES, TXP_RID, sc->sc_res); if (sc->sc_ldata != NULL) contigfree(sc->sc_ldata, sizeof(struct txp_ldata), M_DEVBUF); return; } static int txp_chip_init(sc) struct txp_softc *sc; { /* disable interrupts */ WRITE_REG(sc, TXP_IER, 0); WRITE_REG(sc, TXP_IMR, TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | TXP_INT_LATCH); /* ack all interrupts */ WRITE_REG(sc, TXP_ISR, TXP_INT_RESERVED | TXP_INT_LATCH | TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | TXP_INT_A2H_3 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0); if (txp_reset_adapter(sc)) return (-1); /* disable interrupts */ WRITE_REG(sc, TXP_IER, 0); WRITE_REG(sc, TXP_IMR, TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | TXP_INT_LATCH); /* ack all interrupts */ WRITE_REG(sc, TXP_ISR, TXP_INT_RESERVED | TXP_INT_LATCH | TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | TXP_INT_A2H_3 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0); return (0); } static int txp_reset_adapter(sc) struct txp_softc *sc; { u_int32_t r; int i; r = 0; WRITE_REG(sc, TXP_SRR, TXP_SRR_ALL); DELAY(1000); WRITE_REG(sc, TXP_SRR, 0); /* Should wait max 6 seconds */ for (i = 0; i < 6000; i++) { r = READ_REG(sc, TXP_A2H_0); if (r == STAT_WAITING_FOR_HOST_REQUEST) break; DELAY(1000); } if (r != STAT_WAITING_FOR_HOST_REQUEST) { device_printf(sc->sc_dev, "reset hung\n"); return (-1); } return (0); } static int txp_download_fw(sc) struct txp_softc *sc; { struct txp_fw_file_header *fileheader; struct txp_fw_section_header *secthead; int sect; u_int32_t r, i, ier, imr; r = 0; ier = READ_REG(sc, TXP_IER); WRITE_REG(sc, TXP_IER, ier | TXP_INT_A2H_0); imr = READ_REG(sc, TXP_IMR); WRITE_REG(sc, TXP_IMR, imr | TXP_INT_A2H_0); for (i = 0; i < 10000; i++) { r = READ_REG(sc, TXP_A2H_0); if (r == STAT_WAITING_FOR_HOST_REQUEST) break; DELAY(50); } if (r != STAT_WAITING_FOR_HOST_REQUEST) { device_printf(sc->sc_dev, "not waiting for host request\n"); return (-1); } /* Ack the status */ WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0); fileheader = (struct txp_fw_file_header *)tc990image; if (bcmp("TYPHOON", fileheader->magicid, sizeof(fileheader->magicid))) { device_printf(sc->sc_dev, "fw invalid magic\n"); return (-1); } /* Tell boot firmware to get ready for image */ WRITE_REG(sc, TXP_H2A_1, fileheader->addr); WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_RUNTIME_IMAGE); if (txp_download_fw_wait(sc)) { device_printf(sc->sc_dev, "fw wait failed, initial\n"); return (-1); } secthead = (struct txp_fw_section_header *)(((u_int8_t *)tc990image) + sizeof(struct txp_fw_file_header)); for (sect = 0; sect < fileheader->nsections; sect++) { if (txp_download_fw_section(sc, secthead, sect)) return (-1); secthead = (struct txp_fw_section_header *) (((u_int8_t *)secthead) + secthead->nbytes + sizeof(*secthead)); } WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_DOWNLOAD_COMPLETE); for (i = 0; i < 10000; i++) { r = READ_REG(sc, TXP_A2H_0); if (r == STAT_WAITING_FOR_BOOT) break; DELAY(50); } if (r != STAT_WAITING_FOR_BOOT) { device_printf(sc->sc_dev, "not waiting for boot\n"); return (-1); } WRITE_REG(sc, TXP_IER, ier); WRITE_REG(sc, TXP_IMR, imr); return (0); } static int txp_download_fw_wait(sc) struct txp_softc *sc; { u_int32_t i, r; r = 0; for (i = 0; i < 10000; i++) { r = READ_REG(sc, TXP_ISR); if (r & TXP_INT_A2H_0) break; DELAY(50); } if (!(r & TXP_INT_A2H_0)) { device_printf(sc->sc_dev, "fw wait failed comm0\n"); return (-1); } WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0); r = READ_REG(sc, TXP_A2H_0); if (r != STAT_WAITING_FOR_SEGMENT) { device_printf(sc->sc_dev, "fw not waiting for segment\n"); return (-1); } return (0); } static int txp_download_fw_section(sc, sect, sectnum) struct txp_softc *sc; struct txp_fw_section_header *sect; int sectnum; { vm_offset_t dma; int rseg, err = 0; struct mbuf m; u_int16_t csum; /* Skip zero length sections */ if (sect->nbytes == 0) return (0); /* Make sure we aren't past the end of the image */ rseg = ((u_int8_t *)sect) - ((u_int8_t *)tc990image); if (rseg >= sizeof(tc990image)) { device_printf(sc->sc_dev, "fw invalid section address, " "section %d\n", sectnum); return (-1); } /* Make sure this section doesn't go past the end */ rseg += sect->nbytes; if (rseg >= sizeof(tc990image)) { device_printf(sc->sc_dev, "fw truncated section %d\n", sectnum); return (-1); } bcopy(((u_int8_t *)sect) + sizeof(*sect), sc->sc_fwbuf, sect->nbytes); dma = vtophys(sc->sc_fwbuf); /* * dummy up mbuf and verify section checksum */ m.m_type = MT_DATA; m.m_next = m.m_nextpkt = NULL; m.m_len = sect->nbytes; m.m_data = sc->sc_fwbuf; m.m_flags = 0; csum = in_cksum(&m, sect->nbytes); if (csum != sect->cksum) { device_printf(sc->sc_dev, "fw section %d, bad " "cksum (expected 0x%x got 0x%x)\n", sectnum, sect->cksum, csum); err = -1; goto bail; } WRITE_REG(sc, TXP_H2A_1, sect->nbytes); WRITE_REG(sc, TXP_H2A_2, sect->cksum); WRITE_REG(sc, TXP_H2A_3, sect->addr); WRITE_REG(sc, TXP_H2A_4, 0); WRITE_REG(sc, TXP_H2A_5, dma & 0xffffffff); WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_SEGMENT_AVAILABLE); if (txp_download_fw_wait(sc)) { device_printf(sc->sc_dev, "fw wait failed, " "section %d\n", sectnum); err = -1; } bail: return (err); } static void txp_intr(vsc) void *vsc; { struct txp_softc *sc = vsc; struct txp_hostvar *hv = sc->sc_hostvar; u_int32_t isr; /* mask all interrupts */ WRITE_REG(sc, TXP_IMR, TXP_INT_RESERVED | TXP_INT_SELF | TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0 | TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | TXP_INT_LATCH); isr = READ_REG(sc, TXP_ISR); while (isr) { WRITE_REG(sc, TXP_ISR, isr); if ((*sc->sc_rxhir.r_roff) != (*sc->sc_rxhir.r_woff)) txp_rx_reclaim(sc, &sc->sc_rxhir); if ((*sc->sc_rxlor.r_roff) != (*sc->sc_rxlor.r_woff)) txp_rx_reclaim(sc, &sc->sc_rxlor); if (hv->hv_rx_buf_write_idx == hv->hv_rx_buf_read_idx) txp_rxbuf_reclaim(sc); if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons != TXP_OFFSET2IDX(*(sc->sc_txhir.r_off)))) txp_tx_reclaim(sc, &sc->sc_txhir); if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons != TXP_OFFSET2IDX(*(sc->sc_txlor.r_off)))) txp_tx_reclaim(sc, &sc->sc_txlor); isr = READ_REG(sc, TXP_ISR); } /* unmask all interrupts */ WRITE_REG(sc, TXP_IMR, TXP_INT_A2H_3); txp_start(&sc->sc_arpcom.ac_if); return; } static void txp_rx_reclaim(sc, r) struct txp_softc *sc; struct txp_rx_ring *r; { struct ifnet *ifp = &sc->sc_arpcom.ac_if; struct txp_rx_desc *rxd; struct mbuf *m; struct txp_swdesc *sd = NULL; u_int32_t roff, woff; roff = *r->r_roff; woff = *r->r_woff; rxd = r->r_desc + (roff / sizeof(struct txp_rx_desc)); while (roff != woff) { if (rxd->rx_flags & RX_FLAGS_ERROR) { device_printf(sc->sc_dev, "error 0x%x\n", rxd->rx_stat); ifp->if_ierrors++; goto next; } /* retrieve stashed pointer */ sd = rxd->rx_sd; m = sd->sd_mbuf; sd->sd_mbuf = NULL; m->m_pkthdr.len = m->m_len = rxd->rx_len; #ifdef __STRICT_ALIGNMENT { /* * XXX Nice chip, except it won't accept "off by 2" * buffers, so we're force to copy. Supposedly * this will be fixed in a newer firmware rev * and this will be temporary. */ struct mbuf *mnew; MGETHDR(mnew, M_DONTWAIT, MT_DATA); if (mnew == NULL) { m_freem(m); goto next; } if (m->m_len > (MHLEN - 2)) { MCLGET(mnew, M_DONTWAIT); if (!(mnew->m_flags & M_EXT)) { m_freem(mnew); m_freem(m); goto next; } } mnew->m_pkthdr.rcvif = ifp; m_adj(mnew, 2); mnew->m_pkthdr.len = mnew->m_len = m->m_len; m_copydata(m, 0, m->m_pkthdr.len, mtod(mnew, caddr_t)); m_freem(m); m = mnew; } #endif if (rxd->rx_stat & RX_STAT_IPCKSUMBAD) m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; else if (rxd->rx_stat & RX_STAT_IPCKSUMGOOD) m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED|CSUM_IP_VALID; if ((rxd->rx_stat & RX_STAT_TCPCKSUMGOOD) || (rxd->rx_stat & RX_STAT_UDPCKSUMGOOD)) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID|CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } if (rxd->rx_stat & RX_STAT_VLAN) { VLAN_INPUT_TAG(ifp, m, htons(rxd->rx_vlan >> 16), goto next); } (*ifp->if_input)(ifp, m); next: roff += sizeof(struct txp_rx_desc); if (roff == (RX_ENTRIES * sizeof(struct txp_rx_desc))) { roff = 0; rxd = r->r_desc; } else rxd++; woff = *r->r_woff; } *r->r_roff = woff; return; } static void txp_rxbuf_reclaim(sc) struct txp_softc *sc; { struct ifnet *ifp = &sc->sc_arpcom.ac_if; struct txp_hostvar *hv = sc->sc_hostvar; struct txp_rxbuf_desc *rbd; struct txp_swdesc *sd; u_int32_t i; if (!(ifp->if_flags & IFF_RUNNING)) return; i = sc->sc_rxbufprod; rbd = sc->sc_rxbufs + i; while (1) { sd = rbd->rb_sd; if (sd->sd_mbuf != NULL) break; MGETHDR(sd->sd_mbuf, M_DONTWAIT, MT_DATA); if (sd->sd_mbuf == NULL) goto err_sd; MCLGET(sd->sd_mbuf, M_DONTWAIT); if ((sd->sd_mbuf->m_flags & M_EXT) == 0) goto err_mbuf; sd->sd_mbuf->m_pkthdr.rcvif = ifp; sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES; rbd->rb_paddrlo = vtophys(mtod(sd->sd_mbuf, vm_offset_t)) & 0xffffffff; rbd->rb_paddrhi = 0; hv->hv_rx_buf_write_idx = TXP_IDX2OFFSET(i); if (++i == RXBUF_ENTRIES) { i = 0; rbd = sc->sc_rxbufs; } else rbd++; } sc->sc_rxbufprod = i; return; err_mbuf: m_freem(sd->sd_mbuf); err_sd: free(sd, M_DEVBUF); } /* * Reclaim mbufs and entries from a transmit ring. */ static void txp_tx_reclaim(sc, r) struct txp_softc *sc; struct txp_tx_ring *r; { struct ifnet *ifp = &sc->sc_arpcom.ac_if; u_int32_t idx = TXP_OFFSET2IDX(*(r->r_off)); u_int32_t cons = r->r_cons, cnt = r->r_cnt; struct txp_tx_desc *txd = r->r_desc + cons; struct txp_swdesc *sd = sc->sc_txd + cons; struct mbuf *m; while (cons != idx) { if (cnt == 0) break; if ((txd->tx_flags & TX_FLAGS_TYPE_M) == TX_FLAGS_TYPE_DATA) { m = sd->sd_mbuf; if (m != NULL) { m_freem(m); txd->tx_addrlo = 0; txd->tx_addrhi = 0; ifp->if_opackets++; } } ifp->if_flags &= ~IFF_OACTIVE; if (++cons == TX_ENTRIES) { txd = r->r_desc; cons = 0; sd = sc->sc_txd; } else { txd++; sd++; } cnt--; } r->r_cons = cons; r->r_cnt = cnt; if (cnt == 0) ifp->if_timer = 0; } static int txp_shutdown(dev) device_t dev; { struct txp_softc *sc; sc = device_get_softc(dev); /* mask all interrupts */ WRITE_REG(sc, TXP_IMR, TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | TXP_INT_LATCH); txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 0); txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 0); txp_command(sc, TXP_CMD_HALT, 0, 0, 0, NULL, NULL, NULL, 0); return(0); } static int txp_alloc_rings(sc) struct txp_softc *sc; { struct txp_boot_record *boot; struct txp_ldata *ld; u_int32_t r; int i; r = 0; ld = sc->sc_ldata; boot = &ld->txp_boot; /* boot record */ sc->sc_boot = boot; /* host variables */ bzero(&ld->txp_hostvar, sizeof(struct txp_hostvar)); boot->br_hostvar_lo = vtophys(&ld->txp_hostvar); boot->br_hostvar_hi = 0; sc->sc_hostvar = (struct txp_hostvar *)&ld->txp_hostvar; /* hi priority tx ring */ boot->br_txhipri_lo = vtophys(&ld->txp_txhiring);; boot->br_txhipri_hi = 0; boot->br_txhipri_siz = TX_ENTRIES * sizeof(struct txp_tx_desc); sc->sc_txhir.r_reg = TXP_H2A_1; sc->sc_txhir.r_desc = (struct txp_tx_desc *)&ld->txp_txhiring; sc->sc_txhir.r_cons = sc->sc_txhir.r_prod = sc->sc_txhir.r_cnt = 0; sc->sc_txhir.r_off = &sc->sc_hostvar->hv_tx_hi_desc_read_idx; /* lo priority tx ring */ boot->br_txlopri_lo = vtophys(&ld->txp_txloring); boot->br_txlopri_hi = 0; boot->br_txlopri_siz = TX_ENTRIES * sizeof(struct txp_tx_desc); sc->sc_txlor.r_reg = TXP_H2A_3; sc->sc_txlor.r_desc = (struct txp_tx_desc *)&ld->txp_txloring; sc->sc_txlor.r_cons = sc->sc_txlor.r_prod = sc->sc_txlor.r_cnt = 0; sc->sc_txlor.r_off = &sc->sc_hostvar->hv_tx_lo_desc_read_idx; /* high priority rx ring */ boot->br_rxhipri_lo = vtophys(&ld->txp_rxhiring); boot->br_rxhipri_hi = 0; boot->br_rxhipri_siz = RX_ENTRIES * sizeof(struct txp_rx_desc); sc->sc_rxhir.r_desc = (struct txp_rx_desc *)&ld->txp_rxhiring; sc->sc_rxhir.r_roff = &sc->sc_hostvar->hv_rx_hi_read_idx; sc->sc_rxhir.r_woff = &sc->sc_hostvar->hv_rx_hi_write_idx; /* low priority rx ring */ boot->br_rxlopri_lo = vtophys(&ld->txp_rxloring); boot->br_rxlopri_hi = 0; boot->br_rxlopri_siz = RX_ENTRIES * sizeof(struct txp_rx_desc); sc->sc_rxlor.r_desc = (struct txp_rx_desc *)&ld->txp_rxloring; sc->sc_rxlor.r_roff = &sc->sc_hostvar->hv_rx_lo_read_idx; sc->sc_rxlor.r_woff = &sc->sc_hostvar->hv_rx_lo_write_idx; /* command ring */ bzero(&ld->txp_cmdring, sizeof(struct txp_cmd_desc) * CMD_ENTRIES); boot->br_cmd_lo = vtophys(&ld->txp_cmdring); boot->br_cmd_hi = 0; boot->br_cmd_siz = CMD_ENTRIES * sizeof(struct txp_cmd_desc); sc->sc_cmdring.base = (struct txp_cmd_desc *)&ld->txp_cmdring; sc->sc_cmdring.size = CMD_ENTRIES * sizeof(struct txp_cmd_desc); sc->sc_cmdring.lastwrite = 0; /* response ring */ bzero(&ld->txp_rspring, sizeof(struct txp_rsp_desc) * RSP_ENTRIES); boot->br_resp_lo = vtophys(&ld->txp_rspring); boot->br_resp_hi = 0; boot->br_resp_siz = CMD_ENTRIES * sizeof(struct txp_rsp_desc); sc->sc_rspring.base = (struct txp_rsp_desc *)&ld->txp_rspring; sc->sc_rspring.size = RSP_ENTRIES * sizeof(struct txp_rsp_desc); sc->sc_rspring.lastwrite = 0; /* receive buffer ring */ boot->br_rxbuf_lo = vtophys(&ld->txp_rxbufs); boot->br_rxbuf_hi = 0; boot->br_rxbuf_siz = RXBUF_ENTRIES * sizeof(struct txp_rxbuf_desc); sc->sc_rxbufs = (struct txp_rxbuf_desc *)&ld->txp_rxbufs; for (i = 0; i < RXBUF_ENTRIES; i++) { struct txp_swdesc *sd; if (sc->sc_rxbufs[i].rb_sd != NULL) continue; sc->sc_rxbufs[i].rb_sd = malloc(sizeof(struct txp_swdesc), M_DEVBUF, M_NOWAIT); if (sc->sc_rxbufs[i].rb_sd == NULL) return(ENOBUFS); sd = sc->sc_rxbufs[i].rb_sd; sd->sd_mbuf = NULL; } sc->sc_rxbufprod = 0; /* zero dma */ bzero(&ld->txp_zero, sizeof(u_int32_t)); boot->br_zero_lo = vtophys(&ld->txp_zero); boot->br_zero_hi = 0; /* See if it's waiting for boot, and try to boot it */ for (i = 0; i < 10000; i++) { r = READ_REG(sc, TXP_A2H_0); if (r == STAT_WAITING_FOR_BOOT) break; DELAY(50); } if (r != STAT_WAITING_FOR_BOOT) { device_printf(sc->sc_dev, "not waiting for boot\n"); return(ENXIO); } WRITE_REG(sc, TXP_H2A_2, 0); WRITE_REG(sc, TXP_H2A_1, vtophys(sc->sc_boot)); WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_REGISTER_BOOT_RECORD); /* See if it booted */ for (i = 0; i < 10000; i++) { r = READ_REG(sc, TXP_A2H_0); if (r == STAT_RUNNING) break; DELAY(50); } if (r != STAT_RUNNING) { device_printf(sc->sc_dev, "fw not running\n"); return(ENXIO); } /* Clear TX and CMD ring write registers */ WRITE_REG(sc, TXP_H2A_1, TXP_BOOTCMD_NULL); WRITE_REG(sc, TXP_H2A_2, TXP_BOOTCMD_NULL); WRITE_REG(sc, TXP_H2A_3, TXP_BOOTCMD_NULL); WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_NULL); return (0); } static int txp_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct txp_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; int s, error = 0; s = splnet(); switch(command) { case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { txp_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) txp_stop(sc); } break; case SIOCADDMULTI: case SIOCDELMULTI: /* * Multicast list has changed; set the hardware * filter accordingly. */ txp_set_filter(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, command); break; default: error = ether_ioctl(ifp, command, data); break; } (void)splx(s); return(error); } static int txp_rxring_fill(sc) struct txp_softc *sc; { int i; struct ifnet *ifp; struct txp_swdesc *sd; ifp = &sc->sc_arpcom.ac_if; for (i = 0; i < RXBUF_ENTRIES; i++) { sd = sc->sc_rxbufs[i].rb_sd; MGETHDR(sd->sd_mbuf, M_DONTWAIT, MT_DATA); if (sd->sd_mbuf == NULL) return(ENOBUFS); MCLGET(sd->sd_mbuf, M_DONTWAIT); if ((sd->sd_mbuf->m_flags & M_EXT) == 0) { m_freem(sd->sd_mbuf); return(ENOBUFS); } sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES; sd->sd_mbuf->m_pkthdr.rcvif = ifp; sc->sc_rxbufs[i].rb_paddrlo = vtophys(mtod(sd->sd_mbuf, vm_offset_t)); sc->sc_rxbufs[i].rb_paddrhi = 0; } sc->sc_hostvar->hv_rx_buf_write_idx = (RXBUF_ENTRIES - 1) * sizeof(struct txp_rxbuf_desc); return(0); } static void txp_rxring_empty(sc) struct txp_softc *sc; { int i; struct txp_swdesc *sd; if (sc->sc_rxbufs == NULL) return; for (i = 0; i < RXBUF_ENTRIES; i++) { if (&sc->sc_rxbufs[i] == NULL) continue; sd = sc->sc_rxbufs[i].rb_sd; if (sd == NULL) continue; if (sd->sd_mbuf != NULL) { m_freem(sd->sd_mbuf); sd->sd_mbuf = NULL; } } return; } static void txp_init(xsc) void *xsc; { struct txp_softc *sc; struct ifnet *ifp; u_int16_t p1; u_int32_t p2; int s; sc = xsc; ifp = &sc->sc_arpcom.ac_if; if (ifp->if_flags & IFF_RUNNING) return; txp_stop(sc); s = splnet(); txp_command(sc, TXP_CMD_MAX_PKT_SIZE_WRITE, TXP_MAX_PKTLEN, 0, 0, NULL, NULL, NULL, 1); /* Set station address. */ ((u_int8_t *)&p1)[1] = sc->sc_arpcom.ac_enaddr[0]; ((u_int8_t *)&p1)[0] = sc->sc_arpcom.ac_enaddr[1]; ((u_int8_t *)&p2)[3] = sc->sc_arpcom.ac_enaddr[2]; ((u_int8_t *)&p2)[2] = sc->sc_arpcom.ac_enaddr[3]; ((u_int8_t *)&p2)[1] = sc->sc_arpcom.ac_enaddr[4]; ((u_int8_t *)&p2)[0] = sc->sc_arpcom.ac_enaddr[5]; txp_command(sc, TXP_CMD_STATION_ADDRESS_WRITE, p1, p2, 0, NULL, NULL, NULL, 1); txp_set_filter(sc); txp_rxring_fill(sc); txp_command(sc, TXP_CMD_TX_ENABLE, 0, 0, 0, NULL, NULL, NULL, 1); txp_command(sc, TXP_CMD_RX_ENABLE, 0, 0, 0, NULL, NULL, NULL, 1); WRITE_REG(sc, TXP_IER, TXP_INT_RESERVED | TXP_INT_SELF | TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0 | TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | TXP_INT_LATCH); WRITE_REG(sc, TXP_IMR, TXP_INT_A2H_3); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; ifp->if_timer = 0; sc->sc_tick = timeout(txp_tick, sc, hz); splx(s); } static void txp_tick(vsc) void *vsc; { struct txp_softc *sc = vsc; struct ifnet *ifp = &sc->sc_arpcom.ac_if; struct txp_rsp_desc *rsp = NULL; struct txp_ext_desc *ext; int s; s = splnet(); txp_rxbuf_reclaim(sc); if (txp_command2(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0, &rsp, 1)) goto out; if (rsp->rsp_numdesc != 6) goto out; if (txp_command(sc, TXP_CMD_CLEAR_STATISTICS, 0, 0, 0, NULL, NULL, NULL, 1)) goto out; ext = (struct txp_ext_desc *)(rsp + 1); ifp->if_ierrors += ext[3].ext_2 + ext[3].ext_3 + ext[3].ext_4 + ext[4].ext_1 + ext[4].ext_4; ifp->if_oerrors += ext[0].ext_1 + ext[1].ext_1 + ext[1].ext_4 + ext[2].ext_1; ifp->if_collisions += ext[0].ext_2 + ext[0].ext_3 + ext[1].ext_2 + ext[1].ext_3; ifp->if_opackets += rsp->rsp_par2; ifp->if_ipackets += ext[2].ext_3; out: if (rsp != NULL) free(rsp, M_DEVBUF); splx(s); sc->sc_tick = timeout(txp_tick, sc, hz); return; } static void txp_start(ifp) struct ifnet *ifp; { struct txp_softc *sc = ifp->if_softc; struct txp_tx_ring *r = &sc->sc_txhir; struct txp_tx_desc *txd; struct txp_frag_desc *fxd; struct mbuf *m, *m0; struct txp_swdesc *sd; u_int32_t firstprod, firstcnt, prod, cnt; struct m_tag *mtag; if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) return; prod = r->r_prod; cnt = r->r_cnt; while (1) { IF_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; firstprod = prod; firstcnt = cnt; sd = sc->sc_txd + prod; sd->sd_mbuf = m; if ((TX_ENTRIES - cnt) < 4) goto oactive; txd = r->r_desc + prod; txd->tx_flags = TX_FLAGS_TYPE_DATA; txd->tx_numdesc = 0; txd->tx_addrlo = 0; txd->tx_addrhi = 0; txd->tx_totlen = 0; txd->tx_pflags = 0; if (++prod == TX_ENTRIES) prod = 0; if (++cnt >= (TX_ENTRIES - 4)) goto oactive; mtag = VLAN_OUTPUT_TAG(ifp, m); if (mtag != NULL) { txd->tx_pflags = TX_PFLAGS_VLAN | (htons(VLAN_TAG_VALUE(mtag)) << TX_PFLAGS_VLANTAG_S); } if (m->m_pkthdr.csum_flags & CSUM_IP) txd->tx_pflags |= TX_PFLAGS_IPCKSUM; #if 0 if (m->m_pkthdr.csum_flags & CSUM_TCP) txd->tx_pflags |= TX_PFLAGS_TCPCKSUM; if (m->m_pkthdr.csum_flags & CSUM_UDP) txd->tx_pflags |= TX_PFLAGS_UDPCKSUM; #endif fxd = (struct txp_frag_desc *)(r->r_desc + prod); for (m0 = m; m0 != NULL; m0 = m0->m_next) { if (m0->m_len == 0) continue; if (++cnt >= (TX_ENTRIES - 4)) goto oactive; txd->tx_numdesc++; fxd->frag_flags = FRAG_FLAGS_TYPE_FRAG; fxd->frag_rsvd1 = 0; fxd->frag_len = m0->m_len; fxd->frag_addrlo = vtophys(mtod(m0, vm_offset_t)); fxd->frag_addrhi = 0; fxd->frag_rsvd2 = 0; if (++prod == TX_ENTRIES) { fxd = (struct txp_frag_desc *)r->r_desc; prod = 0; } else fxd++; } ifp->if_timer = 5; BPF_MTAP(ifp, m); WRITE_REG(sc, r->r_reg, TXP_IDX2OFFSET(prod)); } r->r_prod = prod; r->r_cnt = cnt; return; oactive: ifp->if_flags |= IFF_OACTIVE; r->r_prod = firstprod; r->r_cnt = firstcnt; IF_PREPEND(&ifp->if_snd, m); return; } /* * Handle simple commands sent to the typhoon */ static int txp_command(sc, id, in1, in2, in3, out1, out2, out3, wait) struct txp_softc *sc; u_int16_t id, in1, *out1; u_int32_t in2, in3, *out2, *out3; int wait; { struct txp_rsp_desc *rsp = NULL; if (txp_command2(sc, id, in1, in2, in3, NULL, 0, &rsp, wait)) return (-1); if (!wait) return (0); if (out1 != NULL) *out1 = rsp->rsp_par1; if (out2 != NULL) *out2 = rsp->rsp_par2; if (out3 != NULL) *out3 = rsp->rsp_par3; free(rsp, M_DEVBUF); return (0); } static int txp_command2(sc, id, in1, in2, in3, in_extp, in_extn, rspp, wait) struct txp_softc *sc; u_int16_t id, in1; u_int32_t in2, in3; struct txp_ext_desc *in_extp; u_int8_t in_extn; struct txp_rsp_desc **rspp; int wait; { struct txp_hostvar *hv = sc->sc_hostvar; struct txp_cmd_desc *cmd; struct txp_ext_desc *ext; u_int32_t idx, i; u_int16_t seq; if (txp_cmd_desc_numfree(sc) < (in_extn + 1)) { device_printf(sc->sc_dev, "no free cmd descriptors\n"); return (-1); } idx = sc->sc_cmdring.lastwrite; cmd = (struct txp_cmd_desc *)(((u_int8_t *)sc->sc_cmdring.base) + idx); bzero(cmd, sizeof(*cmd)); cmd->cmd_numdesc = in_extn; cmd->cmd_seq = seq = sc->sc_seq++; cmd->cmd_id = id; cmd->cmd_par1 = in1; cmd->cmd_par2 = in2; cmd->cmd_par3 = in3; cmd->cmd_flags = CMD_FLAGS_TYPE_CMD | (wait ? CMD_FLAGS_RESP : 0) | CMD_FLAGS_VALID; idx += sizeof(struct txp_cmd_desc); if (idx == sc->sc_cmdring.size) idx = 0; for (i = 0; i < in_extn; i++) { ext = (struct txp_ext_desc *)(((u_int8_t *)sc->sc_cmdring.base) + idx); bcopy(in_extp, ext, sizeof(struct txp_ext_desc)); in_extp++; idx += sizeof(struct txp_cmd_desc); if (idx == sc->sc_cmdring.size) idx = 0; } sc->sc_cmdring.lastwrite = idx; WRITE_REG(sc, TXP_H2A_2, sc->sc_cmdring.lastwrite); if (!wait) return (0); for (i = 0; i < 10000; i++) { idx = hv->hv_resp_read_idx; if (idx != hv->hv_resp_write_idx) { *rspp = NULL; if (txp_response(sc, idx, id, seq, rspp)) return (-1); if (*rspp != NULL) break; } DELAY(50); } if (i == 1000 || (*rspp) == NULL) { device_printf(sc->sc_dev, "0x%x command failed\n", id); return (-1); } return (0); } static int txp_response(sc, ridx, id, seq, rspp) struct txp_softc *sc; u_int32_t ridx; u_int16_t id; u_int16_t seq; struct txp_rsp_desc **rspp; { struct txp_hostvar *hv = sc->sc_hostvar; struct txp_rsp_desc *rsp; while (ridx != hv->hv_resp_write_idx) { rsp = (struct txp_rsp_desc *)(((u_int8_t *)sc->sc_rspring.base) + ridx); if (id == rsp->rsp_id && rsp->rsp_seq == seq) { *rspp = (struct txp_rsp_desc *)malloc( sizeof(struct txp_rsp_desc) * (rsp->rsp_numdesc + 1), M_DEVBUF, M_NOWAIT); if ((*rspp) == NULL) return (-1); txp_rsp_fixup(sc, rsp, *rspp); return (0); } if (rsp->rsp_flags & RSP_FLAGS_ERROR) { device_printf(sc->sc_dev, "response error!\n"); txp_rsp_fixup(sc, rsp, NULL); ridx = hv->hv_resp_read_idx; continue; } switch (rsp->rsp_id) { case TXP_CMD_CYCLE_STATISTICS: case TXP_CMD_MEDIA_STATUS_READ: break; case TXP_CMD_HELLO_RESPONSE: device_printf(sc->sc_dev, "hello\n"); break; default: device_printf(sc->sc_dev, "unknown id(0x%x)\n", rsp->rsp_id); } txp_rsp_fixup(sc, rsp, NULL); ridx = hv->hv_resp_read_idx; hv->hv_resp_read_idx = ridx; } return (0); } static void txp_rsp_fixup(sc, rsp, dst) struct txp_softc *sc; struct txp_rsp_desc *rsp, *dst; { struct txp_rsp_desc *src = rsp; struct txp_hostvar *hv = sc->sc_hostvar; u_int32_t i, ridx; ridx = hv->hv_resp_read_idx; for (i = 0; i < rsp->rsp_numdesc + 1; i++) { if (dst != NULL) bcopy(src, dst++, sizeof(struct txp_rsp_desc)); ridx += sizeof(struct txp_rsp_desc); if (ridx == sc->sc_rspring.size) { src = sc->sc_rspring.base; ridx = 0; } else src++; sc->sc_rspring.lastwrite = hv->hv_resp_read_idx = ridx; } hv->hv_resp_read_idx = ridx; } static int txp_cmd_desc_numfree(sc) struct txp_softc *sc; { struct txp_hostvar *hv = sc->sc_hostvar; struct txp_boot_record *br = sc->sc_boot; u_int32_t widx, ridx, nfree; widx = sc->sc_cmdring.lastwrite; ridx = hv->hv_cmd_read_idx; if (widx == ridx) { /* Ring is completely free */ nfree = br->br_cmd_siz - sizeof(struct txp_cmd_desc); } else { if (widx > ridx) nfree = br->br_cmd_siz - (widx - ridx + sizeof(struct txp_cmd_desc)); else nfree = ridx - widx - sizeof(struct txp_cmd_desc); } return (nfree / sizeof(struct txp_cmd_desc)); } static void txp_stop(sc) struct txp_softc *sc; { struct ifnet *ifp; ifp = &sc->sc_arpcom.ac_if; ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); untimeout(txp_tick, sc, sc->sc_tick); txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 1); txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 1); txp_rxring_empty(sc); return; } static void txp_watchdog(ifp) struct ifnet *ifp; { return; } static int txp_ifmedia_upd(ifp) struct ifnet *ifp; { struct txp_softc *sc = ifp->if_softc; struct ifmedia *ifm = &sc->sc_ifmedia; u_int16_t new_xcvr; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) { if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) new_xcvr = TXP_XCVR_10_FDX; else new_xcvr = TXP_XCVR_10_HDX; } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) { if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) new_xcvr = TXP_XCVR_100_FDX; else new_xcvr = TXP_XCVR_100_HDX; } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { new_xcvr = TXP_XCVR_AUTO; } else return (EINVAL); /* nothing to do */ if (sc->sc_xcvr == new_xcvr) return (0); txp_command(sc, TXP_CMD_XCVR_SELECT, new_xcvr, 0, 0, NULL, NULL, NULL, 0); sc->sc_xcvr = new_xcvr; return (0); } static void txp_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct txp_softc *sc = ifp->if_softc; struct ifmedia *ifm = &sc->sc_ifmedia; u_int16_t bmsr, bmcr, anlpar; ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0, &bmsr, NULL, NULL, 1)) goto bail; if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0, &bmsr, NULL, NULL, 1)) goto bail; if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMCR, 0, &bmcr, NULL, NULL, 1)) goto bail; if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANLPAR, 0, &anlpar, NULL, NULL, 1)) goto bail; if (bmsr & BMSR_LINK) ifmr->ifm_status |= IFM_ACTIVE; if (bmcr & BMCR_ISO) { ifmr->ifm_active |= IFM_NONE; ifmr->ifm_status = 0; return; } if (bmcr & BMCR_LOOP) ifmr->ifm_active |= IFM_LOOP; if (bmcr & BMCR_AUTOEN) { if ((bmsr & BMSR_ACOMP) == 0) { ifmr->ifm_active |= IFM_NONE; return; } if (anlpar & ANLPAR_T4) ifmr->ifm_active |= IFM_100_T4; else if (anlpar & ANLPAR_TX_FD) ifmr->ifm_active |= IFM_100_TX|IFM_FDX; else if (anlpar & ANLPAR_TX) ifmr->ifm_active |= IFM_100_TX; else if (anlpar & ANLPAR_10_FD) ifmr->ifm_active |= IFM_10_T|IFM_FDX; else if (anlpar & ANLPAR_10) ifmr->ifm_active |= IFM_10_T; else ifmr->ifm_active |= IFM_NONE; } else ifmr->ifm_active = ifm->ifm_cur->ifm_media; return; bail: ifmr->ifm_active |= IFM_NONE; ifmr->ifm_status &= ~IFM_AVALID; } #ifdef TXP_DEBUG static void txp_show_descriptor(d) void *d; { struct txp_cmd_desc *cmd = d; struct txp_rsp_desc *rsp = d; struct txp_tx_desc *txd = d; struct txp_frag_desc *frgd = d; switch (cmd->cmd_flags & CMD_FLAGS_TYPE_M) { case CMD_FLAGS_TYPE_CMD: /* command descriptor */ printf("[cmd flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n", cmd->cmd_flags, cmd->cmd_numdesc, cmd->cmd_id, cmd->cmd_seq, cmd->cmd_par1, cmd->cmd_par2, cmd->cmd_par3); break; case CMD_FLAGS_TYPE_RESP: /* response descriptor */ printf("[rsp flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n", rsp->rsp_flags, rsp->rsp_numdesc, rsp->rsp_id, rsp->rsp_seq, rsp->rsp_par1, rsp->rsp_par2, rsp->rsp_par3); break; case CMD_FLAGS_TYPE_DATA: /* data header (assuming tx for now) */ printf("[data flags 0x%x num %d totlen %d addr 0x%x/0x%x pflags 0x%x]", txd->tx_flags, txd->tx_numdesc, txd->tx_totlen, txd->tx_addrlo, txd->tx_addrhi, txd->tx_pflags); break; case CMD_FLAGS_TYPE_FRAG: /* fragment descriptor */ printf("[frag flags 0x%x rsvd1 0x%x len %d addr 0x%x/0x%x rsvd2 0x%x]", frgd->frag_flags, frgd->frag_rsvd1, frgd->frag_len, frgd->frag_addrlo, frgd->frag_addrhi, frgd->frag_rsvd2); break; default: printf("[unknown(%x) flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n", cmd->cmd_flags & CMD_FLAGS_TYPE_M, cmd->cmd_flags, cmd->cmd_numdesc, cmd->cmd_id, cmd->cmd_seq, cmd->cmd_par1, cmd->cmd_par2, cmd->cmd_par3); break; } } #endif static void txp_set_filter(sc) struct txp_softc *sc; { struct ifnet *ifp = &sc->sc_arpcom.ac_if; u_int32_t crc, carry, hashbit, hash[2]; u_int16_t filter; u_int8_t octet; int i, j, mcnt = 0; struct ifmultiaddr *ifma; char *enm; if (ifp->if_flags & IFF_PROMISC) { filter = TXP_RXFILT_PROMISC; goto setit; } filter = TXP_RXFILT_DIRECT; if (ifp->if_flags & IFF_BROADCAST) filter |= TXP_RXFILT_BROADCAST; if (ifp->if_flags & IFF_ALLMULTI) filter |= TXP_RXFILT_ALLMULTI; else { hash[0] = hash[1] = 0; TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; enm = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); mcnt++; crc = 0xffffffff; for (i = 0; i < ETHER_ADDR_LEN; i++) { octet = enm[i]; for (j = 0; j < 8; j++) { carry = ((crc & 0x80000000) ? 1 : 0) ^ (octet & 1); crc <<= 1; octet >>= 1; if (carry) crc = (crc ^ TXP_POLYNOMIAL) | carry; } } hashbit = (u_int16_t)(crc & (64 - 1)); hash[hashbit / 32] |= (1 << hashbit % 32); } if (mcnt > 0) { filter |= TXP_RXFILT_HASHMULTI; txp_command(sc, TXP_CMD_MCAST_HASH_MASK_WRITE, 2, hash[0], hash[1], NULL, NULL, NULL, 0); } } setit: txp_command(sc, TXP_CMD_RX_FILTER_WRITE, filter, 0, 0, NULL, NULL, NULL, 1); return; } static void txp_capabilities(sc) struct txp_softc *sc; { struct ifnet *ifp = &sc->sc_arpcom.ac_if; struct txp_rsp_desc *rsp = NULL; struct txp_ext_desc *ext; if (txp_command2(sc, TXP_CMD_OFFLOAD_READ, 0, 0, 0, NULL, 0, &rsp, 1)) goto out; if (rsp->rsp_numdesc != 1) goto out; ext = (struct txp_ext_desc *)(rsp + 1); sc->sc_tx_capability = ext->ext_1 & OFFLOAD_MASK; sc->sc_rx_capability = ext->ext_2 & OFFLOAD_MASK; ifp->if_capabilities = 0; if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_VLAN) { sc->sc_tx_capability |= OFFLOAD_VLAN; sc->sc_rx_capability |= OFFLOAD_VLAN; ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; } #if 0 /* not ready yet */ if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_IPSEC) { sc->sc_tx_capability |= OFFLOAD_IPSEC; sc->sc_rx_capability |= OFFLOAD_IPSEC; ifp->if_capabilities |= IFCAP_IPSEC; } #endif if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_IPCKSUM) { sc->sc_tx_capability |= OFFLOAD_IPCKSUM; sc->sc_rx_capability |= OFFLOAD_IPCKSUM; ifp->if_capabilities |= IFCAP_HWCSUM; ifp->if_hwassist |= CSUM_IP; } if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_TCPCKSUM) { #if 0 sc->sc_tx_capability |= OFFLOAD_TCPCKSUM; #endif sc->sc_rx_capability |= OFFLOAD_TCPCKSUM; ifp->if_capabilities |= IFCAP_HWCSUM; } if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_UDPCKSUM) { #if 0 sc->sc_tx_capability |= OFFLOAD_UDPCKSUM; #endif sc->sc_rx_capability |= OFFLOAD_UDPCKSUM; ifp->if_capabilities |= IFCAP_HWCSUM; } ifp->if_capenable = ifp->if_capabilities; if (txp_command(sc, TXP_CMD_OFFLOAD_WRITE, 0, sc->sc_tx_capability, sc->sc_rx_capability, NULL, NULL, NULL, 1)) goto out; out: if (rsp != NULL) free(rsp, M_DEVBUF); return; } Index: head/sys/dev/ubsec/ubsec.c =================================================================== --- head/sys/dev/ubsec/ubsec.c (revision 129878) +++ head/sys/dev/ubsec/ubsec.c (revision 129879) @@ -1,2870 +1,2871 @@ /* $OpenBSD: ubsec.c,v 1.115 2002/09/24 18:33:26 jason Exp $ */ /* * Copyright (c) 2000 Jason L. Wright (jason@thought.net) * Copyright (c) 2000 Theo de Raadt (deraadt@openbsd.org) * Copyright (c) 2001 Patrik Lindergren (patrik@ipunplugged.com) * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Jason L. Wright * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * Effort sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F30602-01-2-0537. */ #include __FBSDID("$FreeBSD$"); /* * uBsec 5[56]01, 58xx hardware crypto accelerator */ #include "opt_ubsec.h" #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* grr, #defines for gratuitous incompatibility in queue.h */ #define SIMPLEQ_HEAD STAILQ_HEAD #define SIMPLEQ_ENTRY STAILQ_ENTRY #define SIMPLEQ_INIT STAILQ_INIT #define SIMPLEQ_INSERT_TAIL STAILQ_INSERT_TAIL #define SIMPLEQ_EMPTY STAILQ_EMPTY #define SIMPLEQ_FIRST STAILQ_FIRST #define SIMPLEQ_REMOVE_HEAD STAILQ_REMOVE_HEAD_UNTIL #define SIMPLEQ_FOREACH STAILQ_FOREACH /* ditto for endian.h */ #define letoh16(x) le16toh(x) #define letoh32(x) le32toh(x) #ifdef UBSEC_RNDTEST #include #endif #include #include /* * Prototypes and count for the pci_device structure */ static int ubsec_probe(device_t); static int ubsec_attach(device_t); static int ubsec_detach(device_t); static int ubsec_suspend(device_t); static int ubsec_resume(device_t); static void ubsec_shutdown(device_t); static device_method_t ubsec_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ubsec_probe), DEVMETHOD(device_attach, ubsec_attach), DEVMETHOD(device_detach, ubsec_detach), DEVMETHOD(device_suspend, ubsec_suspend), DEVMETHOD(device_resume, ubsec_resume), DEVMETHOD(device_shutdown, ubsec_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), { 0, 0 } }; static driver_t ubsec_driver = { "ubsec", ubsec_methods, sizeof (struct ubsec_softc) }; static devclass_t ubsec_devclass; DRIVER_MODULE(ubsec, pci, ubsec_driver, ubsec_devclass, 0, 0); MODULE_DEPEND(ubsec, crypto, 1, 1, 1); #ifdef UBSEC_RNDTEST MODULE_DEPEND(ubsec, rndtest, 1, 1, 1); #endif static void ubsec_intr(void *); static int ubsec_newsession(void *, u_int32_t *, struct cryptoini *); static int ubsec_freesession(void *, u_int64_t); static int ubsec_process(void *, struct cryptop *, int); static void ubsec_callback(struct ubsec_softc *, struct ubsec_q *); static void ubsec_feed(struct ubsec_softc *); static void ubsec_mcopy(struct mbuf *, struct mbuf *, int, int); static void ubsec_callback2(struct ubsec_softc *, struct ubsec_q2 *); static int ubsec_feed2(struct ubsec_softc *); static void ubsec_rng(void *); static int ubsec_dma_malloc(struct ubsec_softc *, bus_size_t, struct ubsec_dma_alloc *, int); #define ubsec_dma_sync(_dma, _flags) \ bus_dmamap_sync((_dma)->dma_tag, (_dma)->dma_map, (_flags)) static void ubsec_dma_free(struct ubsec_softc *, struct ubsec_dma_alloc *); static int ubsec_dmamap_aligned(struct ubsec_operand *op); static void ubsec_reset_board(struct ubsec_softc *sc); static void ubsec_init_board(struct ubsec_softc *sc); static void ubsec_init_pciregs(device_t dev); static void ubsec_totalreset(struct ubsec_softc *sc); static int ubsec_free_q(struct ubsec_softc *sc, struct ubsec_q *q); static int ubsec_kprocess(void*, struct cryptkop *, int); static int ubsec_kprocess_modexp_hw(struct ubsec_softc *, struct cryptkop *, int); static int ubsec_kprocess_modexp_sw(struct ubsec_softc *, struct cryptkop *, int); static int ubsec_kprocess_rsapriv(struct ubsec_softc *, struct cryptkop *, int); static void ubsec_kfree(struct ubsec_softc *, struct ubsec_q2 *); static int ubsec_ksigbits(struct crparam *); static void ubsec_kshift_r(u_int, u_int8_t *, u_int, u_int8_t *, u_int); static void ubsec_kshift_l(u_int, u_int8_t *, u_int, u_int8_t *, u_int); SYSCTL_NODE(_hw, OID_AUTO, ubsec, CTLFLAG_RD, 0, "Broadcom driver parameters"); #ifdef UBSEC_DEBUG static void ubsec_dump_pb(volatile struct ubsec_pktbuf *); static void ubsec_dump_mcr(struct ubsec_mcr *); static void ubsec_dump_ctx2(struct ubsec_ctx_keyop *); static int ubsec_debug = 0; SYSCTL_INT(_hw_ubsec, OID_AUTO, debug, CTLFLAG_RW, &ubsec_debug, 0, "control debugging msgs"); #endif #define READ_REG(sc,r) \ bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r)) #define WRITE_REG(sc,reg,val) \ bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val) #define SWAP32(x) (x) = htole32(ntohl((x))) #define HTOLE32(x) (x) = htole32(x) struct ubsec_stats ubsecstats; SYSCTL_STRUCT(_hw_ubsec, OID_AUTO, stats, CTLFLAG_RD, &ubsecstats, ubsec_stats, "driver statistics"); static int ubsec_probe(device_t dev) { if (pci_get_vendor(dev) == PCI_VENDOR_SUN && (pci_get_device(dev) == PCI_PRODUCT_SUN_5821 || pci_get_device(dev) == PCI_PRODUCT_SUN_SCA1K)) return (0); if (pci_get_vendor(dev) == PCI_VENDOR_BLUESTEEL && (pci_get_device(dev) == PCI_PRODUCT_BLUESTEEL_5501 || pci_get_device(dev) == PCI_PRODUCT_BLUESTEEL_5601)) return (0); if (pci_get_vendor(dev) == PCI_VENDOR_BROADCOM && (pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5801 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5802 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5805 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5820 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5821 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5822 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5823 )) return (0); return (ENXIO); } static const char* ubsec_partname(struct ubsec_softc *sc) { /* XXX sprintf numbers when not decoded */ switch (pci_get_vendor(sc->sc_dev)) { case PCI_VENDOR_BROADCOM: switch (pci_get_device(sc->sc_dev)) { case PCI_PRODUCT_BROADCOM_5801: return "Broadcom 5801"; case PCI_PRODUCT_BROADCOM_5802: return "Broadcom 5802"; case PCI_PRODUCT_BROADCOM_5805: return "Broadcom 5805"; case PCI_PRODUCT_BROADCOM_5820: return "Broadcom 5820"; case PCI_PRODUCT_BROADCOM_5821: return "Broadcom 5821"; case PCI_PRODUCT_BROADCOM_5822: return "Broadcom 5822"; case PCI_PRODUCT_BROADCOM_5823: return "Broadcom 5823"; } return "Broadcom unknown-part"; case PCI_VENDOR_BLUESTEEL: switch (pci_get_device(sc->sc_dev)) { case PCI_PRODUCT_BLUESTEEL_5601: return "Bluesteel 5601"; } return "Bluesteel unknown-part"; case PCI_VENDOR_SUN: switch (pci_get_device(sc->sc_dev)) { case PCI_PRODUCT_SUN_5821: return "Sun Crypto 5821"; case PCI_PRODUCT_SUN_SCA1K: return "Sun Crypto 1K"; } return "Sun unknown-part"; } return "Unknown-vendor unknown-part"; } static void default_harvest(struct rndtest_state *rsp, void *buf, u_int count) { random_harvest(buf, count, count*NBBY, 0, RANDOM_PURE); } static int ubsec_attach(device_t dev) { struct ubsec_softc *sc = device_get_softc(dev); struct ubsec_dma *dmap; u_int32_t cmd, i; int rid; bzero(sc, sizeof (*sc)); sc->sc_dev = dev; SIMPLEQ_INIT(&sc->sc_queue); SIMPLEQ_INIT(&sc->sc_qchip); SIMPLEQ_INIT(&sc->sc_queue2); SIMPLEQ_INIT(&sc->sc_qchip2); SIMPLEQ_INIT(&sc->sc_q2free); /* XXX handle power management */ sc->sc_statmask = BS_STAT_MCR1_DONE | BS_STAT_DMAERR; if (pci_get_vendor(dev) == PCI_VENDOR_BLUESTEEL && pci_get_device(dev) == PCI_PRODUCT_BLUESTEEL_5601) sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG; if (pci_get_vendor(dev) == PCI_VENDOR_BROADCOM && (pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5802 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5805)) sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG; if (pci_get_vendor(dev) == PCI_VENDOR_BROADCOM && pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5820) sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY; if ((pci_get_vendor(dev) == PCI_VENDOR_BROADCOM && (pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5821 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5822 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5823)) || (pci_get_vendor(dev) == PCI_VENDOR_SUN && (pci_get_device(dev) == PCI_PRODUCT_SUN_SCA1K || pci_get_device(dev) == PCI_PRODUCT_SUN_5821))) { /* NB: the 5821/5822 defines some additional status bits */ sc->sc_statmask |= BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY; sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY; } cmd = pci_read_config(dev, PCIR_COMMAND, 4); cmd |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN; pci_write_config(dev, PCIR_COMMAND, cmd, 4); cmd = pci_read_config(dev, PCIR_COMMAND, 4); if (!(cmd & PCIM_CMD_MEMEN)) { device_printf(dev, "failed to enable memory mapping\n"); goto bad; } if (!(cmd & PCIM_CMD_BUSMASTEREN)) { device_printf(dev, "failed to enable bus mastering\n"); goto bad; } /* * Setup memory-mapping of PCI registers. */ rid = BS_BAR; sc->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_sr == NULL) { device_printf(dev, "cannot map register space\n"); goto bad; } sc->sc_st = rman_get_bustag(sc->sc_sr); sc->sc_sh = rman_get_bushandle(sc->sc_sr); /* * Arrange interrupt line. */ rid = 0; sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE|RF_ACTIVE); if (sc->sc_irq == NULL) { device_printf(dev, "could not map interrupt\n"); goto bad1; } /* * NB: Network code assumes we are blocked with splimp() * so make sure the IRQ is mapped appropriately. */ if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, ubsec_intr, sc, &sc->sc_ih)) { device_printf(dev, "could not establish interrupt\n"); goto bad2; } sc->sc_cid = crypto_get_driverid(0); if (sc->sc_cid < 0) { device_printf(dev, "could not get crypto driver id\n"); goto bad3; } /* * Setup DMA descriptor area. */ if (bus_dma_tag_create(NULL, /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ 0x3ffff, /* maxsize */ UBS_MAX_SCATTER, /* nsegments */ 0xffff, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->sc_dmat)) { device_printf(dev, "cannot allocate DMA tag\n"); goto bad4; } SIMPLEQ_INIT(&sc->sc_freequeue); dmap = sc->sc_dmaa; for (i = 0; i < UBS_MAX_NQUEUE; i++, dmap++) { struct ubsec_q *q; q = (struct ubsec_q *)malloc(sizeof(struct ubsec_q), M_DEVBUF, M_NOWAIT); if (q == NULL) { device_printf(dev, "cannot allocate queue buffers\n"); break; } if (ubsec_dma_malloc(sc, sizeof(struct ubsec_dmachunk), &dmap->d_alloc, 0)) { device_printf(dev, "cannot allocate dma buffers\n"); free(q, M_DEVBUF); break; } dmap->d_dma = (struct ubsec_dmachunk *)dmap->d_alloc.dma_vaddr; q->q_dma = dmap; sc->sc_queuea[i] = q; SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); } mtx_init(&sc->sc_mcr1lock, device_get_nameunit(dev), "mcr1 operations", MTX_DEF); mtx_init(&sc->sc_freeqlock, device_get_nameunit(dev), "mcr1 free q", MTX_DEF); device_printf(sc->sc_dev, "%s\n", ubsec_partname(sc)); crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0, ubsec_newsession, ubsec_freesession, ubsec_process, sc); crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0, ubsec_newsession, ubsec_freesession, ubsec_process, sc); crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0, ubsec_newsession, ubsec_freesession, ubsec_process, sc); crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0, ubsec_newsession, ubsec_freesession, ubsec_process, sc); /* * Reset Broadcom chip */ ubsec_reset_board(sc); /* * Init Broadcom specific PCI settings */ ubsec_init_pciregs(dev); /* * Init Broadcom chip */ ubsec_init_board(sc); #ifndef UBSEC_NO_RNG if (sc->sc_flags & UBS_FLAGS_RNG) { sc->sc_statmask |= BS_STAT_MCR2_DONE; #ifdef UBSEC_RNDTEST sc->sc_rndtest = rndtest_attach(dev); if (sc->sc_rndtest) sc->sc_harvest = rndtest_harvest; else sc->sc_harvest = default_harvest; #else sc->sc_harvest = default_harvest; #endif if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), &sc->sc_rng.rng_q.q_mcr, 0)) goto skip_rng; if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rngbypass), &sc->sc_rng.rng_q.q_ctx, 0)) { ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); goto skip_rng; } if (ubsec_dma_malloc(sc, sizeof(u_int32_t) * UBSEC_RNG_BUFSIZ, &sc->sc_rng.rng_buf, 0)) { ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_ctx); ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); goto skip_rng; } if (hz >= 100) sc->sc_rnghz = hz / 100; else sc->sc_rnghz = 1; callout_init(&sc->sc_rngto, CALLOUT_MPSAFE); callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); skip_rng: ; } #endif /* UBSEC_NO_RNG */ mtx_init(&sc->sc_mcr2lock, device_get_nameunit(dev), "mcr2 operations", MTX_DEF); if (sc->sc_flags & UBS_FLAGS_KEY) { sc->sc_statmask |= BS_STAT_MCR2_DONE; crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0, ubsec_kprocess, sc); #if 0 crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0, ubsec_kprocess, sc); #endif } return (0); bad4: crypto_unregister_all(sc->sc_cid); bad3: bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); bad2: bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); bad1: bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); bad: return (ENXIO); } /* * Detach a device that successfully probed. */ static int ubsec_detach(device_t dev) { struct ubsec_softc *sc = device_get_softc(dev); /* XXX wait/abort active ops */ /* disable interrupts */ WRITE_REG(sc, BS_CTRL, READ_REG(sc, BS_CTRL) &~ (BS_CTRL_MCR2INT | BS_CTRL_MCR1INT | BS_CTRL_DMAERR)); callout_stop(&sc->sc_rngto); crypto_unregister_all(sc->sc_cid); #ifdef UBSEC_RNDTEST if (sc->sc_rndtest) rndtest_detach(sc->sc_rndtest); #endif while (!SIMPLEQ_EMPTY(&sc->sc_freequeue)) { struct ubsec_q *q; q = SIMPLEQ_FIRST(&sc->sc_freequeue); SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, q, q_next); ubsec_dma_free(sc, &q->q_dma->d_alloc); free(q, M_DEVBUF); } mtx_destroy(&sc->sc_mcr1lock); #ifndef UBSEC_NO_RNG if (sc->sc_flags & UBS_FLAGS_RNG) { ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_ctx); ubsec_dma_free(sc, &sc->sc_rng.rng_buf); } #endif /* UBSEC_NO_RNG */ mtx_destroy(&sc->sc_mcr2lock); bus_generic_detach(dev); bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); bus_dma_tag_destroy(sc->sc_dmat); bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); return (0); } /* * Stop all chip i/o so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void ubsec_shutdown(device_t dev) { #ifdef notyet ubsec_stop(device_get_softc(dev)); #endif } /* * Device suspend routine. */ static int ubsec_suspend(device_t dev) { struct ubsec_softc *sc = device_get_softc(dev); #ifdef notyet /* XXX stop the device and save PCI settings */ #endif sc->sc_suspended = 1; return (0); } static int ubsec_resume(device_t dev) { struct ubsec_softc *sc = device_get_softc(dev); #ifdef notyet /* XXX retore PCI settings and start the device */ #endif sc->sc_suspended = 0; return (0); } /* * UBSEC Interrupt routine */ static void ubsec_intr(void *arg) { struct ubsec_softc *sc = arg; volatile u_int32_t stat; struct ubsec_q *q; struct ubsec_dma *dmap; int npkts = 0, i; stat = READ_REG(sc, BS_STAT); stat &= sc->sc_statmask; if (stat == 0) return; WRITE_REG(sc, BS_STAT, stat); /* IACK */ /* * Check to see if we have any packets waiting for us */ if ((stat & BS_STAT_MCR1_DONE)) { mtx_lock(&sc->sc_mcr1lock); while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) { q = SIMPLEQ_FIRST(&sc->sc_qchip); dmap = q->q_dma; if ((dmap->d_dma->d_mcr.mcr_flags & htole16(UBS_MCR_DONE)) == 0) break; SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, q, q_next); npkts = q->q_nstacked_mcrs; sc->sc_nqchip -= 1+npkts; /* * search for further sc_qchip ubsec_q's that share * the same MCR, and complete them too, they must be * at the top. */ for (i = 0; i < npkts; i++) { if(q->q_stacked_mcr[i]) { ubsec_callback(sc, q->q_stacked_mcr[i]); } else { break; } } ubsec_callback(sc, q); } /* * Don't send any more packet to chip if there has been * a DMAERR. */ if (!(stat & BS_STAT_DMAERR)) ubsec_feed(sc); mtx_unlock(&sc->sc_mcr1lock); } /* * Check to see if we have any key setups/rng's waiting for us */ if ((sc->sc_flags & (UBS_FLAGS_KEY|UBS_FLAGS_RNG)) && (stat & BS_STAT_MCR2_DONE)) { struct ubsec_q2 *q2; struct ubsec_mcr *mcr; mtx_lock(&sc->sc_mcr2lock); while (!SIMPLEQ_EMPTY(&sc->sc_qchip2)) { q2 = SIMPLEQ_FIRST(&sc->sc_qchip2); ubsec_dma_sync(&q2->q_mcr, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); mcr = (struct ubsec_mcr *)q2->q_mcr.dma_vaddr; if ((mcr->mcr_flags & htole16(UBS_MCR_DONE)) == 0) { ubsec_dma_sync(&q2->q_mcr, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); break; } SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip2, q2, q_next); ubsec_callback2(sc, q2); /* * Don't send any more packet to chip if there has been * a DMAERR. */ if (!(stat & BS_STAT_DMAERR)) ubsec_feed2(sc); } mtx_unlock(&sc->sc_mcr2lock); } /* * Check to see if we got any DMA Error */ if (stat & BS_STAT_DMAERR) { #ifdef UBSEC_DEBUG if (ubsec_debug) { volatile u_int32_t a = READ_REG(sc, BS_ERR); printf("dmaerr %s@%08x\n", (a & BS_ERR_READ) ? "read" : "write", a & BS_ERR_ADDR); } #endif /* UBSEC_DEBUG */ ubsecstats.hst_dmaerr++; mtx_lock(&sc->sc_mcr1lock); ubsec_totalreset(sc); ubsec_feed(sc); mtx_unlock(&sc->sc_mcr1lock); } if (sc->sc_needwakeup) { /* XXX check high watermark */ int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); #ifdef UBSEC_DEBUG if (ubsec_debug) device_printf(sc->sc_dev, "wakeup crypto (%x)\n", sc->sc_needwakeup); #endif /* UBSEC_DEBUG */ sc->sc_needwakeup &= ~wakeup; crypto_unblock(sc->sc_cid, wakeup); } } /* * ubsec_feed() - aggregate and post requests to chip */ static void ubsec_feed(struct ubsec_softc *sc) { struct ubsec_q *q, *q2; int npkts, i; void *v; u_int32_t stat; /* * Decide how many ops to combine in a single MCR. We cannot * aggregate more than UBS_MAX_AGGR because this is the number * of slots defined in the data structure. Note that * aggregation only happens if ops are marked batch'able. * Aggregating ops reduces the number of interrupts to the host * but also (potentially) increases the latency for processing * completed ops as we only get an interrupt when all aggregated * ops have completed. */ if (sc->sc_nqueue == 0) return; if (sc->sc_nqueue > 1) { npkts = 0; SIMPLEQ_FOREACH(q, &sc->sc_queue, q_next) { npkts++; if ((q->q_crp->crp_flags & CRYPTO_F_BATCH) == 0) break; } } else npkts = 1; /* * Check device status before going any further. */ if ((stat = READ_REG(sc, BS_STAT)) & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) { if (stat & BS_STAT_DMAERR) { ubsec_totalreset(sc); ubsecstats.hst_dmaerr++; } else ubsecstats.hst_mcr1full++; return; } if (sc->sc_nqueue > ubsecstats.hst_maxqueue) ubsecstats.hst_maxqueue = sc->sc_nqueue; if (npkts > UBS_MAX_AGGR) npkts = UBS_MAX_AGGR; if (npkts < 2) /* special case 1 op */ goto feed1; ubsecstats.hst_totbatch += npkts-1; #ifdef UBSEC_DEBUG if (ubsec_debug) printf("merging %d records\n", npkts); #endif /* UBSEC_DEBUG */ q = SIMPLEQ_FIRST(&sc->sc_queue); SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q, q_next); --sc->sc_nqueue; bus_dmamap_sync(sc->sc_dmat, q->q_src_map, BUS_DMASYNC_PREWRITE); if (q->q_dst_map != NULL) bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, BUS_DMASYNC_PREREAD); q->q_nstacked_mcrs = npkts - 1; /* Number of packets stacked */ for (i = 0; i < q->q_nstacked_mcrs; i++) { q2 = SIMPLEQ_FIRST(&sc->sc_queue); bus_dmamap_sync(sc->sc_dmat, q2->q_src_map, BUS_DMASYNC_PREWRITE); if (q2->q_dst_map != NULL) bus_dmamap_sync(sc->sc_dmat, q2->q_dst_map, BUS_DMASYNC_PREREAD); SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q2, q_next); --sc->sc_nqueue; v = (void*)(((char *)&q2->q_dma->d_dma->d_mcr) + sizeof(struct ubsec_mcr) - sizeof(struct ubsec_mcr_add)); bcopy(v, &q->q_dma->d_dma->d_mcradd[i], sizeof(struct ubsec_mcr_add)); q->q_stacked_mcr[i] = q2; } q->q_dma->d_dma->d_mcr.mcr_pkts = htole16(npkts); SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next); sc->sc_nqchip += npkts; if (sc->sc_nqchip > ubsecstats.hst_maxqchip) ubsecstats.hst_maxqchip = sc->sc_nqchip; ubsec_dma_sync(&q->q_dma->d_alloc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr + offsetof(struct ubsec_dmachunk, d_mcr)); return; feed1: q = SIMPLEQ_FIRST(&sc->sc_queue); bus_dmamap_sync(sc->sc_dmat, q->q_src_map, BUS_DMASYNC_PREWRITE); if (q->q_dst_map != NULL) bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, BUS_DMASYNC_PREREAD); ubsec_dma_sync(&q->q_dma->d_alloc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr + offsetof(struct ubsec_dmachunk, d_mcr)); #ifdef UBSEC_DEBUG if (ubsec_debug) printf("feed1: q->chip %p %08x stat %08x\n", q, (u_int32_t)vtophys(&q->q_dma->d_dma->d_mcr), stat); #endif /* UBSEC_DEBUG */ SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q, q_next); --sc->sc_nqueue; SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next); sc->sc_nqchip++; if (sc->sc_nqchip > ubsecstats.hst_maxqchip) ubsecstats.hst_maxqchip = sc->sc_nqchip; return; } /* * Allocate a new 'session' and return an encoded session id. 'sidp' * contains our registration id, and should contain an encoded session * id on successful allocation. */ static int ubsec_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri) { struct cryptoini *c, *encini = NULL, *macini = NULL; struct ubsec_softc *sc = arg; struct ubsec_session *ses = NULL; MD5_CTX md5ctx; SHA1_CTX sha1ctx; int i, sesn; if (sidp == NULL || cri == NULL || sc == NULL) return (EINVAL); for (c = cri; c != NULL; c = c->cri_next) { if (c->cri_alg == CRYPTO_MD5_HMAC || c->cri_alg == CRYPTO_SHA1_HMAC) { if (macini) return (EINVAL); macini = c; } else if (c->cri_alg == CRYPTO_DES_CBC || c->cri_alg == CRYPTO_3DES_CBC) { if (encini) return (EINVAL); encini = c; } else return (EINVAL); } if (encini == NULL && macini == NULL) return (EINVAL); if (sc->sc_sessions == NULL) { ses = sc->sc_sessions = (struct ubsec_session *)malloc( sizeof(struct ubsec_session), M_DEVBUF, M_NOWAIT); if (ses == NULL) return (ENOMEM); sesn = 0; sc->sc_nsessions = 1; } else { for (sesn = 0; sesn < sc->sc_nsessions; sesn++) { if (sc->sc_sessions[sesn].ses_used == 0) { ses = &sc->sc_sessions[sesn]; break; } } if (ses == NULL) { sesn = sc->sc_nsessions; ses = (struct ubsec_session *)malloc((sesn + 1) * sizeof(struct ubsec_session), M_DEVBUF, M_NOWAIT); if (ses == NULL) return (ENOMEM); bcopy(sc->sc_sessions, ses, sesn * sizeof(struct ubsec_session)); bzero(sc->sc_sessions, sesn * sizeof(struct ubsec_session)); free(sc->sc_sessions, M_DEVBUF); sc->sc_sessions = ses; ses = &sc->sc_sessions[sesn]; sc->sc_nsessions++; } } bzero(ses, sizeof(struct ubsec_session)); ses->ses_used = 1; if (encini) { /* get an IV, network byte order */ /* XXX may read fewer than requested */ read_random(ses->ses_iv, sizeof(ses->ses_iv)); /* Go ahead and compute key in ubsec's byte order */ if (encini->cri_alg == CRYPTO_DES_CBC) { bcopy(encini->cri_key, &ses->ses_deskey[0], 8); bcopy(encini->cri_key, &ses->ses_deskey[2], 8); bcopy(encini->cri_key, &ses->ses_deskey[4], 8); } else bcopy(encini->cri_key, ses->ses_deskey, 24); SWAP32(ses->ses_deskey[0]); SWAP32(ses->ses_deskey[1]); SWAP32(ses->ses_deskey[2]); SWAP32(ses->ses_deskey[3]); SWAP32(ses->ses_deskey[4]); SWAP32(ses->ses_deskey[5]); } if (macini) { for (i = 0; i < macini->cri_klen / 8; i++) macini->cri_key[i] ^= HMAC_IPAD_VAL; if (macini->cri_alg == CRYPTO_MD5_HMAC) { MD5Init(&md5ctx); MD5Update(&md5ctx, macini->cri_key, macini->cri_klen / 8); MD5Update(&md5ctx, hmac_ipad_buffer, HMAC_BLOCK_LEN - (macini->cri_klen / 8)); bcopy(md5ctx.state, ses->ses_hminner, sizeof(md5ctx.state)); } else { SHA1Init(&sha1ctx); SHA1Update(&sha1ctx, macini->cri_key, macini->cri_klen / 8); SHA1Update(&sha1ctx, hmac_ipad_buffer, HMAC_BLOCK_LEN - (macini->cri_klen / 8)); bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32)); } for (i = 0; i < macini->cri_klen / 8; i++) macini->cri_key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); if (macini->cri_alg == CRYPTO_MD5_HMAC) { MD5Init(&md5ctx); MD5Update(&md5ctx, macini->cri_key, macini->cri_klen / 8); MD5Update(&md5ctx, hmac_opad_buffer, HMAC_BLOCK_LEN - (macini->cri_klen / 8)); bcopy(md5ctx.state, ses->ses_hmouter, sizeof(md5ctx.state)); } else { SHA1Init(&sha1ctx); SHA1Update(&sha1ctx, macini->cri_key, macini->cri_klen / 8); SHA1Update(&sha1ctx, hmac_opad_buffer, HMAC_BLOCK_LEN - (macini->cri_klen / 8)); bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32)); } for (i = 0; i < macini->cri_klen / 8; i++) macini->cri_key[i] ^= HMAC_OPAD_VAL; } *sidp = UBSEC_SID(device_get_unit(sc->sc_dev), sesn); return (0); } /* * Deallocate a session. */ static int ubsec_freesession(void *arg, u_int64_t tid) { struct ubsec_softc *sc = arg; int session, ret; u_int32_t sid = CRYPTO_SESID2LID(tid); if (sc == NULL) return (EINVAL); session = UBSEC_SESSION(sid); if (session < sc->sc_nsessions) { bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session])); ret = 0; } else ret = EINVAL; return (ret); } static void ubsec_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error) { struct ubsec_operand *op = arg; KASSERT(nsegs <= UBS_MAX_SCATTER, ("Too many DMA segments returned when mapping operand")); #ifdef UBSEC_DEBUG if (ubsec_debug) printf("ubsec_op_cb: mapsize %u nsegs %d\n", (u_int) mapsize, nsegs); #endif op->mapsize = mapsize; op->nsegs = nsegs; bcopy(seg, op->segs, nsegs * sizeof (seg[0])); } static int ubsec_process(void *arg, struct cryptop *crp, int hint) { struct ubsec_q *q = NULL; int err = 0, i, j, nicealign; struct ubsec_softc *sc = arg; struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; int encoffset = 0, macoffset = 0, cpskip, cpoffset; int sskip, dskip, stheend, dtheend; int16_t coffset; struct ubsec_session *ses; struct ubsec_pktctx ctx; struct ubsec_dma *dmap = NULL; if (crp == NULL || crp->crp_callback == NULL || sc == NULL) { ubsecstats.hst_invalid++; return (EINVAL); } if (UBSEC_SESSION(crp->crp_sid) >= sc->sc_nsessions) { ubsecstats.hst_badsession++; return (EINVAL); } mtx_lock(&sc->sc_freeqlock); if (SIMPLEQ_EMPTY(&sc->sc_freequeue)) { ubsecstats.hst_queuefull++; sc->sc_needwakeup |= CRYPTO_SYMQ; mtx_unlock(&sc->sc_freeqlock); return (ERESTART); } q = SIMPLEQ_FIRST(&sc->sc_freequeue); SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, q, q_next); mtx_unlock(&sc->sc_freeqlock); dmap = q->q_dma; /* Save dma pointer */ bzero(q, sizeof(struct ubsec_q)); bzero(&ctx, sizeof(ctx)); q->q_sesn = UBSEC_SESSION(crp->crp_sid); q->q_dma = dmap; ses = &sc->sc_sessions[q->q_sesn]; if (crp->crp_flags & CRYPTO_F_IMBUF) { q->q_src_m = (struct mbuf *)crp->crp_buf; q->q_dst_m = (struct mbuf *)crp->crp_buf; } else if (crp->crp_flags & CRYPTO_F_IOV) { q->q_src_io = (struct uio *)crp->crp_buf; q->q_dst_io = (struct uio *)crp->crp_buf; } else { ubsecstats.hst_badflags++; err = EINVAL; goto errout; /* XXX we don't handle contiguous blocks! */ } bzero(&dmap->d_dma->d_mcr, sizeof(struct ubsec_mcr)); dmap->d_dma->d_mcr.mcr_pkts = htole16(1); dmap->d_dma->d_mcr.mcr_flags = 0; q->q_crp = crp; crd1 = crp->crp_desc; if (crd1 == NULL) { ubsecstats.hst_nodesc++; err = EINVAL; goto errout; } crd2 = crd1->crd_next; if (crd2 == NULL) { if (crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC) { maccrd = crd1; enccrd = NULL; } else if (crd1->crd_alg == CRYPTO_DES_CBC || crd1->crd_alg == CRYPTO_3DES_CBC) { maccrd = NULL; enccrd = crd1; } else { ubsecstats.hst_badalg++; err = EINVAL; goto errout; } } else { if ((crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC) && (crd2->crd_alg == CRYPTO_DES_CBC || crd2->crd_alg == CRYPTO_3DES_CBC) && ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { maccrd = crd1; enccrd = crd2; } else if ((crd1->crd_alg == CRYPTO_DES_CBC || crd1->crd_alg == CRYPTO_3DES_CBC) && (crd2->crd_alg == CRYPTO_MD5_HMAC || crd2->crd_alg == CRYPTO_SHA1_HMAC) && (crd1->crd_flags & CRD_F_ENCRYPT)) { enccrd = crd1; maccrd = crd2; } else { /* * We cannot order the ubsec as requested */ ubsecstats.hst_badalg++; err = EINVAL; goto errout; } } if (enccrd) { encoffset = enccrd->crd_skip; ctx.pc_flags |= htole16(UBS_PKTCTX_ENC_3DES); if (enccrd->crd_flags & CRD_F_ENCRYPT) { q->q_flags |= UBSEC_QFLAGS_COPYOUTIV; if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) bcopy(enccrd->crd_iv, ctx.pc_iv, 8); else { ctx.pc_iv[0] = ses->ses_iv[0]; ctx.pc_iv[1] = ses->ses_iv[1]; } if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) { if (crp->crp_flags & CRYPTO_F_IMBUF) m_copyback(q->q_src_m, enccrd->crd_inject, 8, (caddr_t)ctx.pc_iv); else if (crp->crp_flags & CRYPTO_F_IOV) cuio_copyback(q->q_src_io, enccrd->crd_inject, 8, (caddr_t)ctx.pc_iv); } } else { ctx.pc_flags |= htole16(UBS_PKTCTX_INBOUND); if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) bcopy(enccrd->crd_iv, ctx.pc_iv, 8); else if (crp->crp_flags & CRYPTO_F_IMBUF) m_copydata(q->q_src_m, enccrd->crd_inject, 8, (caddr_t)ctx.pc_iv); else if (crp->crp_flags & CRYPTO_F_IOV) cuio_copydata(q->q_src_io, enccrd->crd_inject, 8, (caddr_t)ctx.pc_iv); } ctx.pc_deskey[0] = ses->ses_deskey[0]; ctx.pc_deskey[1] = ses->ses_deskey[1]; ctx.pc_deskey[2] = ses->ses_deskey[2]; ctx.pc_deskey[3] = ses->ses_deskey[3]; ctx.pc_deskey[4] = ses->ses_deskey[4]; ctx.pc_deskey[5] = ses->ses_deskey[5]; SWAP32(ctx.pc_iv[0]); SWAP32(ctx.pc_iv[1]); } if (maccrd) { macoffset = maccrd->crd_skip; if (maccrd->crd_alg == CRYPTO_MD5_HMAC) ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_MD5); else ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_SHA1); for (i = 0; i < 5; i++) { ctx.pc_hminner[i] = ses->ses_hminner[i]; ctx.pc_hmouter[i] = ses->ses_hmouter[i]; HTOLE32(ctx.pc_hminner[i]); HTOLE32(ctx.pc_hmouter[i]); } } if (enccrd && maccrd) { /* * ubsec cannot handle packets where the end of encryption * and authentication are not the same, or where the * encrypted part begins before the authenticated part. */ if ((encoffset + enccrd->crd_len) != (macoffset + maccrd->crd_len)) { ubsecstats.hst_lenmismatch++; err = EINVAL; goto errout; } if (enccrd->crd_skip < maccrd->crd_skip) { ubsecstats.hst_skipmismatch++; err = EINVAL; goto errout; } sskip = maccrd->crd_skip; cpskip = dskip = enccrd->crd_skip; stheend = maccrd->crd_len; dtheend = enccrd->crd_len; coffset = enccrd->crd_skip - maccrd->crd_skip; cpoffset = cpskip + dtheend; #ifdef UBSEC_DEBUG if (ubsec_debug) { printf("mac: skip %d, len %d, inject %d\n", maccrd->crd_skip, maccrd->crd_len, maccrd->crd_inject); printf("enc: skip %d, len %d, inject %d\n", enccrd->crd_skip, enccrd->crd_len, enccrd->crd_inject); printf("src: skip %d, len %d\n", sskip, stheend); printf("dst: skip %d, len %d\n", dskip, dtheend); printf("ubs: coffset %d, pktlen %d, cpskip %d, cpoffset %d\n", coffset, stheend, cpskip, cpoffset); } #endif } else { cpskip = dskip = sskip = macoffset + encoffset; dtheend = stheend = (enccrd)?enccrd->crd_len:maccrd->crd_len; cpoffset = cpskip + dtheend; coffset = 0; } ctx.pc_offset = htole16(coffset >> 2); if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &q->q_src_map)) { ubsecstats.hst_nomap++; err = ENOMEM; goto errout; } if (crp->crp_flags & CRYPTO_F_IMBUF) { if (bus_dmamap_load_mbuf(sc->sc_dmat, q->q_src_map, q->q_src_m, ubsec_op_cb, &q->q_src, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); q->q_src_map = NULL; ubsecstats.hst_noload++; err = ENOMEM; goto errout; } } else if (crp->crp_flags & CRYPTO_F_IOV) { if (bus_dmamap_load_uio(sc->sc_dmat, q->q_src_map, q->q_src_io, ubsec_op_cb, &q->q_src, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); q->q_src_map = NULL; ubsecstats.hst_noload++; err = ENOMEM; goto errout; } } nicealign = ubsec_dmamap_aligned(&q->q_src); dmap->d_dma->d_mcr.mcr_pktlen = htole16(stheend); #ifdef UBSEC_DEBUG if (ubsec_debug) printf("src skip: %d nicealign: %u\n", sskip, nicealign); #endif for (i = j = 0; i < q->q_src_nsegs; i++) { struct ubsec_pktbuf *pb; bus_size_t packl = q->q_src_segs[i].ds_len; bus_addr_t packp = q->q_src_segs[i].ds_addr; if (sskip >= packl) { sskip -= packl; continue; } packl -= sskip; packp += sskip; sskip = 0; if (packl > 0xfffc) { err = EIO; goto errout; } if (j == 0) pb = &dmap->d_dma->d_mcr.mcr_ipktbuf; else pb = &dmap->d_dma->d_sbuf[j - 1]; pb->pb_addr = htole32(packp); if (stheend) { if (packl > stheend) { pb->pb_len = htole32(stheend); stheend = 0; } else { pb->pb_len = htole32(packl); stheend -= packl; } } else pb->pb_len = htole32(packl); if ((i + 1) == q->q_src_nsegs) pb->pb_next = 0; else pb->pb_next = htole32(dmap->d_alloc.dma_paddr + offsetof(struct ubsec_dmachunk, d_sbuf[j])); j++; } if (enccrd == NULL && maccrd != NULL) { dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr = 0; dmap->d_dma->d_mcr.mcr_opktbuf.pb_len = 0; dmap->d_dma->d_mcr.mcr_opktbuf.pb_next = htole32(dmap->d_alloc.dma_paddr + offsetof(struct ubsec_dmachunk, d_macbuf[0])); #ifdef UBSEC_DEBUG if (ubsec_debug) printf("opkt: %x %x %x\n", dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr, dmap->d_dma->d_mcr.mcr_opktbuf.pb_len, dmap->d_dma->d_mcr.mcr_opktbuf.pb_next); #endif } else { if (crp->crp_flags & CRYPTO_F_IOV) { if (!nicealign) { ubsecstats.hst_iovmisaligned++; err = EINVAL; goto errout; } if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &q->q_dst_map)) { ubsecstats.hst_nomap++; err = ENOMEM; goto errout; } if (bus_dmamap_load_uio(sc->sc_dmat, q->q_dst_map, q->q_dst_io, ubsec_op_cb, &q->q_dst, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); q->q_dst_map = NULL; ubsecstats.hst_noload++; err = ENOMEM; goto errout; } } else if (crp->crp_flags & CRYPTO_F_IMBUF) { if (nicealign) { q->q_dst = q->q_src; } else { int totlen, len; struct mbuf *m, *top, **mp; ubsecstats.hst_unaligned++; totlen = q->q_src_mapsize; if (q->q_src_m->m_flags & M_PKTHDR) { len = MHLEN; MGETHDR(m, M_DONTWAIT, MT_DATA); if (m && !m_dup_pkthdr(m, q->q_src_m, M_DONTWAIT)) { m_free(m); m = NULL; } } else { len = MLEN; MGET(m, M_DONTWAIT, MT_DATA); } if (m == NULL) { ubsecstats.hst_nombuf++; err = sc->sc_nqueue ? ERESTART : ENOMEM; goto errout; } if (totlen >= MINCLSIZE) { MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { m_free(m); ubsecstats.hst_nomcl++; err = sc->sc_nqueue ? ERESTART : ENOMEM; goto errout; } len = MCLBYTES; } m->m_len = len; top = NULL; mp = ⊤ while (totlen > 0) { if (top) { MGET(m, M_DONTWAIT, MT_DATA); if (m == NULL) { m_freem(top); ubsecstats.hst_nombuf++; err = sc->sc_nqueue ? ERESTART : ENOMEM; goto errout; } len = MLEN; } if (top && totlen >= MINCLSIZE) { MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { *mp = m; m_freem(top); ubsecstats.hst_nomcl++; err = sc->sc_nqueue ? ERESTART : ENOMEM; goto errout; } len = MCLBYTES; } m->m_len = len = min(totlen, len); totlen -= len; *mp = m; mp = &m->m_next; } q->q_dst_m = top; ubsec_mcopy(q->q_src_m, q->q_dst_m, cpskip, cpoffset); if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &q->q_dst_map) != 0) { ubsecstats.hst_nomap++; err = ENOMEM; goto errout; } if (bus_dmamap_load_mbuf(sc->sc_dmat, q->q_dst_map, q->q_dst_m, ubsec_op_cb, &q->q_dst, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); q->q_dst_map = NULL; ubsecstats.hst_noload++; err = ENOMEM; goto errout; } } } else { ubsecstats.hst_badflags++; err = EINVAL; goto errout; } #ifdef UBSEC_DEBUG if (ubsec_debug) printf("dst skip: %d\n", dskip); #endif for (i = j = 0; i < q->q_dst_nsegs; i++) { struct ubsec_pktbuf *pb; bus_size_t packl = q->q_dst_segs[i].ds_len; bus_addr_t packp = q->q_dst_segs[i].ds_addr; if (dskip >= packl) { dskip -= packl; continue; } packl -= dskip; packp += dskip; dskip = 0; if (packl > 0xfffc) { err = EIO; goto errout; } if (j == 0) pb = &dmap->d_dma->d_mcr.mcr_opktbuf; else pb = &dmap->d_dma->d_dbuf[j - 1]; pb->pb_addr = htole32(packp); if (dtheend) { if (packl > dtheend) { pb->pb_len = htole32(dtheend); dtheend = 0; } else { pb->pb_len = htole32(packl); dtheend -= packl; } } else pb->pb_len = htole32(packl); if ((i + 1) == q->q_dst_nsegs) { if (maccrd) pb->pb_next = htole32(dmap->d_alloc.dma_paddr + offsetof(struct ubsec_dmachunk, d_macbuf[0])); else pb->pb_next = 0; } else pb->pb_next = htole32(dmap->d_alloc.dma_paddr + offsetof(struct ubsec_dmachunk, d_dbuf[j])); j++; } } dmap->d_dma->d_mcr.mcr_cmdctxp = htole32(dmap->d_alloc.dma_paddr + offsetof(struct ubsec_dmachunk, d_ctx)); if (sc->sc_flags & UBS_FLAGS_LONGCTX) { struct ubsec_pktctx_long *ctxl; ctxl = (struct ubsec_pktctx_long *)(dmap->d_alloc.dma_vaddr + offsetof(struct ubsec_dmachunk, d_ctx)); /* transform small context into long context */ ctxl->pc_len = htole16(sizeof(struct ubsec_pktctx_long)); ctxl->pc_type = htole16(UBS_PKTCTX_TYPE_IPSEC); ctxl->pc_flags = ctx.pc_flags; ctxl->pc_offset = ctx.pc_offset; for (i = 0; i < 6; i++) ctxl->pc_deskey[i] = ctx.pc_deskey[i]; for (i = 0; i < 5; i++) ctxl->pc_hminner[i] = ctx.pc_hminner[i]; for (i = 0; i < 5; i++) ctxl->pc_hmouter[i] = ctx.pc_hmouter[i]; ctxl->pc_iv[0] = ctx.pc_iv[0]; ctxl->pc_iv[1] = ctx.pc_iv[1]; } else bcopy(&ctx, dmap->d_alloc.dma_vaddr + offsetof(struct ubsec_dmachunk, d_ctx), sizeof(struct ubsec_pktctx)); mtx_lock(&sc->sc_mcr1lock); SIMPLEQ_INSERT_TAIL(&sc->sc_queue, q, q_next); sc->sc_nqueue++; ubsecstats.hst_ipackets++; ubsecstats.hst_ibytes += dmap->d_alloc.dma_size; if ((hint & CRYPTO_HINT_MORE) == 0 || sc->sc_nqueue >= UBS_MAX_AGGR) ubsec_feed(sc); mtx_unlock(&sc->sc_mcr1lock); return (0); errout: if (q != NULL) { if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m)) m_freem(q->q_dst_m); if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) { bus_dmamap_unload(sc->sc_dmat, q->q_dst_map); bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); } if (q->q_src_map != NULL) { bus_dmamap_unload(sc->sc_dmat, q->q_src_map); bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); } mtx_lock(&sc->sc_freeqlock); SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); mtx_unlock(&sc->sc_freeqlock); } if (err != ERESTART) { crp->crp_etype = err; crypto_done(crp); } else { sc->sc_needwakeup |= CRYPTO_SYMQ; } return (err); } static void ubsec_callback(struct ubsec_softc *sc, struct ubsec_q *q) { struct cryptop *crp = (struct cryptop *)q->q_crp; struct cryptodesc *crd; struct ubsec_dma *dmap = q->q_dma; ubsecstats.hst_opackets++; ubsecstats.hst_obytes += dmap->d_alloc.dma_size; ubsec_dma_sync(&dmap->d_alloc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) { bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->sc_dmat, q->q_dst_map); bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); } bus_dmamap_sync(sc->sc_dmat, q->q_src_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_dmat, q->q_src_map); bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); if ((crp->crp_flags & CRYPTO_F_IMBUF) && (q->q_src_m != q->q_dst_m)) { m_freem(q->q_src_m); crp->crp_buf = (caddr_t)q->q_dst_m; } ubsecstats.hst_obytes += ((struct mbuf *)crp->crp_buf)->m_len; /* copy out IV for future use */ if (q->q_flags & UBSEC_QFLAGS_COPYOUTIV) { for (crd = crp->crp_desc; crd; crd = crd->crd_next) { if (crd->crd_alg != CRYPTO_DES_CBC && crd->crd_alg != CRYPTO_3DES_CBC) continue; if (crp->crp_flags & CRYPTO_F_IMBUF) m_copydata((struct mbuf *)crp->crp_buf, crd->crd_skip + crd->crd_len - 8, 8, (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv); else if (crp->crp_flags & CRYPTO_F_IOV) { cuio_copydata((struct uio *)crp->crp_buf, crd->crd_skip + crd->crd_len - 8, 8, (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv); } break; } } for (crd = crp->crp_desc; crd; crd = crd->crd_next) { if (crd->crd_alg != CRYPTO_MD5_HMAC && crd->crd_alg != CRYPTO_SHA1_HMAC) continue; if (crp->crp_flags & CRYPTO_F_IMBUF) m_copyback((struct mbuf *)crp->crp_buf, crd->crd_inject, 12, (caddr_t)dmap->d_dma->d_macbuf); else if (crp->crp_flags & CRYPTO_F_IOV && crp->crp_mac) bcopy((caddr_t)dmap->d_dma->d_macbuf, crp->crp_mac, 12); break; } mtx_lock(&sc->sc_freeqlock); SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); mtx_unlock(&sc->sc_freeqlock); crypto_done(crp); } static void ubsec_mcopy(struct mbuf *srcm, struct mbuf *dstm, int hoffset, int toffset) { int i, j, dlen, slen; caddr_t dptr, sptr; j = 0; sptr = srcm->m_data; slen = srcm->m_len; dptr = dstm->m_data; dlen = dstm->m_len; while (1) { for (i = 0; i < min(slen, dlen); i++) { if (j < hoffset || j >= toffset) *dptr++ = *sptr++; slen--; dlen--; j++; } if (slen == 0) { srcm = srcm->m_next; if (srcm == NULL) return; sptr = srcm->m_data; slen = srcm->m_len; } if (dlen == 0) { dstm = dstm->m_next; if (dstm == NULL) return; dptr = dstm->m_data; dlen = dstm->m_len; } } } /* * feed the key generator, must be called at splimp() or higher. */ static int ubsec_feed2(struct ubsec_softc *sc) { struct ubsec_q2 *q; while (!SIMPLEQ_EMPTY(&sc->sc_queue2)) { if (READ_REG(sc, BS_STAT) & BS_STAT_MCR2_FULL) break; q = SIMPLEQ_FIRST(&sc->sc_queue2); ubsec_dma_sync(&q->q_mcr, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); ubsec_dma_sync(&q->q_ctx, BUS_DMASYNC_PREWRITE); WRITE_REG(sc, BS_MCR2, q->q_mcr.dma_paddr); SIMPLEQ_REMOVE_HEAD(&sc->sc_queue2, q, q_next); --sc->sc_nqueue2; SIMPLEQ_INSERT_TAIL(&sc->sc_qchip2, q, q_next); } return (0); } /* * Callback for handling random numbers */ static void ubsec_callback2(struct ubsec_softc *sc, struct ubsec_q2 *q) { struct cryptkop *krp; struct ubsec_ctx_keyop *ctx; ctx = (struct ubsec_ctx_keyop *)q->q_ctx.dma_vaddr; ubsec_dma_sync(&q->q_ctx, BUS_DMASYNC_POSTWRITE); switch (q->q_type) { #ifndef UBSEC_NO_RNG case UBS_CTXOP_RNGBYPASS: { struct ubsec_q2_rng *rng = (struct ubsec_q2_rng *)q; ubsec_dma_sync(&rng->rng_buf, BUS_DMASYNC_POSTREAD); (*sc->sc_harvest)(sc->sc_rndtest, rng->rng_buf.dma_vaddr, UBSEC_RNG_BUFSIZ*sizeof (u_int32_t)); rng->rng_used = 0; callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); break; } #endif case UBS_CTXOP_MODEXP: { struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q; u_int rlen, clen; krp = me->me_krp; rlen = (me->me_modbits + 7) / 8; clen = (krp->krp_param[krp->krp_iparams].crp_nbits + 7) / 8; ubsec_dma_sync(&me->me_M, BUS_DMASYNC_POSTWRITE); ubsec_dma_sync(&me->me_E, BUS_DMASYNC_POSTWRITE); ubsec_dma_sync(&me->me_C, BUS_DMASYNC_POSTREAD); ubsec_dma_sync(&me->me_epb, BUS_DMASYNC_POSTWRITE); if (clen < rlen) krp->krp_status = E2BIG; else { if (sc->sc_flags & UBS_FLAGS_HWNORM) { bzero(krp->krp_param[krp->krp_iparams].crp_p, (krp->krp_param[krp->krp_iparams].crp_nbits + 7) / 8); bcopy(me->me_C.dma_vaddr, krp->krp_param[krp->krp_iparams].crp_p, (me->me_modbits + 7) / 8); } else ubsec_kshift_l(me->me_shiftbits, me->me_C.dma_vaddr, me->me_normbits, krp->krp_param[krp->krp_iparams].crp_p, krp->krp_param[krp->krp_iparams].crp_nbits); } crypto_kdone(krp); /* bzero all potentially sensitive data */ bzero(me->me_E.dma_vaddr, me->me_E.dma_size); bzero(me->me_M.dma_vaddr, me->me_M.dma_size); bzero(me->me_C.dma_vaddr, me->me_C.dma_size); bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size); /* Can't free here, so put us on the free list. */ SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &me->me_q, q_next); break; } case UBS_CTXOP_RSAPRIV: { struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q; u_int len; krp = rp->rpr_krp; ubsec_dma_sync(&rp->rpr_msgin, BUS_DMASYNC_POSTWRITE); ubsec_dma_sync(&rp->rpr_msgout, BUS_DMASYNC_POSTREAD); len = (krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT].crp_nbits + 7) / 8; bcopy(rp->rpr_msgout.dma_vaddr, krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT].crp_p, len); crypto_kdone(krp); bzero(rp->rpr_msgin.dma_vaddr, rp->rpr_msgin.dma_size); bzero(rp->rpr_msgout.dma_vaddr, rp->rpr_msgout.dma_size); bzero(rp->rpr_q.q_ctx.dma_vaddr, rp->rpr_q.q_ctx.dma_size); /* Can't free here, so put us on the free list. */ SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &rp->rpr_q, q_next); break; } default: device_printf(sc->sc_dev, "unknown ctx op: %x\n", letoh16(ctx->ctx_op)); break; } } #ifndef UBSEC_NO_RNG static void ubsec_rng(void *vsc) { struct ubsec_softc *sc = vsc; struct ubsec_q2_rng *rng = &sc->sc_rng; struct ubsec_mcr *mcr; struct ubsec_ctx_rngbypass *ctx; mtx_lock(&sc->sc_mcr2lock); if (rng->rng_used) { mtx_unlock(&sc->sc_mcr2lock); return; } sc->sc_nqueue2++; if (sc->sc_nqueue2 >= UBS_MAX_NQUEUE) goto out; mcr = (struct ubsec_mcr *)rng->rng_q.q_mcr.dma_vaddr; ctx = (struct ubsec_ctx_rngbypass *)rng->rng_q.q_ctx.dma_vaddr; mcr->mcr_pkts = htole16(1); mcr->mcr_flags = 0; mcr->mcr_cmdctxp = htole32(rng->rng_q.q_ctx.dma_paddr); mcr->mcr_ipktbuf.pb_addr = mcr->mcr_ipktbuf.pb_next = 0; mcr->mcr_ipktbuf.pb_len = 0; mcr->mcr_reserved = mcr->mcr_pktlen = 0; mcr->mcr_opktbuf.pb_addr = htole32(rng->rng_buf.dma_paddr); mcr->mcr_opktbuf.pb_len = htole32(((sizeof(u_int32_t) * UBSEC_RNG_BUFSIZ)) & UBS_PKTBUF_LEN); mcr->mcr_opktbuf.pb_next = 0; ctx->rbp_len = htole16(sizeof(struct ubsec_ctx_rngbypass)); ctx->rbp_op = htole16(UBS_CTXOP_RNGBYPASS); rng->rng_q.q_type = UBS_CTXOP_RNGBYPASS; ubsec_dma_sync(&rng->rng_buf, BUS_DMASYNC_PREREAD); SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rng->rng_q, q_next); rng->rng_used = 1; ubsec_feed2(sc); ubsecstats.hst_rng++; mtx_unlock(&sc->sc_mcr2lock); return; out: /* * Something weird happened, generate our own call back. */ sc->sc_nqueue2--; mtx_unlock(&sc->sc_mcr2lock); callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); } #endif /* UBSEC_NO_RNG */ static void ubsec_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *paddr = (bus_addr_t*) arg; *paddr = segs->ds_addr; } static int ubsec_dma_malloc( struct ubsec_softc *sc, bus_size_t size, struct ubsec_dma_alloc *dma, int mapflags ) { int r; /* XXX could specify sc_dmat as parent but that just adds overhead */ r = bus_dma_tag_create(NULL, /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ size, /* maxsize */ 1, /* nsegments */ size, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &dma->dma_tag); if (r != 0) { device_printf(sc->sc_dev, "ubsec_dma_malloc: " "bus_dma_tag_create failed; error %u\n", r); goto fail_0; } r = bus_dmamap_create(dma->dma_tag, BUS_DMA_NOWAIT, &dma->dma_map); if (r != 0) { device_printf(sc->sc_dev, "ubsec_dma_malloc: " "bus_dmamap_create failed; error %u\n", r); goto fail_1; } r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, BUS_DMA_NOWAIT, &dma->dma_map); if (r != 0) { device_printf(sc->sc_dev, "ubsec_dma_malloc: " "bus_dmammem_alloc failed; size %zu, error %u\n", size, r); goto fail_2; } r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size, ubsec_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT); if (r != 0) { device_printf(sc->sc_dev, "ubsec_dma_malloc: " "bus_dmamap_load failed; error %u\n", r); goto fail_3; } dma->dma_size = size; return (0); fail_3: bus_dmamap_unload(dma->dma_tag, dma->dma_map); fail_2: bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); fail_1: bus_dmamap_destroy(dma->dma_tag, dma->dma_map); bus_dma_tag_destroy(dma->dma_tag); fail_0: dma->dma_map = NULL; dma->dma_tag = NULL; return (r); } static void ubsec_dma_free(struct ubsec_softc *sc, struct ubsec_dma_alloc *dma) { bus_dmamap_unload(dma->dma_tag, dma->dma_map); bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); bus_dmamap_destroy(dma->dma_tag, dma->dma_map); bus_dma_tag_destroy(dma->dma_tag); } /* * Resets the board. Values in the regesters are left as is * from the reset (i.e. initial values are assigned elsewhere). */ static void ubsec_reset_board(struct ubsec_softc *sc) { volatile u_int32_t ctrl; ctrl = READ_REG(sc, BS_CTRL); ctrl |= BS_CTRL_RESET; WRITE_REG(sc, BS_CTRL, ctrl); /* * Wait aprox. 30 PCI clocks = 900 ns = 0.9 us */ DELAY(10); } /* * Init Broadcom registers */ static void ubsec_init_board(struct ubsec_softc *sc) { u_int32_t ctrl; ctrl = READ_REG(sc, BS_CTRL); ctrl &= ~(BS_CTRL_BE32 | BS_CTRL_BE64); ctrl |= BS_CTRL_LITTLE_ENDIAN | BS_CTRL_MCR1INT; if (sc->sc_flags & (UBS_FLAGS_KEY|UBS_FLAGS_RNG)) ctrl |= BS_CTRL_MCR2INT; else ctrl &= ~BS_CTRL_MCR2INT; if (sc->sc_flags & UBS_FLAGS_HWNORM) ctrl &= ~BS_CTRL_SWNORM; WRITE_REG(sc, BS_CTRL, ctrl); } /* * Init Broadcom PCI registers */ static void ubsec_init_pciregs(device_t dev) { #if 0 u_int32_t misc; misc = pci_conf_read(pc, pa->pa_tag, BS_RTY_TOUT); misc = (misc & ~(UBS_PCI_RTY_MASK << UBS_PCI_RTY_SHIFT)) | ((UBS_DEF_RTY & 0xff) << UBS_PCI_RTY_SHIFT); misc = (misc & ~(UBS_PCI_TOUT_MASK << UBS_PCI_TOUT_SHIFT)) | ((UBS_DEF_TOUT & 0xff) << UBS_PCI_TOUT_SHIFT); pci_conf_write(pc, pa->pa_tag, BS_RTY_TOUT, misc); #endif /* * This will set the cache line size to 1, this will * force the BCM58xx chip just to do burst read/writes. * Cache line read/writes are to slow */ pci_write_config(dev, PCIR_CACHELNSZ, UBS_DEF_CACHELINE, 1); } /* * Clean up after a chip crash. * It is assumed that the caller in splimp() */ static void ubsec_cleanchip(struct ubsec_softc *sc) { struct ubsec_q *q; while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) { q = SIMPLEQ_FIRST(&sc->sc_qchip); SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, q, q_next); ubsec_free_q(sc, q); } sc->sc_nqchip = 0; } /* * free a ubsec_q * It is assumed that the caller is within splimp(). */ static int ubsec_free_q(struct ubsec_softc *sc, struct ubsec_q *q) { struct ubsec_q *q2; struct cryptop *crp; int npkts; int i; npkts = q->q_nstacked_mcrs; for (i = 0; i < npkts; i++) { if(q->q_stacked_mcr[i]) { q2 = q->q_stacked_mcr[i]; if ((q2->q_dst_m != NULL) && (q2->q_src_m != q2->q_dst_m)) m_freem(q2->q_dst_m); crp = (struct cryptop *)q2->q_crp; SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q2, q_next); crp->crp_etype = EFAULT; crypto_done(crp); } else { break; } } /* * Free header MCR */ if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m)) m_freem(q->q_dst_m); crp = (struct cryptop *)q->q_crp; SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); crp->crp_etype = EFAULT; crypto_done(crp); return(0); } /* * Routine to reset the chip and clean up. * It is assumed that the caller is in splimp() */ static void ubsec_totalreset(struct ubsec_softc *sc) { ubsec_reset_board(sc); ubsec_init_board(sc); ubsec_cleanchip(sc); } static int ubsec_dmamap_aligned(struct ubsec_operand *op) { int i; for (i = 0; i < op->nsegs; i++) { if (op->segs[i].ds_addr & 3) return (0); if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3)) return (0); } return (1); } static void ubsec_kfree(struct ubsec_softc *sc, struct ubsec_q2 *q) { switch (q->q_type) { case UBS_CTXOP_MODEXP: { struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q; ubsec_dma_free(sc, &me->me_q.q_mcr); ubsec_dma_free(sc, &me->me_q.q_ctx); ubsec_dma_free(sc, &me->me_M); ubsec_dma_free(sc, &me->me_E); ubsec_dma_free(sc, &me->me_C); ubsec_dma_free(sc, &me->me_epb); free(me, M_DEVBUF); break; } case UBS_CTXOP_RSAPRIV: { struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q; ubsec_dma_free(sc, &rp->rpr_q.q_mcr); ubsec_dma_free(sc, &rp->rpr_q.q_ctx); ubsec_dma_free(sc, &rp->rpr_msgin); ubsec_dma_free(sc, &rp->rpr_msgout); free(rp, M_DEVBUF); break; } default: device_printf(sc->sc_dev, "invalid kfree 0x%x\n", q->q_type); break; } } static int ubsec_kprocess(void *arg, struct cryptkop *krp, int hint) { struct ubsec_softc *sc = arg; int r; if (krp == NULL || krp->krp_callback == NULL) return (EINVAL); while (!SIMPLEQ_EMPTY(&sc->sc_q2free)) { struct ubsec_q2 *q; q = SIMPLEQ_FIRST(&sc->sc_q2free); SIMPLEQ_REMOVE_HEAD(&sc->sc_q2free, q, q_next); ubsec_kfree(sc, q); } switch (krp->krp_op) { case CRK_MOD_EXP: if (sc->sc_flags & UBS_FLAGS_HWNORM) r = ubsec_kprocess_modexp_hw(sc, krp, hint); else r = ubsec_kprocess_modexp_sw(sc, krp, hint); break; case CRK_MOD_EXP_CRT: return (ubsec_kprocess_rsapriv(sc, krp, hint)); default: device_printf(sc->sc_dev, "kprocess: invalid op 0x%x\n", krp->krp_op); krp->krp_status = EOPNOTSUPP; crypto_kdone(krp); return (0); } return (0); /* silence compiler */ } /* * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (sw normalization) */ static int ubsec_kprocess_modexp_sw(struct ubsec_softc *sc, struct cryptkop *krp, int hint) { struct ubsec_q2_modexp *me; struct ubsec_mcr *mcr; struct ubsec_ctx_modexp *ctx; struct ubsec_pktbuf *epb; int err = 0; u_int nbits, normbits, mbits, shiftbits, ebits; me = (struct ubsec_q2_modexp *)malloc(sizeof *me, M_DEVBUF, M_NOWAIT); if (me == NULL) { err = ENOMEM; goto errout; } bzero(me, sizeof *me); me->me_krp = krp; me->me_q.q_type = UBS_CTXOP_MODEXP; nbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_N]); if (nbits <= 512) normbits = 512; else if (nbits <= 768) normbits = 768; else if (nbits <= 1024) normbits = 1024; else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 1536) normbits = 1536; else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 2048) normbits = 2048; else { err = E2BIG; goto errout; } shiftbits = normbits - nbits; me->me_modbits = nbits; me->me_shiftbits = shiftbits; me->me_normbits = normbits; /* Sanity check: result bits must be >= true modulus bits. */ if (krp->krp_param[krp->krp_iparams].crp_nbits < nbits) { err = ERANGE; goto errout; } if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), &me->me_q.q_mcr, 0)) { err = ENOMEM; goto errout; } mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr; if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_modexp), &me->me_q.q_ctx, 0)) { err = ENOMEM; goto errout; } mbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_M]); if (mbits > nbits) { err = E2BIG; goto errout; } if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) { err = ENOMEM; goto errout; } ubsec_kshift_r(shiftbits, krp->krp_param[UBS_MODEXP_PAR_M].crp_p, mbits, me->me_M.dma_vaddr, normbits); if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) { err = ENOMEM; goto errout; } bzero(me->me_C.dma_vaddr, me->me_C.dma_size); ebits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_E]); if (ebits > nbits) { err = E2BIG; goto errout; } if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) { err = ENOMEM; goto errout; } ubsec_kshift_r(shiftbits, krp->krp_param[UBS_MODEXP_PAR_E].crp_p, ebits, me->me_E.dma_vaddr, normbits); if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktbuf), &me->me_epb, 0)) { err = ENOMEM; goto errout; } epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr; epb->pb_addr = htole32(me->me_E.dma_paddr); epb->pb_next = 0; epb->pb_len = htole32(normbits / 8); #ifdef UBSEC_DEBUG if (ubsec_debug) { printf("Epb "); ubsec_dump_pb(epb); } #endif mcr->mcr_pkts = htole16(1); mcr->mcr_flags = 0; mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr); mcr->mcr_reserved = 0; mcr->mcr_pktlen = 0; mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr); mcr->mcr_ipktbuf.pb_len = htole32(normbits / 8); mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr); mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr); mcr->mcr_opktbuf.pb_next = 0; mcr->mcr_opktbuf.pb_len = htole32(normbits / 8); #ifdef DIAGNOSTIC /* Misaligned output buffer will hang the chip. */ if ((letoh32(mcr->mcr_opktbuf.pb_addr) & 3) != 0) panic("%s: modexp invalid addr 0x%x\n", device_get_nameunit(sc->sc_dev), letoh32(mcr->mcr_opktbuf.pb_addr)); if ((letoh32(mcr->mcr_opktbuf.pb_len) & 3) != 0) panic("%s: modexp invalid len 0x%x\n", device_get_nameunit(sc->sc_dev), letoh32(mcr->mcr_opktbuf.pb_len)); #endif ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr; bzero(ctx, sizeof(*ctx)); ubsec_kshift_r(shiftbits, krp->krp_param[UBS_MODEXP_PAR_N].crp_p, nbits, ctx->me_N, normbits); ctx->me_len = htole16((normbits / 8) + (4 * sizeof(u_int16_t))); ctx->me_op = htole16(UBS_CTXOP_MODEXP); ctx->me_E_len = htole16(nbits); ctx->me_N_len = htole16(nbits); #ifdef UBSEC_DEBUG if (ubsec_debug) { ubsec_dump_mcr(mcr); ubsec_dump_ctx2((struct ubsec_ctx_keyop *)ctx); } #endif /* * ubsec_feed2 will sync mcr and ctx, we just need to sync * everything else. */ ubsec_dma_sync(&me->me_M, BUS_DMASYNC_PREWRITE); ubsec_dma_sync(&me->me_E, BUS_DMASYNC_PREWRITE); ubsec_dma_sync(&me->me_C, BUS_DMASYNC_PREREAD); ubsec_dma_sync(&me->me_epb, BUS_DMASYNC_PREWRITE); /* Enqueue and we're done... */ mtx_lock(&sc->sc_mcr2lock); SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next); ubsec_feed2(sc); ubsecstats.hst_modexp++; mtx_unlock(&sc->sc_mcr2lock); return (0); errout: if (me != NULL) { if (me->me_q.q_mcr.dma_map != NULL) ubsec_dma_free(sc, &me->me_q.q_mcr); if (me->me_q.q_ctx.dma_map != NULL) { bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size); ubsec_dma_free(sc, &me->me_q.q_ctx); } if (me->me_M.dma_map != NULL) { bzero(me->me_M.dma_vaddr, me->me_M.dma_size); ubsec_dma_free(sc, &me->me_M); } if (me->me_E.dma_map != NULL) { bzero(me->me_E.dma_vaddr, me->me_E.dma_size); ubsec_dma_free(sc, &me->me_E); } if (me->me_C.dma_map != NULL) { bzero(me->me_C.dma_vaddr, me->me_C.dma_size); ubsec_dma_free(sc, &me->me_C); } if (me->me_epb.dma_map != NULL) ubsec_dma_free(sc, &me->me_epb); free(me, M_DEVBUF); } krp->krp_status = err; crypto_kdone(krp); return (0); } /* * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (hw normalization) */ static int ubsec_kprocess_modexp_hw(struct ubsec_softc *sc, struct cryptkop *krp, int hint) { struct ubsec_q2_modexp *me; struct ubsec_mcr *mcr; struct ubsec_ctx_modexp *ctx; struct ubsec_pktbuf *epb; int err = 0; u_int nbits, normbits, mbits, shiftbits, ebits; me = (struct ubsec_q2_modexp *)malloc(sizeof *me, M_DEVBUF, M_NOWAIT); if (me == NULL) { err = ENOMEM; goto errout; } bzero(me, sizeof *me); me->me_krp = krp; me->me_q.q_type = UBS_CTXOP_MODEXP; nbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_N]); if (nbits <= 512) normbits = 512; else if (nbits <= 768) normbits = 768; else if (nbits <= 1024) normbits = 1024; else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 1536) normbits = 1536; else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 2048) normbits = 2048; else { err = E2BIG; goto errout; } shiftbits = normbits - nbits; /* XXX ??? */ me->me_modbits = nbits; me->me_shiftbits = shiftbits; me->me_normbits = normbits; /* Sanity check: result bits must be >= true modulus bits. */ if (krp->krp_param[krp->krp_iparams].crp_nbits < nbits) { err = ERANGE; goto errout; } if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), &me->me_q.q_mcr, 0)) { err = ENOMEM; goto errout; } mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr; if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_modexp), &me->me_q.q_ctx, 0)) { err = ENOMEM; goto errout; } mbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_M]); if (mbits > nbits) { err = E2BIG; goto errout; } if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) { err = ENOMEM; goto errout; } bzero(me->me_M.dma_vaddr, normbits / 8); bcopy(krp->krp_param[UBS_MODEXP_PAR_M].crp_p, me->me_M.dma_vaddr, (mbits + 7) / 8); if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) { err = ENOMEM; goto errout; } bzero(me->me_C.dma_vaddr, me->me_C.dma_size); ebits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_E]); if (ebits > nbits) { err = E2BIG; goto errout; } if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) { err = ENOMEM; goto errout; } bzero(me->me_E.dma_vaddr, normbits / 8); bcopy(krp->krp_param[UBS_MODEXP_PAR_E].crp_p, me->me_E.dma_vaddr, (ebits + 7) / 8); if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktbuf), &me->me_epb, 0)) { err = ENOMEM; goto errout; } epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr; epb->pb_addr = htole32(me->me_E.dma_paddr); epb->pb_next = 0; epb->pb_len = htole32((ebits + 7) / 8); #ifdef UBSEC_DEBUG if (ubsec_debug) { printf("Epb "); ubsec_dump_pb(epb); } #endif mcr->mcr_pkts = htole16(1); mcr->mcr_flags = 0; mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr); mcr->mcr_reserved = 0; mcr->mcr_pktlen = 0; mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr); mcr->mcr_ipktbuf.pb_len = htole32(normbits / 8); mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr); mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr); mcr->mcr_opktbuf.pb_next = 0; mcr->mcr_opktbuf.pb_len = htole32(normbits / 8); #ifdef DIAGNOSTIC /* Misaligned output buffer will hang the chip. */ if ((letoh32(mcr->mcr_opktbuf.pb_addr) & 3) != 0) panic("%s: modexp invalid addr 0x%x\n", device_get_nameunit(sc->sc_dev), letoh32(mcr->mcr_opktbuf.pb_addr)); if ((letoh32(mcr->mcr_opktbuf.pb_len) & 3) != 0) panic("%s: modexp invalid len 0x%x\n", device_get_nameunit(sc->sc_dev), letoh32(mcr->mcr_opktbuf.pb_len)); #endif ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr; bzero(ctx, sizeof(*ctx)); bcopy(krp->krp_param[UBS_MODEXP_PAR_N].crp_p, ctx->me_N, (nbits + 7) / 8); ctx->me_len = htole16((normbits / 8) + (4 * sizeof(u_int16_t))); ctx->me_op = htole16(UBS_CTXOP_MODEXP); ctx->me_E_len = htole16(ebits); ctx->me_N_len = htole16(nbits); #ifdef UBSEC_DEBUG if (ubsec_debug) { ubsec_dump_mcr(mcr); ubsec_dump_ctx2((struct ubsec_ctx_keyop *)ctx); } #endif /* * ubsec_feed2 will sync mcr and ctx, we just need to sync * everything else. */ ubsec_dma_sync(&me->me_M, BUS_DMASYNC_PREWRITE); ubsec_dma_sync(&me->me_E, BUS_DMASYNC_PREWRITE); ubsec_dma_sync(&me->me_C, BUS_DMASYNC_PREREAD); ubsec_dma_sync(&me->me_epb, BUS_DMASYNC_PREWRITE); /* Enqueue and we're done... */ mtx_lock(&sc->sc_mcr2lock); SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next); ubsec_feed2(sc); mtx_unlock(&sc->sc_mcr2lock); return (0); errout: if (me != NULL) { if (me->me_q.q_mcr.dma_map != NULL) ubsec_dma_free(sc, &me->me_q.q_mcr); if (me->me_q.q_ctx.dma_map != NULL) { bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size); ubsec_dma_free(sc, &me->me_q.q_ctx); } if (me->me_M.dma_map != NULL) { bzero(me->me_M.dma_vaddr, me->me_M.dma_size); ubsec_dma_free(sc, &me->me_M); } if (me->me_E.dma_map != NULL) { bzero(me->me_E.dma_vaddr, me->me_E.dma_size); ubsec_dma_free(sc, &me->me_E); } if (me->me_C.dma_map != NULL) { bzero(me->me_C.dma_vaddr, me->me_C.dma_size); ubsec_dma_free(sc, &me->me_C); } if (me->me_epb.dma_map != NULL) ubsec_dma_free(sc, &me->me_epb); free(me, M_DEVBUF); } krp->krp_status = err; crypto_kdone(krp); return (0); } static int ubsec_kprocess_rsapriv(struct ubsec_softc *sc, struct cryptkop *krp, int hint) { struct ubsec_q2_rsapriv *rp = NULL; struct ubsec_mcr *mcr; struct ubsec_ctx_rsapriv *ctx; int err = 0; u_int padlen, msglen; msglen = ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_P]); padlen = ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_Q]); if (msglen > padlen) padlen = msglen; if (padlen <= 256) padlen = 256; else if (padlen <= 384) padlen = 384; else if (padlen <= 512) padlen = 512; else if (sc->sc_flags & UBS_FLAGS_BIGKEY && padlen <= 768) padlen = 768; else if (sc->sc_flags & UBS_FLAGS_BIGKEY && padlen <= 1024) padlen = 1024; else { err = E2BIG; goto errout; } if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_DP]) > padlen) { err = E2BIG; goto errout; } if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_DQ]) > padlen) { err = E2BIG; goto errout; } if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_PINV]) > padlen) { err = E2BIG; goto errout; } rp = (struct ubsec_q2_rsapriv *)malloc(sizeof *rp, M_DEVBUF, M_NOWAIT); if (rp == NULL) return (ENOMEM); bzero(rp, sizeof *rp); rp->rpr_krp = krp; rp->rpr_q.q_type = UBS_CTXOP_RSAPRIV; if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), &rp->rpr_q.q_mcr, 0)) { err = ENOMEM; goto errout; } mcr = (struct ubsec_mcr *)rp->rpr_q.q_mcr.dma_vaddr; if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rsapriv), &rp->rpr_q.q_ctx, 0)) { err = ENOMEM; goto errout; } ctx = (struct ubsec_ctx_rsapriv *)rp->rpr_q.q_ctx.dma_vaddr; bzero(ctx, sizeof *ctx); /* Copy in p */ bcopy(krp->krp_param[UBS_RSAPRIV_PAR_P].crp_p, &ctx->rpr_buf[0 * (padlen / 8)], (krp->krp_param[UBS_RSAPRIV_PAR_P].crp_nbits + 7) / 8); /* Copy in q */ bcopy(krp->krp_param[UBS_RSAPRIV_PAR_Q].crp_p, &ctx->rpr_buf[1 * (padlen / 8)], (krp->krp_param[UBS_RSAPRIV_PAR_Q].crp_nbits + 7) / 8); /* Copy in dp */ bcopy(krp->krp_param[UBS_RSAPRIV_PAR_DP].crp_p, &ctx->rpr_buf[2 * (padlen / 8)], (krp->krp_param[UBS_RSAPRIV_PAR_DP].crp_nbits + 7) / 8); /* Copy in dq */ bcopy(krp->krp_param[UBS_RSAPRIV_PAR_DQ].crp_p, &ctx->rpr_buf[3 * (padlen / 8)], (krp->krp_param[UBS_RSAPRIV_PAR_DQ].crp_nbits + 7) / 8); /* Copy in pinv */ bcopy(krp->krp_param[UBS_RSAPRIV_PAR_PINV].crp_p, &ctx->rpr_buf[4 * (padlen / 8)], (krp->krp_param[UBS_RSAPRIV_PAR_PINV].crp_nbits + 7) / 8); msglen = padlen * 2; /* Copy in input message (aligned buffer/length). */ if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_MSGIN]) > msglen) { /* Is this likely? */ err = E2BIG; goto errout; } if (ubsec_dma_malloc(sc, (msglen + 7) / 8, &rp->rpr_msgin, 0)) { err = ENOMEM; goto errout; } bzero(rp->rpr_msgin.dma_vaddr, (msglen + 7) / 8); bcopy(krp->krp_param[UBS_RSAPRIV_PAR_MSGIN].crp_p, rp->rpr_msgin.dma_vaddr, (krp->krp_param[UBS_RSAPRIV_PAR_MSGIN].crp_nbits + 7) / 8); /* Prepare space for output message (aligned buffer/length). */ if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT]) < msglen) { /* Is this likely? */ err = E2BIG; goto errout; } if (ubsec_dma_malloc(sc, (msglen + 7) / 8, &rp->rpr_msgout, 0)) { err = ENOMEM; goto errout; } bzero(rp->rpr_msgout.dma_vaddr, (msglen + 7) / 8); mcr->mcr_pkts = htole16(1); mcr->mcr_flags = 0; mcr->mcr_cmdctxp = htole32(rp->rpr_q.q_ctx.dma_paddr); mcr->mcr_ipktbuf.pb_addr = htole32(rp->rpr_msgin.dma_paddr); mcr->mcr_ipktbuf.pb_next = 0; mcr->mcr_ipktbuf.pb_len = htole32(rp->rpr_msgin.dma_size); mcr->mcr_reserved = 0; mcr->mcr_pktlen = htole16(msglen); mcr->mcr_opktbuf.pb_addr = htole32(rp->rpr_msgout.dma_paddr); mcr->mcr_opktbuf.pb_next = 0; mcr->mcr_opktbuf.pb_len = htole32(rp->rpr_msgout.dma_size); #ifdef DIAGNOSTIC if (rp->rpr_msgin.dma_paddr & 3 || rp->rpr_msgin.dma_size & 3) { panic("%s: rsapriv: invalid msgin %x(0x%jx)", device_get_nameunit(sc->sc_dev), rp->rpr_msgin.dma_paddr, (uintmax_t)rp->rpr_msgin.dma_size); } if (rp->rpr_msgout.dma_paddr & 3 || rp->rpr_msgout.dma_size & 3) { panic("%s: rsapriv: invalid msgout %x(0x%jx)", device_get_nameunit(sc->sc_dev), rp->rpr_msgout.dma_paddr, (uintmax_t)rp->rpr_msgout.dma_size); } #endif ctx->rpr_len = (sizeof(u_int16_t) * 4) + (5 * (padlen / 8)); ctx->rpr_op = htole16(UBS_CTXOP_RSAPRIV); ctx->rpr_q_len = htole16(padlen); ctx->rpr_p_len = htole16(padlen); /* * ubsec_feed2 will sync mcr and ctx, we just need to sync * everything else. */ ubsec_dma_sync(&rp->rpr_msgin, BUS_DMASYNC_PREWRITE); ubsec_dma_sync(&rp->rpr_msgout, BUS_DMASYNC_PREREAD); /* Enqueue and we're done... */ mtx_lock(&sc->sc_mcr2lock); SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rp->rpr_q, q_next); ubsec_feed2(sc); ubsecstats.hst_modexpcrt++; mtx_unlock(&sc->sc_mcr2lock); return (0); errout: if (rp != NULL) { if (rp->rpr_q.q_mcr.dma_map != NULL) ubsec_dma_free(sc, &rp->rpr_q.q_mcr); if (rp->rpr_msgin.dma_map != NULL) { bzero(rp->rpr_msgin.dma_vaddr, rp->rpr_msgin.dma_size); ubsec_dma_free(sc, &rp->rpr_msgin); } if (rp->rpr_msgout.dma_map != NULL) { bzero(rp->rpr_msgout.dma_vaddr, rp->rpr_msgout.dma_size); ubsec_dma_free(sc, &rp->rpr_msgout); } free(rp, M_DEVBUF); } krp->krp_status = err; crypto_kdone(krp); return (0); } #ifdef UBSEC_DEBUG static void ubsec_dump_pb(volatile struct ubsec_pktbuf *pb) { printf("addr 0x%x (0x%x) next 0x%x\n", pb->pb_addr, pb->pb_len, pb->pb_next); } static void ubsec_dump_ctx2(struct ubsec_ctx_keyop *c) { printf("CTX (0x%x):\n", c->ctx_len); switch (letoh16(c->ctx_op)) { case UBS_CTXOP_RNGBYPASS: case UBS_CTXOP_RNGSHA1: break; case UBS_CTXOP_MODEXP: { struct ubsec_ctx_modexp *cx = (void *)c; int i, len; printf(" Elen %u, Nlen %u\n", letoh16(cx->me_E_len), letoh16(cx->me_N_len)); len = (cx->me_N_len + 7)/8; for (i = 0; i < len; i++) printf("%s%02x", (i == 0) ? " N: " : ":", cx->me_N[i]); printf("\n"); break; } default: printf("unknown context: %x\n", c->ctx_op); } printf("END CTX\n"); } static void ubsec_dump_mcr(struct ubsec_mcr *mcr) { volatile struct ubsec_mcr_add *ma; int i; printf("MCR:\n"); printf(" pkts: %u, flags 0x%x\n", letoh16(mcr->mcr_pkts), letoh16(mcr->mcr_flags)); ma = (volatile struct ubsec_mcr_add *)&mcr->mcr_cmdctxp; for (i = 0; i < letoh16(mcr->mcr_pkts); i++) { printf(" %d: ctx 0x%x len 0x%x rsvd 0x%x\n", i, letoh32(ma->mcr_cmdctxp), letoh16(ma->mcr_pktlen), letoh16(ma->mcr_reserved)); printf(" %d: ipkt ", i); ubsec_dump_pb(&ma->mcr_ipktbuf); printf(" %d: opkt ", i); ubsec_dump_pb(&ma->mcr_opktbuf); ma++; } printf("END MCR\n"); } #endif /* UBSEC_DEBUG */ /* * Return the number of significant bits of a big number. */ static int ubsec_ksigbits(struct crparam *cr) { u_int plen = (cr->crp_nbits + 7) / 8; int i, sig = plen * 8; u_int8_t c, *p = cr->crp_p; for (i = plen - 1; i >= 0; i--) { c = p[i]; if (c != 0) { while ((c & 0x80) == 0) { sig--; c <<= 1; } break; } sig -= 8; } return (sig); } static void ubsec_kshift_r( u_int shiftbits, u_int8_t *src, u_int srcbits, u_int8_t *dst, u_int dstbits) { u_int slen, dlen; int i, si, di, n; slen = (srcbits + 7) / 8; dlen = (dstbits + 7) / 8; for (i = 0; i < slen; i++) dst[i] = src[i]; for (i = 0; i < dlen - slen; i++) dst[slen + i] = 0; n = shiftbits / 8; if (n != 0) { si = dlen - n - 1; di = dlen - 1; while (si >= 0) dst[di--] = dst[si--]; while (di >= 0) dst[di--] = 0; } n = shiftbits % 8; if (n != 0) { for (i = dlen - 1; i > 0; i--) dst[i] = (dst[i] << n) | (dst[i - 1] >> (8 - n)); dst[0] = dst[0] << n; } } static void ubsec_kshift_l( u_int shiftbits, u_int8_t *src, u_int srcbits, u_int8_t *dst, u_int dstbits) { int slen, dlen, i, n; slen = (srcbits + 7) / 8; dlen = (dstbits + 7) / 8; n = shiftbits / 8; for (i = 0; i < slen; i++) dst[i] = src[i + n]; for (i = 0; i < dlen - slen; i++) dst[slen + i] = 0; n = shiftbits % 8; if (n != 0) { for (i = 0; i < (dlen - 1); i++) dst[i] = (dst[i] >> n) | (dst[i + 1] << (8 - n)); dst[dlen - 1] = dst[dlen - 1] >> n; } } Index: head/sys/dev/usb/if_aue.c =================================================================== --- head/sys/dev/usb/if_aue.c (revision 129878) +++ head/sys/dev/usb/if_aue.c (revision 129879) @@ -1,1574 +1,1575 @@ /*- * Copyright (c) 1997, 1998, 1999, 2000 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * ADMtek AN986 Pegasus and AN8511 Pegasus II USB to ethernet driver. * Datasheet is available from http://www.admtek.com.tw. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The Pegasus chip uses four USB "endpoints" to provide 10/100 ethernet * support: the control endpoint for reading/writing registers, burst * read endpoint for packet reception, burst write for packet transmission * and one for "interrupts." The chip uses the same RX filter scheme * as the other ADMtek ethernet parts: one perfect filter entry for the * the station address and a 64-bit multicast hash table. The chip supports * both MII and HomePNA attachments. * * Since the maximum data transfer speed of USB is supposed to be 12Mbps, * you're never really going to get 100Mbps speeds from this device. I * think the idea is to allow the device to connect to 10 or 100Mbps * networks, not necessarily to provide 100Mbps performance. Also, since * the controller uses an external PHY chip, it's possible that board * designers might simply choose a 10Mbps PHY. * * Registers are accessed using usbd_do_request(). Packet transfers are * done using usbd_transfer() and friends. */ #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #if __FreeBSD_version < 500000 #include #endif #include #include #include #include #include #include #include #include #include MODULE_DEPEND(aue, usb, 1, 1, 1); MODULE_DEPEND(aue, ether, 1, 1, 1); MODULE_DEPEND(aue, miibus, 1, 1, 1); /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" /* * Various supported device vendors/products. */ struct aue_type { struct usb_devno aue_dev; u_int16_t aue_flags; #define LSYS 0x0001 /* use Linksys reset */ #define PNA 0x0002 /* has Home PNA */ #define PII 0x0004 /* Pegasus II chip */ }; Static const struct aue_type aue_devs[] = { {{ USB_VENDOR_3COM, USB_PRODUCT_3COM_3C460B}, PII }, {{ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_XX1}, PNA|PII }, {{ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_XX2}, PII }, {{ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_UFE1000}, LSYS }, {{ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_XX4}, PNA }, {{ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_XX5}, PNA }, {{ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_XX6}, PII }, {{ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_XX7}, PII }, {{ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_XX8}, PII }, {{ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_XX9}, PNA }, {{ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_XX10}, 0 }, {{ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_DSB650TX_PNA}, 0 }, {{ USB_VENDOR_ACCTON, USB_PRODUCT_ACCTON_USB320_EC}, 0 }, {{ USB_VENDOR_ACCTON, USB_PRODUCT_ACCTON_SS1001}, PII }, {{ USB_VENDOR_ADMTEK, USB_PRODUCT_ADMTEK_PEGASUS}, PNA }, {{ USB_VENDOR_ADMTEK, USB_PRODUCT_ADMTEK_PEGASUSII}, PII }, {{ USB_VENDOR_ADMTEK, USB_PRODUCT_ADMTEK_PEGASUSII_2}, PII }, {{ USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_USB2LAN}, PII }, {{ USB_VENDOR_BILLIONTON, USB_PRODUCT_BILLIONTON_USB100}, 0 }, {{ USB_VENDOR_BILLIONTON, USB_PRODUCT_BILLIONTON_USBLP100}, PNA }, {{ USB_VENDOR_BILLIONTON, USB_PRODUCT_BILLIONTON_USBEL100}, 0 }, {{ USB_VENDOR_BILLIONTON, USB_PRODUCT_BILLIONTON_USBE100}, PII }, {{ USB_VENDOR_COREGA, USB_PRODUCT_COREGA_FETHER_USB_TX}, 0 }, {{ USB_VENDOR_COREGA, USB_PRODUCT_COREGA_FETHER_USB_TXS},PII }, {{ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DSB650TX4}, LSYS|PII }, {{ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DSB650TX1}, LSYS }, {{ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DSB650TX}, LSYS }, {{ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DSB650TX_PNA}, PNA }, {{ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DSB650TX3}, LSYS|PII }, {{ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DSB650TX2}, LSYS|PII }, {{ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DSB650}, LSYS }, {{ USB_VENDOR_ELECOM, USB_PRODUCT_ELECOM_LDUSBTX0}, 0 }, {{ USB_VENDOR_ELECOM, USB_PRODUCT_ELECOM_LDUSBTX1}, LSYS }, {{ USB_VENDOR_ELECOM, USB_PRODUCT_ELECOM_LDUSBTX2}, 0 }, {{ USB_VENDOR_ELECOM, USB_PRODUCT_ELECOM_LDUSBTX3}, LSYS }, {{ USB_VENDOR_ELECOM, USB_PRODUCT_ELECOM_LDUSBLTX}, PII }, {{ USB_VENDOR_ELSA, USB_PRODUCT_ELSA_USB2ETHERNET}, 0 }, {{ USB_VENDOR_HAWKING, USB_PRODUCT_HAWKING_UF100}, PII }, {{ USB_VENDOR_HP, USB_PRODUCT_HP_HN210E}, PII }, {{ USB_VENDOR_IODATA, USB_PRODUCT_IODATA_USBETTX}, 0 }, {{ USB_VENDOR_IODATA, USB_PRODUCT_IODATA_USBETTXS}, PII }, {{ USB_VENDOR_KINGSTON, USB_PRODUCT_KINGSTON_KNU101TX}, 0 }, {{ USB_VENDOR_LINKSYS, USB_PRODUCT_LINKSYS_USB10TX1}, LSYS|PII }, {{ USB_VENDOR_LINKSYS, USB_PRODUCT_LINKSYS_USB10T}, LSYS }, {{ USB_VENDOR_LINKSYS, USB_PRODUCT_LINKSYS_USB100TX}, LSYS }, {{ USB_VENDOR_LINKSYS, USB_PRODUCT_LINKSYS_USB100H1}, LSYS|PNA }, {{ USB_VENDOR_LINKSYS, USB_PRODUCT_LINKSYS_USB10TA}, LSYS }, {{ USB_VENDOR_LINKSYS, USB_PRODUCT_LINKSYS_USB10TX2}, LSYS|PII }, {{ USB_VENDOR_MICROSOFT, USB_PRODUCT_MICROSOFT_MN110}, PII }, {{ USB_VENDOR_MELCO, USB_PRODUCT_MELCO_LUATX1}, 0 }, {{ USB_VENDOR_MELCO, USB_PRODUCT_MELCO_LUATX5}, 0 }, {{ USB_VENDOR_MELCO, USB_PRODUCT_MELCO_LUA2TX5}, PII }, {{ USB_VENDOR_SIEMENS, USB_PRODUCT_SIEMENS_SPEEDSTREAM}, PII }, {{ USB_VENDOR_SMARTBRIDGES, USB_PRODUCT_SMARTBRIDGES_SMARTNIC},PII }, {{ USB_VENDOR_SMC, USB_PRODUCT_SMC_2202USB}, 0 }, {{ USB_VENDOR_SMC, USB_PRODUCT_SMC_2206USB}, PII }, {{ USB_VENDOR_SOHOWARE, USB_PRODUCT_SOHOWARE_NUB100}, 0 }, }; #define aue_lookup(v, p) ((const struct aue_type *)usb_lookup(aue_devs, v, p)) Static int aue_match(device_ptr_t); Static int aue_attach(device_ptr_t); Static int aue_detach(device_ptr_t); Static void aue_reset_pegasus_II(struct aue_softc *sc); Static int aue_tx_list_init(struct aue_softc *); Static int aue_rx_list_init(struct aue_softc *); Static int aue_newbuf(struct aue_softc *, struct aue_chain *, struct mbuf *); Static int aue_encap(struct aue_softc *, struct mbuf *, int); #ifdef AUE_INTR_PIPE Static void aue_intr(usbd_xfer_handle, usbd_private_handle, usbd_status); #endif Static void aue_rxeof(usbd_xfer_handle, usbd_private_handle, usbd_status); Static void aue_txeof(usbd_xfer_handle, usbd_private_handle, usbd_status); Static void aue_tick(void *); Static void aue_rxstart(struct ifnet *); Static void aue_start(struct ifnet *); Static int aue_ioctl(struct ifnet *, u_long, caddr_t); Static void aue_init(void *); Static void aue_stop(struct aue_softc *); Static void aue_watchdog(struct ifnet *); Static void aue_shutdown(device_ptr_t); Static int aue_ifmedia_upd(struct ifnet *); Static void aue_ifmedia_sts(struct ifnet *, struct ifmediareq *); Static void aue_eeprom_getword(struct aue_softc *, int, u_int16_t *); Static void aue_read_eeprom(struct aue_softc *, caddr_t, int, int, int); Static int aue_miibus_readreg(device_ptr_t, int, int); Static int aue_miibus_writereg(device_ptr_t, int, int, int); Static void aue_miibus_statchg(device_ptr_t); Static void aue_setmulti(struct aue_softc *); Static uint32_t aue_mchash(const uint8_t *); Static void aue_reset(struct aue_softc *); Static int aue_csr_read_1(struct aue_softc *, int); Static int aue_csr_write_1(struct aue_softc *, int, int); Static int aue_csr_read_2(struct aue_softc *, int); Static int aue_csr_write_2(struct aue_softc *, int, int); Static device_method_t aue_methods[] = { /* Device interface */ DEVMETHOD(device_probe, aue_match), DEVMETHOD(device_attach, aue_attach), DEVMETHOD(device_detach, aue_detach), DEVMETHOD(device_shutdown, aue_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, aue_miibus_readreg), DEVMETHOD(miibus_writereg, aue_miibus_writereg), DEVMETHOD(miibus_statchg, aue_miibus_statchg), { 0, 0 } }; Static driver_t aue_driver = { "aue", aue_methods, sizeof(struct aue_softc) }; Static devclass_t aue_devclass; DRIVER_MODULE(aue, uhub, aue_driver, aue_devclass, usbd_driver_load, 0); DRIVER_MODULE(miibus, aue, miibus_driver, miibus_devclass, 0, 0); #define AUE_SETBIT(sc, reg, x) \ aue_csr_write_1(sc, reg, aue_csr_read_1(sc, reg) | (x)) #define AUE_CLRBIT(sc, reg, x) \ aue_csr_write_1(sc, reg, aue_csr_read_1(sc, reg) & ~(x)) Static int aue_csr_read_1(struct aue_softc *sc, int reg) { usb_device_request_t req; usbd_status err; u_int8_t val = 0; if (sc->aue_dying) return (0); AUE_LOCK(sc); req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = AUE_UR_READREG; USETW(req.wValue, 0); USETW(req.wIndex, reg); USETW(req.wLength, 1); err = usbd_do_request(sc->aue_udev, &req, &val); AUE_UNLOCK(sc); if (err) { return (0); } return (val); } Static int aue_csr_read_2(struct aue_softc *sc, int reg) { usb_device_request_t req; usbd_status err; u_int16_t val = 0; if (sc->aue_dying) return (0); AUE_LOCK(sc); req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = AUE_UR_READREG; USETW(req.wValue, 0); USETW(req.wIndex, reg); USETW(req.wLength, 2); err = usbd_do_request(sc->aue_udev, &req, &val); AUE_UNLOCK(sc); if (err) { return (0); } return (val); } Static int aue_csr_write_1(struct aue_softc *sc, int reg, int val) { usb_device_request_t req; usbd_status err; if (sc->aue_dying) return (0); AUE_LOCK(sc); req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = AUE_UR_WRITEREG; USETW(req.wValue, val); USETW(req.wIndex, reg); USETW(req.wLength, 1); err = usbd_do_request(sc->aue_udev, &req, &val); AUE_UNLOCK(sc); if (err) { return (-1); } return (0); } Static int aue_csr_write_2(struct aue_softc *sc, int reg, int val) { usb_device_request_t req; usbd_status err; if (sc->aue_dying) return (0); AUE_LOCK(sc); req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = AUE_UR_WRITEREG; USETW(req.wValue, val); USETW(req.wIndex, reg); USETW(req.wLength, 2); err = usbd_do_request(sc->aue_udev, &req, &val); AUE_UNLOCK(sc); if (err) { return (-1); } return (0); } /* * Read a word of data stored in the EEPROM at address 'addr.' */ Static void aue_eeprom_getword(struct aue_softc *sc, int addr, u_int16_t *dest) { int i; u_int16_t word = 0; aue_csr_write_1(sc, AUE_EE_REG, addr); aue_csr_write_1(sc, AUE_EE_CTL, AUE_EECTL_READ); for (i = 0; i < AUE_TIMEOUT; i++) { if (aue_csr_read_1(sc, AUE_EE_CTL) & AUE_EECTL_DONE) break; } if (i == AUE_TIMEOUT) { printf("aue%d: EEPROM read timed out\n", sc->aue_unit); } word = aue_csr_read_2(sc, AUE_EE_DATA); *dest = word; return; } /* * Read a sequence of words from the EEPROM. */ Static void aue_read_eeprom(struct aue_softc *sc, caddr_t dest, int off, int cnt, int swap) { int i; u_int16_t word = 0, *ptr; for (i = 0; i < cnt; i++) { aue_eeprom_getword(sc, off + i, &word); ptr = (u_int16_t *)(dest + (i * 2)); if (swap) *ptr = ntohs(word); else *ptr = word; } return; } Static int aue_miibus_readreg(device_ptr_t dev, int phy, int reg) { struct aue_softc *sc = USBGETSOFTC(dev); int i; u_int16_t val = 0; /* * The Am79C901 HomePNA PHY actually contains * two transceivers: a 1Mbps HomePNA PHY and a * 10Mbps full/half duplex ethernet PHY with * NWAY autoneg. However in the ADMtek adapter, * only the 1Mbps PHY is actually connected to * anything, so we ignore the 10Mbps one. It * happens to be configured for MII address 3, * so we filter that out. */ if (sc->aue_vendor == USB_VENDOR_ADMTEK && sc->aue_product == USB_PRODUCT_ADMTEK_PEGASUS) { if (phy == 3) return (0); #ifdef notdef if (phy != 1) return (0); #endif } aue_csr_write_1(sc, AUE_PHY_ADDR, phy); aue_csr_write_1(sc, AUE_PHY_CTL, reg | AUE_PHYCTL_READ); for (i = 0; i < AUE_TIMEOUT; i++) { if (aue_csr_read_1(sc, AUE_PHY_CTL) & AUE_PHYCTL_DONE) break; } if (i == AUE_TIMEOUT) { printf("aue%d: MII read timed out\n", sc->aue_unit); } val = aue_csr_read_2(sc, AUE_PHY_DATA); return (val); } Static int aue_miibus_writereg(device_ptr_t dev, int phy, int reg, int data) { struct aue_softc *sc = USBGETSOFTC(dev); int i; if (phy == 3) return (0); aue_csr_write_2(sc, AUE_PHY_DATA, data); aue_csr_write_1(sc, AUE_PHY_ADDR, phy); aue_csr_write_1(sc, AUE_PHY_CTL, reg | AUE_PHYCTL_WRITE); for (i = 0; i < AUE_TIMEOUT; i++) { if (aue_csr_read_1(sc, AUE_PHY_CTL) & AUE_PHYCTL_DONE) break; } if (i == AUE_TIMEOUT) { printf("aue%d: MII read timed out\n", sc->aue_unit); } return(0); } Static void aue_miibus_statchg(device_ptr_t dev) { struct aue_softc *sc = USBGETSOFTC(dev); struct mii_data *mii = GET_MII(sc); AUE_CLRBIT(sc, AUE_CTL0, AUE_CTL0_RX_ENB | AUE_CTL0_TX_ENB); if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) { AUE_SETBIT(sc, AUE_CTL1, AUE_CTL1_SPEEDSEL); } else { AUE_CLRBIT(sc, AUE_CTL1, AUE_CTL1_SPEEDSEL); } if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) AUE_SETBIT(sc, AUE_CTL1, AUE_CTL1_DUPLEX); else AUE_CLRBIT(sc, AUE_CTL1, AUE_CTL1_DUPLEX); AUE_SETBIT(sc, AUE_CTL0, AUE_CTL0_RX_ENB | AUE_CTL0_TX_ENB); /* * Set the LED modes on the LinkSys adapter. * This turns on the 'dual link LED' bin in the auxmode * register of the Broadcom PHY. */ if (sc->aue_flags & LSYS) { u_int16_t auxmode; auxmode = aue_miibus_readreg(dev, 0, 0x1b); aue_miibus_writereg(dev, 0, 0x1b, auxmode | 0x04); } return; } #define AUE_POLY 0xEDB88320 #define AUE_BITS 6 Static u_int32_t aue_mchash(const uint8_t *addr) { uint32_t crc; int idx, bit; uint8_t data; /* Compute CRC for the address value. */ crc = 0xFFFFFFFF; /* initial value */ for (idx = 0; idx < 6; idx++) { for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) crc = (crc >> 1) ^ (((crc ^ data) & 1) ? AUE_POLY : 0); } return (crc & ((1 << AUE_BITS) - 1)); } Static void aue_setmulti(struct aue_softc *sc) { struct ifnet *ifp; struct ifmultiaddr *ifma; u_int32_t h = 0, i; ifp = &sc->arpcom.ac_if; if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { AUE_SETBIT(sc, AUE_CTL0, AUE_CTL0_ALLMULTI); return; } AUE_CLRBIT(sc, AUE_CTL0, AUE_CTL0_ALLMULTI); /* first, zot all the existing hash bits */ for (i = 0; i < 8; i++) aue_csr_write_1(sc, AUE_MAR0 + i, 0); /* now program new ones */ #if __FreeBSD_version >= 500000 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) #else LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) #endif { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = aue_mchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); AUE_SETBIT(sc, AUE_MAR + (h >> 3), 1 << (h & 0x7)); } return; } Static void aue_reset_pegasus_II(struct aue_softc *sc) { /* Magic constants taken from Linux driver. */ aue_csr_write_1(sc, AUE_REG_1D, 0); aue_csr_write_1(sc, AUE_REG_7B, 2); #if 0 if ((sc->aue_flags & HAS_HOME_PNA) && mii_mode) aue_csr_write_1(sc, AUE_REG_81, 6); else #endif aue_csr_write_1(sc, AUE_REG_81, 2); } Static void aue_reset(struct aue_softc *sc) { int i; AUE_SETBIT(sc, AUE_CTL1, AUE_CTL1_RESETMAC); for (i = 0; i < AUE_TIMEOUT; i++) { if (!(aue_csr_read_1(sc, AUE_CTL1) & AUE_CTL1_RESETMAC)) break; } if (i == AUE_TIMEOUT) printf("aue%d: reset failed\n", sc->aue_unit); /* * The PHY(s) attached to the Pegasus chip may be held * in reset until we flip on the GPIO outputs. Make sure * to set the GPIO pins high so that the PHY(s) will * be enabled. * * Note: We force all of the GPIO pins low first, *then* * enable the ones we want. */ aue_csr_write_1(sc, AUE_GPIO0, AUE_GPIO_OUT0|AUE_GPIO_SEL0); aue_csr_write_1(sc, AUE_GPIO0, AUE_GPIO_OUT0|AUE_GPIO_SEL0|AUE_GPIO_SEL1); if (sc->aue_flags & LSYS) { /* Grrr. LinkSys has to be different from everyone else. */ aue_csr_write_1(sc, AUE_GPIO0, AUE_GPIO_SEL0 | AUE_GPIO_SEL1); aue_csr_write_1(sc, AUE_GPIO0, AUE_GPIO_SEL0 | AUE_GPIO_SEL1 | AUE_GPIO_OUT0); } if (sc->aue_flags & PII) aue_reset_pegasus_II(sc); /* Wait a little while for the chip to get its brains in order. */ DELAY(10000); return; } /* * Probe for a Pegasus chip. */ USB_MATCH(aue) { USB_MATCH_START(aue, uaa); if (uaa->iface != NULL) return (UMATCH_NONE); return (aue_lookup(uaa->vendor, uaa->product) != NULL ? UMATCH_VENDOR_PRODUCT : UMATCH_NONE); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ USB_ATTACH(aue) { USB_ATTACH_START(aue, sc, uaa); char devinfo[1024]; u_char eaddr[ETHER_ADDR_LEN]; struct ifnet *ifp; usbd_interface_handle iface; usbd_status err; usb_interface_descriptor_t *id; usb_endpoint_descriptor_t *ed; int i; bzero(sc, sizeof(struct aue_softc)); usbd_devinfo(uaa->device, 0, devinfo); sc->aue_udev = uaa->device; sc->aue_unit = device_get_unit(self); if (usbd_set_config_no(sc->aue_udev, AUE_CONFIG_NO, 0)) { printf("aue%d: getting interface handle failed\n", sc->aue_unit); USB_ATTACH_ERROR_RETURN; } err = usbd_device2interface_handle(uaa->device, AUE_IFACE_IDX, &iface); if (err) { printf("aue%d: getting interface handle failed\n", sc->aue_unit); USB_ATTACH_ERROR_RETURN; } sc->aue_iface = iface; sc->aue_flags = aue_lookup(uaa->vendor, uaa->product)->aue_flags; sc->aue_product = uaa->product; sc->aue_vendor = uaa->vendor; id = usbd_get_interface_descriptor(sc->aue_iface); usbd_devinfo(uaa->device, 0, devinfo); device_set_desc_copy(self, devinfo); printf("%s: %s\n", USBDEVNAME(self), devinfo); /* Find endpoints. */ for (i = 0; i < id->bNumEndpoints; i++) { ed = usbd_interface2endpoint_descriptor(iface, i); if (ed == NULL) { printf("aue%d: couldn't get ep %d\n", sc->aue_unit, i); USB_ATTACH_ERROR_RETURN; } if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { sc->aue_ed[AUE_ENDPT_RX] = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { sc->aue_ed[AUE_ENDPT_TX] = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_INTERRUPT) { sc->aue_ed[AUE_ENDPT_INTR] = ed->bEndpointAddress; } } #if __FreeBSD_version >= 500000 mtx_init(&sc->aue_mtx, device_get_nameunit(self), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); #endif AUE_LOCK(sc); /* Reset the adapter. */ aue_reset(sc); /* * Get station address from the EEPROM. */ aue_read_eeprom(sc, (caddr_t)&eaddr, 0, 3, 0); bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); ifp = &sc->arpcom.ac_if; ifp->if_softc = sc; if_initname(ifp, "aue", sc->aue_unit); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = aue_ioctl; ifp->if_start = aue_start; ifp->if_watchdog = aue_watchdog; ifp->if_init = aue_init; ifp->if_baudrate = 10000000; ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; /* * Do MII setup. * NOTE: Doing this causes child devices to be attached to us, * which we would normally disconnect at in the detach routine * using device_delete_child(). However the USB code is set up * such that when this driver is removed, all children devices * are removed as well. In effect, the USB code ends up detaching * all of our children for us, so we don't have to do is ourselves * in aue_detach(). It's important to point this out since if * we *do* try to detach the child devices ourselves, we will * end up getting the children deleted twice, which will crash * the system. */ if (mii_phy_probe(self, &sc->aue_miibus, aue_ifmedia_upd, aue_ifmedia_sts)) { printf("aue%d: MII without any PHY!\n", sc->aue_unit); AUE_UNLOCK(sc); #if __FreeBSD_version >= 500000 mtx_destroy(&sc->aue_mtx); #endif USB_ATTACH_ERROR_RETURN; } sc->aue_qdat.ifp = ifp; sc->aue_qdat.if_rxstart = aue_rxstart; /* * Call MI attach routine. */ #if __FreeBSD_version >= 500000 ether_ifattach(ifp, eaddr); #else ether_ifattach(ifp, ETHER_BPF_SUPPORTED); #endif callout_handle_init(&sc->aue_stat_ch); usb_register_netisr(); sc->aue_dying = 0; AUE_UNLOCK(sc); USB_ATTACH_SUCCESS_RETURN; } Static int aue_detach(device_ptr_t dev) { struct aue_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); AUE_LOCK(sc); ifp = &sc->arpcom.ac_if; sc->aue_dying = 1; untimeout(aue_tick, sc, sc->aue_stat_ch); #if __FreeBSD_version >= 500000 ether_ifdetach(ifp); #else ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); #endif if (sc->aue_ep[AUE_ENDPT_TX] != NULL) usbd_abort_pipe(sc->aue_ep[AUE_ENDPT_TX]); if (sc->aue_ep[AUE_ENDPT_RX] != NULL) usbd_abort_pipe(sc->aue_ep[AUE_ENDPT_RX]); #ifdef AUE_INTR_PIPE if (sc->aue_ep[AUE_ENDPT_INTR] != NULL) usbd_abort_pipe(sc->aue_ep[AUE_ENDPT_INTR]); #endif AUE_UNLOCK(sc); #if __FreeBSD_version >= 500000 mtx_destroy(&sc->aue_mtx); #endif return (0); } /* * Initialize an RX descriptor and attach an MBUF cluster. */ Static int aue_newbuf(struct aue_softc *sc, struct aue_chain *c, struct mbuf *m) { struct mbuf *m_new = NULL; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("aue%d: no memory for rx list " "-- packet dropped!\n", sc->aue_unit); return (ENOBUFS); } MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { printf("aue%d: no memory for rx list " "-- packet dropped!\n", sc->aue_unit); m_freem(m_new); return (ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_new->m_data = m_new->m_ext.ext_buf; } m_adj(m_new, ETHER_ALIGN); c->aue_mbuf = m_new; return (0); } Static int aue_rx_list_init(struct aue_softc *sc) { struct aue_cdata *cd; struct aue_chain *c; int i; cd = &sc->aue_cdata; for (i = 0; i < AUE_RX_LIST_CNT; i++) { c = &cd->aue_rx_chain[i]; c->aue_sc = sc; c->aue_idx = i; if (aue_newbuf(sc, c, NULL) == ENOBUFS) return (ENOBUFS); if (c->aue_xfer == NULL) { c->aue_xfer = usbd_alloc_xfer(sc->aue_udev); if (c->aue_xfer == NULL) return (ENOBUFS); } } return (0); } Static int aue_tx_list_init(struct aue_softc *sc) { struct aue_cdata *cd; struct aue_chain *c; int i; cd = &sc->aue_cdata; for (i = 0; i < AUE_TX_LIST_CNT; i++) { c = &cd->aue_tx_chain[i]; c->aue_sc = sc; c->aue_idx = i; c->aue_mbuf = NULL; if (c->aue_xfer == NULL) { c->aue_xfer = usbd_alloc_xfer(sc->aue_udev); if (c->aue_xfer == NULL) return (ENOBUFS); } c->aue_buf = malloc(AUE_BUFSZ, M_USBDEV, M_NOWAIT); if (c->aue_buf == NULL) return (ENOBUFS); } return (0); } #ifdef AUE_INTR_PIPE Static void aue_intr(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct aue_softc *sc = priv; struct ifnet *ifp; struct aue_intrpkt *p; AUE_LOCK(sc); ifp = &sc->arpcom.ac_if; if (!(ifp->if_flags & IFF_RUNNING)) { AUE_UNLOCK(sc); return; } if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { AUE_UNLOCK(sc); return; } printf("aue%d: usb error on intr: %s\n", sc->aue_unit, usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall(sc->aue_ep[AUE_ENDPT_RX]); AUE_UNLOCK(sc); return; } usbd_get_xfer_status(xfer, NULL, (void **)&p, NULL, NULL); if (p->aue_txstat0) ifp->if_oerrors++; if (p->aue_txstat0 & (AUE_TXSTAT0_LATECOLL & AUE_TXSTAT0_EXCESSCOLL)) ifp->if_collisions++; AUE_UNLOCK(sc); return; } #endif Static void aue_rxstart(struct ifnet *ifp) { struct aue_softc *sc; struct aue_chain *c; sc = ifp->if_softc; AUE_LOCK(sc); c = &sc->aue_cdata.aue_rx_chain[sc->aue_cdata.aue_rx_prod]; if (aue_newbuf(sc, c, NULL) == ENOBUFS) { ifp->if_ierrors++; AUE_UNLOCK(sc); return; } /* Setup new transfer. */ usbd_setup_xfer(c->aue_xfer, sc->aue_ep[AUE_ENDPT_RX], c, mtod(c->aue_mbuf, char *), AUE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, aue_rxeof); usbd_transfer(c->aue_xfer); AUE_UNLOCK(sc); return; } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ Static void aue_rxeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct aue_chain *c = priv; struct aue_softc *sc = c->aue_sc; struct mbuf *m; struct ifnet *ifp; int total_len = 0; struct aue_rxpkt r; if (sc->aue_dying) return; AUE_LOCK(sc); ifp = &sc->arpcom.ac_if; if (!(ifp->if_flags & IFF_RUNNING)) { AUE_UNLOCK(sc); return; } if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { AUE_UNLOCK(sc); return; } if (usbd_ratecheck(&sc->aue_rx_notice)) printf("aue%d: usb error on rx: %s\n", sc->aue_unit, usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall(sc->aue_ep[AUE_ENDPT_RX]); goto done; } usbd_get_xfer_status(xfer, NULL, NULL, &total_len, NULL); if (total_len <= 4 + ETHER_CRC_LEN) { ifp->if_ierrors++; goto done; } m = c->aue_mbuf; bcopy(mtod(m, char *) + total_len - 4, (char *)&r, sizeof(r)); /* Turn off all the non-error bits in the rx status word. */ r.aue_rxstat &= AUE_RXSTAT_MASK; if (r.aue_rxstat) { ifp->if_ierrors++; goto done; } /* No errors; receive the packet. */ total_len -= (4 + ETHER_CRC_LEN); ifp->if_ipackets++; m->m_pkthdr.rcvif = (struct ifnet *)&sc->aue_qdat; m->m_pkthdr.len = m->m_len = total_len; /* Put the packet on the special USB input queue. */ usb_ether_input(m); AUE_UNLOCK(sc); return; done: /* Setup new transfer. */ usbd_setup_xfer(xfer, sc->aue_ep[AUE_ENDPT_RX], c, mtod(c->aue_mbuf, char *), AUE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, aue_rxeof); usbd_transfer(xfer); AUE_UNLOCK(sc); return; } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ Static void aue_txeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct aue_chain *c = priv; struct aue_softc *sc = c->aue_sc; struct ifnet *ifp; usbd_status err; AUE_LOCK(sc); ifp = &sc->arpcom.ac_if; if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { AUE_UNLOCK(sc); return; } printf("aue%d: usb error on tx: %s\n", sc->aue_unit, usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall(sc->aue_ep[AUE_ENDPT_TX]); AUE_UNLOCK(sc); return; } ifp->if_timer = 0; ifp->if_flags &= ~IFF_OACTIVE; usbd_get_xfer_status(c->aue_xfer, NULL, NULL, NULL, &err); if (c->aue_mbuf != NULL) { c->aue_mbuf->m_pkthdr.rcvif = ifp; usb_tx_done(c->aue_mbuf); c->aue_mbuf = NULL; } if (err) ifp->if_oerrors++; else ifp->if_opackets++; AUE_UNLOCK(sc); return; } Static void aue_tick(void *xsc) { struct aue_softc *sc = xsc; struct ifnet *ifp; struct mii_data *mii; if (sc == NULL) return; AUE_LOCK(sc); ifp = &sc->arpcom.ac_if; mii = GET_MII(sc); if (mii == NULL) { AUE_UNLOCK(sc); return; } mii_tick(mii); if (!sc->aue_link && mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->aue_link++; if (ifp->if_snd.ifq_head != NULL) aue_start(ifp); } sc->aue_stat_ch = timeout(aue_tick, sc, hz); AUE_UNLOCK(sc); return; } Static int aue_encap(struct aue_softc *sc, struct mbuf *m, int idx) { int total_len; struct aue_chain *c; usbd_status err; c = &sc->aue_cdata.aue_tx_chain[idx]; /* * Copy the mbuf data into a contiguous buffer, leaving two * bytes at the beginning to hold the frame length. */ m_copydata(m, 0, m->m_pkthdr.len, c->aue_buf + 2); c->aue_mbuf = m; total_len = m->m_pkthdr.len + 2; /* * The ADMtek documentation says that the packet length is * supposed to be specified in the first two bytes of the * transfer, however it actually seems to ignore this info * and base the frame size on the bulk transfer length. */ c->aue_buf[0] = (u_int8_t)m->m_pkthdr.len; c->aue_buf[1] = (u_int8_t)(m->m_pkthdr.len >> 8); usbd_setup_xfer(c->aue_xfer, sc->aue_ep[AUE_ENDPT_TX], c, c->aue_buf, total_len, USBD_FORCE_SHORT_XFER, 10000, aue_txeof); /* Transmit */ err = usbd_transfer(c->aue_xfer); if (err != USBD_IN_PROGRESS) { aue_stop(sc); return (EIO); } sc->aue_cdata.aue_tx_cnt++; return (0); } Static void aue_start(struct ifnet *ifp) { struct aue_softc *sc = ifp->if_softc; struct mbuf *m_head = NULL; AUE_LOCK(sc); if (!sc->aue_link) { AUE_UNLOCK(sc); return; } if (ifp->if_flags & IFF_OACTIVE) { AUE_UNLOCK(sc); return; } IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) { AUE_UNLOCK(sc); return; } if (aue_encap(sc, m_head, 0)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; AUE_UNLOCK(sc); return; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); ifp->if_flags |= IFF_OACTIVE; /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; AUE_UNLOCK(sc); return; } Static void aue_init(void *xsc) { struct aue_softc *sc = xsc; struct ifnet *ifp = &sc->arpcom.ac_if; struct mii_data *mii = GET_MII(sc); struct aue_chain *c; usbd_status err; int i; AUE_LOCK(sc); if (ifp->if_flags & IFF_RUNNING) { AUE_UNLOCK(sc); return; } /* * Cancel pending I/O and free all RX/TX buffers. */ aue_reset(sc); /* Set MAC address */ for (i = 0; i < ETHER_ADDR_LEN; i++) aue_csr_write_1(sc, AUE_PAR0 + i, sc->arpcom.ac_enaddr[i]); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) AUE_SETBIT(sc, AUE_CTL2, AUE_CTL2_RX_PROMISC); else AUE_CLRBIT(sc, AUE_CTL2, AUE_CTL2_RX_PROMISC); /* Init TX ring. */ if (aue_tx_list_init(sc) == ENOBUFS) { printf("aue%d: tx list init failed\n", sc->aue_unit); AUE_UNLOCK(sc); return; } /* Init RX ring. */ if (aue_rx_list_init(sc) == ENOBUFS) { printf("aue%d: rx list init failed\n", sc->aue_unit); AUE_UNLOCK(sc); return; } #ifdef AUE_INTR_PIPE sc->aue_cdata.aue_ibuf = malloc(AUE_INTR_PKTLEN, M_USBDEV, M_NOWAIT); #endif /* Load the multicast filter. */ aue_setmulti(sc); /* Enable RX and TX */ aue_csr_write_1(sc, AUE_CTL0, AUE_CTL0_RXSTAT_APPEND | AUE_CTL0_RX_ENB); AUE_SETBIT(sc, AUE_CTL0, AUE_CTL0_TX_ENB); AUE_SETBIT(sc, AUE_CTL2, AUE_CTL2_EP3_CLR); mii_mediachg(mii); /* Open RX and TX pipes. */ err = usbd_open_pipe(sc->aue_iface, sc->aue_ed[AUE_ENDPT_RX], USBD_EXCLUSIVE_USE, &sc->aue_ep[AUE_ENDPT_RX]); if (err) { printf("aue%d: open rx pipe failed: %s\n", sc->aue_unit, usbd_errstr(err)); AUE_UNLOCK(sc); return; } err = usbd_open_pipe(sc->aue_iface, sc->aue_ed[AUE_ENDPT_TX], USBD_EXCLUSIVE_USE, &sc->aue_ep[AUE_ENDPT_TX]); if (err) { printf("aue%d: open tx pipe failed: %s\n", sc->aue_unit, usbd_errstr(err)); AUE_UNLOCK(sc); return; } #ifdef AUE_INTR_PIPE err = usbd_open_pipe_intr(sc->aue_iface, sc->aue_ed[AUE_ENDPT_INTR], USBD_SHORT_XFER_OK, &sc->aue_ep[AUE_ENDPT_INTR], sc, sc->aue_cdata.aue_ibuf, AUE_INTR_PKTLEN, aue_intr, AUE_INTR_INTERVAL); if (err) { printf("aue%d: open intr pipe failed: %s\n", sc->aue_unit, usbd_errstr(err)); AUE_UNLOCK(sc); return; } #endif /* Start up the receive pipe. */ for (i = 0; i < AUE_RX_LIST_CNT; i++) { c = &sc->aue_cdata.aue_rx_chain[i]; usbd_setup_xfer(c->aue_xfer, sc->aue_ep[AUE_ENDPT_RX], c, mtod(c->aue_mbuf, char *), AUE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, aue_rxeof); usbd_transfer(c->aue_xfer); } ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; sc->aue_stat_ch = timeout(aue_tick, sc, hz); AUE_UNLOCK(sc); return; } /* * Set media options. */ Static int aue_ifmedia_upd(struct ifnet *ifp) { struct aue_softc *sc = ifp->if_softc; struct mii_data *mii = GET_MII(sc); sc->aue_link = 0; if (mii->mii_instance) { struct mii_softc *miisc; LIST_FOREACH(miisc, &mii->mii_phys, mii_list) mii_phy_reset(miisc); } mii_mediachg(mii); return (0); } /* * Report current media status. */ Static void aue_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct aue_softc *sc = ifp->if_softc; struct mii_data *mii = GET_MII(sc); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; return; } Static int aue_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct aue_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; struct mii_data *mii; int error = 0; AUE_LOCK(sc); switch(command) { case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->aue_if_flags & IFF_PROMISC)) { AUE_SETBIT(sc, AUE_CTL2, AUE_CTL2_RX_PROMISC); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->aue_if_flags & IFF_PROMISC) { AUE_CLRBIT(sc, AUE_CTL2, AUE_CTL2_RX_PROMISC); } else if (!(ifp->if_flags & IFF_RUNNING)) aue_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) aue_stop(sc); } sc->aue_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: aue_setmulti(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = GET_MII(sc); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: error = ether_ioctl(ifp, command, data); break; } AUE_UNLOCK(sc); return (error); } Static void aue_watchdog(struct ifnet *ifp) { struct aue_softc *sc = ifp->if_softc; struct aue_chain *c; usbd_status stat; AUE_LOCK(sc); ifp->if_oerrors++; printf("aue%d: watchdog timeout\n", sc->aue_unit); c = &sc->aue_cdata.aue_tx_chain[0]; usbd_get_xfer_status(c->aue_xfer, NULL, NULL, NULL, &stat); aue_txeof(c->aue_xfer, c, stat); if (ifp->if_snd.ifq_head != NULL) aue_start(ifp); AUE_UNLOCK(sc); return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ Static void aue_stop(struct aue_softc *sc) { usbd_status err; struct ifnet *ifp; int i; AUE_LOCK(sc); ifp = &sc->arpcom.ac_if; ifp->if_timer = 0; aue_csr_write_1(sc, AUE_CTL0, 0); aue_csr_write_1(sc, AUE_CTL1, 0); aue_reset(sc); untimeout(aue_tick, sc, sc->aue_stat_ch); /* Stop transfers. */ if (sc->aue_ep[AUE_ENDPT_RX] != NULL) { err = usbd_abort_pipe(sc->aue_ep[AUE_ENDPT_RX]); if (err) { printf("aue%d: abort rx pipe failed: %s\n", sc->aue_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->aue_ep[AUE_ENDPT_RX]); if (err) { printf("aue%d: close rx pipe failed: %s\n", sc->aue_unit, usbd_errstr(err)); } sc->aue_ep[AUE_ENDPT_RX] = NULL; } if (sc->aue_ep[AUE_ENDPT_TX] != NULL) { err = usbd_abort_pipe(sc->aue_ep[AUE_ENDPT_TX]); if (err) { printf("aue%d: abort tx pipe failed: %s\n", sc->aue_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->aue_ep[AUE_ENDPT_TX]); if (err) { printf("aue%d: close tx pipe failed: %s\n", sc->aue_unit, usbd_errstr(err)); } sc->aue_ep[AUE_ENDPT_TX] = NULL; } #ifdef AUE_INTR_PIPE if (sc->aue_ep[AUE_ENDPT_INTR] != NULL) { err = usbd_abort_pipe(sc->aue_ep[AUE_ENDPT_INTR]); if (err) { printf("aue%d: abort intr pipe failed: %s\n", sc->aue_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->aue_ep[AUE_ENDPT_INTR]); if (err) { printf("aue%d: close intr pipe failed: %s\n", sc->aue_unit, usbd_errstr(err)); } sc->aue_ep[AUE_ENDPT_INTR] = NULL; } #endif /* Free RX resources. */ for (i = 0; i < AUE_RX_LIST_CNT; i++) { if (sc->aue_cdata.aue_rx_chain[i].aue_buf != NULL) { free(sc->aue_cdata.aue_rx_chain[i].aue_buf, M_USBDEV); sc->aue_cdata.aue_rx_chain[i].aue_buf = NULL; } if (sc->aue_cdata.aue_rx_chain[i].aue_mbuf != NULL) { m_freem(sc->aue_cdata.aue_rx_chain[i].aue_mbuf); sc->aue_cdata.aue_rx_chain[i].aue_mbuf = NULL; } if (sc->aue_cdata.aue_rx_chain[i].aue_xfer != NULL) { usbd_free_xfer(sc->aue_cdata.aue_rx_chain[i].aue_xfer); sc->aue_cdata.aue_rx_chain[i].aue_xfer = NULL; } } /* Free TX resources. */ for (i = 0; i < AUE_TX_LIST_CNT; i++) { if (sc->aue_cdata.aue_tx_chain[i].aue_buf != NULL) { free(sc->aue_cdata.aue_tx_chain[i].aue_buf, M_USBDEV); sc->aue_cdata.aue_tx_chain[i].aue_buf = NULL; } if (sc->aue_cdata.aue_tx_chain[i].aue_mbuf != NULL) { m_freem(sc->aue_cdata.aue_tx_chain[i].aue_mbuf); sc->aue_cdata.aue_tx_chain[i].aue_mbuf = NULL; } if (sc->aue_cdata.aue_tx_chain[i].aue_xfer != NULL) { usbd_free_xfer(sc->aue_cdata.aue_tx_chain[i].aue_xfer); sc->aue_cdata.aue_tx_chain[i].aue_xfer = NULL; } } #ifdef AUE_INTR_PIPE free(sc->aue_cdata.aue_ibuf, M_USBDEV); sc->aue_cdata.aue_ibuf = NULL; #endif sc->aue_link = 0; ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); AUE_UNLOCK(sc); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ Static void aue_shutdown(device_ptr_t dev) { struct aue_softc *sc; sc = device_get_softc(dev); sc->aue_dying++; AUE_LOCK(sc); aue_reset(sc); aue_stop(sc); AUE_UNLOCK(sc); return; } Index: head/sys/dev/usb/if_axe.c =================================================================== --- head/sys/dev/usb/if_axe.c (revision 129878) +++ head/sys/dev/usb/if_axe.c (revision 129879) @@ -1,1201 +1,1202 @@ /* * Copyright (c) 1997, 1998, 1999, 2000-2003 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * ASIX Electronics AX88172 USB 2.0 ethernet driver. Used in the * LinkSys USB200M and various other adapters. * * Manuals available from: * http://www.asix.com.tw/datasheet/mac/Ax88172.PDF * Note: you need the manual for the AX88170 chip (USB 1.x ethernet * controller) to find the definitions for the RX control register. * http://www.asix.com.tw/datasheet/mac/Ax88170.PDF * * Written by Bill Paul * Senior Engineer * Wind River Systems */ /* * The AX88172 provides USB ethernet supports at 10 and 100Mbps. * It uses an external PHY (reference designs use a RealTek chip), * and has a 64-bit multicast hash filter. There is some information * missing from the manual which one needs to know in order to make * the chip function: * * - You must set bit 7 in the RX control register, otherwise the * chip won't receive any packets. * - You must initialize all 3 IPG registers, or you won't be able * to send any packets. * * Note that this device appears to only support loading the station * address via autload from the EEPROM (i.e. there's no way to manaully * set it). * * (Adam Weinberger wanted me to name this driver if_gir.c.) */ #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #include /* * Various supported device vendors/products. */ Static struct axe_type axe_devs[] = { { USB_VENDOR_ASIX, USB_PRODUCT_ASIX_AX88172 }, { USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DUBE100 }, { USB_VENDOR_LINKSYS2, USB_PRODUCT_LINKSYS2_USB200M }, { USB_VENDOR_MELCO, USB_PRODUCT_MELCO_LUAU2KTX }, { USB_VENDOR_NETGEAR, USB_PRODUCT_NETGEAR_FA120 }, { 0, 0 } }; Static int axe_match(device_ptr_t); Static int axe_attach(device_ptr_t); Static int axe_detach(device_ptr_t); Static int axe_tx_list_init(struct axe_softc *); Static int axe_rx_list_init(struct axe_softc *); Static int axe_newbuf(struct axe_softc *, struct axe_chain *, struct mbuf *); Static int axe_encap(struct axe_softc *, struct mbuf *, int); Static void axe_rxeof(usbd_xfer_handle, usbd_private_handle, usbd_status); Static void axe_txeof(usbd_xfer_handle, usbd_private_handle, usbd_status); Static void axe_tick(void *); Static void axe_rxstart(struct ifnet *); Static void axe_start(struct ifnet *); Static int axe_ioctl(struct ifnet *, u_long, caddr_t); Static void axe_init(void *); Static void axe_stop(struct axe_softc *); Static void axe_watchdog(struct ifnet *); Static void axe_shutdown(device_ptr_t); Static int axe_miibus_readreg(device_ptr_t, int, int); Static int axe_miibus_writereg(device_ptr_t, int, int, int); Static void axe_miibus_statchg(device_ptr_t); Static int axe_cmd(struct axe_softc *, int, int, int, void *); Static int axe_ifmedia_upd(struct ifnet *); Static void axe_ifmedia_sts(struct ifnet *, struct ifmediareq *); Static void axe_setmulti(struct axe_softc *); Static uint32_t axe_mchash(const uint8_t *); Static device_method_t axe_methods[] = { /* Device interface */ DEVMETHOD(device_probe, axe_match), DEVMETHOD(device_attach, axe_attach), DEVMETHOD(device_detach, axe_detach), DEVMETHOD(device_shutdown, axe_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, axe_miibus_readreg), DEVMETHOD(miibus_writereg, axe_miibus_writereg), DEVMETHOD(miibus_statchg, axe_miibus_statchg), { 0, 0 } }; Static driver_t axe_driver = { "axe", axe_methods, sizeof(struct axe_softc) }; Static devclass_t axe_devclass; DRIVER_MODULE(axe, uhub, axe_driver, axe_devclass, usbd_driver_load, 0); DRIVER_MODULE(miibus, axe, miibus_driver, miibus_devclass, 0, 0); MODULE_DEPEND(axe, usb, 1, 1, 1); MODULE_DEPEND(axe, miibus, 1, 1, 1); Static int axe_cmd(struct axe_softc *sc, int cmd, int index, int val, void *buf) { usb_device_request_t req; usbd_status err; if (sc->axe_dying) return(0); if (AXE_CMD_DIR(cmd)) req.bmRequestType = UT_WRITE_VENDOR_DEVICE; else req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = AXE_CMD_CMD(cmd); USETW(req.wValue, val); USETW(req.wIndex, index); USETW(req.wLength, AXE_CMD_LEN(cmd)); err = usbd_do_request(sc->axe_udev, &req, buf); if (err) return(-1); return(0); } Static int axe_miibus_readreg(device_ptr_t dev, int phy, int reg) { struct axe_softc *sc = USBGETSOFTC(dev); usbd_status err; u_int16_t val; if (sc->axe_dying) return(0); #ifdef notdef /* * The chip tells us the MII address of any supported * PHYs attached to the chip, so only read from those. */ if (sc->axe_phyaddrs[0] != AXE_NOPHY && phy != sc->axe_phyaddrs[0]) return (0); if (sc->axe_phyaddrs[1] != AXE_NOPHY && phy != sc->axe_phyaddrs[1]) return (0); #endif if (sc->axe_phyaddrs[0] != 0xFF && sc->axe_phyaddrs[0] != phy) return (0); AXE_LOCK(sc); axe_cmd(sc, AXE_CMD_MII_OPMODE_SW, 0, 0, NULL); err = axe_cmd(sc, AXE_CMD_MII_READ_REG, reg, phy, (void *)&val); axe_cmd(sc, AXE_CMD_MII_OPMODE_HW, 0, 0, NULL); AXE_UNLOCK(sc); if (err) { printf("axe%d: read PHY failed\n", sc->axe_unit); return(-1); } if (val) sc->axe_phyaddrs[0] = phy; return (val); } Static int axe_miibus_writereg(device_ptr_t dev, int phy, int reg, int val) { struct axe_softc *sc = USBGETSOFTC(dev); usbd_status err; if (sc->axe_dying) return(0); AXE_LOCK(sc); axe_cmd(sc, AXE_CMD_MII_OPMODE_SW, 0, 0, NULL); err = axe_cmd(sc, AXE_CMD_MII_WRITE_REG, reg, phy, (void *)&val); axe_cmd(sc, AXE_CMD_MII_OPMODE_HW, 0, 0, NULL); AXE_UNLOCK(sc); if (err) { printf("axe%d: write PHY failed\n", sc->axe_unit); return(-1); } return (0); } Static void axe_miibus_statchg(device_ptr_t dev) { #ifdef notdef struct axe_softc *sc = USBGETSOFTC(dev); struct mii_data *mii = GET_MII(sc); #endif /* doesn't seem to be necessary */ return; } /* * Set media options. */ Static int axe_ifmedia_upd(struct ifnet *ifp) { struct axe_softc *sc = ifp->if_softc; struct mii_data *mii = GET_MII(sc); sc->axe_link = 0; if (mii->mii_instance) { struct mii_softc *miisc; LIST_FOREACH(miisc, &mii->mii_phys, mii_list) mii_phy_reset(miisc); } mii_mediachg(mii); return (0); } /* * Report current media status. */ Static void axe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct axe_softc *sc = ifp->if_softc; struct mii_data *mii = GET_MII(sc); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; return; } Static uint32_t axe_mchash(const uint8_t *addr) { uint32_t crc, carry; int idx, bit; uint8_t data; /* Compute CRC for the address value. */ crc = 0xFFFFFFFF; /* initial value */ for (idx = 0; idx < 6; idx++) { for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) { carry = ((crc & 0x80000000) ? 1 : 0) ^ (data & 0x01); crc <<= 1; if (carry) crc = (crc ^ 0x04c11db6) | carry; } } /* return the filter bit position */ return((crc >> 26) & 0x0000003F); } Static void axe_setmulti(struct axe_softc *sc) { struct ifnet *ifp; struct ifmultiaddr *ifma; u_int32_t h = 0; u_int16_t rxmode; u_int8_t hashtbl[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; ifp = &sc->arpcom.ac_if; AXE_LOCK(sc); axe_cmd(sc, AXE_CMD_RXCTL_READ, 0, 0, (void *)&rxmode); if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { rxmode |= AXE_RXCMD_ALLMULTI; axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL); AXE_UNLOCK(sc); return; } else rxmode &= ~AXE_RXCMD_ALLMULTI; TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = axe_mchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); hashtbl[h / 8] |= 1 << (h % 8); } axe_cmd(sc, AXE_CMD_WRITE_MCAST, 0, 0, (void *)&hashtbl); axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL); AXE_UNLOCK(sc); return; } Static void axe_reset(struct axe_softc *sc) { if (sc->axe_dying) return; if (usbd_set_config_no(sc->axe_udev, AXE_CONFIG_NO, 1) || usbd_device2interface_handle(sc->axe_udev, AXE_IFACE_IDX, &sc->axe_iface)) { printf("axe%d: getting interface handle failed\n", sc->axe_unit); } /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); return; } /* * Probe for a AX88172 chip. */ USB_MATCH(axe) { USB_MATCH_START(axe, uaa); struct axe_type *t; if (!uaa->iface) return(UMATCH_NONE); t = axe_devs; while(t->axe_vid) { if (uaa->vendor == t->axe_vid && uaa->product == t->axe_did) { return(UMATCH_VENDOR_PRODUCT); } t++; } return(UMATCH_NONE); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ USB_ATTACH(axe) { USB_ATTACH_START(axe, sc, uaa); char devinfo[1024]; u_char eaddr[ETHER_ADDR_LEN]; struct ifnet *ifp; usb_interface_descriptor_t *id; usb_endpoint_descriptor_t *ed; int i; bzero(sc, sizeof(struct axe_softc)); sc->axe_udev = uaa->device; sc->axe_dev = self; sc->axe_unit = device_get_unit(self); if (usbd_set_config_no(sc->axe_udev, AXE_CONFIG_NO, 1)) { printf("axe%d: getting interface handle failed\n", sc->axe_unit); USB_ATTACH_ERROR_RETURN; } if (usbd_device2interface_handle(uaa->device, AXE_IFACE_IDX, &sc->axe_iface)) { printf("axe%d: getting interface handle failed\n", sc->axe_unit); USB_ATTACH_ERROR_RETURN; } id = usbd_get_interface_descriptor(sc->axe_iface); usbd_devinfo(uaa->device, 0, devinfo); device_set_desc_copy(self, devinfo); printf("%s: %s\n", USBDEVNAME(self), devinfo); /* Find endpoints. */ for (i = 0; i < id->bNumEndpoints; i++) { ed = usbd_interface2endpoint_descriptor(sc->axe_iface, i); if (!ed) { printf("axe%d: couldn't get ep %d\n", sc->axe_unit, i); USB_ATTACH_ERROR_RETURN; } if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { sc->axe_ed[AXE_ENDPT_RX] = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { sc->axe_ed[AXE_ENDPT_TX] = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_INTERRUPT) { sc->axe_ed[AXE_ENDPT_INTR] = ed->bEndpointAddress; } } mtx_init(&sc->axe_mtx, device_get_nameunit(self), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); AXE_LOCK(sc); /* * Get station address. */ axe_cmd(sc, AXE_CMD_READ_NODEID, 0, 0, &eaddr); /* * Load IPG values and PHY indexes. */ axe_cmd(sc, AXE_CMD_READ_IPG012, 0, 0, (void *)&sc->axe_ipgs); axe_cmd(sc, AXE_CMD_READ_PHYID, 0, 0, (void *)&sc->axe_phyaddrs); /* * Work around broken adapters that appear to lie about * their PHY addresses. */ sc->axe_phyaddrs[0] = sc->axe_phyaddrs[1] = 0xFF; bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); ifp = &sc->arpcom.ac_if; ifp->if_softc = sc; if_initname(ifp, "axe", sc->axe_unit); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = axe_ioctl; ifp->if_start = axe_start; ifp->if_watchdog = axe_watchdog; ifp->if_init = axe_init; ifp->if_baudrate = 10000000; ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; sc->axe_qdat.ifp = ifp; sc->axe_qdat.if_rxstart = axe_rxstart; if (mii_phy_probe(self, &sc->axe_miibus, axe_ifmedia_upd, axe_ifmedia_sts)) { printf("axe%d: MII without any PHY!\n", sc->axe_unit); AXE_UNLOCK(sc); mtx_destroy(&sc->axe_mtx); USB_ATTACH_ERROR_RETURN; } /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); callout_handle_init(&sc->axe_stat_ch); usb_register_netisr(); sc->axe_dying = 0; AXE_UNLOCK(sc); USB_ATTACH_SUCCESS_RETURN; } Static int axe_detach(device_ptr_t dev) { struct axe_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); AXE_LOCK(sc); ifp = &sc->arpcom.ac_if; sc->axe_dying = 1; untimeout(axe_tick, sc, sc->axe_stat_ch); ether_ifdetach(ifp); if (sc->axe_ep[AXE_ENDPT_TX] != NULL) usbd_abort_pipe(sc->axe_ep[AXE_ENDPT_TX]); if (sc->axe_ep[AXE_ENDPT_RX] != NULL) usbd_abort_pipe(sc->axe_ep[AXE_ENDPT_RX]); if (sc->axe_ep[AXE_ENDPT_INTR] != NULL) usbd_abort_pipe(sc->axe_ep[AXE_ENDPT_INTR]); AXE_UNLOCK(sc); mtx_destroy(&sc->axe_mtx); return(0); } /* * Initialize an RX descriptor and attach an MBUF cluster. */ Static int axe_newbuf(struct axe_softc *sc, struct axe_chain *c, struct mbuf *m) { struct mbuf *m_new = NULL; if (m == NULL) { m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m_new == NULL) { printf("axe%d: no memory for rx list " "-- packet dropped!\n", sc->axe_unit); return(ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_new->m_data = m_new->m_ext.ext_buf; } m_adj(m_new, ETHER_ALIGN); c->axe_mbuf = m_new; return(0); } Static int axe_rx_list_init(struct axe_softc *sc) { struct axe_cdata *cd; struct axe_chain *c; int i; cd = &sc->axe_cdata; for (i = 0; i < AXE_RX_LIST_CNT; i++) { c = &cd->axe_rx_chain[i]; c->axe_sc = sc; c->axe_idx = i; if (axe_newbuf(sc, c, NULL) == ENOBUFS) return(ENOBUFS); if (c->axe_xfer == NULL) { c->axe_xfer = usbd_alloc_xfer(sc->axe_udev); if (c->axe_xfer == NULL) return(ENOBUFS); } } return(0); } Static int axe_tx_list_init(struct axe_softc *sc) { struct axe_cdata *cd; struct axe_chain *c; int i; cd = &sc->axe_cdata; for (i = 0; i < AXE_TX_LIST_CNT; i++) { c = &cd->axe_tx_chain[i]; c->axe_sc = sc; c->axe_idx = i; c->axe_mbuf = NULL; if (c->axe_xfer == NULL) { c->axe_xfer = usbd_alloc_xfer(sc->axe_udev); if (c->axe_xfer == NULL) return(ENOBUFS); } c->axe_buf = malloc(AXE_BUFSZ, M_USBDEV, M_NOWAIT); if (c->axe_buf == NULL) return(ENOBUFS); } return(0); } Static void axe_rxstart(struct ifnet *ifp) { struct axe_softc *sc; struct axe_chain *c; sc = ifp->if_softc; AXE_LOCK(sc); c = &sc->axe_cdata.axe_rx_chain[sc->axe_cdata.axe_rx_prod]; if (axe_newbuf(sc, c, NULL) == ENOBUFS) { ifp->if_ierrors++; AXE_UNLOCK(sc); return; } /* Setup new transfer. */ usbd_setup_xfer(c->axe_xfer, sc->axe_ep[AXE_ENDPT_RX], c, mtod(c->axe_mbuf, char *), AXE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, axe_rxeof); usbd_transfer(c->axe_xfer); AXE_UNLOCK(sc); return; } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ Static void axe_rxeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct axe_softc *sc; struct axe_chain *c; struct mbuf *m; struct ifnet *ifp; int total_len = 0; c = priv; sc = c->axe_sc; AXE_LOCK(sc); ifp = &sc->arpcom.ac_if; if (!(ifp->if_flags & IFF_RUNNING)) { AXE_UNLOCK(sc); return; } if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { AXE_UNLOCK(sc); return; } if (usbd_ratecheck(&sc->axe_rx_notice)) printf("axe%d: usb error on rx: %s\n", sc->axe_unit, usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall(sc->axe_ep[AXE_ENDPT_RX]); goto done; } usbd_get_xfer_status(xfer, NULL, NULL, &total_len, NULL); m = c->axe_mbuf; if (total_len < sizeof(struct ether_header)) { ifp->if_ierrors++; goto done; } ifp->if_ipackets++; m->m_pkthdr.rcvif = (struct ifnet *)&sc->axe_qdat; m->m_pkthdr.len = m->m_len = total_len; /* Put the packet on the special USB input queue. */ usb_ether_input(m); AXE_UNLOCK(sc); return; done: /* Setup new transfer. */ usbd_setup_xfer(c->axe_xfer, sc->axe_ep[AXE_ENDPT_RX], c, mtod(c->axe_mbuf, char *), AXE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, axe_rxeof); usbd_transfer(c->axe_xfer); AXE_UNLOCK(sc); return; } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ Static void axe_txeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct axe_softc *sc; struct axe_chain *c; struct ifnet *ifp; usbd_status err; c = priv; sc = c->axe_sc; AXE_LOCK(sc); ifp = &sc->arpcom.ac_if; if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { AXE_UNLOCK(sc); return; } printf("axe%d: usb error on tx: %s\n", sc->axe_unit, usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall(sc->axe_ep[AXE_ENDPT_TX]); AXE_UNLOCK(sc); return; } ifp->if_timer = 0; ifp->if_flags &= ~IFF_OACTIVE; usbd_get_xfer_status(c->axe_xfer, NULL, NULL, NULL, &err); if (c->axe_mbuf != NULL) { c->axe_mbuf->m_pkthdr.rcvif = ifp; usb_tx_done(c->axe_mbuf); c->axe_mbuf = NULL; } if (err) ifp->if_oerrors++; else ifp->if_opackets++; AXE_UNLOCK(sc); return; } Static void axe_tick(void *xsc) { struct axe_softc *sc; struct ifnet *ifp; struct mii_data *mii; sc = xsc; if (sc == NULL) return; AXE_LOCK(sc); ifp = &sc->arpcom.ac_if; mii = GET_MII(sc); if (mii == NULL) { AXE_UNLOCK(sc); return; } mii_tick(mii); if (!sc->axe_link && mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->axe_link++; if (ifp->if_snd.ifq_head != NULL) axe_start(ifp); } sc->axe_stat_ch = timeout(axe_tick, sc, hz); AXE_UNLOCK(sc); return; } Static int axe_encap(struct axe_softc *sc, struct mbuf *m, int idx) { struct axe_chain *c; usbd_status err; c = &sc->axe_cdata.axe_tx_chain[idx]; /* * Copy the mbuf data into a contiguous buffer, leaving two * bytes at the beginning to hold the frame length. */ m_copydata(m, 0, m->m_pkthdr.len, c->axe_buf); c->axe_mbuf = m; usbd_setup_xfer(c->axe_xfer, sc->axe_ep[AXE_ENDPT_TX], c, c->axe_buf, m->m_pkthdr.len, 0, 10000, axe_txeof); /* Transmit */ err = usbd_transfer(c->axe_xfer); if (err != USBD_IN_PROGRESS) { axe_stop(sc); return(EIO); } sc->axe_cdata.axe_tx_cnt++; return(0); } Static void axe_start(struct ifnet *ifp) { struct axe_softc *sc; struct mbuf *m_head = NULL; sc = ifp->if_softc; AXE_LOCK(sc); if (!sc->axe_link) { AXE_UNLOCK(sc); return; } if (ifp->if_flags & IFF_OACTIVE) { AXE_UNLOCK(sc); return; } IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) { AXE_UNLOCK(sc); return; } if (axe_encap(sc, m_head, 0)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; AXE_UNLOCK(sc); return; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); ifp->if_flags |= IFF_OACTIVE; /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; AXE_UNLOCK(sc); return; } Static void axe_init(void *xsc) { struct axe_softc *sc = xsc; struct ifnet *ifp = &sc->arpcom.ac_if; struct axe_chain *c; usbd_status err; int i; int rxmode; if (ifp->if_flags & IFF_RUNNING) return; AXE_LOCK(sc); /* * Cancel pending I/O and free all RX/TX buffers. */ axe_reset(sc); #ifdef notdef /* Set MAC address */ axe_mac(sc, sc->arpcom.ac_enaddr, 1); #endif /* Enable RX logic. */ /* Init TX ring. */ if (axe_tx_list_init(sc) == ENOBUFS) { printf("axe%d: tx list init failed\n", sc->axe_unit); AXE_UNLOCK(sc); return; } /* Init RX ring. */ if (axe_rx_list_init(sc) == ENOBUFS) { printf("axe%d: rx list init failed\n", sc->axe_unit); AXE_UNLOCK(sc); return; } /* Set transmitter IPG values */ axe_cmd(sc, AXE_CMD_WRITE_IPG0, 0, sc->axe_ipgs[0], NULL); axe_cmd(sc, AXE_CMD_WRITE_IPG1, 0, sc->axe_ipgs[1], NULL); axe_cmd(sc, AXE_CMD_WRITE_IPG2, 0, sc->axe_ipgs[2], NULL); /* Enable receiver, set RX mode */ rxmode = AXE_RXCMD_UNICAST|AXE_RXCMD_MULTICAST|AXE_RXCMD_ENABLE; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) rxmode |= AXE_RXCMD_PROMISC; if (ifp->if_flags & IFF_BROADCAST) rxmode |= AXE_RXCMD_BROADCAST; axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL); /* Load the multicast filter. */ axe_setmulti(sc); /* Open RX and TX pipes. */ err = usbd_open_pipe(sc->axe_iface, sc->axe_ed[AXE_ENDPT_RX], USBD_EXCLUSIVE_USE, &sc->axe_ep[AXE_ENDPT_RX]); if (err) { printf("axe%d: open rx pipe failed: %s\n", sc->axe_unit, usbd_errstr(err)); AXE_UNLOCK(sc); return; } err = usbd_open_pipe(sc->axe_iface, sc->axe_ed[AXE_ENDPT_TX], USBD_EXCLUSIVE_USE, &sc->axe_ep[AXE_ENDPT_TX]); if (err) { printf("axe%d: open tx pipe failed: %s\n", sc->axe_unit, usbd_errstr(err)); AXE_UNLOCK(sc); return; } /* Start up the receive pipe. */ for (i = 0; i < AXE_RX_LIST_CNT; i++) { c = &sc->axe_cdata.axe_rx_chain[i]; usbd_setup_xfer(c->axe_xfer, sc->axe_ep[AXE_ENDPT_RX], c, mtod(c->axe_mbuf, char *), AXE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, axe_rxeof); usbd_transfer(c->axe_xfer); } ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; AXE_UNLOCK(sc); sc->axe_stat_ch = timeout(axe_tick, sc, hz); return; } Static int axe_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct axe_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; struct mii_data *mii; u_int16_t rxmode; int error = 0; switch(command) { case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->axe_if_flags & IFF_PROMISC)) { AXE_LOCK(sc); axe_cmd(sc, AXE_CMD_RXCTL_READ, 0, 0, (void *)&rxmode); rxmode |= AXE_RXCMD_PROMISC; axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL); AXE_UNLOCK(sc); axe_setmulti(sc); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->axe_if_flags & IFF_PROMISC) { AXE_LOCK(sc); axe_cmd(sc, AXE_CMD_RXCTL_READ, 0, 0, (void *)&rxmode); rxmode &= ~AXE_RXCMD_PROMISC; axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL); AXE_UNLOCK(sc); axe_setmulti(sc); } else if (!(ifp->if_flags & IFF_RUNNING)) axe_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) axe_stop(sc); } sc->axe_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: axe_setmulti(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = GET_MII(sc); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: error = ether_ioctl(ifp, command, data); break; } AXE_UNLOCK(sc); return(error); } Static void axe_watchdog(struct ifnet *ifp) { struct axe_softc *sc; struct axe_chain *c; usbd_status stat; sc = ifp->if_softc; AXE_LOCK(sc); ifp->if_oerrors++; printf("axe%d: watchdog timeout\n", sc->axe_unit); c = &sc->axe_cdata.axe_tx_chain[0]; usbd_get_xfer_status(c->axe_xfer, NULL, NULL, NULL, &stat); axe_txeof(c->axe_xfer, c, stat); AXE_UNLOCK(sc); if (ifp->if_snd.ifq_head != NULL) axe_start(ifp); return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ Static void axe_stop(struct axe_softc *sc) { usbd_status err; struct ifnet *ifp; int i; AXE_LOCK(sc); ifp = &sc->arpcom.ac_if; ifp->if_timer = 0; untimeout(axe_tick, sc, sc->axe_stat_ch); /* Stop transfers. */ if (sc->axe_ep[AXE_ENDPT_RX] != NULL) { err = usbd_abort_pipe(sc->axe_ep[AXE_ENDPT_RX]); if (err) { printf("axe%d: abort rx pipe failed: %s\n", sc->axe_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->axe_ep[AXE_ENDPT_RX]); if (err) { printf("axe%d: close rx pipe failed: %s\n", sc->axe_unit, usbd_errstr(err)); } sc->axe_ep[AXE_ENDPT_RX] = NULL; } if (sc->axe_ep[AXE_ENDPT_TX] != NULL) { err = usbd_abort_pipe(sc->axe_ep[AXE_ENDPT_TX]); if (err) { printf("axe%d: abort tx pipe failed: %s\n", sc->axe_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->axe_ep[AXE_ENDPT_TX]); if (err) { printf("axe%d: close tx pipe failed: %s\n", sc->axe_unit, usbd_errstr(err)); } sc->axe_ep[AXE_ENDPT_TX] = NULL; } if (sc->axe_ep[AXE_ENDPT_INTR] != NULL) { err = usbd_abort_pipe(sc->axe_ep[AXE_ENDPT_INTR]); if (err) { printf("axe%d: abort intr pipe failed: %s\n", sc->axe_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->axe_ep[AXE_ENDPT_INTR]); if (err) { printf("axe%d: close intr pipe failed: %s\n", sc->axe_unit, usbd_errstr(err)); } sc->axe_ep[AXE_ENDPT_INTR] = NULL; } axe_reset(sc); /* Free RX resources. */ for (i = 0; i < AXE_RX_LIST_CNT; i++) { if (sc->axe_cdata.axe_rx_chain[i].axe_buf != NULL) { free(sc->axe_cdata.axe_rx_chain[i].axe_buf, M_USBDEV); sc->axe_cdata.axe_rx_chain[i].axe_buf = NULL; } if (sc->axe_cdata.axe_rx_chain[i].axe_mbuf != NULL) { m_freem(sc->axe_cdata.axe_rx_chain[i].axe_mbuf); sc->axe_cdata.axe_rx_chain[i].axe_mbuf = NULL; } if (sc->axe_cdata.axe_rx_chain[i].axe_xfer != NULL) { usbd_free_xfer(sc->axe_cdata.axe_rx_chain[i].axe_xfer); sc->axe_cdata.axe_rx_chain[i].axe_xfer = NULL; } } /* Free TX resources. */ for (i = 0; i < AXE_TX_LIST_CNT; i++) { if (sc->axe_cdata.axe_tx_chain[i].axe_buf != NULL) { free(sc->axe_cdata.axe_tx_chain[i].axe_buf, M_USBDEV); sc->axe_cdata.axe_tx_chain[i].axe_buf = NULL; } if (sc->axe_cdata.axe_tx_chain[i].axe_mbuf != NULL) { m_freem(sc->axe_cdata.axe_tx_chain[i].axe_mbuf); sc->axe_cdata.axe_tx_chain[i].axe_mbuf = NULL; } if (sc->axe_cdata.axe_tx_chain[i].axe_xfer != NULL) { usbd_free_xfer(sc->axe_cdata.axe_tx_chain[i].axe_xfer); sc->axe_cdata.axe_tx_chain[i].axe_xfer = NULL; } } ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); sc->axe_link = 0; AXE_UNLOCK(sc); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ Static void axe_shutdown(device_ptr_t dev) { struct axe_softc *sc; sc = device_get_softc(dev); axe_stop(sc); return; } Index: head/sys/dev/usb/if_cue.c =================================================================== --- head/sys/dev/usb/if_cue.c (revision 129878) +++ head/sys/dev/usb/if_cue.c (revision 129879) @@ -1,1211 +1,1212 @@ /* * Copyright (c) 1997, 1998, 1999, 2000 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * CATC USB-EL1210A USB to ethernet driver. Used in the CATC Netmate * adapters and others. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The CATC USB-EL1210A provides USB ethernet support at 10Mbps. The * RX filter uses a 512-bit multicast hash table, single perfect entry * for the station address, and promiscuous mode. Unlike the ADMtek * and KLSI chips, the CATC ASIC supports read and write combining * mode where multiple packets can be transfered using a single bulk * transaction, which helps performance a great deal. */ #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #if __FreeBSD_version < 500000 #include #endif #include #include #include #include #include #include #include /* * Various supported device vendors/products. */ Static struct cue_type cue_devs[] = { { USB_VENDOR_CATC, USB_PRODUCT_CATC_NETMATE }, { USB_VENDOR_CATC, USB_PRODUCT_CATC_NETMATE2 }, { USB_VENDOR_SMARTBRIDGES, USB_PRODUCT_SMARTBRIDGES_SMARTLINK }, { 0, 0 } }; Static int cue_match(device_ptr_t); Static int cue_attach(device_ptr_t); Static int cue_detach(device_ptr_t); Static int cue_tx_list_init(struct cue_softc *); Static int cue_rx_list_init(struct cue_softc *); Static int cue_newbuf(struct cue_softc *, struct cue_chain *, struct mbuf *); Static int cue_encap(struct cue_softc *, struct mbuf *, int); Static void cue_rxeof(usbd_xfer_handle, usbd_private_handle, usbd_status); Static void cue_txeof(usbd_xfer_handle, usbd_private_handle, usbd_status); Static void cue_tick(void *); Static void cue_rxstart(struct ifnet *); Static void cue_start(struct ifnet *); Static int cue_ioctl(struct ifnet *, u_long, caddr_t); Static void cue_init(void *); Static void cue_stop(struct cue_softc *); Static void cue_watchdog(struct ifnet *); Static void cue_shutdown(device_ptr_t); Static void cue_setmulti(struct cue_softc *); Static uint32_t cue_mchash(const uint8_t *); Static void cue_reset(struct cue_softc *); Static int cue_csr_read_1(struct cue_softc *, int); Static int cue_csr_write_1(struct cue_softc *, int, int); Static int cue_csr_read_2(struct cue_softc *, int); #ifdef notdef Static int cue_csr_write_2(struct cue_softc *, int, int); #endif Static int cue_mem(struct cue_softc *, int, int, void *, int); Static int cue_getmac(struct cue_softc *, void *); Static device_method_t cue_methods[] = { /* Device interface */ DEVMETHOD(device_probe, cue_match), DEVMETHOD(device_attach, cue_attach), DEVMETHOD(device_detach, cue_detach), DEVMETHOD(device_shutdown, cue_shutdown), { 0, 0 } }; Static driver_t cue_driver = { "cue", cue_methods, sizeof(struct cue_softc) }; Static devclass_t cue_devclass; DRIVER_MODULE(cue, uhub, cue_driver, cue_devclass, usbd_driver_load, 0); MODULE_DEPEND(cue, usb, 1, 1, 1); MODULE_DEPEND(cue, ether, 1, 1, 1); #define CUE_SETBIT(sc, reg, x) \ cue_csr_write_1(sc, reg, cue_csr_read_1(sc, reg) | (x)) #define CUE_CLRBIT(sc, reg, x) \ cue_csr_write_1(sc, reg, cue_csr_read_1(sc, reg) & ~(x)) Static int cue_csr_read_1(struct cue_softc *sc, int reg) { usb_device_request_t req; usbd_status err; u_int8_t val = 0; if (sc->cue_dying) return(0); CUE_LOCK(sc); req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = CUE_CMD_READREG; USETW(req.wValue, 0); USETW(req.wIndex, reg); USETW(req.wLength, 1); err = usbd_do_request(sc->cue_udev, &req, &val); CUE_UNLOCK(sc); if (err) return(0); return(val); } Static int cue_csr_read_2(struct cue_softc *sc, int reg) { usb_device_request_t req; usbd_status err; u_int16_t val = 0; if (sc->cue_dying) return(0); CUE_LOCK(sc); req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = CUE_CMD_READREG; USETW(req.wValue, 0); USETW(req.wIndex, reg); USETW(req.wLength, 2); err = usbd_do_request(sc->cue_udev, &req, &val); CUE_UNLOCK(sc); if (err) return(0); return(val); } Static int cue_csr_write_1(struct cue_softc *sc, int reg, int val) { usb_device_request_t req; usbd_status err; if (sc->cue_dying) return(0); CUE_LOCK(sc); req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = CUE_CMD_WRITEREG; USETW(req.wValue, val); USETW(req.wIndex, reg); USETW(req.wLength, 0); err = usbd_do_request(sc->cue_udev, &req, NULL); CUE_UNLOCK(sc); if (err) return(-1); return(0); } #ifdef notdef Static int cue_csr_write_2(struct cue_softc *sc, int reg, int val) { usb_device_request_t req; usbd_status err; if (sc->cue_dying) return(0); CUE_LOCK(sc); req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = CUE_CMD_WRITEREG; USETW(req.wValue, val); USETW(req.wIndex, reg); USETW(req.wLength, 0); err = usbd_do_request(sc->cue_udev, &req, NULL); CUE_UNLOCK(sc); if (err) return(-1); return(0); } #endif Static int cue_mem(struct cue_softc *sc, int cmd, int addr, void *buf, int len) { usb_device_request_t req; usbd_status err; if (sc->cue_dying) return(0); CUE_LOCK(sc); if (cmd == CUE_CMD_READSRAM) req.bmRequestType = UT_READ_VENDOR_DEVICE; else req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = cmd; USETW(req.wValue, 0); USETW(req.wIndex, addr); USETW(req.wLength, len); err = usbd_do_request(sc->cue_udev, &req, buf); CUE_UNLOCK(sc); if (err) return(-1); return(0); } Static int cue_getmac(struct cue_softc *sc, void *buf) { usb_device_request_t req; usbd_status err; if (sc->cue_dying) return(0); CUE_LOCK(sc); req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = CUE_CMD_GET_MACADDR; USETW(req.wValue, 0); USETW(req.wIndex, 0); USETW(req.wLength, ETHER_ADDR_LEN); err = usbd_do_request(sc->cue_udev, &req, buf); CUE_UNLOCK(sc); if (err) { printf("cue%d: read MAC address failed\n", sc->cue_unit); return(-1); } return(0); } #define CUE_POLY 0xEDB88320 #define CUE_BITS 9 Static uint32_t cue_mchash(const uint8_t *addr) { uint32_t crc; int idx, bit; uint8_t data; /* Compute CRC for the address value. */ crc = 0xFFFFFFFF; /* initial value */ for (idx = 0; idx < 6; idx++) { for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) crc = (crc >> 1) ^ (((crc ^ data) & 1) ? CUE_POLY : 0); } return (crc & ((1 << CUE_BITS) - 1)); } Static void cue_setmulti(struct cue_softc *sc) { struct ifnet *ifp; struct ifmultiaddr *ifma; u_int32_t h = 0, i; ifp = &sc->arpcom.ac_if; if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { for (i = 0; i < CUE_MCAST_TABLE_LEN; i++) sc->cue_mctab[i] = 0xFF; cue_mem(sc, CUE_CMD_WRITESRAM, CUE_MCAST_TABLE_ADDR, &sc->cue_mctab, CUE_MCAST_TABLE_LEN); return; } /* first, zot all the existing hash bits */ for (i = 0; i < CUE_MCAST_TABLE_LEN; i++) sc->cue_mctab[i] = 0; /* now program new ones */ #if __FreeBSD_version >= 500000 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) #else LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) #endif { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = cue_mchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); sc->cue_mctab[h >> 3] |= 1 << (h & 0x7); } /* * Also include the broadcast address in the filter * so we can receive broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) { #if __FreeBSD_version >= 500000 h = cue_mchash(ifp->if_broadcastaddr); #else h = cue_mchash(etherbroadcastaddr); #endif sc->cue_mctab[h >> 3] |= 1 << (h & 0x7); } cue_mem(sc, CUE_CMD_WRITESRAM, CUE_MCAST_TABLE_ADDR, &sc->cue_mctab, CUE_MCAST_TABLE_LEN); return; } Static void cue_reset(struct cue_softc *sc) { usb_device_request_t req; usbd_status err; if (sc->cue_dying) return; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = CUE_CMD_RESET; USETW(req.wValue, 0); USETW(req.wIndex, 0); USETW(req.wLength, 0); err = usbd_do_request(sc->cue_udev, &req, NULL); if (err) printf("cue%d: reset failed\n", sc->cue_unit); /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); return; } /* * Probe for a Pegasus chip. */ USB_MATCH(cue) { USB_MATCH_START(cue, uaa); struct cue_type *t; if (!uaa->iface) return(UMATCH_NONE); t = cue_devs; while(t->cue_vid) { if (uaa->vendor == t->cue_vid && uaa->product == t->cue_did) { return(UMATCH_VENDOR_PRODUCT); } t++; } return(UMATCH_NONE); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ USB_ATTACH(cue) { USB_ATTACH_START(cue, sc, uaa); char devinfo[1024]; u_char eaddr[ETHER_ADDR_LEN]; struct ifnet *ifp; usb_interface_descriptor_t *id; usb_endpoint_descriptor_t *ed; int i; bzero(sc, sizeof(struct cue_softc)); sc->cue_iface = uaa->iface; sc->cue_udev = uaa->device; sc->cue_unit = device_get_unit(self); if (usbd_set_config_no(sc->cue_udev, CUE_CONFIG_NO, 0)) { printf("cue%d: getting interface handle failed\n", sc->cue_unit); USB_ATTACH_ERROR_RETURN; } id = usbd_get_interface_descriptor(uaa->iface); usbd_devinfo(uaa->device, 0, devinfo); device_set_desc_copy(self, devinfo); printf("%s: %s\n", USBDEVNAME(self), devinfo); /* Find endpoints. */ for (i = 0; i < id->bNumEndpoints; i++) { ed = usbd_interface2endpoint_descriptor(uaa->iface, i); if (!ed) { printf("cue%d: couldn't get ep %d\n", sc->cue_unit, i); USB_ATTACH_ERROR_RETURN; } if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { sc->cue_ed[CUE_ENDPT_RX] = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { sc->cue_ed[CUE_ENDPT_TX] = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_INTERRUPT) { sc->cue_ed[CUE_ENDPT_INTR] = ed->bEndpointAddress; } } #if __FreeBSD_version >= 500000 mtx_init(&sc->cue_mtx, device_get_nameunit(self), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); #endif CUE_LOCK(sc); #ifdef notdef /* Reset the adapter. */ cue_reset(sc); #endif /* * Get station address. */ cue_getmac(sc, &eaddr); bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); ifp = &sc->arpcom.ac_if; ifp->if_softc = sc; if_initname(ifp, "cue", sc->cue_unit); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = cue_ioctl; ifp->if_start = cue_start; ifp->if_watchdog = cue_watchdog; ifp->if_init = cue_init; ifp->if_baudrate = 10000000; ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; sc->cue_qdat.ifp = ifp; sc->cue_qdat.if_rxstart = cue_rxstart; /* * Call MI attach routine. */ #if __FreeBSD_version >= 500000 ether_ifattach(ifp, eaddr); #else ether_ifattach(ifp, ETHER_BPF_SUPPORTED); #endif callout_handle_init(&sc->cue_stat_ch); usb_register_netisr(); sc->cue_dying = 0; CUE_UNLOCK(sc); USB_ATTACH_SUCCESS_RETURN; } Static int cue_detach(device_ptr_t dev) { struct cue_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); CUE_LOCK(sc); ifp = &sc->arpcom.ac_if; sc->cue_dying = 1; untimeout(cue_tick, sc, sc->cue_stat_ch); #if __FreeBSD_version >= 500000 ether_ifdetach(ifp); #else ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); #endif if (sc->cue_ep[CUE_ENDPT_TX] != NULL) usbd_abort_pipe(sc->cue_ep[CUE_ENDPT_TX]); if (sc->cue_ep[CUE_ENDPT_RX] != NULL) usbd_abort_pipe(sc->cue_ep[CUE_ENDPT_RX]); if (sc->cue_ep[CUE_ENDPT_INTR] != NULL) usbd_abort_pipe(sc->cue_ep[CUE_ENDPT_INTR]); CUE_UNLOCK(sc); #if __FreeBSD_version >= 500000 mtx_destroy(&sc->cue_mtx); #endif return(0); } /* * Initialize an RX descriptor and attach an MBUF cluster. */ Static int cue_newbuf(struct cue_softc *sc, struct cue_chain *c, struct mbuf *m) { struct mbuf *m_new = NULL; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("cue%d: no memory for rx list " "-- packet dropped!\n", sc->cue_unit); return(ENOBUFS); } MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { printf("cue%d: no memory for rx list " "-- packet dropped!\n", sc->cue_unit); m_freem(m_new); return(ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_new->m_data = m_new->m_ext.ext_buf; } m_adj(m_new, ETHER_ALIGN); c->cue_mbuf = m_new; return(0); } Static int cue_rx_list_init(struct cue_softc *sc) { struct cue_cdata *cd; struct cue_chain *c; int i; cd = &sc->cue_cdata; for (i = 0; i < CUE_RX_LIST_CNT; i++) { c = &cd->cue_rx_chain[i]; c->cue_sc = sc; c->cue_idx = i; if (cue_newbuf(sc, c, NULL) == ENOBUFS) return(ENOBUFS); if (c->cue_xfer == NULL) { c->cue_xfer = usbd_alloc_xfer(sc->cue_udev); if (c->cue_xfer == NULL) return(ENOBUFS); } } return(0); } Static int cue_tx_list_init(struct cue_softc *sc) { struct cue_cdata *cd; struct cue_chain *c; int i; cd = &sc->cue_cdata; for (i = 0; i < CUE_TX_LIST_CNT; i++) { c = &cd->cue_tx_chain[i]; c->cue_sc = sc; c->cue_idx = i; c->cue_mbuf = NULL; if (c->cue_xfer == NULL) { c->cue_xfer = usbd_alloc_xfer(sc->cue_udev); if (c->cue_xfer == NULL) return(ENOBUFS); } c->cue_buf = malloc(CUE_BUFSZ, M_USBDEV, M_NOWAIT); if (c->cue_buf == NULL) return(ENOBUFS); } return(0); } Static void cue_rxstart(struct ifnet *ifp) { struct cue_softc *sc; struct cue_chain *c; sc = ifp->if_softc; CUE_LOCK(sc); c = &sc->cue_cdata.cue_rx_chain[sc->cue_cdata.cue_rx_prod]; if (cue_newbuf(sc, c, NULL) == ENOBUFS) { ifp->if_ierrors++; CUE_UNLOCK(sc); return; } /* Setup new transfer. */ usbd_setup_xfer(c->cue_xfer, sc->cue_ep[CUE_ENDPT_RX], c, mtod(c->cue_mbuf, char *), CUE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, cue_rxeof); usbd_transfer(c->cue_xfer); CUE_UNLOCK(sc); return; } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ Static void cue_rxeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct cue_softc *sc; struct cue_chain *c; struct mbuf *m; struct ifnet *ifp; int total_len = 0; u_int16_t len; c = priv; sc = c->cue_sc; CUE_LOCK(sc); ifp = &sc->arpcom.ac_if; if (!(ifp->if_flags & IFF_RUNNING)) { CUE_UNLOCK(sc); return; } if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { CUE_UNLOCK(sc); return; } if (usbd_ratecheck(&sc->cue_rx_notice)) printf("cue%d: usb error on rx: %s\n", sc->cue_unit, usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall(sc->cue_ep[CUE_ENDPT_RX]); goto done; } usbd_get_xfer_status(xfer, NULL, NULL, &total_len, NULL); m = c->cue_mbuf; len = *mtod(m, u_int16_t *); /* No errors; receive the packet. */ total_len = len; if (len < sizeof(struct ether_header)) { ifp->if_ierrors++; goto done; } ifp->if_ipackets++; m_adj(m, sizeof(u_int16_t)); m->m_pkthdr.rcvif = (struct ifnet *)&sc->cue_qdat; m->m_pkthdr.len = m->m_len = total_len; /* Put the packet on the special USB input queue. */ usb_ether_input(m); CUE_UNLOCK(sc); return; done: /* Setup new transfer. */ usbd_setup_xfer(c->cue_xfer, sc->cue_ep[CUE_ENDPT_RX], c, mtod(c->cue_mbuf, char *), CUE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, cue_rxeof); usbd_transfer(c->cue_xfer); CUE_UNLOCK(sc); return; } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ Static void cue_txeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct cue_softc *sc; struct cue_chain *c; struct ifnet *ifp; usbd_status err; c = priv; sc = c->cue_sc; CUE_LOCK(sc); ifp = &sc->arpcom.ac_if; if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { CUE_UNLOCK(sc); return; } printf("cue%d: usb error on tx: %s\n", sc->cue_unit, usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall(sc->cue_ep[CUE_ENDPT_TX]); CUE_UNLOCK(sc); return; } ifp->if_timer = 0; ifp->if_flags &= ~IFF_OACTIVE; usbd_get_xfer_status(c->cue_xfer, NULL, NULL, NULL, &err); if (c->cue_mbuf != NULL) { c->cue_mbuf->m_pkthdr.rcvif = ifp; usb_tx_done(c->cue_mbuf); c->cue_mbuf = NULL; } if (err) ifp->if_oerrors++; else ifp->if_opackets++; CUE_UNLOCK(sc); return; } Static void cue_tick(void *xsc) { struct cue_softc *sc; struct ifnet *ifp; sc = xsc; if (sc == NULL) return; CUE_LOCK(sc); ifp = &sc->arpcom.ac_if; ifp->if_collisions += cue_csr_read_2(sc, CUE_TX_SINGLECOLL); ifp->if_collisions += cue_csr_read_2(sc, CUE_TX_MULTICOLL); ifp->if_collisions += cue_csr_read_2(sc, CUE_TX_EXCESSCOLL); if (cue_csr_read_2(sc, CUE_RX_FRAMEERR)) ifp->if_ierrors++; sc->cue_stat_ch = timeout(cue_tick, sc, hz); CUE_UNLOCK(sc); return; } Static int cue_encap(struct cue_softc *sc, struct mbuf *m, int idx) { int total_len; struct cue_chain *c; usbd_status err; c = &sc->cue_cdata.cue_tx_chain[idx]; /* * Copy the mbuf data into a contiguous buffer, leaving two * bytes at the beginning to hold the frame length. */ m_copydata(m, 0, m->m_pkthdr.len, c->cue_buf + 2); c->cue_mbuf = m; total_len = m->m_pkthdr.len + 2; /* The first two bytes are the frame length */ c->cue_buf[0] = (u_int8_t)m->m_pkthdr.len; c->cue_buf[1] = (u_int8_t)(m->m_pkthdr.len >> 8); usbd_setup_xfer(c->cue_xfer, sc->cue_ep[CUE_ENDPT_TX], c, c->cue_buf, total_len, 0, 10000, cue_txeof); /* Transmit */ err = usbd_transfer(c->cue_xfer); if (err != USBD_IN_PROGRESS) { cue_stop(sc); return(EIO); } sc->cue_cdata.cue_tx_cnt++; return(0); } Static void cue_start(struct ifnet *ifp) { struct cue_softc *sc; struct mbuf *m_head = NULL; sc = ifp->if_softc; CUE_LOCK(sc); if (ifp->if_flags & IFF_OACTIVE) { CUE_UNLOCK(sc); return; } IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) { CUE_UNLOCK(sc); return; } if (cue_encap(sc, m_head, 0)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; CUE_UNLOCK(sc); return; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); ifp->if_flags |= IFF_OACTIVE; /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; CUE_UNLOCK(sc); return; } Static void cue_init(void *xsc) { struct cue_softc *sc = xsc; struct ifnet *ifp = &sc->arpcom.ac_if; struct cue_chain *c; usbd_status err; int i; if (ifp->if_flags & IFF_RUNNING) return; CUE_LOCK(sc); /* * Cancel pending I/O and free all RX/TX buffers. */ #ifdef foo cue_reset(sc); #endif /* Set MAC address */ for (i = 0; i < ETHER_ADDR_LEN; i++) cue_csr_write_1(sc, CUE_PAR0 - i, sc->arpcom.ac_enaddr[i]); /* Enable RX logic. */ cue_csr_write_1(sc, CUE_ETHCTL, CUE_ETHCTL_RX_ON|CUE_ETHCTL_MCAST_ON); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) { CUE_SETBIT(sc, CUE_ETHCTL, CUE_ETHCTL_PROMISC); } else { CUE_CLRBIT(sc, CUE_ETHCTL, CUE_ETHCTL_PROMISC); } /* Init TX ring. */ if (cue_tx_list_init(sc) == ENOBUFS) { printf("cue%d: tx list init failed\n", sc->cue_unit); CUE_UNLOCK(sc); return; } /* Init RX ring. */ if (cue_rx_list_init(sc) == ENOBUFS) { printf("cue%d: rx list init failed\n", sc->cue_unit); CUE_UNLOCK(sc); return; } /* Load the multicast filter. */ cue_setmulti(sc); /* * Set the number of RX and TX buffers that we want * to reserve inside the ASIC. */ cue_csr_write_1(sc, CUE_RX_BUFPKTS, CUE_RX_FRAMES); cue_csr_write_1(sc, CUE_TX_BUFPKTS, CUE_TX_FRAMES); /* Set advanced operation modes. */ cue_csr_write_1(sc, CUE_ADVANCED_OPMODES, CUE_AOP_EMBED_RXLEN|0x01); /* 1 wait state */ /* Program the LED operation. */ cue_csr_write_1(sc, CUE_LEDCTL, CUE_LEDCTL_FOLLOW_LINK); /* Open RX and TX pipes. */ err = usbd_open_pipe(sc->cue_iface, sc->cue_ed[CUE_ENDPT_RX], USBD_EXCLUSIVE_USE, &sc->cue_ep[CUE_ENDPT_RX]); if (err) { printf("cue%d: open rx pipe failed: %s\n", sc->cue_unit, usbd_errstr(err)); CUE_UNLOCK(sc); return; } err = usbd_open_pipe(sc->cue_iface, sc->cue_ed[CUE_ENDPT_TX], USBD_EXCLUSIVE_USE, &sc->cue_ep[CUE_ENDPT_TX]); if (err) { printf("cue%d: open tx pipe failed: %s\n", sc->cue_unit, usbd_errstr(err)); CUE_UNLOCK(sc); return; } /* Start up the receive pipe. */ for (i = 0; i < CUE_RX_LIST_CNT; i++) { c = &sc->cue_cdata.cue_rx_chain[i]; usbd_setup_xfer(c->cue_xfer, sc->cue_ep[CUE_ENDPT_RX], c, mtod(c->cue_mbuf, char *), CUE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, cue_rxeof); usbd_transfer(c->cue_xfer); } ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; CUE_UNLOCK(sc); sc->cue_stat_ch = timeout(cue_tick, sc, hz); return; } Static int cue_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct cue_softc *sc = ifp->if_softc; int error = 0; CUE_LOCK(sc); switch(command) { case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->cue_if_flags & IFF_PROMISC)) { CUE_SETBIT(sc, CUE_ETHCTL, CUE_ETHCTL_PROMISC); cue_setmulti(sc); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->cue_if_flags & IFF_PROMISC) { CUE_CLRBIT(sc, CUE_ETHCTL, CUE_ETHCTL_PROMISC); cue_setmulti(sc); } else if (!(ifp->if_flags & IFF_RUNNING)) cue_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) cue_stop(sc); } sc->cue_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: cue_setmulti(sc); error = 0; break; default: error = ether_ioctl(ifp, command, data); break; } CUE_UNLOCK(sc); return(error); } Static void cue_watchdog(struct ifnet *ifp) { struct cue_softc *sc; struct cue_chain *c; usbd_status stat; sc = ifp->if_softc; CUE_LOCK(sc); ifp->if_oerrors++; printf("cue%d: watchdog timeout\n", sc->cue_unit); c = &sc->cue_cdata.cue_tx_chain[0]; usbd_get_xfer_status(c->cue_xfer, NULL, NULL, NULL, &stat); cue_txeof(c->cue_xfer, c, stat); if (ifp->if_snd.ifq_head != NULL) cue_start(ifp); CUE_UNLOCK(sc); return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ Static void cue_stop(struct cue_softc *sc) { usbd_status err; struct ifnet *ifp; int i; CUE_LOCK(sc); ifp = &sc->arpcom.ac_if; ifp->if_timer = 0; cue_csr_write_1(sc, CUE_ETHCTL, 0); cue_reset(sc); untimeout(cue_tick, sc, sc->cue_stat_ch); /* Stop transfers. */ if (sc->cue_ep[CUE_ENDPT_RX] != NULL) { err = usbd_abort_pipe(sc->cue_ep[CUE_ENDPT_RX]); if (err) { printf("cue%d: abort rx pipe failed: %s\n", sc->cue_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->cue_ep[CUE_ENDPT_RX]); if (err) { printf("cue%d: close rx pipe failed: %s\n", sc->cue_unit, usbd_errstr(err)); } sc->cue_ep[CUE_ENDPT_RX] = NULL; } if (sc->cue_ep[CUE_ENDPT_TX] != NULL) { err = usbd_abort_pipe(sc->cue_ep[CUE_ENDPT_TX]); if (err) { printf("cue%d: abort tx pipe failed: %s\n", sc->cue_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->cue_ep[CUE_ENDPT_TX]); if (err) { printf("cue%d: close tx pipe failed: %s\n", sc->cue_unit, usbd_errstr(err)); } sc->cue_ep[CUE_ENDPT_TX] = NULL; } if (sc->cue_ep[CUE_ENDPT_INTR] != NULL) { err = usbd_abort_pipe(sc->cue_ep[CUE_ENDPT_INTR]); if (err) { printf("cue%d: abort intr pipe failed: %s\n", sc->cue_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->cue_ep[CUE_ENDPT_INTR]); if (err) { printf("cue%d: close intr pipe failed: %s\n", sc->cue_unit, usbd_errstr(err)); } sc->cue_ep[CUE_ENDPT_INTR] = NULL; } /* Free RX resources. */ for (i = 0; i < CUE_RX_LIST_CNT; i++) { if (sc->cue_cdata.cue_rx_chain[i].cue_buf != NULL) { free(sc->cue_cdata.cue_rx_chain[i].cue_buf, M_USBDEV); sc->cue_cdata.cue_rx_chain[i].cue_buf = NULL; } if (sc->cue_cdata.cue_rx_chain[i].cue_mbuf != NULL) { m_freem(sc->cue_cdata.cue_rx_chain[i].cue_mbuf); sc->cue_cdata.cue_rx_chain[i].cue_mbuf = NULL; } if (sc->cue_cdata.cue_rx_chain[i].cue_xfer != NULL) { usbd_free_xfer(sc->cue_cdata.cue_rx_chain[i].cue_xfer); sc->cue_cdata.cue_rx_chain[i].cue_xfer = NULL; } } /* Free TX resources. */ for (i = 0; i < CUE_TX_LIST_CNT; i++) { if (sc->cue_cdata.cue_tx_chain[i].cue_buf != NULL) { free(sc->cue_cdata.cue_tx_chain[i].cue_buf, M_USBDEV); sc->cue_cdata.cue_tx_chain[i].cue_buf = NULL; } if (sc->cue_cdata.cue_tx_chain[i].cue_mbuf != NULL) { m_freem(sc->cue_cdata.cue_tx_chain[i].cue_mbuf); sc->cue_cdata.cue_tx_chain[i].cue_mbuf = NULL; } if (sc->cue_cdata.cue_tx_chain[i].cue_xfer != NULL) { usbd_free_xfer(sc->cue_cdata.cue_tx_chain[i].cue_xfer); sc->cue_cdata.cue_tx_chain[i].cue_xfer = NULL; } } ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); CUE_UNLOCK(sc); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ Static void cue_shutdown(device_ptr_t dev) { struct cue_softc *sc; sc = device_get_softc(dev); CUE_LOCK(sc); cue_reset(sc); cue_stop(sc); CUE_UNLOCK(sc); return; } Index: head/sys/dev/usb/if_kue.c =================================================================== --- head/sys/dev/usb/if_kue.c (revision 129878) +++ head/sys/dev/usb/if_kue.c (revision 129879) @@ -1,1137 +1,1138 @@ /* * Copyright (c) 1997, 1998, 1999, 2000 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Kawasaki LSI KL5KUSB101B USB to ethernet adapter driver. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The KLSI USB to ethernet adapter chip contains an USB serial interface, * ethernet MAC and embedded microcontroller (called the QT Engine). * The chip must have firmware loaded into it before it will operate. * Packets are passed between the chip and host via bulk transfers. * There is an interrupt endpoint mentioned in the software spec, however * it's currently unused. This device is 10Mbps half-duplex only, hence * there is no media selection logic. The MAC supports a 128 entry * multicast filter, though the exact size of the filter can depend * on the firmware. Curiously, while the software spec describes various * ethernet statistics counters, my sample adapter and firmware combination * claims not to support any statistics counters at all. * * Note that once we load the firmware in the device, we have to be * careful not to load it again: if you restart your computer but * leave the adapter attached to the USB controller, it may remain * powered on and retain its firmware. In this case, we don't need * to load the firmware a second time. * * Special thanks to Rob Furr for providing an ADS Technologies * adapter for development and testing. No monkeys were harmed during * the development of this driver. */ #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #if __FreeBSD_version < 500000 #include #endif #include #include #include #include #include #include #include #include MODULE_DEPEND(kue, usb, 1, 1, 1); MODULE_DEPEND(kue, ether, 1, 1, 1); /* * Various supported device vendors/products. */ Static struct kue_type kue_devs[] = { { USB_VENDOR_AOX, USB_PRODUCT_AOX_USB101 }, { USB_VENDOR_KLSI, USB_PRODUCT_AOX_USB101 }, { USB_VENDOR_ADS, USB_PRODUCT_ADS_UBS10BT }, { USB_VENDOR_ATEN, USB_PRODUCT_ATEN_UC10T }, { USB_VENDOR_NETGEAR, USB_PRODUCT_NETGEAR_EA101 }, { USB_VENDOR_PERACOM, USB_PRODUCT_PERACOM_ENET }, { USB_VENDOR_PERACOM, USB_PRODUCT_PERACOM_ENET2 }, { USB_VENDOR_ENTREGA, USB_PRODUCT_ENTREGA_E45 }, { USB_VENDOR_3COM, USB_PRODUCT_3COM_3C19250 }, { USB_VENDOR_COREGA, USB_PRODUCT_COREGA_ETHER_USB_T }, { USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DSB650C }, { USB_VENDOR_SMC, USB_PRODUCT_SMC_2102USB }, { USB_VENDOR_LINKSYS, USB_PRODUCT_LINKSYS_USB10T }, { USB_VENDOR_KLSI, USB_PRODUCT_KLSI_DUH3E10BT }, { USB_VENDOR_KLSI, USB_PRODUCT_KLSI_DUH3E10BTN }, { USB_VENDOR_PERACOM, USB_PRODUCT_PERACOM_ENET3 }, { USB_VENDOR_IODATA, USB_PRODUCT_IODATA_USBETT }, { USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_URE450 }, { 0, 0 } }; Static int kue_match(device_ptr_t); Static int kue_attach(device_ptr_t); Static int kue_detach(device_ptr_t); Static void kue_shutdown(device_ptr_t); Static int kue_tx_list_init(struct kue_softc *); Static int kue_rx_list_init(struct kue_softc *); Static int kue_newbuf(struct kue_softc *, struct kue_chain *, struct mbuf *); Static int kue_encap(struct kue_softc *, struct mbuf *, int); Static void kue_rxeof(usbd_xfer_handle, usbd_private_handle, usbd_status); Static void kue_txeof(usbd_xfer_handle, usbd_private_handle, usbd_status); Static void kue_start(struct ifnet *); Static void kue_rxstart(struct ifnet *); Static int kue_ioctl(struct ifnet *, u_long, caddr_t); Static void kue_init(void *); Static void kue_stop(struct kue_softc *); Static void kue_watchdog(struct ifnet *); Static void kue_setmulti(struct kue_softc *); Static void kue_reset(struct kue_softc *); Static usbd_status kue_do_request(usbd_device_handle, usb_device_request_t *, void *); Static usbd_status kue_ctl(struct kue_softc *, int, u_int8_t, u_int16_t, char *, int); Static usbd_status kue_setword(struct kue_softc *, u_int8_t, u_int16_t); Static int kue_load_fw(struct kue_softc *); Static device_method_t kue_methods[] = { /* Device interface */ DEVMETHOD(device_probe, kue_match), DEVMETHOD(device_attach, kue_attach), DEVMETHOD(device_detach, kue_detach), DEVMETHOD(device_shutdown, kue_shutdown), { 0, 0 } }; Static driver_t kue_driver = { "kue", kue_methods, sizeof(struct kue_softc) }; Static devclass_t kue_devclass; DRIVER_MODULE(kue, uhub, kue_driver, kue_devclass, usbd_driver_load, 0); /* * We have a custom do_request function which is almost like the * regular do_request function, except it has a much longer timeout. * Why? Because we need to make requests over the control endpoint * to download the firmware to the device, which can take longer * than the default timeout. */ Static usbd_status kue_do_request(usbd_device_handle dev, usb_device_request_t *req, void *data) { usbd_xfer_handle xfer; usbd_status err; xfer = usbd_alloc_xfer(dev); usbd_setup_default_xfer(xfer, dev, 0, 500000, req, data, UGETW(req->wLength), USBD_SHORT_XFER_OK, 0); err = usbd_sync_transfer(xfer); usbd_free_xfer(xfer); return(err); } Static usbd_status kue_setword(struct kue_softc *sc, u_int8_t breq, u_int16_t word) { usbd_device_handle dev; usb_device_request_t req; usbd_status err; if (sc->kue_dying) return(USBD_NORMAL_COMPLETION); dev = sc->kue_udev; KUE_LOCK(sc); req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = breq; USETW(req.wValue, word); USETW(req.wIndex, 0); USETW(req.wLength, 0); err = kue_do_request(dev, &req, NULL); KUE_UNLOCK(sc); return(err); } Static usbd_status kue_ctl(struct kue_softc *sc, int rw, u_int8_t breq, u_int16_t val, char *data, int len) { usbd_device_handle dev; usb_device_request_t req; usbd_status err; dev = sc->kue_udev; if (sc->kue_dying) return(USBD_NORMAL_COMPLETION); KUE_LOCK(sc); if (rw == KUE_CTL_WRITE) req.bmRequestType = UT_WRITE_VENDOR_DEVICE; else req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = breq; USETW(req.wValue, val); USETW(req.wIndex, 0); USETW(req.wLength, len); err = kue_do_request(dev, &req, data); KUE_UNLOCK(sc); return(err); } Static int kue_load_fw(struct kue_softc *sc) { usbd_status err; usb_device_descriptor_t *dd; int hwrev; dd = &sc->kue_udev->ddesc; hwrev = UGETW(dd->bcdDevice); /* * First, check if we even need to load the firmware. * If the device was still attached when the system was * rebooted, it may already have firmware loaded in it. * If this is the case, we don't need to do it again. * And in fact, if we try to load it again, we'll hang, * so we have to avoid this condition if we don't want * to look stupid. * * We can test this quickly by checking the bcdRevision * code. The NIC will return a different revision code if * it's probed while the firmware is still loaded and * running. */ if (hwrev == 0x0202) return(0); /* Load code segment */ err = kue_ctl(sc, KUE_CTL_WRITE, KUE_CMD_SEND_SCAN, 0, kue_code_seg, sizeof(kue_code_seg)); if (err) { printf("kue%d: failed to load code segment: %s\n", sc->kue_unit, usbd_errstr(err)); return(ENXIO); } /* Load fixup segment */ err = kue_ctl(sc, KUE_CTL_WRITE, KUE_CMD_SEND_SCAN, 0, kue_fix_seg, sizeof(kue_fix_seg)); if (err) { printf("kue%d: failed to load fixup segment: %s\n", sc->kue_unit, usbd_errstr(err)); return(ENXIO); } /* Send trigger command. */ err = kue_ctl(sc, KUE_CTL_WRITE, KUE_CMD_SEND_SCAN, 0, kue_trig_seg, sizeof(kue_trig_seg)); if (err) { printf("kue%d: failed to load trigger segment: %s\n", sc->kue_unit, usbd_errstr(err)); return(ENXIO); } return(0); } Static void kue_setmulti(struct kue_softc *sc) { struct ifnet *ifp; struct ifmultiaddr *ifma; int i = 0; ifp = &sc->arpcom.ac_if; if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { sc->kue_rxfilt |= KUE_RXFILT_ALLMULTI; sc->kue_rxfilt &= ~KUE_RXFILT_MULTICAST; kue_setword(sc, KUE_CMD_SET_PKT_FILTER, sc->kue_rxfilt); return; } sc->kue_rxfilt &= ~KUE_RXFILT_ALLMULTI; #if __FreeBSD_version >= 500000 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) #else LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) #endif { if (ifma->ifma_addr->sa_family != AF_LINK) continue; /* * If there are too many addresses for the * internal filter, switch over to allmulti mode. */ if (i == KUE_MCFILTCNT(sc)) break; bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), KUE_MCFILT(sc, i), ETHER_ADDR_LEN); i++; } if (i == KUE_MCFILTCNT(sc)) sc->kue_rxfilt |= KUE_RXFILT_ALLMULTI; else { sc->kue_rxfilt |= KUE_RXFILT_MULTICAST; kue_ctl(sc, KUE_CTL_WRITE, KUE_CMD_SET_MCAST_FILTERS, i, sc->kue_mcfilters, i * ETHER_ADDR_LEN); } kue_setword(sc, KUE_CMD_SET_PKT_FILTER, sc->kue_rxfilt); return; } /* * Issue a SET_CONFIGURATION command to reset the MAC. This should be * done after the firmware is loaded into the adapter in order to * bring it into proper operation. */ Static void kue_reset(struct kue_softc *sc) { if (usbd_set_config_no(sc->kue_udev, KUE_CONFIG_NO, 0) || usbd_device2interface_handle(sc->kue_udev, KUE_IFACE_IDX, &sc->kue_iface)) { printf("kue%d: getting interface handle failed\n", sc->kue_unit); } /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); return; } /* * Probe for a KLSI chip. */ USB_MATCH(kue) { USB_MATCH_START(kue, uaa); struct kue_type *t; if (!uaa->iface) return(UMATCH_NONE); t = kue_devs; while(t->kue_vid) { if (uaa->vendor == t->kue_vid && uaa->product == t->kue_did) { return(UMATCH_VENDOR_PRODUCT); } t++; } return(UMATCH_NONE); } /* * Attach the interface. Allocate softc structures, do * setup and ethernet/BPF attach. */ USB_ATTACH(kue) { USB_ATTACH_START(kue, sc, uaa); char devinfo[1024]; struct ifnet *ifp; usbd_status err; usb_interface_descriptor_t *id; usb_endpoint_descriptor_t *ed; int i; bzero(sc, sizeof(struct kue_softc)); sc->kue_iface = uaa->iface; sc->kue_udev = uaa->device; sc->kue_unit = device_get_unit(self); id = usbd_get_interface_descriptor(uaa->iface); usbd_devinfo(uaa->device, 0, devinfo); device_set_desc_copy(self, devinfo); printf("%s: %s\n", USBDEVNAME(self), devinfo); /* Find endpoints. */ for (i = 0; i < id->bNumEndpoints; i++) { ed = usbd_interface2endpoint_descriptor(uaa->iface, i); if (!ed) { printf("kue%d: couldn't get ep %d\n", sc->kue_unit, i); USB_ATTACH_ERROR_RETURN; } if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { sc->kue_ed[KUE_ENDPT_RX] = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { sc->kue_ed[KUE_ENDPT_TX] = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_INTERRUPT) { sc->kue_ed[KUE_ENDPT_INTR] = ed->bEndpointAddress; } } #if __FreeBSD_version >= 500000 mtx_init(&sc->kue_mtx, device_get_nameunit(self), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); #endif KUE_LOCK(sc); /* Load the firmware into the NIC. */ if (kue_load_fw(sc)) { KUE_UNLOCK(sc); #if __FreeBSD_version >= 500000 mtx_destroy(&sc->kue_mtx); #endif USB_ATTACH_ERROR_RETURN; } /* Reset the adapter. */ kue_reset(sc); /* Read ethernet descriptor */ err = kue_ctl(sc, KUE_CTL_READ, KUE_CMD_GET_ETHER_DESCRIPTOR, 0, (char *)&sc->kue_desc, sizeof(sc->kue_desc)); sc->kue_mcfilters = malloc(KUE_MCFILTCNT(sc) * ETHER_ADDR_LEN, M_USBDEV, M_NOWAIT); bcopy(sc->kue_desc.kue_macaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); ifp = &sc->arpcom.ac_if; ifp->if_softc = sc; if_initname(ifp, "kue", sc->kue_unit); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = kue_ioctl; ifp->if_start = kue_start; ifp->if_watchdog = kue_watchdog; ifp->if_init = kue_init; ifp->if_baudrate = 10000000; ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; sc->kue_qdat.ifp = ifp; sc->kue_qdat.if_rxstart = kue_rxstart; /* * Call MI attach routine. */ #if __FreeBSD_version >= 500000 ether_ifattach(ifp, sc->kue_desc.kue_macaddr); #else ether_ifattach(ifp, ETHER_BPF_SUPPORTED); #endif usb_register_netisr(); sc->kue_dying = 0; KUE_UNLOCK(sc); USB_ATTACH_SUCCESS_RETURN; } Static int kue_detach(device_ptr_t dev) { struct kue_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); KUE_LOCK(sc); ifp = &sc->arpcom.ac_if; sc->kue_dying = 1; if (ifp != NULL) #if __FreeBSD_version >= 500000 ether_ifdetach(ifp); #else ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); #endif if (sc->kue_ep[KUE_ENDPT_TX] != NULL) usbd_abort_pipe(sc->kue_ep[KUE_ENDPT_TX]); if (sc->kue_ep[KUE_ENDPT_RX] != NULL) usbd_abort_pipe(sc->kue_ep[KUE_ENDPT_RX]); if (sc->kue_ep[KUE_ENDPT_INTR] != NULL) usbd_abort_pipe(sc->kue_ep[KUE_ENDPT_INTR]); if (sc->kue_mcfilters != NULL) free(sc->kue_mcfilters, M_USBDEV); KUE_UNLOCK(sc); #if __FreeBSD_version >= 500000 mtx_destroy(&sc->kue_mtx); #endif return(0); } /* * Initialize an RX descriptor and attach an MBUF cluster. */ Static int kue_newbuf(struct kue_softc *sc, struct kue_chain *c, struct mbuf *m) { struct mbuf *m_new = NULL; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("kue%d: no memory for rx list " "-- packet dropped!\n", sc->kue_unit); return(ENOBUFS); } MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { printf("kue%d: no memory for rx list " "-- packet dropped!\n", sc->kue_unit); m_freem(m_new); return(ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_new->m_data = m_new->m_ext.ext_buf; } c->kue_mbuf = m_new; return(0); } Static int kue_rx_list_init(struct kue_softc *sc) { struct kue_cdata *cd; struct kue_chain *c; int i; cd = &sc->kue_cdata; for (i = 0; i < KUE_RX_LIST_CNT; i++) { c = &cd->kue_rx_chain[i]; c->kue_sc = sc; c->kue_idx = i; if (kue_newbuf(sc, c, NULL) == ENOBUFS) return(ENOBUFS); if (c->kue_xfer == NULL) { c->kue_xfer = usbd_alloc_xfer(sc->kue_udev); if (c->kue_xfer == NULL) return(ENOBUFS); } } return(0); } Static int kue_tx_list_init(struct kue_softc *sc) { struct kue_cdata *cd; struct kue_chain *c; int i; cd = &sc->kue_cdata; for (i = 0; i < KUE_TX_LIST_CNT; i++) { c = &cd->kue_tx_chain[i]; c->kue_sc = sc; c->kue_idx = i; c->kue_mbuf = NULL; if (c->kue_xfer == NULL) { c->kue_xfer = usbd_alloc_xfer(sc->kue_udev); if (c->kue_xfer == NULL) return(ENOBUFS); } c->kue_buf = malloc(KUE_BUFSZ, M_USBDEV, M_NOWAIT); if (c->kue_buf == NULL) return(ENOBUFS); } return(0); } Static void kue_rxstart(struct ifnet *ifp) { struct kue_softc *sc; struct kue_chain *c; sc = ifp->if_softc; KUE_LOCK(sc); c = &sc->kue_cdata.kue_rx_chain[sc->kue_cdata.kue_rx_prod]; if (kue_newbuf(sc, c, NULL) == ENOBUFS) { ifp->if_ierrors++; return; } /* Setup new transfer. */ usbd_setup_xfer(c->kue_xfer, sc->kue_ep[KUE_ENDPT_RX], c, mtod(c->kue_mbuf, char *), KUE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, kue_rxeof); usbd_transfer(c->kue_xfer); KUE_UNLOCK(sc); return; } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ Static void kue_rxeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct kue_softc *sc; struct kue_chain *c; struct mbuf *m; struct ifnet *ifp; int total_len = 0; u_int16_t len; c = priv; sc = c->kue_sc; KUE_LOCK(sc); ifp = &sc->arpcom.ac_if; if (!(ifp->if_flags & IFF_RUNNING)) { KUE_UNLOCK(sc); return; } if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { KUE_UNLOCK(sc); return; } if (usbd_ratecheck(&sc->kue_rx_notice)) printf("kue%d: usb error on rx: %s\n", sc->kue_unit, usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall(sc->kue_ep[KUE_ENDPT_RX]); goto done; } usbd_get_xfer_status(xfer, NULL, NULL, &total_len, NULL); m = c->kue_mbuf; if (total_len <= 1) goto done; len = *mtod(m, u_int16_t *); m_adj(m, sizeof(u_int16_t)); /* No errors; receive the packet. */ total_len = len; if (len < sizeof(struct ether_header)) { ifp->if_ierrors++; goto done; } ifp->if_ipackets++; m->m_pkthdr.rcvif = (struct ifnet *)&sc->kue_qdat; m->m_pkthdr.len = m->m_len = total_len; /* Put the packet on the special USB input queue. */ usb_ether_input(m); KUE_UNLOCK(sc); return; done: /* Setup new transfer. */ usbd_setup_xfer(c->kue_xfer, sc->kue_ep[KUE_ENDPT_RX], c, mtod(c->kue_mbuf, char *), KUE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, kue_rxeof); usbd_transfer(c->kue_xfer); KUE_UNLOCK(sc); return; } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ Static void kue_txeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct kue_softc *sc; struct kue_chain *c; struct ifnet *ifp; usbd_status err; c = priv; sc = c->kue_sc; KUE_LOCK(sc); ifp = &sc->arpcom.ac_if; ifp->if_timer = 0; ifp->if_flags &= ~IFF_OACTIVE; if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { KUE_UNLOCK(sc); return; } printf("kue%d: usb error on tx: %s\n", sc->kue_unit, usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall(sc->kue_ep[KUE_ENDPT_TX]); KUE_UNLOCK(sc); return; } usbd_get_xfer_status(c->kue_xfer, NULL, NULL, NULL, &err); if (c->kue_mbuf != NULL) { c->kue_mbuf->m_pkthdr.rcvif = ifp; usb_tx_done(c->kue_mbuf); c->kue_mbuf = NULL; } if (err) ifp->if_oerrors++; else ifp->if_opackets++; KUE_UNLOCK(sc); return; } Static int kue_encap(struct kue_softc *sc, struct mbuf *m, int idx) { int total_len; struct kue_chain *c; usbd_status err; c = &sc->kue_cdata.kue_tx_chain[idx]; /* * Copy the mbuf data into a contiguous buffer, leaving two * bytes at the beginning to hold the frame length. */ m_copydata(m, 0, m->m_pkthdr.len, c->kue_buf + 2); c->kue_mbuf = m; total_len = m->m_pkthdr.len + 2; total_len += 64 - (total_len % 64); /* Frame length is specified in the first 2 bytes of the buffer. */ c->kue_buf[0] = (u_int8_t)m->m_pkthdr.len; c->kue_buf[1] = (u_int8_t)(m->m_pkthdr.len >> 8); usbd_setup_xfer(c->kue_xfer, sc->kue_ep[KUE_ENDPT_TX], c, c->kue_buf, total_len, 0, 10000, kue_txeof); /* Transmit */ err = usbd_transfer(c->kue_xfer); if (err != USBD_IN_PROGRESS) { kue_stop(sc); return(EIO); } sc->kue_cdata.kue_tx_cnt++; return(0); } Static void kue_start(struct ifnet *ifp) { struct kue_softc *sc; struct mbuf *m_head = NULL; sc = ifp->if_softc; KUE_LOCK(sc); if (ifp->if_flags & IFF_OACTIVE) { KUE_UNLOCK(sc); return; } IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) { KUE_UNLOCK(sc); return; } if (kue_encap(sc, m_head, 0)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; KUE_UNLOCK(sc); return; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); ifp->if_flags |= IFF_OACTIVE; /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; KUE_UNLOCK(sc); return; } Static void kue_init(void *xsc) { struct kue_softc *sc = xsc; struct ifnet *ifp = &sc->arpcom.ac_if; struct kue_chain *c; usbd_status err; int i; KUE_LOCK(sc); if (ifp->if_flags & IFF_RUNNING) { KUE_UNLOCK(sc); return; } /* Set MAC address */ kue_ctl(sc, KUE_CTL_WRITE, KUE_CMD_SET_MAC, 0, sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); sc->kue_rxfilt = KUE_RXFILT_UNICAST|KUE_RXFILT_BROADCAST; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) sc->kue_rxfilt |= KUE_RXFILT_PROMISC; kue_setword(sc, KUE_CMD_SET_PKT_FILTER, sc->kue_rxfilt); /* I'm not sure how to tune these. */ #ifdef notdef /* * Leave this one alone for now; setting it * wrong causes lockups on some machines/controllers. */ kue_setword(sc, KUE_CMD_SET_SOFS, 1); #endif kue_setword(sc, KUE_CMD_SET_URB_SIZE, 64); /* Init TX ring. */ if (kue_tx_list_init(sc) == ENOBUFS) { printf("kue%d: tx list init failed\n", sc->kue_unit); KUE_UNLOCK(sc); return; } /* Init RX ring. */ if (kue_rx_list_init(sc) == ENOBUFS) { printf("kue%d: rx list init failed\n", sc->kue_unit); KUE_UNLOCK(sc); return; } /* Load the multicast filter. */ kue_setmulti(sc); /* Open RX and TX pipes. */ err = usbd_open_pipe(sc->kue_iface, sc->kue_ed[KUE_ENDPT_RX], USBD_EXCLUSIVE_USE, &sc->kue_ep[KUE_ENDPT_RX]); if (err) { printf("kue%d: open rx pipe failed: %s\n", sc->kue_unit, usbd_errstr(err)); KUE_UNLOCK(sc); return; } err = usbd_open_pipe(sc->kue_iface, sc->kue_ed[KUE_ENDPT_TX], USBD_EXCLUSIVE_USE, &sc->kue_ep[KUE_ENDPT_TX]); if (err) { printf("kue%d: open tx pipe failed: %s\n", sc->kue_unit, usbd_errstr(err)); KUE_UNLOCK(sc); return; } /* Start up the receive pipe. */ for (i = 0; i < KUE_RX_LIST_CNT; i++) { c = &sc->kue_cdata.kue_rx_chain[i]; usbd_setup_xfer(c->kue_xfer, sc->kue_ep[KUE_ENDPT_RX], c, mtod(c->kue_mbuf, char *), KUE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, kue_rxeof); usbd_transfer(c->kue_xfer); } ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; KUE_UNLOCK(sc); return; } Static int kue_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct kue_softc *sc = ifp->if_softc; int error = 0; KUE_LOCK(sc); switch(command) { case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->kue_if_flags & IFF_PROMISC)) { sc->kue_rxfilt |= KUE_RXFILT_PROMISC; kue_setword(sc, KUE_CMD_SET_PKT_FILTER, sc->kue_rxfilt); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->kue_if_flags & IFF_PROMISC) { sc->kue_rxfilt &= ~KUE_RXFILT_PROMISC; kue_setword(sc, KUE_CMD_SET_PKT_FILTER, sc->kue_rxfilt); } else if (!(ifp->if_flags & IFF_RUNNING)) kue_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) kue_stop(sc); } sc->kue_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: kue_setmulti(sc); error = 0; break; default: error = ether_ioctl(ifp, command, data); break; } KUE_UNLOCK(sc); return(error); } Static void kue_watchdog(struct ifnet *ifp) { struct kue_softc *sc; struct kue_chain *c; usbd_status stat; sc = ifp->if_softc; KUE_LOCK(sc); ifp->if_oerrors++; printf("kue%d: watchdog timeout\n", sc->kue_unit); c = &sc->kue_cdata.kue_tx_chain[0]; usbd_get_xfer_status(c->kue_xfer, NULL, NULL, NULL, &stat); kue_txeof(c->kue_xfer, c, stat); if (ifp->if_snd.ifq_head != NULL) kue_start(ifp); KUE_UNLOCK(sc); return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ Static void kue_stop(struct kue_softc *sc) { usbd_status err; struct ifnet *ifp; int i; KUE_LOCK(sc); ifp = &sc->arpcom.ac_if; ifp->if_timer = 0; /* Stop transfers. */ if (sc->kue_ep[KUE_ENDPT_RX] != NULL) { err = usbd_abort_pipe(sc->kue_ep[KUE_ENDPT_RX]); if (err) { printf("kue%d: abort rx pipe failed: %s\n", sc->kue_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->kue_ep[KUE_ENDPT_RX]); if (err) { printf("kue%d: close rx pipe failed: %s\n", sc->kue_unit, usbd_errstr(err)); } sc->kue_ep[KUE_ENDPT_RX] = NULL; } if (sc->kue_ep[KUE_ENDPT_TX] != NULL) { err = usbd_abort_pipe(sc->kue_ep[KUE_ENDPT_TX]); if (err) { printf("kue%d: abort tx pipe failed: %s\n", sc->kue_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->kue_ep[KUE_ENDPT_TX]); if (err) { printf("kue%d: close tx pipe failed: %s\n", sc->kue_unit, usbd_errstr(err)); } sc->kue_ep[KUE_ENDPT_TX] = NULL; } if (sc->kue_ep[KUE_ENDPT_INTR] != NULL) { err = usbd_abort_pipe(sc->kue_ep[KUE_ENDPT_INTR]); if (err) { printf("kue%d: abort intr pipe failed: %s\n", sc->kue_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->kue_ep[KUE_ENDPT_INTR]); if (err) { printf("kue%d: close intr pipe failed: %s\n", sc->kue_unit, usbd_errstr(err)); } sc->kue_ep[KUE_ENDPT_INTR] = NULL; } /* Free RX resources. */ for (i = 0; i < KUE_RX_LIST_CNT; i++) { if (sc->kue_cdata.kue_rx_chain[i].kue_buf != NULL) { free(sc->kue_cdata.kue_rx_chain[i].kue_buf, M_USBDEV); sc->kue_cdata.kue_rx_chain[i].kue_buf = NULL; } if (sc->kue_cdata.kue_rx_chain[i].kue_mbuf != NULL) { m_freem(sc->kue_cdata.kue_rx_chain[i].kue_mbuf); sc->kue_cdata.kue_rx_chain[i].kue_mbuf = NULL; } if (sc->kue_cdata.kue_rx_chain[i].kue_xfer != NULL) { usbd_free_xfer(sc->kue_cdata.kue_rx_chain[i].kue_xfer); sc->kue_cdata.kue_rx_chain[i].kue_xfer = NULL; } } /* Free TX resources. */ for (i = 0; i < KUE_TX_LIST_CNT; i++) { if (sc->kue_cdata.kue_tx_chain[i].kue_buf != NULL) { free(sc->kue_cdata.kue_tx_chain[i].kue_buf, M_USBDEV); sc->kue_cdata.kue_tx_chain[i].kue_buf = NULL; } if (sc->kue_cdata.kue_tx_chain[i].kue_mbuf != NULL) { m_freem(sc->kue_cdata.kue_tx_chain[i].kue_mbuf); sc->kue_cdata.kue_tx_chain[i].kue_mbuf = NULL; } if (sc->kue_cdata.kue_tx_chain[i].kue_xfer != NULL) { usbd_free_xfer(sc->kue_cdata.kue_tx_chain[i].kue_xfer); sc->kue_cdata.kue_tx_chain[i].kue_xfer = NULL; } } ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); KUE_UNLOCK(sc); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ Static void kue_shutdown(device_ptr_t dev) { struct kue_softc *sc; sc = device_get_softc(dev); kue_stop(sc); return; } Index: head/sys/dev/usb/if_rue.c =================================================================== --- head/sys/dev/usb/if_rue.c (revision 129878) +++ head/sys/dev/usb/if_rue.c (revision 129879) @@ -1,1523 +1,1524 @@ /*- * Copyright (c) 2001-2003, Shunsuke Akiyama . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 1997, 1998, 1999, 2000 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * RealTek RTL8150 USB to fast ethernet controller driver. * Datasheet is available from * ftp://ftp.realtek.com.tw/lancard/data_sheet/8150/. */ #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #if __FreeBSD_version < 500000 #include #endif #include #include #include #include #include #include #include #include #include /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #ifdef USB_DEBUG Static int ruedebug = 0; SYSCTL_NODE(_hw_usb, OID_AUTO, rue, CTLFLAG_RW, 0, "USB rue"); SYSCTL_INT(_hw_usb_rue, OID_AUTO, debug, CTLFLAG_RW, &ruedebug, 0, "rue debug level"); #define DPRINTFN(n, x) do { \ if (ruedebug > (n)) \ logprintf x; \ } while (0); #else #define DPRINTFN(n, x) #endif #define DPRINTF(x) DPRINTFN(0, x) /* * Various supported device vendors/products. */ Static struct rue_type rue_devs[] = { { USB_VENDOR_MELCO, USB_PRODUCT_MELCO_LUAKTX }, { USB_VENDOR_REALTEK, USB_PRODUCT_REALTEK_USBKR100 }, { 0, 0 } }; Static int rue_match(device_ptr_t); Static int rue_attach(device_ptr_t); Static int rue_detach(device_ptr_t); Static int rue_tx_list_init(struct rue_softc *); Static int rue_rx_list_init(struct rue_softc *); Static int rue_newbuf(struct rue_softc *, struct rue_chain *, struct mbuf *); Static int rue_encap(struct rue_softc *, struct mbuf *, int); #ifdef RUE_INTR_PIPE Static void rue_intr(usbd_xfer_handle, usbd_private_handle, usbd_status); #endif Static void rue_rxeof(usbd_xfer_handle, usbd_private_handle, usbd_status); Static void rue_txeof(usbd_xfer_handle, usbd_private_handle, usbd_status); Static void rue_tick(void *); Static void rue_rxstart(struct ifnet *); Static void rue_start(struct ifnet *); Static int rue_ioctl(struct ifnet *, u_long, caddr_t); Static void rue_init(void *); Static void rue_stop(struct rue_softc *); Static void rue_watchdog(struct ifnet *); Static void rue_shutdown(device_ptr_t); Static int rue_ifmedia_upd(struct ifnet *); Static void rue_ifmedia_sts(struct ifnet *, struct ifmediareq *); Static int rue_miibus_readreg(device_ptr_t, int, int); Static int rue_miibus_writereg(device_ptr_t, int, int, int); Static void rue_miibus_statchg(device_ptr_t); Static uint32_t rue_mchash(const uint8_t *); Static void rue_setmulti(struct rue_softc *); Static void rue_reset(struct rue_softc *); Static int rue_read_mem(struct rue_softc *, u_int16_t, void *, u_int16_t); Static int rue_write_mem(struct rue_softc *, u_int16_t, void *, u_int16_t); Static int rue_csr_read_1(struct rue_softc *, int); Static int rue_csr_write_1(struct rue_softc *, int, u_int8_t); Static int rue_csr_read_2(struct rue_softc *, int); Static int rue_csr_write_2(struct rue_softc *, int, u_int16_t); Static int rue_csr_write_4(struct rue_softc *, int, u_int32_t); Static device_method_t rue_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rue_match), DEVMETHOD(device_attach, rue_attach), DEVMETHOD(device_detach, rue_detach), DEVMETHOD(device_shutdown, rue_shutdown), /* Bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, rue_miibus_readreg), DEVMETHOD(miibus_writereg, rue_miibus_writereg), DEVMETHOD(miibus_statchg, rue_miibus_statchg), { 0, 0 } }; Static driver_t rue_driver = { "rue", rue_methods, sizeof(struct rue_softc) }; Static devclass_t rue_devclass; DRIVER_MODULE(rue, uhub, rue_driver, rue_devclass, usbd_driver_load, 0); DRIVER_MODULE(miibus, rue, miibus_driver, miibus_devclass, 0, 0); MODULE_DEPEND(rue, usb, 1, 1, 1); MODULE_DEPEND(rue, ether, 1, 1, 1); MODULE_DEPEND(rue, miibus, 1, 1, 1); #define RUE_SETBIT(sc, reg, x) \ rue_csr_write_1(sc, reg, rue_csr_read_1(sc, reg) | (x)) #define RUE_CLRBIT(sc, reg, x) \ rue_csr_write_1(sc, reg, rue_csr_read_1(sc, reg) & ~(x)) #define RUE_SETBIT_2(sc, reg, x) \ rue_csr_write_2(sc, reg, rue_csr_read_2(sc, reg) | (x)) #define RUE_CLRBIT_2(sc, reg, x) \ rue_csr_write_2(sc, reg, rue_csr_read_2(sc, reg) & ~(x)) Static int rue_read_mem(struct rue_softc *sc, u_int16_t addr, void *buf, u_int16_t len) { usb_device_request_t req; usbd_status err; if (sc->rue_dying) return (0); RUE_LOCK(sc); req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = UR_SET_ADDRESS; USETW(req.wValue, addr); USETW(req.wIndex, 0); USETW(req.wLength, len); err = usbd_do_request(sc->rue_udev, &req, buf); RUE_UNLOCK(sc); if (err) { printf("rue%d: control pipe read failed: %s\n", sc->rue_unit, usbd_errstr(err)); return (-1); } return (0); } Static int rue_write_mem(struct rue_softc *sc, u_int16_t addr, void *buf, u_int16_t len) { usb_device_request_t req; usbd_status err; if (sc->rue_dying) return (0); RUE_LOCK(sc); req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = UR_SET_ADDRESS; USETW(req.wValue, addr); USETW(req.wIndex, 0); USETW(req.wLength, len); err = usbd_do_request(sc->rue_udev, &req, buf); RUE_UNLOCK(sc); if (err) { printf("rue%d: control pipe write failed: %s\n", sc->rue_unit, usbd_errstr(err)); return (-1); } return (0); } Static int rue_csr_read_1(struct rue_softc *sc, int reg) { int err; u_int8_t val = 0; err = rue_read_mem(sc, reg, &val, 1); if (err) return (0); return (val); } Static int rue_csr_read_2(struct rue_softc *sc, int reg) { int err; u_int16_t val = 0; uWord w; USETW(w, val); err = rue_read_mem(sc, reg, &w, 2); val = UGETW(w); if (err) return (0); return (val); } Static int rue_csr_write_1(struct rue_softc *sc, int reg, u_int8_t val) { int err; err = rue_write_mem(sc, reg, &val, 1); if (err) return (-1); return (0); } Static int rue_csr_write_2(struct rue_softc *sc, int reg, u_int16_t val) { int err; uWord w; USETW(w, val); err = rue_write_mem(sc, reg, &w, 2); if (err) return (-1); return (0); } Static int rue_csr_write_4(struct rue_softc *sc, int reg, u_int32_t val) { int err; uDWord dw; USETDW(dw, val); err = rue_write_mem(sc, reg, &dw, 4); if (err) return (-1); return (0); } Static int rue_miibus_readreg(device_ptr_t dev, int phy, int reg) { struct rue_softc *sc = USBGETSOFTC(dev); int rval; int ruereg; if (phy != 0) /* RTL8150 supports PHY == 0, only */ return (0); switch (reg) { case MII_BMCR: ruereg = RUE_BMCR; break; case MII_BMSR: ruereg = RUE_BMSR; break; case MII_ANAR: ruereg = RUE_ANAR; break; case MII_ANER: ruereg = RUE_AER; break; case MII_ANLPAR: ruereg = RUE_ANLP; break; case MII_PHYIDR1: case MII_PHYIDR2: return (0); break; default: if (RUE_REG_MIN <= reg && reg <= RUE_REG_MAX) { rval = rue_csr_read_1(sc, reg); return (rval); } printf("rue%d: bad phy register\n", sc->rue_unit); return (0); } rval = rue_csr_read_2(sc, ruereg); return (rval); } Static int rue_miibus_writereg(device_ptr_t dev, int phy, int reg, int data) { struct rue_softc *sc = USBGETSOFTC(dev); int ruereg; if (phy != 0) /* RTL8150 supports PHY == 0, only */ return (0); switch (reg) { case MII_BMCR: ruereg = RUE_BMCR; break; case MII_BMSR: ruereg = RUE_BMSR; break; case MII_ANAR: ruereg = RUE_ANAR; break; case MII_ANER: ruereg = RUE_AER; break; case MII_ANLPAR: ruereg = RUE_ANLP; break; case MII_PHYIDR1: case MII_PHYIDR2: return (0); break; default: if (RUE_REG_MIN <= reg && reg <= RUE_REG_MAX) { rue_csr_write_1(sc, reg, data); return (0); } printf("rue%d: bad phy register\n", sc->rue_unit); return (0); } rue_csr_write_2(sc, ruereg, data); return (0); } Static void rue_miibus_statchg(device_ptr_t dev) { struct rue_softc *sc = USBGETSOFTC(dev); struct mii_data *mii = GET_MII(sc); int bmcr; RUE_CLRBIT(sc, RUE_CR, (RUE_CR_RE | RUE_CR_TE)); bmcr = rue_csr_read_2(sc, RUE_BMCR); if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) bmcr |= RUE_BMCR_SPD_SET; else bmcr &= ~RUE_BMCR_SPD_SET; if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) bmcr |= RUE_BMCR_DUPLEX; else bmcr &= ~RUE_BMCR_DUPLEX; rue_csr_write_2(sc, RUE_BMCR, bmcr); RUE_SETBIT(sc, RUE_CR, (RUE_CR_RE | RUE_CR_TE)); } /* * Calculate CRC of a multicast group address, return the upper 6 bits. */ Static uint32_t rue_mchash(const uint8_t *addr) { uint32_t crc, carry; int idx, bit; uint8_t data; /* Compute CRC for the address value. */ crc = 0xFFFFFFFF; /* initial value */ for (idx = 0; idx < 6; idx++) { for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) { carry = ((crc & 0x80000000) ? 1 : 0) ^ (data & 0x01); crc <<= 1; if (carry) crc = (crc ^ 0x04c11db6) | carry; } } /* return the filter bit position */ return (crc >> 26); } /* * Program the 64-bit multicast hash filter. */ Static void rue_setmulti(struct rue_softc *sc) { struct ifnet *ifp; int h = 0; u_int32_t hashes[2] = { 0, 0 }; struct ifmultiaddr *ifma; u_int32_t rxcfg; int mcnt = 0; ifp = &sc->arpcom.ac_if; rxcfg = rue_csr_read_2(sc, RUE_RCR); if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { rxcfg |= (RUE_RCR_AAM | RUE_RCR_AAP); rxcfg &= ~RUE_RCR_AM; rue_csr_write_2(sc, RUE_RCR, rxcfg); rue_csr_write_4(sc, RUE_MAR0, 0xFFFFFFFF); rue_csr_write_4(sc, RUE_MAR4, 0xFFFFFFFF); return; } /* first, zot all the existing hash bits */ rue_csr_write_4(sc, RUE_MAR0, 0); rue_csr_write_4(sc, RUE_MAR4, 0); /* now program new ones */ #if __FreeBSD_version >= 500000 TAILQ_FOREACH (ifma, &ifp->if_multiaddrs, ifma_link) #else LIST_FOREACH (ifma, &ifp->if_multiaddrs, ifma_link) #endif { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = rue_mchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); mcnt++; } if (mcnt) rxcfg |= RUE_RCR_AM; else rxcfg &= ~RUE_RCR_AM; rxcfg &= ~(RUE_RCR_AAM | RUE_RCR_AAP); rue_csr_write_2(sc, RUE_RCR, rxcfg); rue_csr_write_4(sc, RUE_MAR0, hashes[0]); rue_csr_write_4(sc, RUE_MAR4, hashes[1]); } Static void rue_reset(struct rue_softc *sc) { int i; rue_csr_write_1(sc, RUE_CR, RUE_CR_SOFT_RST); for (i = 0; i < RUE_TIMEOUT; i++) { DELAY(500); if (!(rue_csr_read_1(sc, RUE_CR) & RUE_CR_SOFT_RST)) break; } if (i == RUE_TIMEOUT) printf("rue%d: reset never completed!\n", sc->rue_unit); DELAY(10000); } /* * Probe for a RTL8150 chip. */ USB_MATCH(rue) { USB_MATCH_START(rue, uaa); struct rue_type *t; if (uaa->iface == NULL) return (UMATCH_NONE); t = rue_devs; while (t->rue_vid) { if (uaa->vendor == t->rue_vid && uaa->product == t->rue_did) { return (UMATCH_VENDOR_PRODUCT); } t++; } return (UMATCH_NONE); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ USB_ATTACH(rue) { USB_ATTACH_START(rue, sc, uaa); char *devinfo; u_char eaddr[ETHER_ADDR_LEN]; struct ifnet *ifp; usbd_interface_handle iface; usbd_status err; usb_interface_descriptor_t *id; usb_endpoint_descriptor_t *ed; int i; struct rue_type *t; devinfo = malloc(1024, M_USBDEV, M_WAITOK); bzero(sc, sizeof (struct rue_softc)); usbd_devinfo(uaa->device, 0, devinfo); sc->rue_udev = uaa->device; sc->rue_unit = device_get_unit(self); if (usbd_set_config_no(sc->rue_udev, RUE_CONFIG_NO, 0)) { printf("rue%d: getting interface handle failed\n", sc->rue_unit); goto error; } err = usbd_device2interface_handle(uaa->device, RUE_IFACE_IDX, &iface); if (err) { printf("rue%d: getting interface handle failed\n", sc->rue_unit); goto error; } sc->rue_iface = iface; t = rue_devs; while (t->rue_vid) { if (uaa->vendor == t->rue_vid && uaa->product == t->rue_did) { sc->rue_info = t; break; } t++; } id = usbd_get_interface_descriptor(sc->rue_iface); usbd_devinfo(uaa->device, 0, devinfo); device_set_desc_copy(self, devinfo); printf("%s: %s\n", USBDEVNAME(self), devinfo); /* Find endpoints */ for (i = 0; i < id->bNumEndpoints; i++) { ed = usbd_interface2endpoint_descriptor(iface, i); if (ed == NULL) { printf("rue%d: couldn't get ep %d\n", sc->rue_unit, i); goto error; } if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { sc->rue_ed[RUE_ENDPT_RX] = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { sc->rue_ed[RUE_ENDPT_TX] = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_INTERRUPT) { sc->rue_ed[RUE_ENDPT_INTR] = ed->bEndpointAddress; } } #if __FreeBSD_version >= 500000 mtx_init(&sc->rue_mtx, device_get_nameunit(self), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); #endif RUE_LOCK(sc); /* Reset the adapter */ rue_reset(sc); /* Get station address from the EEPROM */ err = rue_read_mem(sc, RUE_EEPROM_IDR0, (caddr_t)&eaddr, ETHER_ADDR_LEN); if (err) { printf("rue%d: couldn't get station address\n", sc->rue_unit); goto error1; } bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); ifp = &sc->arpcom.ac_if; ifp->if_softc = sc; if_initname(ifp, "rue", sc->rue_unit); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = rue_ioctl; ifp->if_start = rue_start; ifp->if_watchdog = rue_watchdog; ifp->if_init = rue_init; ifp->if_baudrate = 10000000; ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; /* MII setup */ if (mii_phy_probe(self, &sc->rue_miibus, rue_ifmedia_upd, rue_ifmedia_sts)) { printf("rue%d: MII without any PHY!\n", sc->rue_unit); goto error1; } sc->rue_qdat.ifp = ifp; sc->rue_qdat.if_rxstart = rue_rxstart; /* Call MI attach routine */ #if __FreeBSD_version >= 500000 ether_ifattach(ifp, eaddr); #else ether_ifattach(ifp, ETHER_BPF_SUPPORTED); #endif callout_handle_init(&sc->rue_stat_ch); usb_register_netisr(); sc->rue_dying = 0; RUE_UNLOCK(sc); free(devinfo, M_USBDEV); USB_ATTACH_SUCCESS_RETURN; error1: RUE_UNLOCK(sc); #if __FreeBSD_version >= 500000 mtx_destroy(&sc->rue_mtx); #endif error: free(devinfo, M_USBDEV); USB_ATTACH_ERROR_RETURN; } Static int rue_detach(device_ptr_t dev) { struct rue_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); RUE_LOCK(sc); ifp = &sc->arpcom.ac_if; sc->rue_dying = 1; untimeout(rue_tick, sc, sc->rue_stat_ch); #if __FreeBSD_version >= 500000 ether_ifdetach(ifp); #else ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); #endif if (sc->rue_ep[RUE_ENDPT_TX] != NULL) usbd_abort_pipe(sc->rue_ep[RUE_ENDPT_TX]); if (sc->rue_ep[RUE_ENDPT_RX] != NULL) usbd_abort_pipe(sc->rue_ep[RUE_ENDPT_RX]); #ifdef RUE_INTR_PIPE if (sc->rue_ep[RUE_ENDPT_INTR] != NULL) usbd_abort_pipe(sc->rue_ep[RUE_ENDPT_INTR]); #endif RUE_UNLOCK(sc); #if __FreeBSD_version >= 500000 mtx_destroy(&sc->rue_mtx); #endif return (0); } /* * Initialize an RX descriptor and attach an MBUF cluster. */ Static int rue_newbuf(struct rue_softc *sc, struct rue_chain *c, struct mbuf *m) { struct mbuf *m_new = NULL; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("rue%d: no memory for rx list " "-- packet dropped!\n", sc->rue_unit); return (ENOBUFS); } MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { printf("rue%d: no memory for rx list " "-- packet dropped!\n", sc->rue_unit); m_freem(m_new); return (ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_new->m_data = m_new->m_ext.ext_buf; } m_adj(m_new, ETHER_ALIGN); c->rue_mbuf = m_new; return (0); } Static int rue_rx_list_init(struct rue_softc *sc) { struct rue_cdata *cd; struct rue_chain *c; int i; cd = &sc->rue_cdata; for (i = 0; i < RUE_RX_LIST_CNT; i++) { c = &cd->rue_rx_chain[i]; c->rue_sc = sc; c->rue_idx = i; if (rue_newbuf(sc, c, NULL) == ENOBUFS) return (ENOBUFS); if (c->rue_xfer == NULL) { c->rue_xfer = usbd_alloc_xfer(sc->rue_udev); if (c->rue_xfer == NULL) return (ENOBUFS); } } return (0); } Static int rue_tx_list_init(struct rue_softc *sc) { struct rue_cdata *cd; struct rue_chain *c; int i; cd = &sc->rue_cdata; for (i = 0; i < RUE_TX_LIST_CNT; i++) { c = &cd->rue_tx_chain[i]; c->rue_sc = sc; c->rue_idx = i; c->rue_mbuf = NULL; if (c->rue_xfer == NULL) { c->rue_xfer = usbd_alloc_xfer(sc->rue_udev); if (c->rue_xfer == NULL) return (ENOBUFS); } c->rue_buf = malloc(RUE_BUFSZ, M_USBDEV, M_NOWAIT); if (c->rue_buf == NULL) return (ENOBUFS); } return (0); } #ifdef RUE_INTR_PIPE Static void rue_intr(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct rue_softc *sc = priv; struct ifnet *ifp; struct rue_intrpkt *p; RUE_LOCK(sc); ifp = &sc->arpcom.ac_if; if (!(ifp->if_flags & IFF_RUNNING)) { RUE_UNLOCK(sc); return; } if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { RUE_UNLOCK(sc); return; } printf("rue%d: usb error on intr: %s\n", sc->rue_unit, usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall(sc->rue_ep[RUE_ENDPT_INTR]); RUE_UNLOCK(sc); return; } usbd_get_xfer_status(xfer, NULL, (void **)&p, NULL, NULL); ifp->if_ierrors += p->rue_rxlost_cnt; ifp->if_ierrors += p->rue_crcerr_cnt; ifp->if_collisions += p->rue_col_cnt; RUE_UNLOCK(sc); } #endif Static void rue_rxstart(struct ifnet *ifp) { struct rue_softc *sc; struct rue_chain *c; sc = ifp->if_softc; RUE_LOCK(sc); c = &sc->rue_cdata.rue_rx_chain[sc->rue_cdata.rue_rx_prod]; if (rue_newbuf(sc, c, NULL) == ENOBUFS) { ifp->if_ierrors++; RUE_UNLOCK(sc); return; } /* Setup new transfer. */ usbd_setup_xfer(c->rue_xfer, sc->rue_ep[RUE_ENDPT_RX], c, mtod(c->rue_mbuf, char *), RUE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, rue_rxeof); usbd_transfer(c->rue_xfer); RUE_UNLOCK(sc); } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ Static void rue_rxeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct rue_chain *c = priv; struct rue_softc *sc = c->rue_sc; struct mbuf *m; struct ifnet *ifp; int total_len = 0; struct rue_rxpkt r; if (sc->rue_dying) return; RUE_LOCK(sc); ifp = &sc->arpcom.ac_if; if (!(ifp->if_flags & IFF_RUNNING)) { RUE_UNLOCK(sc); return; } if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { RUE_UNLOCK(sc); return; } if (usbd_ratecheck(&sc->rue_rx_notice)) printf("rue%d: usb error on rx: %s\n", sc->rue_unit, usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall(sc->rue_ep[RUE_ENDPT_RX]); goto done; } usbd_get_xfer_status(xfer, NULL, NULL, &total_len, NULL); if (total_len <= ETHER_CRC_LEN) { ifp->if_ierrors++; goto done; } m = c->rue_mbuf; bcopy(mtod(m, char *) + total_len - 4, (char *)&r, sizeof (r)); /* Check recieve packet was valid or not */ if ((r.rue_rxstat & RUE_RXSTAT_VALID) == 0) { ifp->if_ierrors++; goto done; } /* No errors; receive the packet. */ total_len -= ETHER_CRC_LEN; ifp->if_ipackets++; m->m_pkthdr.rcvif = (struct ifnet *)&sc->rue_qdat; m->m_pkthdr.len = m->m_len = total_len; /* Put the packet on the special USB input queue. */ usb_ether_input(m); RUE_UNLOCK(sc); return; done: /* Setup new transfer. */ usbd_setup_xfer(xfer, sc->rue_ep[RUE_ENDPT_RX], c, mtod(c->rue_mbuf, char *), RUE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, rue_rxeof); usbd_transfer(xfer); RUE_UNLOCK(sc); } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ Static void rue_txeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct rue_chain *c = priv; struct rue_softc *sc = c->rue_sc; struct ifnet *ifp; usbd_status err; RUE_LOCK(sc); ifp = &sc->arpcom.ac_if; if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { RUE_UNLOCK(sc); return; } printf("rue%d: usb error on tx: %s\n", sc->rue_unit, usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall(sc->rue_ep[RUE_ENDPT_TX]); RUE_UNLOCK(sc); return; } ifp->if_timer = 0; ifp->if_flags &= ~IFF_OACTIVE; usbd_get_xfer_status(c->rue_xfer, NULL, NULL, NULL, &err); if (c->rue_mbuf != NULL) { c->rue_mbuf->m_pkthdr.rcvif = ifp; usb_tx_done(c->rue_mbuf); c->rue_mbuf = NULL; } if (err) ifp->if_oerrors++; else ifp->if_opackets++; RUE_UNLOCK(sc); } Static void rue_tick(void *xsc) { struct rue_softc *sc = xsc; struct ifnet *ifp; struct mii_data *mii; if (sc == NULL) return; RUE_LOCK(sc); ifp = &sc->arpcom.ac_if; mii = GET_MII(sc); if (mii == NULL) { RUE_UNLOCK(sc); return; } mii_tick(mii); if (!sc->rue_link && mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->rue_link++; if (ifp->if_snd.ifq_head != NULL) rue_start(ifp); } sc->rue_stat_ch = timeout(rue_tick, sc, hz); RUE_UNLOCK(sc); } Static int rue_encap(struct rue_softc *sc, struct mbuf *m, int idx) { int total_len; struct rue_chain *c; usbd_status err; c = &sc->rue_cdata.rue_tx_chain[idx]; /* * Copy the mbuf data into a contiguous buffer */ m_copydata(m, 0, m->m_pkthdr.len, c->rue_buf); c->rue_mbuf = m; total_len = m->m_pkthdr.len; /* * This is an undocumented behavior. * RTL8150 chip doesn't send frame length smaller than * RUE_MIN_FRAMELEN (60) byte packet. */ if (total_len < RUE_MIN_FRAMELEN) total_len = RUE_MIN_FRAMELEN; usbd_setup_xfer(c->rue_xfer, sc->rue_ep[RUE_ENDPT_TX], c, c->rue_buf, total_len, USBD_FORCE_SHORT_XFER, 10000, rue_txeof); /* Transmit */ err = usbd_transfer(c->rue_xfer); if (err != USBD_IN_PROGRESS) { rue_stop(sc); return (EIO); } sc->rue_cdata.rue_tx_cnt++; return (0); } Static void rue_start(struct ifnet *ifp) { struct rue_softc *sc = ifp->if_softc; struct mbuf *m_head = NULL; RUE_LOCK(sc); if (!sc->rue_link) { RUE_UNLOCK(sc); return; } if (ifp->if_flags & IFF_OACTIVE) { RUE_UNLOCK(sc); return; } IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) { RUE_UNLOCK(sc); return; } if (rue_encap(sc, m_head, 0)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; RUE_UNLOCK(sc); return; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); ifp->if_flags |= IFF_OACTIVE; /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; RUE_UNLOCK(sc); } Static void rue_init(void *xsc) { struct rue_softc *sc = xsc; struct ifnet *ifp = &sc->arpcom.ac_if; struct mii_data *mii = GET_MII(sc); struct rue_chain *c; usbd_status err; int i; int rxcfg; RUE_LOCK(sc); if (ifp->if_flags & IFF_RUNNING) { RUE_UNLOCK(sc); return; } /* * Cancel pending I/O and free all RX/TX buffers. */ rue_reset(sc); /* Set MAC address */ rue_write_mem(sc, RUE_IDR0, sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); /* Init TX ring. */ if (rue_tx_list_init(sc) == ENOBUFS) { printf("rue%d: tx list init failed\n", sc->rue_unit); RUE_UNLOCK(sc); return; } /* Init RX ring. */ if (rue_rx_list_init(sc) == ENOBUFS) { printf("rue%d: rx list init failed\n", sc->rue_unit); RUE_UNLOCK(sc); return; } #ifdef RUE_INTR_PIPE sc->rue_cdata.rue_ibuf = malloc(RUE_INTR_PKTLEN, M_USBDEV, M_NOWAIT); #endif /* * Set the initial TX and RX configuration. */ rue_csr_write_1(sc, RUE_TCR, RUE_TCR_CONFIG); rxcfg = RUE_RCR_CONFIG; /* Set capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) rxcfg |= RUE_RCR_AB; else rxcfg &= ~RUE_RCR_AB; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) rxcfg |= RUE_RCR_AAP; else rxcfg &= ~RUE_RCR_AAP; rue_csr_write_2(sc, RUE_RCR, rxcfg); /* Load the multicast filter. */ rue_setmulti(sc); /* Enable RX and TX */ rue_csr_write_1(sc, RUE_CR, (RUE_CR_TE | RUE_CR_RE | RUE_CR_EP3CLREN)); mii_mediachg(mii); /* Open RX and TX pipes. */ err = usbd_open_pipe(sc->rue_iface, sc->rue_ed[RUE_ENDPT_RX], USBD_EXCLUSIVE_USE, &sc->rue_ep[RUE_ENDPT_RX]); if (err) { printf("rue%d: open rx pipe failed: %s\n", sc->rue_unit, usbd_errstr(err)); RUE_UNLOCK(sc); return; } err = usbd_open_pipe(sc->rue_iface, sc->rue_ed[RUE_ENDPT_TX], USBD_EXCLUSIVE_USE, &sc->rue_ep[RUE_ENDPT_TX]); if (err) { printf("rue%d: open tx pipe failed: %s\n", sc->rue_unit, usbd_errstr(err)); RUE_UNLOCK(sc); return; } #ifdef RUE_INTR_PIPE err = usbd_open_pipe_intr(sc->rue_iface, sc->rue_ed[RUE_ENDPT_INTR], USBD_SHORT_XFER_OK, &sc->rue_ep[RUE_ENDPT_INTR], sc, sc->rue_cdata.rue_ibuf, RUE_INTR_PKTLEN, rue_intr, RUE_INTR_INTERVAL); if (err) { printf("rue%d: open intr pipe failed: %s\n", sc->rue_unit, usbd_errstr(err)); RUE_UNLOCK(sc); return; } #endif /* Start up the receive pipe. */ for (i = 0; i < RUE_RX_LIST_CNT; i++) { c = &sc->rue_cdata.rue_rx_chain[i]; usbd_setup_xfer(c->rue_xfer, sc->rue_ep[RUE_ENDPT_RX], c, mtod(c->rue_mbuf, char *), RUE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, rue_rxeof); usbd_transfer(c->rue_xfer); } ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; sc->rue_stat_ch = timeout(rue_tick, sc, hz); RUE_UNLOCK(sc); } /* * Set media options. */ Static int rue_ifmedia_upd(struct ifnet *ifp) { struct rue_softc *sc = ifp->if_softc; struct mii_data *mii = GET_MII(sc); sc->rue_link = 0; if (mii->mii_instance) { struct mii_softc *miisc; LIST_FOREACH (miisc, &mii->mii_phys, mii_list) mii_phy_reset(miisc); } mii_mediachg(mii); return (0); } /* * Report current media status. */ Static void rue_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct rue_softc *sc = ifp->if_softc; struct mii_data *mii = GET_MII(sc); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } Static int rue_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct rue_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; struct mii_data *mii; int error = 0; RUE_LOCK(sc); switch (command) { case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->rue_if_flags & IFF_PROMISC)) { RUE_SETBIT_2(sc, RUE_RCR, (RUE_RCR_AAM | RUE_RCR_AAP)); rue_setmulti(sc); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->rue_if_flags & IFF_PROMISC) { RUE_CLRBIT_2(sc, RUE_RCR, (RUE_RCR_AAM | RUE_RCR_AAP)); rue_setmulti(sc); } else if (!(ifp->if_flags & IFF_RUNNING)) rue_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) rue_stop(sc); } sc->rue_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: rue_setmulti(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = GET_MII(sc); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: error = ether_ioctl(ifp, command, data); break; } RUE_UNLOCK(sc); return (error); } Static void rue_watchdog(struct ifnet *ifp) { struct rue_softc *sc = ifp->if_softc; struct rue_chain *c; usbd_status stat; RUE_LOCK(sc); ifp->if_oerrors++; printf("rue%d: watchdog timeout\n", sc->rue_unit); c = &sc->rue_cdata.rue_tx_chain[0]; usbd_get_xfer_status(c->rue_xfer, NULL, NULL, NULL, &stat); rue_txeof(c->rue_xfer, c, stat); if (ifp->if_snd.ifq_head != NULL) rue_start(ifp); RUE_UNLOCK(sc); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ Static void rue_stop(struct rue_softc *sc) { usbd_status err; struct ifnet *ifp; int i; RUE_LOCK(sc); ifp = &sc->arpcom.ac_if; ifp->if_timer = 0; rue_csr_write_1(sc, RUE_CR, 0x00); rue_reset(sc); untimeout(rue_tick, sc, sc->rue_stat_ch); /* Stop transfers. */ if (sc->rue_ep[RUE_ENDPT_RX] != NULL) { err = usbd_abort_pipe(sc->rue_ep[RUE_ENDPT_RX]); if (err) { printf("rue%d: abort rx pipe failed: %s\n", sc->rue_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->rue_ep[RUE_ENDPT_RX]); if (err) { printf("rue%d: close rx pipe failed: %s\n", sc->rue_unit, usbd_errstr(err)); } sc->rue_ep[RUE_ENDPT_RX] = NULL; } if (sc->rue_ep[RUE_ENDPT_TX] != NULL) { err = usbd_abort_pipe(sc->rue_ep[RUE_ENDPT_TX]); if (err) { printf("rue%d: abort tx pipe failed: %s\n", sc->rue_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->rue_ep[RUE_ENDPT_TX]); if (err) { printf("rue%d: close tx pipe failed: %s\n", sc->rue_unit, usbd_errstr(err)); } sc->rue_ep[RUE_ENDPT_TX] = NULL; } #ifdef RUE_INTR_PIPE if (sc->rue_ep[RUE_ENDPT_INTR] != NULL) { err = usbd_abort_pipe(sc->rue_ep[RUE_ENDPT_INTR]); if (err) { printf("rue%d: abort intr pipe failed: %s\n", sc->rue_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->rue_ep[RUE_ENDPT_INTR]); if (err) { printf("rue%d: close intr pipe failed: %s\n", sc->rue_unit, usbd_errstr(err)); } sc->rue_ep[RUE_ENDPT_INTR] = NULL; } #endif /* Free RX resources. */ for (i = 0; i < RUE_RX_LIST_CNT; i++) { if (sc->rue_cdata.rue_rx_chain[i].rue_buf != NULL) { free(sc->rue_cdata.rue_rx_chain[i].rue_buf, M_USBDEV); sc->rue_cdata.rue_rx_chain[i].rue_buf = NULL; } if (sc->rue_cdata.rue_rx_chain[i].rue_mbuf != NULL) { m_freem(sc->rue_cdata.rue_rx_chain[i].rue_mbuf); sc->rue_cdata.rue_rx_chain[i].rue_mbuf = NULL; } if (sc->rue_cdata.rue_rx_chain[i].rue_xfer != NULL) { usbd_free_xfer(sc->rue_cdata.rue_rx_chain[i].rue_xfer); sc->rue_cdata.rue_rx_chain[i].rue_xfer = NULL; } } /* Free TX resources. */ for (i = 0; i < RUE_TX_LIST_CNT; i++) { if (sc->rue_cdata.rue_tx_chain[i].rue_buf != NULL) { free(sc->rue_cdata.rue_tx_chain[i].rue_buf, M_USBDEV); sc->rue_cdata.rue_tx_chain[i].rue_buf = NULL; } if (sc->rue_cdata.rue_tx_chain[i].rue_mbuf != NULL) { m_freem(sc->rue_cdata.rue_tx_chain[i].rue_mbuf); sc->rue_cdata.rue_tx_chain[i].rue_mbuf = NULL; } if (sc->rue_cdata.rue_tx_chain[i].rue_xfer != NULL) { usbd_free_xfer(sc->rue_cdata.rue_tx_chain[i].rue_xfer); sc->rue_cdata.rue_tx_chain[i].rue_xfer = NULL; } } #ifdef RUE_INTR_PIPE free(sc->rue_cdata.rue_ibuf, M_USBDEV); sc->rue_cdata.rue_ibuf = NULL; #endif sc->rue_link = 0; ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); RUE_UNLOCK(sc); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ Static void rue_shutdown(device_ptr_t dev) { struct rue_softc *sc; sc = device_get_softc(dev); sc->rue_dying++; RUE_LOCK(sc); rue_reset(sc); rue_stop(sc); RUE_UNLOCK(sc); } Index: head/sys/dev/usb/if_udav.c =================================================================== --- head/sys/dev/usb/if_udav.c (revision 129878) +++ head/sys/dev/usb/if_udav.c (revision 129879) @@ -1,2037 +1,2038 @@ /* $NetBSD: if_udav.c,v 1.2 2003/09/04 15:17:38 tsutsui Exp $ */ /* $nabe: if_udav.c,v 1.3 2003/08/21 16:57:19 nabe Exp $ */ /* $FreeBSD$ */ /* * Copyright (c) 2003 * Shingo WATANABE . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * DM9601(DAVICOM USB to Ethernet MAC Controller with Integrated 10/100 PHY) * The spec can be found at the following url. * http://www.davicom.com.tw/big5/download/Data%20Sheet/DM9601-DS-F01-062202s.pdf */ /* * TODO: * Interrupt Endpoint support * External PHYs * powerhook() support? */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #if defined(__NetBSD__) #include "opt_ns.h" #endif #if defined(__NetBSD__) #include "bpfilter.h" #endif #if defined(__FreeBSD__) #define NBPFILTER 1 #endif #if defined(__NetBSD__) #include "rnd.h" #endif #include #include #include #include #include +#include #include #if defined(__FreeBSD__) #include #include #include #endif #if defined(__NetBSD__) #include #endif #if NRND > 0 #include #endif #include #include #include #include #include #if NBPFILTER > 0 #include #endif #if defined(__NetBSD__) #ifndef BPF_MTAP #define BPF_MTAP(_ifp, _m) do { \ if ((_ifp)->if_bpf)) { \ bpf_mtap((_ifp)->if_bpf, (_m)) ; \ } \ } while (0) #endif #endif #if defined(__NetBSD__) #include #ifdef INET #include #include #endif /* INET */ #elif defined(__FreeBSD__) /* defined(__NetBSD__) */ #include #include #endif /* defined(__FreeBSD__) */ #if defined(__NetBSD__) #ifdef NS #include #include #endif #endif /* defined (__NetBSD__) */ #include #include #if __FreeBSD_version < 500000 #include #endif #include #include #include #include #include #include #include #include #include #if defined(__FreeBSD__) MODULE_DEPEND(udav, usb, 1, 1, 1); MODULE_DEPEND(udav, ether, 1, 1, 1); MODULE_DEPEND(udav, miibus, 1, 1, 1); #endif /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #if !defined(__FreeBSD__) /* Function declarations */ USB_DECLARE_DRIVER(udav); #endif #if defined(__FreeBSD__) Static int udav_match(device_ptr_t); Static int udav_attach(device_ptr_t); Static int udav_detach(device_ptr_t); Static void udav_shutdown(device_ptr_t); #endif Static int udav_openpipes(struct udav_softc *); Static int udav_rx_list_init(struct udav_softc *); Static int udav_tx_list_init(struct udav_softc *); Static int udav_newbuf(struct udav_softc *, struct udav_chain *, struct mbuf *); Static void udav_start(struct ifnet *); Static int udav_send(struct udav_softc *, struct mbuf *, int); Static void udav_txeof(usbd_xfer_handle, usbd_private_handle, usbd_status); #if defined(__FreeBSD__) Static void udav_rxstart(struct ifnet *ifp); #endif Static void udav_rxeof(usbd_xfer_handle, usbd_private_handle, usbd_status); Static void udav_tick(void *); Static void udav_tick_task(void *); Static int udav_ioctl(struct ifnet *, u_long, caddr_t); Static void udav_stop_task(struct udav_softc *); Static void udav_stop(struct ifnet *, int); Static void udav_watchdog(struct ifnet *); Static int udav_ifmedia_change(struct ifnet *); Static void udav_ifmedia_status(struct ifnet *, struct ifmediareq *); Static void udav_lock_mii(struct udav_softc *); Static void udav_unlock_mii(struct udav_softc *); Static int udav_miibus_readreg(device_ptr_t, int, int); Static void udav_miibus_writereg(device_ptr_t, int, int, int); Static void udav_miibus_statchg(device_ptr_t); #if defined(__NetBSD__) Static int udav_init(struct ifnet *); #elif defined(__FreeBSD__) Static void udav_init(void *); #endif Static void udav_setmulti(struct udav_softc *); Static void udav_reset(struct udav_softc *); Static int udav_csr_read(struct udav_softc *, int, void *, int); Static int udav_csr_write(struct udav_softc *, int, void *, int); Static int udav_csr_read1(struct udav_softc *, int); Static int udav_csr_write1(struct udav_softc *, int, unsigned char); #if 0 Static int udav_mem_read(struct udav_softc *, int, void *, int); Static int udav_mem_write(struct udav_softc *, int, void *, int); Static int udav_mem_write1(struct udav_softc *, int, unsigned char); #endif #if defined(__FreeBSD__) Static device_method_t udav_methods[] = { /* Device interface */ DEVMETHOD(device_probe, udav_match), DEVMETHOD(device_attach, udav_attach), DEVMETHOD(device_detach, udav_detach), DEVMETHOD(device_shutdown, udav_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, udav_miibus_readreg), DEVMETHOD(miibus_writereg, udav_miibus_writereg), DEVMETHOD(miibus_statchg, udav_miibus_statchg), { 0, 0 } }; Static driver_t udav_driver = { "udav", udav_methods, sizeof(struct udav_softc) }; Static devclass_t udav_devclass; DRIVER_MODULE(udav, uhub, udav_driver, udav_devclass, usbd_driver_load, 0); DRIVER_MODULE(miibus, udav, miibus_driver, miibus_devclass, 0, 0); #endif /* defined(__FreeBSD__) */ /* Macros */ #ifdef UDAV_DEBUG #define DPRINTF(x) if (udavdebug) logprintf x #define DPRINTFN(n,x) if (udavdebug >= (n)) logprintf x int udavdebug = 0; #else #define DPRINTF(x) #define DPRINTFN(n,x) #endif #define delay(d) DELAY(d) #define UDAV_SETBIT(sc, reg, x) \ udav_csr_write1(sc, reg, udav_csr_read1(sc, reg) | (x)) #define UDAV_CLRBIT(sc, reg, x) \ udav_csr_write1(sc, reg, udav_csr_read1(sc, reg) & ~(x)) static const struct udav_type { struct usb_devno udav_dev; u_int16_t udav_flags; #define UDAV_EXT_PHY 0x0001 } udav_devs [] = { /* Corega USB-TXC */ {{ USB_VENDOR_COREGA, USB_PRODUCT_COREGA_FETHER_USB_TXC }, 0}, #if 0 /* DAVICOM DM9601 Generic? */ /* XXX: The following ids was obtained from the data sheet. */ {{ 0x0a46, 0x9601 }, 0}, #endif }; #define udav_lookup(v, p) ((const struct udav_type *)usb_lookup(udav_devs, v, p)) /* Probe */ USB_MATCH(udav) { USB_MATCH_START(udav, uaa); if (uaa->iface != NULL) return (UMATCH_NONE); return (udav_lookup(uaa->vendor, uaa->product) != NULL ? UMATCH_VENDOR_PRODUCT : UMATCH_NONE); } /* Attach */ USB_ATTACH(udav) { USB_ATTACH_START(udav, sc, uaa); usbd_device_handle dev = uaa->device; usbd_interface_handle iface; usbd_status err; usb_interface_descriptor_t *id; usb_endpoint_descriptor_t *ed; char devinfo[1024]; const char *devname ; struct ifnet *ifp; #if defined(__NetBSD__) struct mii_data *mii; #endif u_char eaddr[ETHER_ADDR_LEN]; int i; #if defined(__NetBSD__) int s; #endif bzero(sc, sizeof(struct udav_softc)); usbd_devinfo(dev, 0, devinfo); USB_ATTACH_SETUP; devname = USBDEVNAME(sc->sc_dev); printf("%s: %s\n", devname, devinfo); /* Move the device into the configured state. */ err = usbd_set_config_no(dev, UDAV_CONFIG_NO, 1); if (err) { printf("%s: setting config no failed\n", devname); goto bad; } usb_init_task(&sc->sc_tick_task, udav_tick_task, sc); lockinit(&sc->sc_mii_lock, PZERO, "udavmii", 0, 0); usb_init_task(&sc->sc_stop_task, (void (*)(void *)) udav_stop_task, sc); /* get control interface */ err = usbd_device2interface_handle(dev, UDAV_IFACE_INDEX, &iface); if (err) { printf("%s: failed to get interface, err=%s\n", devname, usbd_errstr(err)); goto bad; } sc->sc_udev = dev; sc->sc_ctl_iface = iface; sc->sc_flags = udav_lookup(uaa->vendor, uaa->product)->udav_flags; /* get interface descriptor */ id = usbd_get_interface_descriptor(sc->sc_ctl_iface); /* find endpoints */ sc->sc_bulkin_no = sc->sc_bulkout_no = sc->sc_intrin_no = -1; for (i = 0; i < id->bNumEndpoints; i++) { ed = usbd_interface2endpoint_descriptor(sc->sc_ctl_iface, i); if (ed == NULL) { printf("%s: couldn't get endpoint %d\n", devname, i); goto bad; } if ((ed->bmAttributes & UE_XFERTYPE) == UE_BULK && UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN) sc->sc_bulkin_no = ed->bEndpointAddress; /* RX */ else if ((ed->bmAttributes & UE_XFERTYPE) == UE_BULK && UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT) sc->sc_bulkout_no = ed->bEndpointAddress; /* TX */ else if ((ed->bmAttributes & UE_XFERTYPE) == UE_INTERRUPT && UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN) sc->sc_intrin_no = ed->bEndpointAddress; /* Status */ } if (sc->sc_bulkin_no == -1 || sc->sc_bulkout_no == -1 || sc->sc_intrin_no == -1) { printf("%s: missing endpoint\n", devname); goto bad; } #if defined(__FreeBSD__) && __FreeBSD_version >= 500000 mtx_init(&sc->sc_mtx, device_get_nameunit(self), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); #endif #if defined(__NetBSD__) s = splnet(); #elif defined(__FreeBSD__) UDAV_LOCK(sc); #endif /* reset the adapter */ udav_reset(sc); /* Get Ethernet Address */ err = udav_csr_read(sc, UDAV_PAR, (void *)eaddr, ETHER_ADDR_LEN); if (err) { printf("%s: read MAC address failed\n", devname); #if defined(__NetBSD__) splx(s); #elif defined(__FreeBSD__) UDAV_UNLOCK(sc); #endif goto bad; } /* Print Ethernet Address */ printf("%s: Ethernet address %s\n", devname, ether_sprintf(eaddr)); #if defined(__FreeBSD__) bcopy(eaddr, (char *)&sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN); #endif /* initialize interface infomation */ ifp = GET_IFP(sc); ifp->if_softc = sc; ifp->if_mtu = ETHERMTU; #if defined(__NetBSD__) strncpy(ifp->if_xname, devname, IFNAMSIZ); #elif defined(__FreeBSD__) if_initname(ifp, "udav", device_get_unit(self)); #endif ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_start = udav_start; ifp->if_ioctl = udav_ioctl; ifp->if_watchdog = udav_watchdog; ifp->if_init = udav_init; #if defined(__NetBSD__) ifp->if_stop = udav_stop; #endif #if defined(__FreeBSD__) ifp->if_baudrate = 10000000; ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; #endif #if defined(__NetBSD__) IFQ_SET_READY(&ifp->if_snd); #endif #if defined(__NetBSD__) /* * Do ifmedia setup. */ mii = &sc->sc_mii; mii->mii_ifp = ifp; mii->mii_readreg = udav_miibus_readreg; mii->mii_writereg = udav_miibus_writereg; mii->mii_statchg = udav_miibus_statchg; mii->mii_flags = MIIF_AUTOTSLEEP; ifmedia_init(&mii->mii_media, 0, udav_ifmedia_change, udav_ifmedia_status); mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0); if (LIST_FIRST(&mii->mii_phys) == NULL) { ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL); ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE); } else ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); /* attach the interface */ if_attach(ifp); Ether_ifattach(ifp, eaddr); #elif defined(__FreeBSD__) if (mii_phy_probe(self, &sc->sc_miibus, udav_ifmedia_change, udav_ifmedia_status)) { printf("%s: MII without any PHY!\n", USBDEVNAME(sc->sc_dev)); UDAV_UNLOCK(sc); mtx_destroy(&sc->sc_mtx); USB_ATTACH_ERROR_RETURN; } sc->sc_qdat.ifp = ifp; sc->sc_qdat.if_rxstart = udav_rxstart; /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); #endif #if NRND > 0 rnd_attach_source(&sc->rnd_source, devname, RND_TYPE_NET, 0); #endif usb_callout_init(sc->sc_stat_ch); #if defined(__FreeBSD__) usb_register_netisr(); #endif sc->sc_attached = 1; #if defined(__NetBSD__) splx(s); #elif defined(__FreeBSD__) UDAV_UNLOCK(sc); #endif usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, dev, USBDEV(sc->sc_dev)); USB_ATTACH_SUCCESS_RETURN; bad: sc->sc_dying = 1; USB_ATTACH_ERROR_RETURN; } /* detach */ USB_DETACH(udav) { USB_DETACH_START(udav, sc); struct ifnet *ifp = GET_IFP(sc); #if defined(__NetBSD__) int s; #endif DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); /* Detached before attached finished */ if (!sc->sc_attached) return (0); UDAV_LOCK(sc); usb_uncallout(sc->sc_stat_ch, udav_tick, sc); /* Remove any pending tasks */ usb_rem_task(sc->sc_udev, &sc->sc_tick_task); usb_rem_task(sc->sc_udev, &sc->sc_stop_task); #if defined(__NetBSD__) s = splusb(); #elif defined(__FreeBSD__) UDAV_LOCK(sc); #endif if (--sc->sc_refcnt >= 0) { /* Wait for processes to go away */ usb_detach_wait(USBDEV(sc->sc_dev)); } if (ifp->if_flags & IFF_RUNNING) udav_stop(GET_IFP(sc), 1); #if NRND > 0 rnd_detach_source(&sc->rnd_source); #endif #if defined(__NetBSD__) mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY); #endif ether_ifdetach(ifp); #if defined(__NetBSD__) if_detach(ifp); #endif #ifdef DIAGNOSTIC if (sc->sc_pipe_tx != NULL) printf("%s: detach has active tx endpoint.\n", USBDEVNAME(sc->sc_dev)); if (sc->sc_pipe_rx != NULL) printf("%s: detach has active rx endpoint.\n", USBDEVNAME(sc->sc_dev)); if (sc->sc_pipe_intr != NULL) printf("%s: detach has active intr endpoint.\n", USBDEVNAME(sc->sc_dev)); #endif sc->sc_attached = 0; #if defined(__NetBSD__) splx(s); #elif defined(__FreeBSD__) UDAV_UNLOCK(sc); #endif #if defined(__FreeBSD__) mtx_destroy(&sc->sc_mtx); #endif usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev, USBDEV(sc->sc_dev)); return (0); } #if 0 /* read memory */ Static int udav_mem_read(struct udav_softc *sc, int offset, void *buf, int len) { usb_device_request_t req; usbd_status err; if (sc == NULL) return (0); DPRINTFN(0x200, ("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) return (0); offset &= 0xffff; len &= 0xff; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = UDAV_REQ_MEM_READ; USETW(req.wValue, 0x0000); USETW(req.wIndex, offset); USETW(req.wLength, len); sc->sc_refcnt++; err = usbd_do_request(sc->sc_udev, &req, buf); if (--sc->sc_refcnt < 0) usb_detach_wakeup(USBDEV(sc->sc_dev)); if (err) { DPRINTF(("%s: %s: read failed. off=%04x, err=%d\n", USBDEVNAME(sc->sc_dev), __func__, offset, err)); } return (err); } /* write memory */ Static int udav_mem_write(struct udav_softc *sc, int offset, void *buf, int len) { usb_device_request_t req; usbd_status err; if (sc == NULL) return (0); DPRINTFN(0x200, ("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) return (0); offset &= 0xffff; len &= 0xff; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = UDAV_REQ_MEM_WRITE; USETW(req.wValue, 0x0000); USETW(req.wIndex, offset); USETW(req.wLength, len); sc->sc_refcnt++; err = usbd_do_request(sc->sc_udev, &req, buf); if (--sc->sc_refcnt < 0) usb_detach_wakeup(USBDEV(sc->sc_dev)); if (err) { DPRINTF(("%s: %s: write failed. off=%04x, err=%d\n", USBDEVNAME(sc->sc_dev), __func__, offset, err)); } return (err); } /* write memory */ Static int udav_mem_write1(struct udav_softc *sc, int offset, unsigned char ch) { usb_device_request_t req; usbd_status err; if (sc == NULL) return (0); DPRINTFN(0x200, ("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) return (0); offset &= 0xffff; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = UDAV_REQ_MEM_WRITE1; USETW(req.wValue, ch); USETW(req.wIndex, offset); USETW(req.wLength, 0x0000); sc->sc_refcnt++; err = usbd_do_request(sc->sc_udev, &req, NULL); if (--sc->sc_refcnt < 0) usb_detach_wakeup(USBDEV(sc->sc_dev)); if (err) { DPRINTF(("%s: %s: write failed. off=%04x, err=%d\n", USBDEVNAME(sc->sc_dev), __func__, offset, err)); } return (err); } #endif /* read register(s) */ Static int udav_csr_read(struct udav_softc *sc, int offset, void *buf, int len) { usb_device_request_t req; usbd_status err; if (sc == NULL) return (0); DPRINTFN(0x200, ("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) return (0); offset &= 0xff; len &= 0xff; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = UDAV_REQ_REG_READ; USETW(req.wValue, 0x0000); USETW(req.wIndex, offset); USETW(req.wLength, len); sc->sc_refcnt++; err = usbd_do_request(sc->sc_udev, &req, buf); if (--sc->sc_refcnt < 0) usb_detach_wakeup(USBDEV(sc->sc_dev)); if (err) { DPRINTF(("%s: %s: read failed. off=%04x, err=%d\n", USBDEVNAME(sc->sc_dev), __func__, offset, err)); } return (err); } /* write register(s) */ Static int udav_csr_write(struct udav_softc *sc, int offset, void *buf, int len) { usb_device_request_t req; usbd_status err; if (sc == NULL) return (0); DPRINTFN(0x200, ("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) return (0); offset &= 0xff; len &= 0xff; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = UDAV_REQ_REG_WRITE; USETW(req.wValue, 0x0000); USETW(req.wIndex, offset); USETW(req.wLength, len); sc->sc_refcnt++; err = usbd_do_request(sc->sc_udev, &req, buf); if (--sc->sc_refcnt < 0) usb_detach_wakeup(USBDEV(sc->sc_dev)); if (err) { DPRINTF(("%s: %s: write failed. off=%04x, err=%d\n", USBDEVNAME(sc->sc_dev), __func__, offset, err)); } return (err); } Static int udav_csr_read1(struct udav_softc *sc, int offset) { u_int8_t val = 0; if (sc == NULL) return (0); DPRINTFN(0x200, ("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) return (0); return (udav_csr_read(sc, offset, &val, 1) ? 0 : val); } /* write a register */ Static int udav_csr_write1(struct udav_softc *sc, int offset, unsigned char ch) { usb_device_request_t req; usbd_status err; if (sc == NULL) return (0); DPRINTFN(0x200, ("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) return (0); offset &= 0xff; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = UDAV_REQ_REG_WRITE1; USETW(req.wValue, ch); USETW(req.wIndex, offset); USETW(req.wLength, 0x0000); sc->sc_refcnt++; err = usbd_do_request(sc->sc_udev, &req, NULL); if (--sc->sc_refcnt < 0) usb_detach_wakeup(USBDEV(sc->sc_dev)); if (err) { DPRINTF(("%s: %s: write failed. off=%04x, err=%d\n", USBDEVNAME(sc->sc_dev), __func__, offset, err)); } return (err); } #if defined(__NetBSD__) Static int udav_init(struct ifnet *ifp) #elif defined(__FreeBSD__) Static void udav_init(void *xsc) #endif { #if defined(__NetBSD__) struct udav_softc *sc = ifp->if_softc; #elif defined(__FreeBSD__) struct udav_softc *sc = (struct udav_softc *)xsc; struct ifnet *ifp = GET_IFP(sc); #endif struct mii_data *mii = GET_MII(sc); u_char *eaddr; #if defined(__NetBSD__) int s; #endif DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) #if defined(__NetBSD__) return (EIO); #elif defined(__FreeBSD__) return ; #endif #if defined(__NetBSD__) s = splnet(); #elif defined(__FreeBSD__) UDAV_LOCK(sc); #endif /* Cancel pending I/O and free all TX/RX buffers */ udav_stop(ifp, 1); #if defined(__NetBSD__) eaddr = LLADDR(ifp->if_sadl); #elif defined(__FreeBSD__) eaddr = sc->sc_ac.ac_enaddr ; #endif udav_csr_write(sc, UDAV_PAR, eaddr, ETHER_ADDR_LEN); /* Initialize network control register */ /* Disable loopback */ UDAV_CLRBIT(sc, UDAV_NCR, UDAV_NCR_LBK0 | UDAV_NCR_LBK1); /* Initialize RX control register */ UDAV_SETBIT(sc, UDAV_RCR, UDAV_RCR_DIS_LONG | UDAV_RCR_DIS_CRC); /* If we want promiscuous mode, accept all physical frames. */ if (ifp->if_flags & IFF_PROMISC) UDAV_SETBIT(sc, UDAV_RCR, UDAV_RCR_ALL|UDAV_RCR_PRMSC); else UDAV_CLRBIT(sc, UDAV_RCR, UDAV_RCR_ALL|UDAV_RCR_PRMSC); /* Initialize transmit ring */ if (udav_tx_list_init(sc) == ENOBUFS) { printf("%s: tx list init failed\n", USBDEVNAME(sc->sc_dev)); #if defined(__NetBSD__) splx(s); return (EIO); #elif defined(__FreeBSD__) UDAV_UNLOCK(sc); return ; #endif } /* Initialize receive ring */ if (udav_rx_list_init(sc) == ENOBUFS) { printf("%s: rx list init failed\n", USBDEVNAME(sc->sc_dev)); #if defined(__NetBSD__) splx(s); return (EIO); #elif defined(__FreeBSD__) UDAV_UNLOCK(sc); return ; #endif } /* Load the multicast filter */ udav_setmulti(sc); /* Enable RX */ UDAV_SETBIT(sc, UDAV_RCR, UDAV_RCR_RXEN); /* clear POWER_DOWN state of internal PHY */ UDAV_SETBIT(sc, UDAV_GPCR, UDAV_GPCR_GEP_CNTL0); UDAV_CLRBIT(sc, UDAV_GPR, UDAV_GPR_GEPIO0); mii_mediachg(mii); if (sc->sc_pipe_tx == NULL || sc->sc_pipe_rx == NULL) { if (udav_openpipes(sc)) { #if defined(__NetBSD__) splx(s); return (EIO); #elif defined(__FreeBSD__) UDAV_UNLOCK(sc); return ; #endif } } ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; #if defined(__NetBSD__) splx(s); #elif defined(__FreeBSD__) UDAV_UNLOCK(sc); #endif usb_callout(sc->sc_stat_ch, hz, udav_tick, sc); #if defined(__NetBSD__) return (0); #elif defined(__FreeBSD__) return ; #endif } Static void udav_reset(struct udav_softc *sc) { int i; DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) return; /* Select PHY */ #if 1 /* * XXX: force select internal phy. * external phy routines are not tested. */ UDAV_CLRBIT(sc, UDAV_NCR, UDAV_NCR_EXT_PHY); #else if (sc->sc_flags & UDAV_EXT_PHY) { UDAV_SETBIT(sc, UDAV_NCR, UDAV_NCR_EXT_PHY); } else { UDAV_CLRBIT(sc, UDAV_NCR, UDAV_NCR_EXT_PHY); } #endif UDAV_SETBIT(sc, UDAV_NCR, UDAV_NCR_RST); for (i = 0; i < UDAV_TX_TIMEOUT; i++) { if (!(udav_csr_read1(sc, UDAV_NCR) & UDAV_NCR_RST)) break; delay(10); /* XXX */ } delay(10000); /* XXX */ } #if defined(__NetBSD__) || defined(__OpenBSD__) int udav_activate(device_ptr_t self, enum devact act) { struct udav_softc *sc = (struct udav_softc *)self; DPRINTF(("%s: %s: enter, act=%d\n", USBDEVNAME(sc->sc_dev), __func__, act)); switch (act) { case DVACT_ACTIVATE: return (EOPNOTSUPP); break; case DVACT_DEACTIVATE: if_deactivate(&sc->sc_ec.ec_if); sc->sc_dying = 1; break; } return (0); } #endif #define UDAV_BITS 6 #if defined(__NetBSD__) #define UDAV_CALCHASH(addr) \ (ether_crc32_le((addr), ETHER_ADDR_LEN) & ((1 << UDAV_BITS) - 1)) #elif defined(__FreeBSD__) Static uint32_t udav_mchash(const uint8_t *addr) { uint32_t crc, carry; int idx, bit; uint8_t data; /* Compute CRC for the address value. */ crc = 0xFFFFFFFF; /* initial value */ for (idx = 0; idx < 6; idx++) { for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) { carry = ((crc & 0x80000000) ? 1 : 0) ^ (data & 0x01); crc <<= 1; if (carry) crc = (crc ^ 0x04c11db6) | carry; } } /* return the filter bit position */ return((crc >> 26) & 0x0000003F); } #endif Static void udav_setmulti(struct udav_softc *sc) { struct ifnet *ifp; #if defined(__NetBSD__) struct ether_multi *enm; struct ether_multistep step; #elif defined(__FreeBSD__) struct ifmultiaddr *ifma; #endif u_int8_t hashes[8]; int h = 0; DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) return; ifp = GET_IFP(sc); if (ifp->if_flags & IFF_PROMISC) { UDAV_SETBIT(sc, UDAV_RCR, UDAV_RCR_ALL|UDAV_RCR_PRMSC); return; } else if (ifp->if_flags & IFF_ALLMULTI) { #if defined(__NetBSD__) allmulti: #endif ifp->if_flags |= IFF_ALLMULTI; UDAV_SETBIT(sc, UDAV_RCR, UDAV_RCR_ALL); UDAV_CLRBIT(sc, UDAV_RCR, UDAV_RCR_PRMSC); return; } /* first, zot all the existing hash bits */ memset(hashes, 0x00, sizeof(hashes)); hashes[7] |= 0x80; /* broadcast address */ udav_csr_write(sc, UDAV_MAR, hashes, sizeof(hashes)); /* now program new ones */ #if defined(__NetBSD__) ETHER_FIRST_MULTI(step, &sc->sc_ec, enm); while (enm != NULL) { if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) goto allmulti; h = UDAV_CALCHASH(enm->enm_addrlo); hashes[h>>3] |= 1 << (h & 0x7); ETHER_NEXT_MULTI(step, enm); } #elif defined(__FreeBSD__) #if __FreeBSD_version >= 500000 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) #else LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) #endif { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = udav_mchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); hashes[h / 8] |= 1 << (h % 8); } #endif /* disable all multicast */ ifp->if_flags &= ~IFF_ALLMULTI; UDAV_CLRBIT(sc, UDAV_RCR, UDAV_RCR_ALL); /* write hash value to the register */ udav_csr_write(sc, UDAV_MAR, hashes, sizeof(hashes)); } Static int udav_openpipes(struct udav_softc *sc) { struct udav_chain *c; usbd_status err; int i; int error = 0; if (sc->sc_dying) return (EIO); sc->sc_refcnt++; /* Open RX pipe */ err = usbd_open_pipe(sc->sc_ctl_iface, sc->sc_bulkin_no, USBD_EXCLUSIVE_USE, &sc->sc_pipe_rx); if (err) { printf("%s: open rx pipe failed: %s\n", USBDEVNAME(sc->sc_dev), usbd_errstr(err)); error = EIO; goto done; } /* Open TX pipe */ err = usbd_open_pipe(sc->sc_ctl_iface, sc->sc_bulkout_no, USBD_EXCLUSIVE_USE, &sc->sc_pipe_tx); if (err) { printf("%s: open tx pipe failed: %s\n", USBDEVNAME(sc->sc_dev), usbd_errstr(err)); error = EIO; goto done; } #if 0 /* XXX: interrupt endpoint is not yet supported */ /* Open Interrupt pipe */ err = usbd_open_pipe_intr(sc->sc_ctl_iface, sc->sc_intrin_no, USBD_EXCLUSIVE_USE, &sc->sc_pipe_intr, sc, &sc->sc_cdata.udav_ibuf, UDAV_INTR_PKGLEN, udav_intr, UDAV_INTR_INTERVAL); if (err) { printf("%s: open intr pipe failed: %s\n", USBDEVNAME(sc->sc_dev), usbd_errstr(err)); error = EIO; goto done; } #endif /* Start up the receive pipe. */ for (i = 0; i < UDAV_RX_LIST_CNT; i++) { c = &sc->sc_cdata.udav_rx_chain[i]; usbd_setup_xfer(c->udav_xfer, sc->sc_pipe_rx, c, c->udav_buf, UDAV_BUFSZ, USBD_SHORT_XFER_OK | USBD_NO_COPY, USBD_NO_TIMEOUT, udav_rxeof); (void)usbd_transfer(c->udav_xfer); DPRINTF(("%s: %s: start read\n", USBDEVNAME(sc->sc_dev), __func__)); } done: if (--sc->sc_refcnt < 0) usb_detach_wakeup(USBDEV(sc->sc_dev)); return (error); } Static int udav_newbuf(struct udav_softc *sc, struct udav_chain *c, struct mbuf *m) { struct mbuf *m_new = NULL; DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("%s: no memory for rx list " "-- packet dropped!\n", USBDEVNAME(sc->sc_dev)); return (ENOBUFS); } MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { printf("%s: no memory for rx list " "-- packet dropped!\n", USBDEVNAME(sc->sc_dev)); m_freem(m_new); return (ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_new->m_data = m_new->m_ext.ext_buf; } m_adj(m_new, ETHER_ALIGN); c->udav_mbuf = m_new; return (0); } Static int udav_rx_list_init(struct udav_softc *sc) { struct udav_cdata *cd; struct udav_chain *c; int i; DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); cd = &sc->sc_cdata; for (i = 0; i < UDAV_RX_LIST_CNT; i++) { c = &cd->udav_rx_chain[i]; c->udav_sc = sc; c->udav_idx = i; if (udav_newbuf(sc, c, NULL) == ENOBUFS) return (ENOBUFS); if (c->udav_xfer == NULL) { c->udav_xfer = usbd_alloc_xfer(sc->sc_udev); if (c->udav_xfer == NULL) return (ENOBUFS); c->udav_buf = usbd_alloc_buffer(c->udav_xfer, UDAV_BUFSZ); if (c->udav_buf == NULL) { usbd_free_xfer(c->udav_xfer); return (ENOBUFS); } } } return (0); } Static int udav_tx_list_init(struct udav_softc *sc) { struct udav_cdata *cd; struct udav_chain *c; int i; DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); cd = &sc->sc_cdata; for (i = 0; i < UDAV_TX_LIST_CNT; i++) { c = &cd->udav_tx_chain[i]; c->udav_sc = sc; c->udav_idx = i; c->udav_mbuf = NULL; if (c->udav_xfer == NULL) { c->udav_xfer = usbd_alloc_xfer(sc->sc_udev); if (c->udav_xfer == NULL) return (ENOBUFS); c->udav_buf = usbd_alloc_buffer(c->udav_xfer, UDAV_BUFSZ); if (c->udav_buf == NULL) { usbd_free_xfer(c->udav_xfer); return (ENOBUFS); } } } return (0); } Static void udav_start(struct ifnet *ifp) { struct udav_softc *sc = ifp->if_softc; struct mbuf *m_head = NULL; DPRINTF(("%s: %s: enter, link=%d\n", USBDEVNAME(sc->sc_dev), __func__, sc->sc_link)); if (sc->sc_dying) return; if (!sc->sc_link) return; if (ifp->if_flags & IFF_OACTIVE) return; #if defined(__NetBSD__) IFQ_POLL(&ifp->if_snd, m_head); #elif defined(__FreeBSD__) IF_DEQUEUE(&ifp->if_snd, m_head); #endif if (m_head == NULL) return; if (udav_send(sc, m_head, 0)) { #if defined(__FreeBSD__) IF_PREPEND(&ifp->if_snd, m_head); #endif ifp->if_flags |= IFF_OACTIVE; return; } #if defined(__NetBSD__) IFQ_DEQUEUE(&ifp->if_snd, m_head); #endif #if NBPFILTER > 0 BPF_MTAP(ifp, m_head); #endif ifp->if_flags |= IFF_OACTIVE; /* Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; } Static int udav_send(struct udav_softc *sc, struct mbuf *m, int idx) { int total_len; struct udav_chain *c; usbd_status err; DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev),__func__)); c = &sc->sc_cdata.udav_tx_chain[idx]; /* Copy the mbuf data into a contiguous buffer */ /* first 2 bytes are packet length */ m_copydata(m, 0, m->m_pkthdr.len, c->udav_buf + 2); c->udav_mbuf = m; total_len = m->m_pkthdr.len; if (total_len < UDAV_MIN_FRAME_LEN) { memset(c->udav_buf + 2 + total_len, 0, UDAV_MIN_FRAME_LEN - total_len); total_len = UDAV_MIN_FRAME_LEN; } /* Frame length is specified in the first 2bytes of the buffer */ c->udav_buf[0] = (u_int8_t)total_len; c->udav_buf[1] = (u_int8_t)(total_len >> 8); total_len += 2; usbd_setup_xfer(c->udav_xfer, sc->sc_pipe_tx, c, c->udav_buf, total_len, USBD_FORCE_SHORT_XFER | USBD_NO_COPY, UDAV_TX_TIMEOUT, udav_txeof); /* Transmit */ sc->sc_refcnt++; err = usbd_transfer(c->udav_xfer); if (--sc->sc_refcnt < 0) usb_detach_wakeup(USBDEV(sc->sc_dev)); if (err != USBD_IN_PROGRESS) { printf("%s: udav_send error=%s\n", USBDEVNAME(sc->sc_dev), usbd_errstr(err)); /* Stop the interface */ usb_add_task(sc->sc_udev, &sc->sc_stop_task); return (EIO); } DPRINTF(("%s: %s: send %d bytes\n", USBDEVNAME(sc->sc_dev), __func__, total_len)); sc->sc_cdata.udav_tx_cnt++; return (0); } Static void udav_txeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct udav_chain *c = priv; struct udav_softc *sc = c->udav_sc; struct ifnet *ifp = GET_IFP(sc); #if defined(__NetBSD__) int s; #endif if (sc->sc_dying) return; #if defined(__NetBSD__) s = splnet(); #elif defined(__FreeBSD__) UDAV_LOCK(sc); #endif DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); ifp->if_timer = 0; ifp->if_flags &= ~IFF_OACTIVE; if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { #if defined(__NetBSD__) splx(s); #elif defined(__FreeBSD__) UDAV_UNLOCK(sc); #endif return; } ifp->if_oerrors++; printf("%s: usb error on tx: %s\n", USBDEVNAME(sc->sc_dev), usbd_errstr(status)); if (status == USBD_STALLED) { sc->sc_refcnt++; usbd_clear_endpoint_stall(sc->sc_pipe_tx); if (--sc->sc_refcnt < 0) usb_detach_wakeup(USBDEV(sc->sc_dev)); } #if defined(__NetBSD__) splx(s); #elif defined(__FreeBSD__) UDAV_UNLOCK(sc); #endif return; } ifp->if_opackets++; m_freem(c->udav_mbuf); c->udav_mbuf = NULL; #if defined(__NetBSD__) if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) #elif defined(__FreeBSD__) if ( ifp->if_snd.ifq_head != NULL ) #endif udav_start(ifp); #if defined(__NetBSD__) splx(s); #elif defined(__FreeBSD__) UDAV_UNLOCK(sc); #endif } Static void udav_rxeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct udav_chain *c = priv; struct udav_softc *sc = c->udav_sc; struct ifnet *ifp = GET_IFP(sc); struct mbuf *m; u_int32_t total_len; u_int8_t *pktstat; #if defined(__NetBSD__) int s; #endif DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev),__func__)); if (sc->sc_dying) return; if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) return; sc->sc_rx_errs++; if (usbd_ratecheck(&sc->sc_rx_notice)) { printf("%s: %u usb errors on rx: %s\n", USBDEVNAME(sc->sc_dev), sc->sc_rx_errs, usbd_errstr(status)); sc->sc_rx_errs = 0; } if (status == USBD_STALLED) { sc->sc_refcnt++; usbd_clear_endpoint_stall(sc->sc_pipe_rx); if (--sc->sc_refcnt < 0) usb_detach_wakeup(USBDEV(sc->sc_dev)); } goto done; } usbd_get_xfer_status(xfer, NULL, NULL, &total_len, NULL); /* copy data to mbuf */ m = c->udav_mbuf; memcpy(mtod(m, char *), c->udav_buf, total_len); /* first byte in received data */ pktstat = mtod(m, u_int8_t *); m_adj(m, sizeof(u_int8_t)); DPRINTF(("%s: RX Status: 0x%02x\n", USBDEVNAME(sc->sc_dev), *pktstat)); total_len = UGETW(mtod(m, u_int8_t *)); m_adj(m, sizeof(u_int16_t)); if (*pktstat & UDAV_RSR_LCS) { ifp->if_collisions++; goto done; } if (total_len < sizeof(struct ether_header) || *pktstat & UDAV_RSR_ERR) { ifp->if_ierrors++; goto done; } ifp->if_ipackets++; total_len -= ETHER_CRC_LEN; m->m_pkthdr.len = m->m_len = total_len; #if defined(__NetBSD__) m->m_pkthdr.rcvif = ifp; #elif defined(__FreeBSD__) m->m_pkthdr.rcvif = (struct ifnet *)&sc->sc_qdat; #endif #if defined(__NetBSD__) s = splnet(); #elif defined(__FreeBSD__) UDAV_LOCK(sc); #endif #if defined(__NetBSD__) if (udav_newbuf(sc, c, NULL) == ENOBUFS) { ifp->if_ierrors++; goto done1; } #endif #if NBPFILTER > 0 BPF_MTAP(ifp, m); #endif DPRINTF(("%s: %s: deliver %d\n", USBDEVNAME(sc->sc_dev), __func__, m->m_len)); #if defined(__NetBSD__) IF_INPUT(ifp, m); #endif #if defined(__FreeBSD__) usb_ether_input(m); UDAV_UNLOCK(sc); return ; #endif #if defined(__NetBSD__) done1: splx(s); #elif defined(__FreeBSD__) UDAV_UNLOCK(sc); #endif done: /* Setup new transfer */ usbd_setup_xfer(xfer, sc->sc_pipe_rx, c, c->udav_buf, UDAV_BUFSZ, USBD_SHORT_XFER_OK | USBD_NO_COPY, USBD_NO_TIMEOUT, udav_rxeof); sc->sc_refcnt++; usbd_transfer(xfer); if (--sc->sc_refcnt < 0) usb_detach_wakeup(USBDEV(sc->sc_dev)); DPRINTF(("%s: %s: start rx\n", USBDEVNAME(sc->sc_dev), __func__)); } #if 0 Static void udav_intr() { } #endif Static int udav_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct udav_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; struct mii_data *mii; #if defined(__NetBSD__) int s; #endif int error = 0; DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) return (EIO); #if defined(__NetBSD__) s = splnet(); #elif defined(__FreeBSD__) UDAV_LOCK(sc); #endif switch (cmd) { case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = GET_MII(sc); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); break; default: error = ether_ioctl(ifp, cmd, data); if (error == ENETRESET) { udav_setmulti(sc); error = 0; } break; } #if defined(__NetBSD__) splx(s); #elif defined(__FreeBSD__) UDAV_UNLOCK(sc); #endif return (error); } Static void udav_watchdog(struct ifnet *ifp) { struct udav_softc *sc = ifp->if_softc; struct udav_chain *c; usbd_status stat; #if defined(__NetBSD__) int s; #endif DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); ifp->if_oerrors++; printf("%s: watchdog timeout\n", USBDEVNAME(sc->sc_dev)); #if defined(__NetBSD__) s = splusb(); #elif defined(__FreeBSD__) UDAV_LOCK(sc) #endif c = &sc->sc_cdata.udav_tx_chain[0]; usbd_get_xfer_status(c->udav_xfer, NULL, NULL, NULL, &stat); udav_txeof(c->udav_xfer, c, stat); #if defined(__NetBSD__) if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) #elif defined(__FreeBSD__) if ( ifp->if_snd.ifq_head != NULL ) #endif udav_start(ifp); #if defined(__NetBSD__) splx(s); #elif defined(__FreeBSD__) UDAV_UNLOCK(sc); #endif } Static void udav_stop_task(struct udav_softc *sc) { udav_stop(GET_IFP(sc), 1); } /* Stop the adapter and free any mbufs allocated to the RX and TX lists. */ Static void udav_stop(struct ifnet *ifp, int disable) { struct udav_softc *sc = ifp->if_softc; usbd_status err; int i; DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); ifp->if_timer = 0; udav_reset(sc); usb_uncallout(sc->sc_stat_ch, udav_tick, sc); /* Stop transfers */ /* RX endpoint */ if (sc->sc_pipe_rx != NULL) { err = usbd_abort_pipe(sc->sc_pipe_rx); if (err) printf("%s: abort rx pipe failed: %s\n", USBDEVNAME(sc->sc_dev), usbd_errstr(err)); err = usbd_close_pipe(sc->sc_pipe_rx); if (err) printf("%s: close rx pipe failed: %s\n", USBDEVNAME(sc->sc_dev), usbd_errstr(err)); sc->sc_pipe_rx = NULL; } /* TX endpoint */ if (sc->sc_pipe_tx != NULL) { err = usbd_abort_pipe(sc->sc_pipe_tx); if (err) printf("%s: abort tx pipe failed: %s\n", USBDEVNAME(sc->sc_dev), usbd_errstr(err)); err = usbd_close_pipe(sc->sc_pipe_tx); if (err) printf("%s: close tx pipe failed: %s\n", USBDEVNAME(sc->sc_dev), usbd_errstr(err)); sc->sc_pipe_tx = NULL; } #if 0 /* XXX: Interrupt endpoint is not yet supported!! */ /* Interrupt endpoint */ if (sc->sc_pipe_intr != NULL) { err = usbd_abort_pipe(sc->sc_pipe_intr); if (err) printf("%s: abort intr pipe failed: %s\n", USBDEVNAME(sc->sc_dev), usbd_errstr(err)); err = usbd_close_pipe(sc->sc_pipe_intr); if (err) printf("%s: close intr pipe failed: %s\n", USBDEVNAME(sc->sc_dev), usbd_errstr(err)); sc->sc_pipe_intr = NULL; } #endif /* Free RX resources. */ for (i = 0; i < UDAV_RX_LIST_CNT; i++) { if (sc->sc_cdata.udav_rx_chain[i].udav_mbuf != NULL) { m_freem(sc->sc_cdata.udav_rx_chain[i].udav_mbuf); sc->sc_cdata.udav_rx_chain[i].udav_mbuf = NULL; } if (sc->sc_cdata.udav_rx_chain[i].udav_xfer != NULL) { usbd_free_xfer(sc->sc_cdata.udav_rx_chain[i].udav_xfer); sc->sc_cdata.udav_rx_chain[i].udav_xfer = NULL; } } /* Free TX resources. */ for (i = 0; i < UDAV_TX_LIST_CNT; i++) { if (sc->sc_cdata.udav_tx_chain[i].udav_mbuf != NULL) { m_freem(sc->sc_cdata.udav_tx_chain[i].udav_mbuf); sc->sc_cdata.udav_tx_chain[i].udav_mbuf = NULL; } if (sc->sc_cdata.udav_tx_chain[i].udav_xfer != NULL) { usbd_free_xfer(sc->sc_cdata.udav_tx_chain[i].udav_xfer); sc->sc_cdata.udav_tx_chain[i].udav_xfer = NULL; } } sc->sc_link = 0; ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); } /* Set media options */ Static int udav_ifmedia_change(struct ifnet *ifp) { struct udav_softc *sc = ifp->if_softc; struct mii_data *mii = GET_MII(sc); DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) return (0); sc->sc_link = 0; if (mii->mii_instance) { struct mii_softc *miisc; for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; miisc = LIST_NEXT(miisc, mii_list)) mii_phy_reset(miisc); } return (mii_mediachg(mii)); } /* Report current media status. */ Static void udav_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr) { struct udav_softc *sc = ifp->if_softc; struct mii_data *mii = GET_MII(sc); DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) return; if ((ifp->if_flags & IFF_RUNNING) == 0) { ifmr->ifm_active = IFM_ETHER | IFM_NONE; ifmr->ifm_status = 0; return; } mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } Static void udav_tick(void *xsc) { struct udav_softc *sc = xsc; if (sc == NULL) return; DPRINTFN(0xff, ("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) return; /* Perform periodic stuff in process context */ usb_add_task(sc->sc_udev, &sc->sc_tick_task); } Static void udav_tick_task(void *xsc) { struct udav_softc *sc = xsc; struct ifnet *ifp; struct mii_data *mii; #if defined(__NetBSD__) int s; #endif if (sc == NULL) return; DPRINTFN(0xff, ("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) return; ifp = GET_IFP(sc); mii = GET_MII(sc); if (mii == NULL) return; #if defined(__NetBSD__) s = splnet(); #elif defined(__FreeBSD__) UDAV_LOCK(sc); #endif mii_tick(mii); if (!sc->sc_link) { mii_pollstat(mii); if (mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { DPRINTF(("%s: %s: got link\n", USBDEVNAME(sc->sc_dev), __func__)); sc->sc_link++; #if defined(__NetBSD__) if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) #elif defined(__FreeBSD__) if ( ifp->if_snd.ifq_head != NULL ) #endif udav_start(ifp); } } usb_callout(sc->sc_stat_ch, hz, udav_tick, sc); #if defined(__NetBSD__) splx(s); #elif defined(__FreeBSD__) UDAV_UNLOCK(sc); #endif } /* Get exclusive access to the MII registers */ Static void udav_lock_mii(struct udav_softc *sc) { DPRINTFN(0xff, ("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); sc->sc_refcnt++; #if defined(__NetBSD__) lockmgr(&sc->sc_mii_lock, LK_EXCLUSIVE, NULL); #elif defined(__FreeBSD__) lockmgr(&sc->sc_mii_lock, LK_EXCLUSIVE, NULL, NULL); #endif } Static void udav_unlock_mii(struct udav_softc *sc) { DPRINTFN(0xff, ("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); #if defined(__NetBSD__) lockmgr(&sc->sc_mii_lock, LK_RELEASE, NULL); #elif defined(__FreeBSD__) lockmgr(&sc->sc_mii_lock, LK_RELEASE, NULL, NULL); #endif if (--sc->sc_refcnt < 0) usb_detach_wakeup(USBDEV(sc->sc_dev)); } Static int udav_miibus_readreg(device_ptr_t dev, int phy, int reg) { struct udav_softc *sc; u_int8_t val[2]; u_int16_t data16; if (dev == NULL) return (0); sc = USBGETSOFTC(dev); DPRINTFN(0xff, ("%s: %s: enter, phy=%d reg=0x%04x\n", USBDEVNAME(sc->sc_dev), __func__, phy, reg)); if (sc->sc_dying) { #ifdef DIAGNOSTIC printf("%s: %s: dying\n", USBDEVNAME(sc->sc_dev), __func__); #endif return (0); } /* XXX: one PHY only for the internal PHY */ if (phy != 0) { DPRINTFN(0xff, ("%s: %s: phy=%d is not supported\n", USBDEVNAME(sc->sc_dev), __func__, phy)); return (0); } udav_lock_mii(sc); /* select internal PHY and set PHY register address */ udav_csr_write1(sc, UDAV_EPAR, UDAV_EPAR_PHY_ADR0 | (reg & UDAV_EPAR_EROA_MASK)); /* select PHY operation and start read command */ udav_csr_write1(sc, UDAV_EPCR, UDAV_EPCR_EPOS | UDAV_EPCR_ERPRR); /* XXX: should be wait? */ /* end read command */ UDAV_CLRBIT(sc, UDAV_EPCR, UDAV_EPCR_ERPRR); /* retrieve the result from data registers */ udav_csr_read(sc, UDAV_EPDRL, val, 2); udav_unlock_mii(sc); data16 = val[0] | (val[1] << 8); DPRINTFN(0xff, ("%s: %s: phy=%d reg=0x%04x => 0x%04x\n", USBDEVNAME(sc->sc_dev), __func__, phy, reg, data16)); return (data16); } Static void udav_miibus_writereg(device_ptr_t dev, int phy, int reg, int data) { struct udav_softc *sc; u_int8_t val[2]; if (dev == NULL) return; sc = USBGETSOFTC(dev); DPRINTFN(0xff, ("%s: %s: enter, phy=%d reg=0x%04x data=0x%04x\n", USBDEVNAME(sc->sc_dev), __func__, phy, reg, data)); if (sc->sc_dying) { #ifdef DIAGNOSTIC printf("%s: %s: dying\n", USBDEVNAME(sc->sc_dev), __func__); #endif return; } /* XXX: one PHY only for the internal PHY */ if (phy != 0) { DPRINTFN(0xff, ("%s: %s: phy=%d is not supported\n", USBDEVNAME(sc->sc_dev), __func__, phy)); return; } udav_lock_mii(sc); /* select internal PHY and set PHY register address */ udav_csr_write1(sc, UDAV_EPAR, UDAV_EPAR_PHY_ADR0 | (reg & UDAV_EPAR_EROA_MASK)); /* put the value to the data registers */ val[0] = data & 0xff; val[1] = (data >> 8) & 0xff; udav_csr_write(sc, UDAV_EPDRL, val, 2); /* select PHY operation and start write command */ udav_csr_write1(sc, UDAV_EPCR, UDAV_EPCR_EPOS | UDAV_EPCR_ERPRW); /* XXX: should be wait? */ /* end write command */ UDAV_CLRBIT(sc, UDAV_EPCR, UDAV_EPCR_ERPRW); udav_unlock_mii(sc); return; } Static void udav_miibus_statchg(device_ptr_t dev) { #ifdef UDAV_DEBUG struct udav_softc *sc; if (dev == NULL) return; sc = USBGETSOFTC(dev); DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); #endif /* Nothing to do */ } #if defined(__FreeBSD__) /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ Static void udav_shutdown(device_ptr_t dev) { struct udav_softc *sc; sc = device_get_softc(dev); udav_stop_task(sc); return; } Static void udav_rxstart(struct ifnet *ifp) { struct udav_softc *sc; struct udav_chain *c; sc = ifp->if_softc; UDAV_LOCK(sc); c = &sc->sc_cdata.udav_rx_chain[sc->sc_cdata.udav_rx_prod]; if (udav_newbuf(sc, c, NULL) == ENOBUFS) { ifp->if_ierrors++; UDAV_UNLOCK(sc); return; } /* Setup new transfer. */ usbd_setup_xfer(c->udav_xfer, sc->sc_pipe_rx, c, c->udav_buf, UDAV_BUFSZ, USBD_SHORT_XFER_OK | USBD_NO_COPY, USBD_NO_TIMEOUT, udav_rxeof); usbd_transfer(c->udav_xfer); UDAV_UNLOCK(sc); return; } #endif Index: head/sys/dev/usb/ubsa.c =================================================================== --- head/sys/dev/usb/ubsa.c (revision 129878) +++ head/sys/dev/usb/ubsa.c (revision 129879) @@ -1,770 +1,771 @@ /*- * Copyright (c) 2002, Alexander Kabaev . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Copyright (c) 2001 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Ichiro FUKUHARA (ichiro@ichiro.org). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the NetBSD * Foundation, Inc. and its contributors. * 4. Neither the name of The NetBSD Foundation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include +#include #include #include #include #include #include #include #include #if __FreeBSD_version >= 500014 #include #else #include #endif #include #include #include #include #include #include #include #include #include #include #ifdef USB_DEBUG Static int ubsadebug = 0; SYSCTL_NODE(_hw_usb, OID_AUTO, ubsa, CTLFLAG_RW, 0, "USB ubsa"); SYSCTL_INT(_hw_usb_ubsa, OID_AUTO, debug, CTLFLAG_RW, &ubsadebug, 0, "ubsa debug level"); #define DPRINTFN(n, x) do { \ if (ubsadebug > (n)) \ logprintf x; \ } while (0) #else #define DPRINTFN(n, x) #endif #define DPRINTF(x) DPRINTFN(0, x) #define UBSA_MODVER 1 /* module version */ #define UBSA_CONFIG_INDEX 1 #define UBSA_IFACE_INDEX 0 #define UBSA_INTR_INTERVAL 100 /* ms */ #define UBSA_SET_BAUDRATE 0x00 #define UBSA_SET_STOP_BITS 0x01 #define UBSA_SET_DATA_BITS 0x02 #define UBSA_SET_PARITY 0x03 #define UBSA_SET_DTR 0x0A #define UBSA_SET_RTS 0x0B #define UBSA_SET_BREAK 0x0C #define UBSA_SET_FLOW_CTRL 0x10 #define UBSA_PARITY_NONE 0x00 #define UBSA_PARITY_EVEN 0x01 #define UBSA_PARITY_ODD 0x02 #define UBSA_PARITY_MARK 0x03 #define UBSA_PARITY_SPACE 0x04 #define UBSA_FLOW_NONE 0x0000 #define UBSA_FLOW_OCTS 0x0001 #define UBSA_FLOW_ODSR 0x0002 #define UBSA_FLOW_IDSR 0x0004 #define UBSA_FLOW_IDTR 0x0008 #define UBSA_FLOW_IRTS 0x0010 #define UBSA_FLOW_ORTS 0x0020 #define UBSA_FLOW_UNKNOWN 0x0040 #define UBSA_FLOW_OXON 0x0080 #define UBSA_FLOW_IXON 0x0100 /* line status register */ #define UBSA_LSR_TSRE 0x40 /* Transmitter empty: byte sent */ #define UBSA_LSR_TXRDY 0x20 /* Transmitter buffer empty */ #define UBSA_LSR_BI 0x10 /* Break detected */ #define UBSA_LSR_FE 0x08 /* Framing error: bad stop bit */ #define UBSA_LSR_PE 0x04 /* Parity error */ #define UBSA_LSR_OE 0x02 /* Overrun, lost incoming byte */ #define UBSA_LSR_RXRDY 0x01 /* Byte ready in Receive Buffer */ #define UBSA_LSR_RCV_MASK 0x1f /* Mask for incoming data or error */ /* modem status register */ /* All deltas are from the last read of the MSR. */ #define UBSA_MSR_DCD 0x80 /* Current Data Carrier Detect */ #define UBSA_MSR_RI 0x40 /* Current Ring Indicator */ #define UBSA_MSR_DSR 0x20 /* Current Data Set Ready */ #define UBSA_MSR_CTS 0x10 /* Current Clear to Send */ #define UBSA_MSR_DDCD 0x08 /* DCD has changed state */ #define UBSA_MSR_TERI 0x04 /* RI has toggled low to high */ #define UBSA_MSR_DDSR 0x02 /* DSR has changed state */ #define UBSA_MSR_DCTS 0x01 /* CTS has changed state */ struct ubsa_softc { struct ucom_softc sc_ucom; int sc_iface_number; /* interface number */ usbd_interface_handle sc_intr_iface; /* interrupt interface */ int sc_intr_number; /* interrupt number */ usbd_pipe_handle sc_intr_pipe; /* interrupt pipe */ u_char *sc_intr_buf; /* interrupt buffer */ int sc_isize; u_char sc_dtr; /* current DTR state */ u_char sc_rts; /* current RTS state */ u_char sc_lsr; /* Local status register */ u_char sc_msr; /* ubsa status register */ #if __FreeBSD_version >= 500000 void *sc_swicookie; #endif }; Static void ubsa_intr(usbd_xfer_handle, usbd_private_handle, usbd_status); Static void ubsa_notify(void *); Static void ubsa_get_status(void *, int, u_char *, u_char *); Static void ubsa_set(void *, int, int, int); Static int ubsa_param(void *, int, struct termios *); Static int ubsa_open(void *, int); Static void ubsa_close(void *, int); Static int ubsa_request(struct ubsa_softc *, u_int8_t, u_int16_t); Static void ubsa_dtr(struct ubsa_softc *, int); Static void ubsa_rts(struct ubsa_softc *, int); Static void ubsa_baudrate(struct ubsa_softc *, speed_t); Static void ubsa_parity(struct ubsa_softc *, tcflag_t); Static void ubsa_databits(struct ubsa_softc *, tcflag_t); Static void ubsa_stopbits(struct ubsa_softc *, tcflag_t); Static void ubsa_flow(struct ubsa_softc *, tcflag_t, tcflag_t); struct ucom_callback ubsa_callback = { ubsa_get_status, ubsa_set, ubsa_param, NULL, ubsa_open, ubsa_close, NULL, NULL }; Static const struct ubsa_product { uint16_t vendor; uint16_t product; } ubsa_products [] = { /* BELKIN F5U103 */ { USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_F5U103 }, /* BELKIN F5U120 */ { USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_F5U120 }, /* GoHubs GO-COM232 */ { USB_VENDOR_ETEK, USB_PRODUCT_ETEK_1COM }, /* GoHubs GO-COM232 */ { USB_VENDOR_GOHUBS, USB_PRODUCT_GOHUBS_GOCOM232 }, /* Peracom */ { USB_VENDOR_PERACOM, USB_PRODUCT_PERACOM_SERIAL1 }, { 0, 0 } }; Static device_probe_t ubsa_match; Static device_attach_t ubsa_attach; Static device_detach_t ubsa_detach; Static device_method_t ubsa_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ubsa_match), DEVMETHOD(device_attach, ubsa_attach), DEVMETHOD(device_detach, ubsa_detach), { 0, 0 } }; Static driver_t ubsa_driver = { "ucom", ubsa_methods, sizeof (struct ubsa_softc) }; DRIVER_MODULE(ubsa, uhub, ubsa_driver, ucom_devclass, usbd_driver_load, 0); MODULE_DEPEND(ubsa, usb, 1, 1, 1); MODULE_DEPEND(ubsa, ucom, UCOM_MINVER, UCOM_PREFVER, UCOM_MAXVER); MODULE_VERSION(ubsa, UBSA_MODVER); #if __FreeBSD_version >= 500000 static struct ithd *ucom_ithd; #endif USB_MATCH(ubsa) { USB_MATCH_START(ubsa, uaa); int i; if (uaa->iface != NULL) return (UMATCH_NONE); for (i = 0; ubsa_products[i].vendor != 0; i++) { if (ubsa_products[i].vendor == uaa->vendor && ubsa_products[i].product == uaa->product) { return (UMATCH_VENDOR_PRODUCT); } } return (UMATCH_NONE); } USB_ATTACH(ubsa) { USB_ATTACH_START(ubsa, sc, uaa); usbd_device_handle dev; struct ucom_softc *ucom; usb_config_descriptor_t *cdesc; usb_interface_descriptor_t *id; usb_endpoint_descriptor_t *ed; char *devinfo; const char *devname; usbd_status err; int i; dev = uaa->device; devinfo = malloc(1024, M_USBDEV, M_WAITOK); ucom = &sc->sc_ucom; bzero(sc, sizeof (struct ubsa_softc)); /* * initialize rts, dtr variables to something * different from boolean 0, 1 */ sc->sc_dtr = -1; sc->sc_rts = -1; usbd_devinfo(dev, 0, devinfo); /* USB_ATTACH_SETUP; */ ucom->sc_dev = self; device_set_desc_copy(self, devinfo); /* USB_ATTACH_SETUP; */ ucom->sc_udev = dev; ucom->sc_iface = uaa->iface; devname = USBDEVNAME(ucom->sc_dev); printf("%s: %s\n", devname, devinfo); DPRINTF(("ubsa attach: sc = %p\n", sc)); /* initialize endpoints */ ucom->sc_bulkin_no = ucom->sc_bulkout_no = -1; sc->sc_intr_number = -1; sc->sc_intr_pipe = NULL; /* Move the device into the configured state. */ err = usbd_set_config_index(dev, UBSA_CONFIG_INDEX, 1); if (err) { printf("%s: failed to set configuration: %s\n", devname, usbd_errstr(err)); ucom->sc_dying = 1; goto error; } /* get the config descriptor */ cdesc = usbd_get_config_descriptor(ucom->sc_udev); if (cdesc == NULL) { printf("%s: failed to get configuration descriptor\n", USBDEVNAME(ucom->sc_dev)); ucom->sc_dying = 1; goto error; } /* get the first interface */ err = usbd_device2interface_handle(dev, UBSA_IFACE_INDEX, &ucom->sc_iface); if (err) { printf("%s: failed to get interface: %s\n", devname, usbd_errstr(err)); ucom->sc_dying = 1; goto error; } /* Find the endpoints */ id = usbd_get_interface_descriptor(ucom->sc_iface); sc->sc_iface_number = id->bInterfaceNumber; for (i = 0; i < id->bNumEndpoints; i++) { ed = usbd_interface2endpoint_descriptor(ucom->sc_iface, i); if (ed == NULL) { printf("%s: no endpoint descriptor for %d\n", USBDEVNAME(ucom->sc_dev), i); ucom->sc_dying = 1; goto error; } if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_INTERRUPT) { sc->sc_intr_number = ed->bEndpointAddress; sc->sc_isize = UGETW(ed->wMaxPacketSize); } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { ucom->sc_bulkin_no = ed->bEndpointAddress; ucom->sc_ibufsize = UGETW(ed->wMaxPacketSize); } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { ucom->sc_bulkout_no = ed->bEndpointAddress; ucom->sc_obufsize = UGETW(ed->wMaxPacketSize); } } if (sc->sc_intr_number == -1) { printf("%s: Could not find interrupt in\n", USBDEVNAME(ucom->sc_dev)); ucom->sc_dying = 1; goto error; } /* keep interface for interrupt */ sc->sc_intr_iface = ucom->sc_iface; if (ucom->sc_bulkin_no == -1) { printf("%s: Could not find data bulk in\n", USBDEVNAME(ucom->sc_dev)); ucom->sc_dying = 1; goto error; } if (ucom->sc_bulkout_no == -1) { printf("%s: Could not find data bulk out\n", USBDEVNAME(ucom->sc_dev)); ucom->sc_dying = 1; goto error; } ucom->sc_parent = sc; ucom->sc_portno = UCOM_UNK_PORTNO; /* bulkin, bulkout set above */ ucom->sc_ibufsizepad = ucom->sc_ibufsize; ucom->sc_opkthdrlen = 0; ucom->sc_callback = &ubsa_callback; DPRINTF(("ubsa: in = 0x%x, out = 0x%x, intr = 0x%x\n", ucom->sc_bulkin_no, ucom->sc_bulkout_no, sc->sc_intr_number)); #if __FreeBSD_version >= 500000 swi_add(&ucom_ithd, "ucom", ubsa_notify, sc, SWI_TTY, 0, &sc->sc_swicookie); #endif ucom_attach(ucom); free(devinfo, M_USBDEV); USB_ATTACH_SUCCESS_RETURN; error: free(devinfo, M_USBDEV); USB_ATTACH_ERROR_RETURN; } USB_DETACH(ubsa) { USB_DETACH_START(ubsa, sc); int rv; DPRINTF(("ubsa_detach: sc = %p\n", sc)); if (sc->sc_intr_pipe != NULL) { usbd_abort_pipe(sc->sc_intr_pipe); usbd_close_pipe(sc->sc_intr_pipe); free(sc->sc_intr_buf, M_USBDEV); sc->sc_intr_pipe = NULL; } sc->sc_ucom.sc_dying = 1; rv = ucom_detach(&sc->sc_ucom); #if __FreeBSD_version >= 500000 ithread_remove_handler(sc->sc_swicookie); #endif return (rv); } Static int ubsa_request(struct ubsa_softc *sc, u_int8_t request, u_int16_t value) { usb_device_request_t req; usbd_status err; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = request; USETW(req.wValue, value); USETW(req.wIndex, sc->sc_iface_number); USETW(req.wLength, 0); err = usbd_do_request(sc->sc_ucom.sc_udev, &req, 0); if (err) printf("%s: ubsa_request: %s\n", USBDEVNAME(sc->sc_ucom.sc_dev), usbd_errstr(err)); return (err); } Static void ubsa_dtr(struct ubsa_softc *sc, int onoff) { DPRINTF(("ubsa_dtr: onoff = %d\n", onoff)); if (sc->sc_dtr == onoff) return; sc->sc_dtr = onoff; ubsa_request(sc, UBSA_SET_DTR, onoff ? 1 : 0); } Static void ubsa_rts(struct ubsa_softc *sc, int onoff) { DPRINTF(("ubsa_rts: onoff = %d\n", onoff)); if (sc->sc_rts == onoff) return; sc->sc_rts = onoff; ubsa_request(sc, UBSA_SET_RTS, onoff ? 1 : 0); } Static void ubsa_break(struct ubsa_softc *sc, int onoff) { DPRINTF(("ubsa_rts: onoff = %d\n", onoff)); ubsa_request(sc, UBSA_SET_BREAK, onoff ? 1 : 0); } Static void ubsa_set(void *addr, int portno, int reg, int onoff) { struct ubsa_softc *sc; sc = addr; switch (reg) { case UCOM_SET_DTR: ubsa_dtr(sc, onoff); break; case UCOM_SET_RTS: ubsa_rts(sc, onoff); break; case UCOM_SET_BREAK: ubsa_break(sc, onoff); break; default: break; } } Static void ubsa_baudrate(struct ubsa_softc *sc, speed_t speed) { u_int16_t value = 0; DPRINTF(("ubsa_baudrate: speed = %d\n", speed)); switch(speed) { case B0: break; case B300: case B600: case B1200: case B2400: case B4800: case B9600: case B19200: case B38400: case B57600: case B115200: case B230400: value = B230400 / speed; break; default: printf("%s: ubsa_param: unsupported baudrate, " "forcing default of 9600\n", USBDEVNAME(sc->sc_ucom.sc_dev)); value = B230400 / B9600; break; }; if (speed == B0) { ubsa_flow(sc, 0, 0); ubsa_dtr(sc, 0); ubsa_rts(sc, 0); } else ubsa_request(sc, UBSA_SET_BAUDRATE, value); } Static void ubsa_parity(struct ubsa_softc *sc, tcflag_t cflag) { int value; DPRINTF(("ubsa_parity: cflag = 0x%x\n", cflag)); if (cflag & PARENB) value = (cflag & PARODD) ? UBSA_PARITY_ODD : UBSA_PARITY_EVEN; else value = UBSA_PARITY_NONE; ubsa_request(sc, UBSA_SET_PARITY, value); } Static void ubsa_databits(struct ubsa_softc *sc, tcflag_t cflag) { int value; DPRINTF(("ubsa_databits: cflag = 0x%x\n", cflag)); switch (cflag & CSIZE) { case CS5: value = 0; break; case CS6: value = 1; break; case CS7: value = 2; break; case CS8: value = 3; break; default: printf("%s: ubsa_param: unsupported databits requested, " "forcing default of 8\n", USBDEVNAME(sc->sc_ucom.sc_dev)); value = 3; } ubsa_request(sc, UBSA_SET_DATA_BITS, value); } Static void ubsa_stopbits(struct ubsa_softc *sc, tcflag_t cflag) { int value; DPRINTF(("ubsa_stopbits: cflag = 0x%x\n", cflag)); value = (cflag & CSTOPB) ? 1 : 0; ubsa_request(sc, UBSA_SET_STOP_BITS, value); } Static void ubsa_flow(struct ubsa_softc *sc, tcflag_t cflag, tcflag_t iflag) { int value; DPRINTF(("ubsa_flow: cflag = 0x%x, iflag = 0x%x\n", cflag, iflag)); value = 0; if (cflag & CRTSCTS) value |= UBSA_FLOW_OCTS | UBSA_FLOW_IRTS; if (iflag & (IXON|IXOFF)) value |= UBSA_FLOW_OXON | UBSA_FLOW_IXON; ubsa_request(sc, UBSA_SET_FLOW_CTRL, value); } Static int ubsa_param(void *addr, int portno, struct termios *ti) { struct ubsa_softc *sc; sc = addr; DPRINTF(("ubsa_param: sc = %p\n", sc)); ubsa_baudrate(sc, ti->c_ospeed); ubsa_parity(sc, ti->c_cflag); ubsa_databits(sc, ti->c_cflag); ubsa_stopbits(sc, ti->c_cflag); ubsa_flow(sc, ti->c_cflag, ti->c_iflag); return (0); } Static int ubsa_open(void *addr, int portno) { struct ubsa_softc *sc; int err; sc = addr; if (sc->sc_ucom.sc_dying) return (ENXIO); DPRINTF(("ubsa_open: sc = %p\n", sc)); if (sc->sc_intr_number != -1 && sc->sc_intr_pipe == NULL) { sc->sc_intr_buf = malloc(sc->sc_isize, M_USBDEV, M_WAITOK); err = usbd_open_pipe_intr(sc->sc_intr_iface, sc->sc_intr_number, USBD_SHORT_XFER_OK, &sc->sc_intr_pipe, sc, sc->sc_intr_buf, sc->sc_isize, ubsa_intr, UBSA_INTR_INTERVAL); if (err) { printf("%s: cannot open interrupt pipe (addr %d)\n", USBDEVNAME(sc->sc_ucom.sc_dev), sc->sc_intr_number); return (EIO); } } return (0); } Static void ubsa_close(void *addr, int portno) { struct ubsa_softc *sc; int err; sc = addr; if (sc->sc_ucom.sc_dying) return; DPRINTF(("ubsa_close: close\n")); if (sc->sc_intr_pipe != NULL) { err = usbd_abort_pipe(sc->sc_intr_pipe); if (err) printf("%s: abort interrupt pipe failed: %s\n", USBDEVNAME(sc->sc_ucom.sc_dev), usbd_errstr(err)); err = usbd_close_pipe(sc->sc_intr_pipe); if (err) printf("%s: close interrupt pipe failed: %s\n", USBDEVNAME(sc->sc_ucom.sc_dev), usbd_errstr(err)); free(sc->sc_intr_buf, M_USBDEV); sc->sc_intr_pipe = NULL; } } Static void ubsa_intr(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct ubsa_softc *sc; u_char *buf; sc = priv; buf = sc->sc_intr_buf; if (sc->sc_ucom.sc_dying) return; if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) return; DPRINTF(("%s: ubsa_intr: abnormal status: %s\n", USBDEVNAME(sc->sc_ucom.sc_dev), usbd_errstr(status))); usbd_clear_endpoint_stall_async(sc->sc_intr_pipe); return; } /* incidentally, Belkin adapter status bits match UART 16550 bits */ sc->sc_lsr = buf[2]; sc->sc_msr = buf[3]; DPRINTF(("%s: ubsa lsr = 0x%02x, msr = 0x%02x\n", USBDEVNAME(sc->sc_ucom.sc_dev), sc->sc_lsr, sc->sc_msr)); #if __FreeBSD_version >= 500000 swi_sched(sc->sc_swicookie, 0); #else ubsa_notify(sc); #endif } /* Handle delayed events. */ Static void ubsa_notify(void *arg) { struct ubsa_softc *sc; sc = arg; ucom_status_change(&sc->sc_ucom); } Static void ubsa_get_status(void *addr, int portno, u_char *lsr, u_char *msr) { struct ubsa_softc *sc; DPRINTF(("ubsa_get_status\n")); sc = addr; if (lsr != NULL) *lsr = sc->sc_lsr; if (msr != NULL) *msr = sc->sc_msr; } Index: head/sys/dev/usb/ubser.c =================================================================== --- head/sys/dev/usb/ubser.c (revision 129878) +++ head/sys/dev/usb/ubser.c (revision 129879) @@ -1,1057 +1,1058 @@ /* * Copyright (c) 2004 Bernd Walter * * $URL: https://devel.bwct.de/svn/projects/ubser/ubser.c $ * $Date: 2004-02-29 01:53:10 +0100 (Sun, 29 Feb 2004) $ * $Author: ticso $ * $Rev: 1127 $ */ /*- * Copyright (c) 2001-2002, Shunsuke Akiyama . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Copyright (c) 2000 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Lennart Augustsson (lennart@augustsson.net). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the NetBSD * Foundation, Inc. and its contributors. * 4. Neither the name of The NetBSD Foundation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * BWCT serial adapter driver */ #include #include #include #include +#include #include #include #include #include #include #include #include #include #if __FreeBSD_version >= 500014 #include #else #include #endif #include #include #include #include #include #include #include #ifdef USB_DEBUG static int ubserdebug = 0; SYSCTL_NODE(_hw_usb, OID_AUTO, ubser, CTLFLAG_RW, 0, "USB ubser"); SYSCTL_INT(_hw_usb_ubser, OID_AUTO, debug, CTLFLAG_RW, &ubserdebug, 0, "ubser debug level"); #define DPRINTF(x) do { \ if (ubserdebug) \ logprintf x; \ } while (0) #define DPRINTFN(n, x) do { \ if (ubserdebug > (n)) \ logprintf x; \ } while (0) #else #define DPRINTF(x) #define DPRINTFN(n,x) #endif #define ISSET(t, f) ((t) & (f)) #define SET(t, f) (t) |= (f) #define CLR(t, f) (t) &= ~((unsigned)(f)) struct ubser_softc { USBBASEDEVICE sc_dev; usbd_device_handle sc_udev; usbd_interface_handle sc_iface; /* data interface */ int sc_ifaceno; int sc_refcnt; u_char sc_dying; u_char sc_opening; int sc_state; uint8_t sc_numser; int sc_bulkin_no; /* bulk in endpoint address */ usbd_pipe_handle sc_bulkin_pipe; /* bulk in pipe */ usbd_xfer_handle sc_ixfer; /* read request */ u_char *sc_ibuf; /* read buffer */ u_int sc_ibufsize; /* read buffer size */ u_int sc_ibufsizepad; /* read buffer size padded */ int sc_bulkout_no; /* bulk out endpoint address */ usbd_pipe_handle sc_bulkout_pipe;/* bulk out pipe */ usbd_xfer_handle sc_oxfer[8]; /* write request */ u_char *sc_obuf[8]; /* write buffer */ u_int sc_obufsize; /* write buffer size */ u_int sc_opkthdrlen; /* header length of output packet */ dev_t dev[8]; }; Static d_open_t ubser_open; Static d_close_t ubser_close; Static d_read_t ubser_read; Static d_write_t ubser_write; Static d_ioctl_t ubser_ioctl; Static int ubserparam(struct tty *, struct termios *); Static void ubserstart(struct tty *); Static void ubserstop(struct tty *, int); Static usbd_status ubserstartread(struct ubser_softc *); Static void ubserreadcb(usbd_xfer_handle, usbd_private_handle, usbd_status); Static void ubserwritecb(usbd_xfer_handle, usbd_private_handle, usbd_status); Static void ubser_cleanup(struct ubser_softc *sc); static struct cdevsw ubser_cdevsw = { #if __FreeBSD_version > 502102 .d_version = D_VERSION, #endif .d_open = ubser_open, .d_close = ubser_close, .d_read = ubser_read, .d_write = ubser_write, .d_ioctl = ubser_ioctl, #if __FreeBSD_version < 502103 .d_poll = ttypoll, .d_kqfilter = ttykqfilter, #endif .d_name = "ubser", #if __FreeBSD_version > 502102 .d_flags = D_TTY | D_NEEDGIANT, #else .d_flags = D_TTY, #endif #if __FreeBSD_version < 500014 .d_bmaj = -1, #endif }; USB_DECLARE_DRIVER(ubser); USB_MATCH(ubser) { USB_MATCH_START(ubser, uaa); usb_string_descriptor_t us; usb_interface_descriptor_t *id; usb_device_descriptor_t *dd; int err; if (uaa->iface == NULL) return (UMATCH_NONE); DPRINTFN(20,("ubser: vendor=0x%x, product=0x%x\n", uaa->vendor, uaa->product)); dd = usbd_get_device_descriptor(uaa->device); if (dd == NULL) { printf("ubser: failed to get device descriptor\n"); return (UMATCH_NONE); } id = usbd_get_interface_descriptor(uaa->iface); if (id == NULL) { printf("ubser: failed to get interface descriptor\n"); return (UMATCH_NONE); } err = usbd_get_string_desc(uaa->device, dd->iManufacturer, 0, &us); if (err != 0) return (UMATCH_NONE); /* check if this is a BWCT vendor specific ubser interface */ if (strcmp((char*)us.bString, "B\0W\0C\0T\0") == 0 && id->bInterfaceClass == 0xff && id->bInterfaceSubClass == 0x00) return (UMATCH_VENDOR_IFACESUBCLASS); return (UMATCH_NONE); } USB_ATTACH(ubser) { USB_ATTACH_START(ubser, sc, uaa); usbd_device_handle udev = uaa->device; usb_endpoint_descriptor_t *ed; usb_interface_descriptor_t *id; usb_device_request_t req; char *devinfo; struct tty *tp; usbd_status err; int i; int alen; uint8_t epcount; devinfo = malloc(1024, M_USBDEV, M_WAITOK); usbd_devinfo(udev, 0, devinfo); USB_ATTACH_SETUP; printf("%s: %s\n", USBDEVNAME(sc->sc_dev), devinfo); DPRINTFN(10,("\nubser_attach: sc=%p\n", sc)); sc->sc_udev = udev = uaa->device; sc->sc_iface = uaa->iface; for (i = 0; i < 8; i++) { sc->dev[i] = NULL; } /* get interface index */ id = usbd_get_interface_descriptor(uaa->iface); if (id == NULL) { printf("ubser: failed to get interface descriptor\n"); return (UMATCH_NONE); } sc->sc_ifaceno = id->bInterfaceNumber; /* get number of serials */ req.bmRequestType = UT_READ_VENDOR_INTERFACE; req.bRequest = VENDOR_GET_NUMSER; USETW(req.wValue, 0); USETW(req.wIndex, sc->sc_ifaceno); USETW(req.wLength, 1); err = usbd_do_request_flags(udev, &req, &sc->sc_numser, USBD_SHORT_XFER_OK, &alen, USBD_DEFAULT_TIMEOUT); if (err) { printf("%s: cannot get number of serials\n", USBDEVNAME(sc->sc_dev)); goto bad; } else if (alen != 1) { printf("%s: bogus answer on get_numser\n", USBDEVNAME(sc->sc_dev)); goto bad; } if (sc->sc_numser > 8) sc->sc_numser = 8; printf("%s: found %i serials\n", USBDEVNAME(sc->sc_dev), sc->sc_numser); sc->sc_ibufsize = 7; sc->sc_ibufsizepad = 8; sc->sc_obufsize = 7; sc->sc_opkthdrlen = 1; for (i = 0; i < sc->sc_numser; i++) { sc->dev[i] = NULL; } for (i = 0; i < sc->sc_numser; i++) { sc->dev[i] = make_dev(&ubser_cdevsw, USBDEVUNIT(sc->sc_dev) * 8 + i, UID_UUCP, GID_DIALER, 0660, "%s.%d", USBDEVNAME(sc->sc_dev), i); if (sc->dev[i] == NULL) { printf("%s: make_dev failed\n", USBDEVNAME(sc->sc_dev)); goto bad; } sc->dev[i]->si_tty = tp = ttymalloc(NULL); if (sc->dev[i]->si_tty == NULL) { printf("%s: ttymalloc failed\n", USBDEVNAME(sc->sc_dev)); goto bad; } DPRINTF(("ubser_attach: tty_attach tp = %p\n", tp)); tp->t_oproc = ubserstart; tp->t_param = ubserparam; tp->t_stop = ubserstop; } /* find our bulk endpoints */ epcount = 0; usbd_endpoint_count(sc->sc_iface, &epcount); sc->sc_bulkin_no = -1; sc->sc_bulkout_no = -1; for (i = 0; i < epcount; i++) { ed = usbd_interface2endpoint_descriptor(sc->sc_iface, i); if (ed == NULL) { printf("%s: couldn't get ep %d\n", USBDEVNAME(sc->sc_dev), i); USB_ATTACH_ERROR_RETURN; } if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { sc->sc_bulkin_no = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { sc->sc_bulkout_no = ed->bEndpointAddress; } } if (sc->sc_bulkin_no == -1) { printf("%s: could not find bulk in endpoint\n", USBDEVNAME(sc->sc_dev)); sc->sc_dying = 1; USB_ATTACH_ERROR_RETURN; } if (sc->sc_bulkout_no == -1) { printf("%s: could not find bulk out endpoint\n", USBDEVNAME(sc->sc_dev)); sc->sc_dying = 1; USB_ATTACH_ERROR_RETURN; } /* Open the bulk pipes */ /* Bulk-in pipe */ err = usbd_open_pipe(sc->sc_iface, sc->sc_bulkin_no, 0, &sc->sc_bulkin_pipe); if (err) { printf("%s: open bulk in error (addr %d): %s\n", USBDEVNAME(sc->sc_dev), sc->sc_bulkin_no, usbd_errstr(err)); goto fail_0; } /* Bulk-out pipe */ err = usbd_open_pipe(sc->sc_iface, sc->sc_bulkout_no, USBD_EXCLUSIVE_USE, &sc->sc_bulkout_pipe); if (err) { printf("%s: open bulk out error (addr %d): %s\n", USBDEVNAME(sc->sc_dev), sc->sc_bulkout_no, usbd_errstr(err)); goto fail_1; } /* Allocate a request and an input buffer and start reading. */ sc->sc_ixfer = usbd_alloc_xfer(sc->sc_udev); if (sc->sc_ixfer == NULL) { goto fail_2; } sc->sc_ibuf = usbd_alloc_buffer(sc->sc_ixfer, sc->sc_ibufsizepad); if (sc->sc_ibuf == NULL) { goto fail_3; } for (i = 0; i < 8; i++) { sc->sc_oxfer[i] = NULL; sc->sc_obuf[i] = NULL; } for (i = 0; i < sc->sc_numser; i++) { sc->sc_oxfer[i] = usbd_alloc_xfer(sc->sc_udev); if (sc->sc_oxfer[i] == NULL) { goto fail_4; } sc->sc_obuf[i] = usbd_alloc_buffer(sc->sc_oxfer[i], sc->sc_obufsize + sc->sc_opkthdrlen); if (sc->sc_obuf[i] == NULL) { goto fail_4; } } ubserstartread(sc); free(devinfo, M_USBDEV); USB_ATTACH_SUCCESS_RETURN; fail_4: for (i = 0; i < sc->sc_numser; i++) { if (sc->sc_oxfer[i] != NULL) { usbd_free_xfer(sc->sc_oxfer[i]); sc->sc_oxfer[i] = NULL; } } fail_3: usbd_free_xfer(sc->sc_ixfer); sc->sc_ixfer = NULL; fail_2: usbd_close_pipe(sc->sc_bulkout_pipe); sc->sc_bulkout_pipe = NULL; fail_1: usbd_close_pipe(sc->sc_bulkin_pipe); sc->sc_bulkin_pipe = NULL; fail_0: sc->sc_opening = 0; wakeup(&sc->sc_opening); bad: ubser_cleanup(sc); for (i = 0; i < 8; i++) { if (sc->dev[i] != NULL) { tp = sc->dev[i]->si_tty; if (tp != NULL) { if (tp->t_state & TS_ISOPEN) { (*linesw[tp->t_line].l_close)(tp, 0); tp->t_gen++; ttyclose(tp); ttwakeup(tp); ttwwakeup(tp); } } destroy_dev(sc->dev[i]); } } DPRINTF(("ubser_attach: ATTACH ERROR\n")); free(devinfo, M_USBDEV); USB_ATTACH_ERROR_RETURN; } USB_DETACH(ubser) { USB_DETACH_START(ubser, sc); int i, s; struct tty *tp; DPRINTF(("ubser_detach: sc=%p\n", sc)); sc->sc_dying = 1; if (sc->sc_bulkin_pipe != NULL) usbd_abort_pipe(sc->sc_bulkin_pipe); if (sc->sc_bulkout_pipe != NULL) usbd_abort_pipe(sc->sc_bulkout_pipe); for (i = 0; i < 8; i++) { if (sc->dev[i] != NULL) { tp = sc->dev[i]->si_tty; if (tp != NULL) { if (tp->t_state & TS_ISOPEN) { (*linesw[tp->t_line].l_close)(tp, 0); tp->t_gen++; ttyclose(tp); ttwakeup(tp); ttwwakeup(tp); } } destroy_dev(sc->dev[i]); } } s = splusb(); if (--sc->sc_refcnt >= 0) { /* Wait for processes to go away. */ usb_detach_wait(USBDEV(sc->sc_dev)); } splx(s); return (0); } Static int ubserparam(struct tty *tp, struct termios *t) { struct ubser_softc *sc; USB_GET_SC(ubser, dev2unit(tp->t_dev) / 8, sc); if (sc->sc_dying) return (EIO); DPRINTF(("ubserparam: sc = %p\n", sc)); /* * The firmware on our devices can only do 8n1@9600bps * without handshake. * We refuse to accept other configurations. */ /* enshure 9600bps */ switch (t->c_ospeed) { case 9600: break; default: return (EINVAL); } /* 2 stop bits not possible */ if (ISSET(t->c_cflag, CSTOPB)) return (EINVAL); /* XXX parity handling not possible with current firmware */ if (ISSET(t->c_cflag, PARENB)) return (EINVAL); /* we can only do 8 data bits */ switch (ISSET(t->c_cflag, CSIZE)) { case CS8: break; default: return (EINVAL); } /* we can't do any kind of hardware handshaking */ if ((t->c_cflag & (CRTS_IFLOW | CDTR_IFLOW |CDSR_OFLOW |CCAR_OFLOW)) != 0) return (EINVAL); /* * XXX xon/xoff not supported by the firmware! * This is handled within FreeBSD only and may overflow buffers * because of delayed reaction due to device buffering. */ ttsetwater(tp); return (0); } Static void ubserstart(struct tty *tp) { struct ubser_softc *sc; struct cblock *cbp; usbd_status err; int s; u_char *data; int cnt; uint8_t serial; USB_GET_SC(ubser, dev2unit(tp->t_dev) / 8, sc); serial = dev2unit(tp->t_dev) & 0x07; DPRINTF(("ubserstart: sc = %p, tp = %p\n", sc, tp)); if (sc->sc_dying) return; s = spltty(); if (ISSET(tp->t_state, TS_BUSY | TS_TIMEOUT | TS_TTSTOP)) { ttwwakeup(tp); DPRINTF(("ubserstart: stopped\n")); goto out; } if (tp->t_outq.c_cc <= tp->t_olowat) { if (ISSET(tp->t_state, TS_SO_OLOWAT)) { CLR(tp->t_state, TS_SO_OLOWAT); wakeup(TSA_OLOWAT(tp)); } selwakeuppri(&tp->t_wsel, TTIPRI); if (tp->t_outq.c_cc == 0) { if (ISSET(tp->t_state, TS_BUSY | TS_SO_OCOMPLETE) == TS_SO_OCOMPLETE && tp->t_outq.c_cc == 0) { CLR(tp->t_state, TS_SO_OCOMPLETE); wakeup(TSA_OCOMPLETE(tp)); } goto out; } } /* Grab the first contiguous region of buffer space. */ data = tp->t_outq.c_cf; cbp = (struct cblock *) ((intptr_t) tp->t_outq.c_cf & ~CROUND); cnt = min((char *) (cbp+1) - tp->t_outq.c_cf, tp->t_outq.c_cc); if (cnt == 0) { DPRINTF(("ubserstart: cnt == 0\n")); goto out; } SET(tp->t_state, TS_BUSY); if (cnt + sc->sc_opkthdrlen > sc->sc_obufsize) { DPRINTF(("ubserstart: big buffer %d chars\n", cnt)); cnt = sc->sc_obufsize; } sc->sc_obuf[serial][0] = serial; memcpy(sc->sc_obuf[serial] + sc->sc_opkthdrlen, data, cnt); DPRINTF(("ubserstart: %d chars\n", cnt)); usbd_setup_xfer(sc->sc_oxfer[serial], sc->sc_bulkout_pipe, (usbd_private_handle)tp, sc->sc_obuf[serial], cnt + sc->sc_opkthdrlen, USBD_NO_COPY, USBD_NO_TIMEOUT, ubserwritecb); /* What can we do on error? */ err = usbd_transfer(sc->sc_oxfer[serial]); if (err != USBD_IN_PROGRESS) printf("ubserstart: err=%s\n", usbd_errstr(err)); ttwwakeup(tp); out: splx(s); } Static void ubserstop(struct tty *tp, int flag) { struct ubser_softc *sc; int s; USB_GET_SC(ubser, dev2unit(tp->t_dev) / 8, sc); DPRINTF(("ubserstop: %d\n", flag)); if (flag & FWRITE) { DPRINTF(("ubserstop: write\n")); s = spltty(); if (ISSET(tp->t_state, TS_BUSY)) { /* XXX do what? */ if (!ISSET(tp->t_state, TS_TTSTOP)) SET(tp->t_state, TS_FLUSH); } splx(s); } DPRINTF(("ubserstop: done\n")); } Static void ubserwritecb(usbd_xfer_handle xfer, usbd_private_handle p, usbd_status status) { struct tty *tp; struct ubser_softc *sc; u_int32_t cc; int s; tp = (struct tty *)p; USB_GET_SC(ubser, dev2unit(tp->t_dev) / 8, sc); DPRINTF(("ubserwritecb: status = %d\n", status)); if (status == USBD_CANCELLED || sc->sc_dying) goto error; if (status != USBD_NORMAL_COMPLETION) { printf("%s: ubserwritecb: %s\n", USBDEVNAME(sc->sc_dev), usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall_async(sc->sc_bulkin_pipe); /* XXX we should restart after some delay. */ goto error; } usbd_get_xfer_status(xfer, NULL, NULL, &cc, NULL); DPRINTF(("ubserwritecb: cc = %d\n", cc)); if (cc <= sc->sc_opkthdrlen) { printf("%s: sent size too small, cc = %d\n", USBDEVNAME(sc->sc_dev), cc); goto error; } /* convert from USB bytes to tty bytes */ cc -= sc->sc_opkthdrlen; s = spltty(); CLR(tp->t_state, TS_BUSY); if (ISSET(tp->t_state, TS_FLUSH)) CLR(tp->t_state, TS_FLUSH); else ndflush(&tp->t_outq, cc); (*linesw[tp->t_line].l_start)(tp); splx(s); return; error: s = spltty(); CLR(tp->t_state, TS_BUSY); splx(s); return; } Static usbd_status ubserstartread(struct ubser_softc *sc) { usbd_status err; DPRINTF(("ubserstartread: start\n")); if (sc->sc_bulkin_pipe == NULL) return (USBD_NORMAL_COMPLETION); usbd_setup_xfer(sc->sc_ixfer, sc->sc_bulkin_pipe, (usbd_private_handle)sc, sc->sc_ibuf, sc->sc_ibufsizepad, USBD_SHORT_XFER_OK | USBD_NO_COPY, USBD_NO_TIMEOUT, ubserreadcb); err = usbd_transfer(sc->sc_ixfer); if (err != USBD_IN_PROGRESS) { DPRINTF(("ubserstartread: err = %s\n", usbd_errstr(err))); return (err); } return (USBD_NORMAL_COMPLETION); } Static void ubserreadcb(usbd_xfer_handle xfer, usbd_private_handle p, usbd_status status) { struct ubser_softc *sc = (struct ubser_softc *)p; struct tty *tp; int (*rint) (int, struct tty *); usbd_status err; u_int32_t cc; u_char *cp; int lostcc; int s; DPRINTF(("ubserreadcb: status = %d\n", status)); if (status != USBD_NORMAL_COMPLETION) { printf("%s: ubserreadcb: %s\n", USBDEVNAME(sc->sc_dev), usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall_async(sc->sc_bulkin_pipe); /* XXX we should restart after some delay. */ return; } usbd_get_xfer_status(xfer, NULL, (void **)&cp, &cc, NULL); DPRINTF(("ubserreadcb: got %d bytes from device\n", cc)); if (cc == 0) goto resubmit; if (cc > sc->sc_ibufsizepad) { printf("%s: invalid receive data size, %d chars\n", USBDEVNAME(sc->sc_dev), cc); goto resubmit; } /* parse header */ if (cc < 1) goto resubmit; DPRINTF(("ubserreadcb: got %d chars for serial %d\n", cc - 1, *cp)); tp = sc->dev[*cp]->si_tty; rint = linesw[tp->t_line].l_rint; cp++; cc--; if (cc < 1) goto resubmit; if (!(tp->t_state & TS_ISOPEN)) /* drop data for unused serials */ goto resubmit; s = spltty(); if (tp->t_state & TS_CAN_BYPASS_L_RINT) { if (tp->t_rawq.c_cc + cc > tp->t_ihiwat && (tp->t_iflag & IXOFF) && !(tp->t_state & TS_TBLOCK)) ttyblock(tp); lostcc = b_to_q((char *)cp, cc, &tp->t_rawq); tp->t_rawcc += cc; ttwakeup(tp); if (tp->t_state & TS_TTSTOP && (tp->t_iflag & IXANY || tp->t_cc[VSTART] == tp->t_cc[VSTOP])) { tp->t_state &= ~TS_TTSTOP; tp->t_lflag &= ~FLUSHO; ubserstart(tp); } if (lostcc > 0) printf("%s: lost %d chars\n", USBDEVNAME(sc->sc_dev), lostcc); } else { /* Give characters to tty layer. */ while (cc > 0) { DPRINTFN(7, ("ubserreadcb: char = 0x%02x\n", *cp)); if ((*rint)(*cp, tp) == -1) { /* XXX what should we do? */ printf("%s: lost %d chars\n", USBDEVNAME(sc->sc_dev), cc); break; } cc--; cp++; } } splx(s); resubmit: err = ubserstartread(sc); if (err) { printf("%s: read start failed\n", USBDEVNAME(sc->sc_dev)); /* XXX what should we do now? */ } } Static void ubser_cleanup(struct ubser_softc *sc) { int i; DPRINTF(("ubser_cleanup: closing pipes\n")); if (sc->sc_bulkin_pipe != NULL) { usbd_abort_pipe(sc->sc_bulkin_pipe); usbd_close_pipe(sc->sc_bulkin_pipe); sc->sc_bulkin_pipe = NULL; } if (sc->sc_bulkout_pipe != NULL) { usbd_abort_pipe(sc->sc_bulkout_pipe); usbd_close_pipe(sc->sc_bulkout_pipe); sc->sc_bulkout_pipe = NULL; } if (sc->sc_ixfer != NULL) { usbd_free_xfer(sc->sc_ixfer); sc->sc_ixfer = NULL; } for (i = 0; i < sc->sc_numser; i++) { if (sc->sc_oxfer[i] != NULL) { usbd_free_xfer(sc->sc_oxfer[i]); sc->sc_oxfer[i] = NULL; } } } static int ubser_open(dev_t dev, int flag, int mode, usb_proc_ptr p) { struct ubser_softc *sc; struct tty *tp; int s; int error; USB_GET_SC(ubser, dev2unit(dev) / 8, sc); if (sc->sc_dying) return (ENXIO); tp = sc->dev[dev2unit(dev) & 0x07]->si_tty; DPRINTF(("%s: ubser_open: tp = %p\n", USBDEVNAME(sc->sc_dev), tp)); if (ISSET(tp->t_state, TS_ISOPEN) && ISSET(tp->t_state, TS_XCLUDE) && suser(p)) return (EBUSY); /* * Do the following if this is a first open. */ s = spltty(); while (sc->sc_opening) tsleep(&sc->sc_opening, PRIBIO, "ubser_op", 0); sc->sc_opening = 1; if (!ISSET(tp->t_state, TS_ISOPEN)) { tp->t_dev = dev; /* * Initialize the termios status to the defaults. Add in the * sticky bits from TIOCSFLAGS. */ tp->t_iflag = TTYDEF_IFLAG; tp->t_oflag = TTYDEF_OFLAG; tp->t_lflag = TTYDEF_LFLAG; ttychars(tp); ttsetwater(tp); /* * Handle initial DCD. */ (*linesw[tp->t_line].l_modem)(tp, 1); } sc->sc_refcnt++; /* XXX: wrong refcnt on error later on */ sc->sc_opening = 0; wakeup(&sc->sc_opening); splx(s); error = ttyopen(dev, tp); if (error) goto bad; error = (*linesw[tp->t_line].l_open)(dev, tp); if (error) goto bad; DPRINTF(("%s: ubser_open: success\n", USBDEVNAME(sc->sc_dev))); return (0); sc->sc_opening = 0; wakeup(&sc->sc_opening); splx(s); return (error); bad: DPRINTF(("%s: ubser_open: failed\n", USBDEVNAME(sc->sc_dev))); return (error); } static int ubser_close(dev_t dev, int flag, int mode, usb_proc_ptr p) { struct ubser_softc *sc; struct tty *tp; USB_GET_SC(ubser, dev2unit(dev) / 8, sc); tp = sc->dev[dev2unit(dev) & 0x07]->si_tty; DPRINTF(("%s: ubserclose\n", USBDEVNAME(sc->sc_dev))); if (!ISSET(tp->t_state, TS_ISOPEN)) goto quit; if (sc->sc_dying) goto quit; quit: if (--sc->sc_refcnt < 0) usb_detach_wakeup(USBDEV(sc->sc_dev)); return (0); } static int ubser_ioctl(dev_t dev, u_long cmd, caddr_t data, int flag, usb_proc_ptr p) { usb_device_request_t req; struct ubser_softc *sc; struct tty *tp; int error; int s; int alen; USB_GET_SC(ubser, dev2unit(dev) / 8, sc); tp = sc->dev[dev2unit(dev) & 0x07]->si_tty; DPRINTF(("ubser_ioctl: cmd = 0x%08lx\n", cmd)); if (sc->sc_dying) return (EIO); error = (*linesw[tp->t_line].l_ioctl)(tp, cmd, data, flag, p); if (error >= 0) { DPRINTF(("ubser_ioctl: l_ioctl: error = %d\n", error)); return (error); } error = ttioctl(tp, cmd, data, flag); if (error >= 0) { DPRINTF(("ubser_ioctl: ttioctl: error = %d\n", error)); return (error); } error = 0; s = spltty(); switch (cmd) { case TIOCSBRK: /* clearing break condition is done in firmware */ DPRINTF(("ubser_ioctl: TIOCSBRK\n")); req.bmRequestType = UT_READ_VENDOR_INTERFACE; req.bRequest = VENDOR_SET_BREAK; USETW(req.wValue, dev2unit(dev) & 0x07); USETW(req.wIndex, sc->sc_ifaceno); USETW(req.wLength, 0); error = usbd_do_request_flags(sc->sc_udev, &req, &sc->sc_numser, USBD_SHORT_XFER_OK, &alen, USBD_DEFAULT_TIMEOUT); break; /* XXX: something else to handle? */ } splx(s); return (error); } static int ubser_read(dev_t dev, struct uio *uio, int flag) { struct ubser_softc *sc; struct tty *tp; int error; USB_GET_SC(ubser, dev2unit(dev) / 8, sc); tp = sc->dev[dev2unit(dev) & 0x07]->si_tty; DPRINTF(("ubser_read: tp = %p, flag = 0x%x\n", tp, flag)); if (sc->sc_dying) return (EIO); error = (*linesw[tp->t_line].l_read)(tp, uio, flag); DPRINTF(("ubser_read: error = %d\n", error)); return (error); } static int ubser_write(dev_t dev, struct uio *uio, int flag) { struct ubser_softc *sc; struct tty *tp; int error; USB_GET_SC(ubser, dev2unit(dev) / 8, sc); tp = sc->dev[dev2unit(dev) & 0x07]->si_tty; DPRINTF(("ubser_write: tp = %p, flag = 0x%x\n", tp, flag)); if (sc->sc_dying) return (EIO); error = (*linesw[tp->t_line].l_write)(tp, uio, flag); DPRINTF(("ubser_write: error = %d\n", error)); return (error); } DRIVER_MODULE(ubser, uhub, ubser_driver, ubser_devclass, usbd_driver_load, 0); Index: head/sys/dev/usb/ucom.c =================================================================== --- head/sys/dev/usb/ucom.c (revision 129878) +++ head/sys/dev/usb/ucom.c (revision 129879) @@ -1,1198 +1,1199 @@ /* $NetBSD: ucom.c,v 1.40 2001/11/13 06:24:54 lukem Exp $ */ /*- * Copyright (c) 2001-2002, Shunsuke Akiyama . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Copyright (c) 1998, 2000 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Lennart Augustsson (lennart@augustsson.net) at * Carlstedt Research & Technology. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the NetBSD * Foundation, Inc. and its contributors. * 4. Neither the name of The NetBSD Foundation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * TODO: * 1. How do I handle hotchar? */ #include #include #include #include +#include #include #include #include #include #include #include #include #if __FreeBSD_version >= 500014 #include #else #include #endif #include #include #include #include #include #include #include #include #include #include #ifdef USB_DEBUG static int ucomdebug = 0; SYSCTL_NODE(_hw_usb, OID_AUTO, ucom, CTLFLAG_RW, 0, "USB ucom"); SYSCTL_INT(_hw_usb_ucom, OID_AUTO, debug, CTLFLAG_RW, &ucomdebug, 0, "ucom debug level"); #define DPRINTF(x) do { \ if (ucomdebug) \ logprintf x; \ } while (0) #define DPRINTFN(n, x) do { \ if (ucomdebug > (n)) \ logprintf x; \ } while (0) #else #define DPRINTF(x) #define DPRINTFN(n, x) #endif Static d_open_t ucomopen; Static d_close_t ucomclose; Static d_read_t ucomread; Static d_write_t ucomwrite; Static d_ioctl_t ucomioctl; static struct cdevsw ucom_cdevsw = { .d_version = D_VERSION, .d_open = ucomopen, .d_close = ucomclose, .d_read = ucomread, .d_write = ucomwrite, .d_ioctl = ucomioctl, .d_name = "ucom", .d_flags = D_TTY | D_NEEDGIANT, #if __FreeBSD_version < 500014 .d_bmaj = -1, #endif }; Static void ucom_cleanup(struct ucom_softc *); Static int ucomctl(struct ucom_softc *, int, int); Static int ucomparam(struct tty *, struct termios *); Static void ucomstart(struct tty *); Static void ucomstop(struct tty *, int); Static void ucom_shutdown(struct ucom_softc *); Static void ucom_dtr(struct ucom_softc *, int); Static void ucom_rts(struct ucom_softc *, int); Static void ucom_break(struct ucom_softc *, int); Static usbd_status ucomstartread(struct ucom_softc *); Static void ucomreadcb(usbd_xfer_handle, usbd_private_handle, usbd_status); Static void ucomwritecb(usbd_xfer_handle, usbd_private_handle, usbd_status); Static void ucomstopread(struct ucom_softc *); static void disc_optim(struct tty *, struct termios *, struct ucom_softc *); devclass_t ucom_devclass; static moduledata_t ucom_mod = { "ucom", NULL, NULL }; DECLARE_MODULE(ucom, ucom_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE); MODULE_DEPEND(ucom, usb, 1, 1, 1); MODULE_VERSION(ucom, UCOM_MODVER); int ucom_attach(struct ucom_softc *sc) { struct tty *tp; int unit; unit = device_get_unit(sc->sc_dev); sc->sc_tty = tp = ttymalloc(sc->sc_tty); tp->t_oproc = ucomstart; tp->t_param = ucomparam; tp->t_stop = ucomstop; DPRINTF(("ucom_attach: tty_attach tp = %p\n", tp)); DPRINTF(("ucom_attach: make_dev: ucom%d\n", unit)); sc->dev = make_dev(&ucom_cdevsw, unit | UCOM_CALLOUT_MASK, UID_UUCP, GID_DIALER, 0660, "ucom%d", unit); sc->dev->si_tty = tp; return (0); } int ucom_detach(struct ucom_softc *sc) { struct tty *tp = sc->sc_tty; int s; DPRINTF(("ucom_detach: sc = %p, tp = %p\n", sc, sc->sc_tty)); sc->sc_dying = 1; if (sc->sc_bulkin_pipe != NULL) usbd_abort_pipe(sc->sc_bulkin_pipe); if (sc->sc_bulkout_pipe != NULL) usbd_abort_pipe(sc->sc_bulkout_pipe); if (tp != NULL) { if (tp->t_state & TS_ISOPEN) { device_printf(sc->sc_dev, "still open, forcing close\n"); (*linesw[tp->t_line].l_close)(tp, 0); tp->t_gen++; ttyclose(tp); ttwakeup(tp); ttwwakeup(tp); } } else { DPRINTF(("ucom_detach: no tty\n")); return (0); } s = splusb(); if (--sc->sc_refcnt >= 0) { /* Wait for processes to go away. */ usb_detach_wait(USBDEV(sc->sc_dev)); } splx(s); destroy_dev(sc->dev); return (0); } Static void ucom_shutdown(struct ucom_softc *sc) { struct tty *tp = sc->sc_tty; DPRINTF(("ucom_shutdown\n")); /* * Hang up if necessary. Wait a bit, so the other side has time to * notice even if we immediately open the port again. */ if (ISSET(tp->t_cflag, HUPCL)) { (void)ucomctl(sc, TIOCM_DTR, DMBIC); (void)tsleep(sc, TTIPRI, "ucomsd", hz); } } Static int ucomopen(dev_t dev, int flag, int mode, usb_proc_ptr p) { int unit = UCOMUNIT(dev); struct ucom_softc *sc; usbd_status err; struct tty *tp; int s; int error; USB_GET_SC_OPEN(ucom, unit, sc); if (sc->sc_dying) return (ENXIO); tp = sc->sc_tty; DPRINTF(("%s: ucomopen: tp = %p\n", USBDEVNAME(sc->sc_dev), tp)); if (ISSET(tp->t_state, TS_ISOPEN) && ISSET(tp->t_state, TS_XCLUDE) && suser(p)) return (EBUSY); /* * Do the following iff this is a first open. */ s = spltty(); while (sc->sc_opening) tsleep(&sc->sc_opening, PRIBIO, "ucomop", 0); sc->sc_opening = 1; if (!ISSET(tp->t_state, TS_ISOPEN)) { struct termios t; sc->sc_poll = 0; sc->sc_lsr = sc->sc_msr = sc->sc_mcr = 0; tp->t_dev = dev; /* * Initialize the termios status to the defaults. Add in the * sticky bits from TIOCSFLAGS. */ t.c_ispeed = 0; t.c_ospeed = TTYDEF_SPEED; t.c_cflag = TTYDEF_CFLAG; /* Make sure ucomparam() will do something. */ tp->t_ospeed = 0; (void)ucomparam(tp, &t); tp->t_iflag = TTYDEF_IFLAG; tp->t_oflag = TTYDEF_OFLAG; tp->t_lflag = TTYDEF_LFLAG; ttychars(tp); ttsetwater(tp); /* * Turn on DTR. We must always do this, even if carrier is not * present, because otherwise we'd have to use TIOCSDTR * immediately after setting CLOCAL, which applications do not * expect. We always assert DTR while the device is open * unless explicitly requested to deassert it. */ (void)ucomctl(sc, TIOCM_DTR | TIOCM_RTS, DMBIS); /* Device specific open */ if (sc->sc_callback->ucom_open != NULL) { error = sc->sc_callback->ucom_open(sc->sc_parent, sc->sc_portno); if (error) { ucom_cleanup(sc); sc->sc_opening = 0; wakeup(&sc->sc_opening); splx(s); return (error); } } DPRINTF(("ucomopen: open pipes in = %d out = %d\n", sc->sc_bulkin_no, sc->sc_bulkout_no)); /* Open the bulk pipes */ /* Bulk-in pipe */ err = usbd_open_pipe(sc->sc_iface, sc->sc_bulkin_no, 0, &sc->sc_bulkin_pipe); if (err) { printf("%s: open bulk in error (addr %d): %s\n", USBDEVNAME(sc->sc_dev), sc->sc_bulkin_no, usbd_errstr(err)); error = EIO; goto fail_0; } /* Bulk-out pipe */ err = usbd_open_pipe(sc->sc_iface, sc->sc_bulkout_no, USBD_EXCLUSIVE_USE, &sc->sc_bulkout_pipe); if (err) { printf("%s: open bulk out error (addr %d): %s\n", USBDEVNAME(sc->sc_dev), sc->sc_bulkout_no, usbd_errstr(err)); error = EIO; goto fail_1; } /* Allocate a request and an input buffer and start reading. */ sc->sc_ixfer = usbd_alloc_xfer(sc->sc_udev); if (sc->sc_ixfer == NULL) { error = ENOMEM; goto fail_2; } sc->sc_ibuf = usbd_alloc_buffer(sc->sc_ixfer, sc->sc_ibufsizepad); if (sc->sc_ibuf == NULL) { error = ENOMEM; goto fail_3; } sc->sc_oxfer = usbd_alloc_xfer(sc->sc_udev); if (sc->sc_oxfer == NULL) { error = ENOMEM; goto fail_3; } sc->sc_obuf = usbd_alloc_buffer(sc->sc_oxfer, sc->sc_obufsize + sc->sc_opkthdrlen); if (sc->sc_obuf == NULL) { error = ENOMEM; goto fail_4; } /* * Handle initial DCD. */ if (ISSET(sc->sc_msr, UMSR_DCD) || (minor(dev) & UCOM_CALLOUT_MASK)) (*linesw[tp->t_line].l_modem)(tp, 1); ucomstartread(sc); } sc->sc_opening = 0; wakeup(&sc->sc_opening); splx(s); error = ttyopen(dev, tp); if (error) goto bad; error = (*linesw[tp->t_line].l_open)(dev, tp); if (error) goto bad; disc_optim(tp, &tp->t_termios, sc); DPRINTF(("%s: ucomopen: success\n", USBDEVNAME(sc->sc_dev))); sc->sc_poll = 1; sc->sc_refcnt++; return (0); fail_4: usbd_free_xfer(sc->sc_oxfer); sc->sc_oxfer = NULL; fail_3: usbd_free_xfer(sc->sc_ixfer); sc->sc_ixfer = NULL; fail_2: usbd_close_pipe(sc->sc_bulkout_pipe); sc->sc_bulkout_pipe = NULL; fail_1: usbd_close_pipe(sc->sc_bulkin_pipe); sc->sc_bulkin_pipe = NULL; fail_0: sc->sc_opening = 0; wakeup(&sc->sc_opening); splx(s); return (error); bad: if (!ISSET(tp->t_state, TS_ISOPEN)) { /* * We failed to open the device, and nobody else had it opened. * Clean up the state as appropriate. */ ucom_cleanup(sc); } DPRINTF(("%s: ucomopen: failed\n", USBDEVNAME(sc->sc_dev))); return (error); } static int ucomclose(dev_t dev, int flag, int mode, usb_proc_ptr p) { struct ucom_softc *sc; struct tty *tp; int s; USB_GET_SC(ucom, UCOMUNIT(dev), sc); tp = sc->sc_tty; DPRINTF(("%s: ucomclose: unit = %d\n", USBDEVNAME(sc->sc_dev), UCOMUNIT(dev))); if (!ISSET(tp->t_state, TS_ISOPEN)) goto quit; s = spltty(); (*linesw[tp->t_line].l_close)(tp, flag); disc_optim(tp, &tp->t_termios, sc); ttyclose(tp); splx(s); if (sc->sc_dying) goto quit; if (!ISSET(tp->t_state, TS_ISOPEN)) { /* * Although we got a last close, the device may still be in * use; e.g. if this was the dialout node, and there are still * processes waiting for carrier on the non-dialout node. */ ucom_cleanup(sc); } if (sc->sc_callback->ucom_close != NULL) sc->sc_callback->ucom_close(sc->sc_parent, sc->sc_portno); quit: if (--sc->sc_refcnt < 0) usb_detach_wakeup(USBDEV(sc->sc_dev)); return (0); } static int ucomread(dev_t dev, struct uio *uio, int flag) { struct ucom_softc *sc; struct tty *tp; int error; USB_GET_SC(ucom, UCOMUNIT(dev), sc); tp = sc->sc_tty; DPRINTF(("ucomread: tp = %p, flag = 0x%x\n", tp, flag)); if (sc->sc_dying) return (EIO); error = (*linesw[tp->t_line].l_read)(tp, uio, flag); DPRINTF(("ucomread: error = %d\n", error)); return (error); } static int ucomwrite(dev_t dev, struct uio *uio, int flag) { struct ucom_softc *sc; struct tty *tp; int error; USB_GET_SC(ucom, UCOMUNIT(dev), sc); tp = sc->sc_tty; DPRINTF(("ucomwrite: tp = %p, flag = 0x%x\n", tp, flag)); if (sc->sc_dying) return (EIO); error = (*linesw[tp->t_line].l_write)(tp, uio, flag); DPRINTF(("ucomwrite: error = %d\n", error)); return (error); } static int ucomioctl(dev_t dev, u_long cmd, caddr_t data, int flag, usb_proc_ptr p) { struct ucom_softc *sc; struct tty *tp; int error; int s; int d; #if defined(COMPAT_43) || defined(COMPAT_SUNOS) u_long oldcmd; struct termios term; #endif USB_GET_SC(ucom, UCOMUNIT(dev), sc); tp = sc->sc_tty; if (sc->sc_dying) return (EIO); DPRINTF(("ucomioctl: cmd = 0x%08lx\n", cmd)); #if defined(COMPAT_43) || defined(COMPAT_SUNOS) term = tp->t_termios; oldcmd = cmd; error = ttsetcompat(tp, &cmd, data, &term); if (error != 0) return (error); if (cmd != oldcmd) data = (caddr_t)&term; #endif error = (*linesw[tp->t_line].l_ioctl)(tp, cmd, data, flag, p); if (error != ENOIOCTL) { DPRINTF(("ucomioctl: l_ioctl: error = %d\n", error)); return (error); } s = spltty(); error = ttioctl(tp, cmd, data, flag); disc_optim(tp, &tp->t_termios, sc); if (error != ENOIOCTL) { splx(s); DPRINTF(("ucomioctl: ttioctl: error = %d\n", error)); return (error); } if (sc->sc_callback->ucom_ioctl != NULL) { error = sc->sc_callback->ucom_ioctl(sc->sc_parent, sc->sc_portno, cmd, data, flag, p); if (error >= 0) return (error); } error = 0; DPRINTF(("ucomioctl: our cmd = 0x%08lx\n", cmd)); switch (cmd) { case TIOCSBRK: DPRINTF(("ucomioctl: TIOCSBRK\n")); ucom_break(sc, 1); break; case TIOCCBRK: DPRINTF(("ucomioctl: TIOCCBRK\n")); ucom_break(sc, 0); break; case TIOCSDTR: DPRINTF(("ucomioctl: TIOCSDTR\n")); (void)ucomctl(sc, TIOCM_DTR, DMBIS); break; case TIOCCDTR: DPRINTF(("ucomioctl: TIOCCDTR\n")); (void)ucomctl(sc, TIOCM_DTR, DMBIC); break; case TIOCMSET: d = *(int *)data; DPRINTF(("ucomioctl: TIOCMSET, 0x%x\n", d)); (void)ucomctl(sc, d, DMSET); break; case TIOCMBIS: d = *(int *)data; DPRINTF(("ucomioctl: TIOCMBIS, 0x%x\n", d)); (void)ucomctl(sc, d, DMBIS); break; case TIOCMBIC: d = *(int *)data; DPRINTF(("ucomioctl: TIOCMBIC, 0x%x\n", d)); (void)ucomctl(sc, d, DMBIC); break; case TIOCMGET: d = ucomctl(sc, 0, DMGET); DPRINTF(("ucomioctl: TIOCMGET, 0x%x\n", d)); *(int *)data = d; break; default: DPRINTF(("ucomioctl: error: our cmd = 0x%08lx\n", cmd)); error = ENOTTY; break; } splx(s); return (error); } Static int ucomctl(struct ucom_softc *sc, int bits, int how) { int mcr; int msr; int onoff; DPRINTF(("ucomctl: bits = 0x%x, how = %d\n", bits, how)); if (how == DMGET) { SET(bits, TIOCM_LE); /* always set TIOCM_LE bit */ DPRINTF(("ucomctl: DMGET: LE")); mcr = sc->sc_mcr; if (ISSET(mcr, UMCR_DTR)) { SET(bits, TIOCM_DTR); DPRINTF((" DTR")); } if (ISSET(mcr, UMCR_RTS)) { SET(bits, TIOCM_RTS); DPRINTF((" RTS")); } msr = sc->sc_msr; if (ISSET(msr, UMSR_CTS)) { SET(bits, TIOCM_CTS); DPRINTF((" CTS")); } if (ISSET(msr, UMSR_DCD)) { SET(bits, TIOCM_CD); DPRINTF((" CD")); } if (ISSET(msr, UMSR_DSR)) { SET(bits, TIOCM_DSR); DPRINTF((" DSR")); } if (ISSET(msr, UMSR_RI)) { SET(bits, TIOCM_RI); DPRINTF((" RI")); } DPRINTF(("\n")); return (bits); } mcr = 0; if (ISSET(bits, TIOCM_DTR)) SET(mcr, UMCR_DTR); if (ISSET(bits, TIOCM_RTS)) SET(mcr, UMCR_RTS); switch (how) { case DMSET: sc->sc_mcr = mcr; break; case DMBIS: sc->sc_mcr |= mcr; break; case DMBIC: sc->sc_mcr &= ~mcr; break; } onoff = ISSET(sc->sc_mcr, UMCR_DTR) ? 1 : 0; ucom_dtr(sc, onoff); onoff = ISSET(sc->sc_mcr, UMCR_RTS) ? 1 : 0; ucom_rts(sc, onoff); return (0); } Static void ucom_break(struct ucom_softc *sc, int onoff) { DPRINTF(("ucom_break: onoff = %d\n", onoff)); if (sc->sc_callback->ucom_set == NULL) return; sc->sc_callback->ucom_set(sc->sc_parent, sc->sc_portno, UCOM_SET_BREAK, onoff); } Static void ucom_dtr(struct ucom_softc *sc, int onoff) { DPRINTF(("ucom_dtr: onoff = %d\n", onoff)); if (sc->sc_callback->ucom_set == NULL) return; sc->sc_callback->ucom_set(sc->sc_parent, sc->sc_portno, UCOM_SET_DTR, onoff); } Static void ucom_rts(struct ucom_softc *sc, int onoff) { DPRINTF(("ucom_rts: onoff = %d\n", onoff)); if (sc->sc_callback->ucom_set == NULL) return; sc->sc_callback->ucom_set(sc->sc_parent, sc->sc_portno, UCOM_SET_RTS, onoff); } void ucom_status_change(struct ucom_softc *sc) { struct tty *tp = sc->sc_tty; u_char old_msr; int onoff; if (sc->sc_callback->ucom_get_status == NULL) { sc->sc_lsr = 0; sc->sc_msr = 0; return; } old_msr = sc->sc_msr; sc->sc_callback->ucom_get_status(sc->sc_parent, sc->sc_portno, &sc->sc_lsr, &sc->sc_msr); if (ISSET((sc->sc_msr ^ old_msr), UMSR_DCD)) { if (sc->sc_poll == 0) return; onoff = ISSET(sc->sc_msr, UMSR_DCD) ? 1 : 0; DPRINTF(("ucom_status_change: DCD changed to %d\n", onoff)); (*linesw[tp->t_line].l_modem)(tp, onoff); } } Static int ucomparam(struct tty *tp, struct termios *t) { struct ucom_softc *sc; int error; usbd_status uerr; USB_GET_SC(ucom, UCOMUNIT(tp->t_dev), sc); if (sc->sc_dying) return (EIO); DPRINTF(("ucomparam: sc = %p\n", sc)); /* Check requested parameters. */ if (t->c_ospeed < 0) { DPRINTF(("ucomparam: negative ospeed\n")); return (EINVAL); } if (t->c_ispeed && t->c_ispeed != t->c_ospeed) { DPRINTF(("ucomparam: mismatch ispeed and ospeed\n")); return (EINVAL); } /* * If there were no changes, don't do anything. This avoids dropping * input and improves performance when all we did was frob things like * VMIN and VTIME. */ if (tp->t_ospeed == t->c_ospeed && tp->t_cflag == t->c_cflag) return (0); /* And copy to tty. */ tp->t_ispeed = 0; tp->t_ospeed = t->c_ospeed; tp->t_cflag = t->c_cflag; if (sc->sc_callback->ucom_param == NULL) return (0); ucomstopread(sc); error = sc->sc_callback->ucom_param(sc->sc_parent, sc->sc_portno, t); if (error) { DPRINTF(("ucomparam: callback: error = %d\n", error)); return (error); } ttsetwater(tp); if (t->c_cflag & CRTS_IFLOW) { sc->sc_state |= UCS_RTS_IFLOW; } else if (sc->sc_state & UCS_RTS_IFLOW) { sc->sc_state &= ~UCS_RTS_IFLOW; (void)ucomctl(sc, UMCR_RTS, DMBIS); } disc_optim(tp, t, sc); uerr = ucomstartread(sc); if (uerr != USBD_NORMAL_COMPLETION) return (EIO); return (0); } Static void ucomstart(struct tty *tp) { struct ucom_softc *sc; struct cblock *cbp; usbd_status err; int s; u_char *data; int cnt; USB_GET_SC(ucom, UCOMUNIT(tp->t_dev), sc); DPRINTF(("ucomstart: sc = %p\n", sc)); if (sc->sc_dying) return; s = spltty(); if (tp->t_state & TS_TBLOCK) { if (ISSET(sc->sc_mcr, UMCR_RTS) && ISSET(sc->sc_state, UCS_RTS_IFLOW)) { DPRINTF(("ucomstart: clear RTS\n")); (void)ucomctl(sc, UMCR_RTS, DMBIC); } } else { if (!ISSET(sc->sc_mcr, UMCR_RTS) && tp->t_rawq.c_cc <= tp->t_ilowat && ISSET(sc->sc_state, UCS_RTS_IFLOW)) { DPRINTF(("ucomstart: set RTS\n")); (void)ucomctl(sc, UMCR_RTS, DMBIS); } } if (ISSET(tp->t_state, TS_BUSY | TS_TIMEOUT | TS_TTSTOP)) { ttwwakeup(tp); DPRINTF(("ucomstart: stopped\n")); goto out; } if (tp->t_outq.c_cc <= tp->t_olowat) { if (ISSET(tp->t_state, TS_SO_OLOWAT)) { CLR(tp->t_state, TS_SO_OLOWAT); wakeup(TSA_OLOWAT(tp)); } selwakeuppri(&tp->t_wsel, TTIPRI); if (tp->t_outq.c_cc == 0) { if (ISSET(tp->t_state, TS_BUSY | TS_SO_OCOMPLETE) == TS_SO_OCOMPLETE && tp->t_outq.c_cc == 0) { CLR(tp->t_state, TS_SO_OCOMPLETE); wakeup(TSA_OCOMPLETE(tp)); } goto out; } } /* Grab the first contiguous region of buffer space. */ data = tp->t_outq.c_cf; cbp = (struct cblock *) ((intptr_t) tp->t_outq.c_cf & ~CROUND); cnt = min((char *) (cbp+1) - tp->t_outq.c_cf, tp->t_outq.c_cc); if (cnt == 0) { DPRINTF(("ucomstart: cnt == 0\n")); goto out; } SET(tp->t_state, TS_BUSY); if (cnt > sc->sc_obufsize) { DPRINTF(("ucomstart: big buffer %d chars\n", cnt)); cnt = sc->sc_obufsize; } if (sc->sc_callback->ucom_write != NULL) sc->sc_callback->ucom_write(sc->sc_parent, sc->sc_portno, sc->sc_obuf, data, &cnt); else memcpy(sc->sc_obuf, data, cnt); DPRINTF(("ucomstart: %d chars\n", cnt)); usbd_setup_xfer(sc->sc_oxfer, sc->sc_bulkout_pipe, (usbd_private_handle)sc, sc->sc_obuf, cnt, USBD_NO_COPY, USBD_NO_TIMEOUT, ucomwritecb); /* What can we do on error? */ err = usbd_transfer(sc->sc_oxfer); if (err != USBD_IN_PROGRESS) printf("ucomstart: err=%s\n", usbd_errstr(err)); ttwwakeup(tp); out: splx(s); } Static void ucomstop(struct tty *tp, int flag) { struct ucom_softc *sc; int s; USB_GET_SC(ucom, UCOMUNIT(tp->t_dev), sc); DPRINTF(("ucomstop: %d\n", flag)); if (flag & FREAD) { DPRINTF(("ucomstop: read\n")); ucomstopread(sc); } if (flag & FWRITE) { DPRINTF(("ucomstop: write\n")); s = spltty(); if (ISSET(tp->t_state, TS_BUSY)) { /* XXX do what? */ if (!ISSET(tp->t_state, TS_TTSTOP)) SET(tp->t_state, TS_FLUSH); } splx(s); } DPRINTF(("ucomstop: done\n")); } Static void ucomwritecb(usbd_xfer_handle xfer, usbd_private_handle p, usbd_status status) { struct ucom_softc *sc = (struct ucom_softc *)p; struct tty *tp = sc->sc_tty; u_int32_t cc; int s; DPRINTF(("ucomwritecb: status = %d\n", status)); if (status == USBD_CANCELLED || sc->sc_dying) goto error; if (status != USBD_NORMAL_COMPLETION) { printf("%s: ucomwritecb: %s\n", USBDEVNAME(sc->sc_dev), usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall_async(sc->sc_bulkin_pipe); /* XXX we should restart after some delay. */ goto error; } usbd_get_xfer_status(xfer, NULL, NULL, &cc, NULL); DPRINTF(("ucomwritecb: cc = %d\n", cc)); if (cc <= sc->sc_opkthdrlen) { printf("%s: sent size too small, cc = %d\n", USBDEVNAME(sc->sc_dev), cc); goto error; } /* convert from USB bytes to tty bytes */ cc -= sc->sc_opkthdrlen; s = spltty(); CLR(tp->t_state, TS_BUSY); if (ISSET(tp->t_state, TS_FLUSH)) CLR(tp->t_state, TS_FLUSH); else ndflush(&tp->t_outq, cc); (*linesw[tp->t_line].l_start)(tp); splx(s); return; error: s = spltty(); CLR(tp->t_state, TS_BUSY); splx(s); return; } Static usbd_status ucomstartread(struct ucom_softc *sc) { usbd_status err; DPRINTF(("ucomstartread: start\n")); sc->sc_state &= ~UCS_RXSTOP; if (sc->sc_bulkin_pipe == NULL) return (USBD_NORMAL_COMPLETION); usbd_setup_xfer(sc->sc_ixfer, sc->sc_bulkin_pipe, (usbd_private_handle)sc, sc->sc_ibuf, sc->sc_ibufsize, USBD_SHORT_XFER_OK | USBD_NO_COPY, USBD_NO_TIMEOUT, ucomreadcb); err = usbd_transfer(sc->sc_ixfer); if (err != USBD_IN_PROGRESS) { DPRINTF(("ucomstartread: err = %s\n", usbd_errstr(err))); return (err); } return (USBD_NORMAL_COMPLETION); } Static void ucomreadcb(usbd_xfer_handle xfer, usbd_private_handle p, usbd_status status) { struct ucom_softc *sc = (struct ucom_softc *)p; struct tty *tp = sc->sc_tty; int (*rint) (int c, struct tty *tp) = linesw[tp->t_line].l_rint; usbd_status err; u_int32_t cc; u_char *cp; int lostcc; int s; DPRINTF(("ucomreadcb: status = %d\n", status)); if (status != USBD_NORMAL_COMPLETION) { if (!(sc->sc_state & UCS_RXSTOP)) printf("%s: ucomreadcb: %s\n", USBDEVNAME(sc->sc_dev), usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall_async(sc->sc_bulkin_pipe); /* XXX we should restart after some delay. */ return; } usbd_get_xfer_status(xfer, NULL, (void **)&cp, &cc, NULL); DPRINTF(("ucomreadcb: got %d chars, tp = %p\n", cc, tp)); if (cc == 0) goto resubmit; if (sc->sc_callback->ucom_read != NULL) sc->sc_callback->ucom_read(sc->sc_parent, sc->sc_portno, &cp, &cc); if (cc > sc->sc_ibufsize) { printf("%s: invalid receive data size, %d chars\n", USBDEVNAME(sc->sc_dev), cc); goto resubmit; } if (cc < 1) goto resubmit; s = spltty(); if (tp->t_state & TS_CAN_BYPASS_L_RINT) { if (tp->t_rawq.c_cc + cc > tp->t_ihiwat && (sc->sc_state & UCS_RTS_IFLOW || tp->t_iflag & IXOFF) && !(tp->t_state & TS_TBLOCK)) ttyblock(tp); lostcc = b_to_q((char *)cp, cc, &tp->t_rawq); tp->t_rawcc += cc; ttwakeup(tp); if (tp->t_state & TS_TTSTOP && (tp->t_iflag & IXANY || tp->t_cc[VSTART] == tp->t_cc[VSTOP])) { tp->t_state &= ~TS_TTSTOP; tp->t_lflag &= ~FLUSHO; ucomstart(tp); } if (lostcc > 0) printf("%s: lost %d chars\n", USBDEVNAME(sc->sc_dev), lostcc); } else { /* Give characters to tty layer. */ while (cc > 0) { DPRINTFN(7, ("ucomreadcb: char = 0x%02x\n", *cp)); if ((*rint)(*cp, tp) == -1) { /* XXX what should we do? */ printf("%s: lost %d chars\n", USBDEVNAME(sc->sc_dev), cc); break; } cc--; cp++; } } splx(s); resubmit: err = ucomstartread(sc); if (err) { printf("%s: read start failed\n", USBDEVNAME(sc->sc_dev)); /* XXX what should we dow now? */ } if ((sc->sc_state & UCS_RTS_IFLOW) && !ISSET(sc->sc_mcr, UMCR_RTS) && !(tp->t_state & TS_TBLOCK)) ucomctl(sc, UMCR_RTS, DMBIS); } Static void ucom_cleanup(struct ucom_softc *sc) { DPRINTF(("ucom_cleanup: closing pipes\n")); ucom_shutdown(sc); if (sc->sc_bulkin_pipe != NULL) { usbd_abort_pipe(sc->sc_bulkin_pipe); usbd_close_pipe(sc->sc_bulkin_pipe); sc->sc_bulkin_pipe = NULL; } if (sc->sc_bulkout_pipe != NULL) { usbd_abort_pipe(sc->sc_bulkout_pipe); usbd_close_pipe(sc->sc_bulkout_pipe); sc->sc_bulkout_pipe = NULL; } if (sc->sc_ixfer != NULL) { usbd_free_xfer(sc->sc_ixfer); sc->sc_ixfer = NULL; } if (sc->sc_oxfer != NULL) { usbd_free_xfer(sc->sc_oxfer); sc->sc_oxfer = NULL; } } Static void ucomstopread(struct ucom_softc *sc) { usbd_status err; DPRINTF(("ucomstopread: enter\n")); if (!(sc->sc_state & UCS_RXSTOP)) { sc->sc_state |= UCS_RXSTOP; if (sc->sc_bulkin_pipe == NULL) { DPRINTF(("ucomstopread: bulkin pipe NULL\n")); return; } err = usbd_abort_pipe(sc->sc_bulkin_pipe); if (err) { DPRINTF(("ucomstopread: err = %s\n", usbd_errstr(err))); } } DPRINTF(("ucomstopread: leave\n")); } static void disc_optim(struct tty *tp, struct termios *t, struct ucom_softc *sc) { if (!(t->c_iflag & (ICRNL | IGNCR | IMAXBEL | INLCR | ISTRIP | IXON)) && (!(t->c_iflag & BRKINT) || (t->c_iflag & IGNBRK)) && (!(t->c_iflag & PARMRK) || (t->c_iflag & (IGNPAR | IGNBRK)) == (IGNPAR | IGNBRK)) && !(t->c_lflag & (ECHO | ICANON | IEXTEN | ISIG | PENDIN)) && linesw[tp->t_line].l_rint == ttyinput) { DPRINTF(("disc_optim: bypass l_rint\n")); tp->t_state |= TS_CAN_BYPASS_L_RINT; } else { DPRINTF(("disc_optim: can't bypass l_rint\n")); tp->t_state &= ~TS_CAN_BYPASS_L_RINT; } sc->hotchar = linesw[tp->t_line].l_hotchar; } Index: head/sys/dev/usb/uftdi.c =================================================================== --- head/sys/dev/usb/uftdi.c (revision 129878) +++ head/sys/dev/usb/uftdi.c (revision 129879) @@ -1,636 +1,637 @@ /* $NetBSD: uftdi.c,v 1.13 2002/09/23 05:51:23 simonb Exp $ */ /* * Copyright (c) 2000 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Lennart Augustsson (lennart@augustsson.net). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the NetBSD * Foundation, Inc. and its contributors. * 4. Neither the name of The NetBSD Foundation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * FTDI FT8U100AX serial adapter driver */ #include #include #include #include #include +#include #include #include #include #include #include #include #if __FreeBSD_version >= 500014 #include #else #include #endif #include #include #include #include #include #include #include #include #ifdef USB_DEBUG static int uftdidebug = 0; SYSCTL_NODE(_hw_usb, OID_AUTO, uftdi, CTLFLAG_RW, 0, "USB uftdi"); SYSCTL_INT(_hw_usb_uftdi, OID_AUTO, debug, CTLFLAG_RW, &uftdidebug, 0, "uftdi debug level"); #define DPRINTF(x) do { \ if (uftdidebug) \ logprintf x; \ } while (0) #define DPRINTFN(n, x) do { \ if (uftdidebug > (n)) \ logprintf x; \ } while (0) #else #define DPRINTF(x) #define DPRINTFN(n,x) #endif #define UFTDI_CONFIG_INDEX 0 #define UFTDI_IFACE_INDEX 0 /* * These are the maximum number of bytes transferred per frame. * The output buffer size cannot be increased due to the size encoding. */ #define UFTDIIBUFSIZE 64 #define UFTDIOBUFSIZE 64 struct uftdi_softc { struct ucom_softc sc_ucom; usbd_interface_handle sc_iface; /* interface */ enum uftdi_type sc_type; u_int sc_hdrlen; u_char sc_msr; u_char sc_lsr; u_int last_lcr; }; Static void uftdi_get_status(void *, int portno, u_char *lsr, u_char *msr); Static void uftdi_set(void *, int, int, int); Static int uftdi_param(void *, int, struct termios *); Static int uftdi_open(void *sc, int portno); Static void uftdi_read(void *sc, int portno, u_char **ptr,u_int32_t *count); Static void uftdi_write(void *sc, int portno, u_char *to, u_char *from, u_int32_t *count); Static void uftdi_break(void *sc, int portno, int onoff); struct ucom_callback uftdi_callback = { uftdi_get_status, uftdi_set, uftdi_param, NULL, uftdi_open, NULL, uftdi_read, uftdi_write, }; USB_MATCH(uftdi) { USB_MATCH_START(uftdi, uaa); if (uaa->iface != NULL) return (UMATCH_NONE); DPRINTFN(20,("uftdi: vendor=0x%x, product=0x%x\n", uaa->vendor, uaa->product)); if (uaa->vendor == USB_VENDOR_FTDI && (uaa->product == USB_PRODUCT_FTDI_SERIAL_8U100AX || uaa->product == USB_PRODUCT_FTDI_SERIAL_8U232AM || uaa->product == USB_PRODUCT_FTDI_CFA_631 || uaa->product == USB_PRODUCT_FTDI_CFA_632 || uaa->product == USB_PRODUCT_FTDI_CFA_633 || uaa->product == USB_PRODUCT_FTDI_CFA_634 || uaa->product == USB_PRODUCT_FTDI_USBSERIAL || uaa->product == USB_PRODUCT_FTDI_MX2_3 || uaa->product == USB_PRODUCT_FTDI_MX4_5 || uaa->product == USB_PRODUCT_FTDI_LK202 || uaa->product == USB_PRODUCT_FTDI_LK204)) return (UMATCH_VENDOR_PRODUCT); return (UMATCH_NONE); } USB_ATTACH(uftdi) { USB_ATTACH_START(uftdi, sc, uaa); usbd_device_handle dev = uaa->device; usbd_interface_handle iface; usb_interface_descriptor_t *id; usb_endpoint_descriptor_t *ed; char *devinfo; const char *devname; int i; usbd_status err; struct ucom_softc *ucom = &sc->sc_ucom; DPRINTFN(10,("\nuftdi_attach: sc=%p\n", sc)); devinfo = malloc(1024, M_USBDEV, M_WAITOK); ucom->sc_dev = self; ucom->sc_udev = dev; devname = USBDEVNAME(ucom->sc_dev); /* Move the device into the configured state. */ err = usbd_set_config_index(dev, UFTDI_CONFIG_INDEX, 1); if (err) { printf("\n%s: failed to set configuration, err=%s\n", devname, usbd_errstr(err)); goto bad; } err = usbd_device2interface_handle(dev, UFTDI_IFACE_INDEX, &iface); if (err) { printf("\n%s: failed to get interface, err=%s\n", devname, usbd_errstr(err)); goto bad; } usbd_devinfo(dev, 0, devinfo); /* USB_ATTACH_SETUP;*/ printf("%s: %s\n", devname, devinfo); id = usbd_get_interface_descriptor(iface); ucom->sc_iface = iface; switch( uaa->product ){ case USB_PRODUCT_FTDI_SERIAL_8U100AX: sc->sc_type = UFTDI_TYPE_SIO; sc->sc_hdrlen = 1; break; case USB_PRODUCT_FTDI_SERIAL_8U232AM: case USB_PRODUCT_FTDI_CFA_631: case USB_PRODUCT_FTDI_CFA_632: case USB_PRODUCT_FTDI_CFA_633: case USB_PRODUCT_FTDI_CFA_634: case USB_PRODUCT_FTDI_USBSERIAL: case USB_PRODUCT_FTDI_MX2_3: case USB_PRODUCT_FTDI_MX4_5: case USB_PRODUCT_FTDI_LK202: case USB_PRODUCT_FTDI_LK204: sc->sc_type = UFTDI_TYPE_8U232AM; sc->sc_hdrlen = 0; break; default: /* Can't happen */ goto bad; } ucom->sc_bulkin_no = ucom->sc_bulkout_no = -1; for (i = 0; i < id->bNumEndpoints; i++) { int addr, dir, attr; ed = usbd_interface2endpoint_descriptor(iface, i); if (ed == NULL) { printf("%s: could not read endpoint descriptor" ": %s\n", devname, usbd_errstr(err)); goto bad; } addr = ed->bEndpointAddress; dir = UE_GET_DIR(ed->bEndpointAddress); attr = ed->bmAttributes & UE_XFERTYPE; if (dir == UE_DIR_IN && attr == UE_BULK) ucom->sc_bulkin_no = addr; else if (dir == UE_DIR_OUT && attr == UE_BULK) ucom->sc_bulkout_no = addr; else { printf("%s: unexpected endpoint\n", devname); goto bad; } } if (ucom->sc_bulkin_no == -1) { printf("%s: Could not find data bulk in\n", devname); goto bad; } if (ucom->sc_bulkout_no == -1) { printf("%s: Could not find data bulk out\n", devname); goto bad; } ucom->sc_parent = sc; ucom->sc_portno = FTDI_PIT_SIOA; /* bulkin, bulkout set above */ ucom->sc_ibufsize = UFTDIIBUFSIZE; ucom->sc_obufsize = UFTDIOBUFSIZE - sc->sc_hdrlen; ucom->sc_ibufsizepad = UFTDIIBUFSIZE; ucom->sc_opkthdrlen = sc->sc_hdrlen; ucom->sc_callback = &uftdi_callback; #if 0 usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, ucom->sc_udev, USBDEV(ucom->sc_dev)); #endif DPRINTF(("uftdi: in=0x%x out=0x%x\n", ucom->sc_bulkin_no, ucom->sc_bulkout_no)); ucom_attach(&sc->sc_ucom); free(devinfo, M_USBDEV); USB_ATTACH_SUCCESS_RETURN; bad: DPRINTF(("uftdi_attach: ATTACH ERROR\n")); ucom->sc_dying = 1; free(devinfo, M_USBDEV); USB_ATTACH_ERROR_RETURN; } #if 0 int uftdi_activate(device_ptr_t self, enum devact act) { struct uftdi_softc *sc = (struct uftdi_softc *)self; int rv = 0; switch (act) { case DVACT_ACTIVATE: return (EOPNOTSUPP); case DVACT_DEACTIVATE: if (sc->sc_subdev != NULL) rv = config_deactivate(sc->sc_subdev); sc->sc_ucom.sc_dying = 1; break; } return (rv); } #endif #if 1 USB_DETACH(uftdi) { USB_DETACH_START(uftdi, sc); int rv = 0; DPRINTF(("uftdi_detach: sc=%p\n", sc)); sc->sc_ucom.sc_dying = 1; rv = ucom_detach(&sc->sc_ucom); return rv; } #endif Static int uftdi_open(void *vsc, int portno) { struct uftdi_softc *sc = vsc; struct ucom_softc *ucom = &sc->sc_ucom; usb_device_request_t req; usbd_status err; struct termios t; DPRINTF(("uftdi_open: sc=%p\n", sc)); if (ucom->sc_dying) return (EIO); /* Perform a full reset on the device */ req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = FTDI_SIO_RESET; USETW(req.wValue, FTDI_SIO_RESET_SIO); USETW(req.wIndex, portno); USETW(req.wLength, 0); err = usbd_do_request(ucom->sc_udev, &req, NULL); if (err) return (EIO); /* Set 9600 baud, 2 stop bits, no parity, 8 bits */ t.c_ospeed = 9600; t.c_cflag = CSTOPB | CS8; (void)uftdi_param(sc, portno, &t); /* Turn on RTS/CTS flow control */ req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = FTDI_SIO_SET_FLOW_CTRL; USETW(req.wValue, 0); USETW2(req.wIndex, FTDI_SIO_RTS_CTS_HS, portno); USETW(req.wLength, 0); err = usbd_do_request(ucom->sc_udev, &req, NULL); if (err) return (EIO); return (0); } Static void uftdi_read(void *vsc, int portno, u_char **ptr, u_int32_t *count) { struct uftdi_softc *sc = vsc; u_char msr, lsr; DPRINTFN(15,("uftdi_read: sc=%p, port=%d count=%d\n", sc, portno, *count)); msr = FTDI_GET_MSR(*ptr); lsr = FTDI_GET_LSR(*ptr); #ifdef USB_DEBUG if (*count != 2) DPRINTFN(10,("uftdi_read: sc=%p, port=%d count=%d data[0]=" "0x%02x\n", sc, portno, *count, (*ptr)[2])); #endif if (sc->sc_msr != msr || (sc->sc_lsr & FTDI_LSR_MASK) != (lsr & FTDI_LSR_MASK)) { DPRINTF(("uftdi_read: status change msr=0x%02x(0x%02x) " "lsr=0x%02x(0x%02x)\n", msr, sc->sc_msr, lsr, sc->sc_lsr)); sc->sc_msr = msr; sc->sc_lsr = lsr; ucom_status_change(&sc->sc_ucom); } /* Pick up status and adjust data part. */ *ptr += 2; *count -= 2; } Static void uftdi_write(void *vsc, int portno, u_char *to, u_char *from, u_int32_t *count) { struct uftdi_softc *sc = vsc; DPRINTFN(10,("uftdi_write: sc=%p, port=%d count=%u data[0]=0x%02x\n", vsc, portno, *count, from[0])); /* Make length tag and copy data */ if (sc->sc_hdrlen > 0) *to = FTDI_OUT_TAG(*count, portno); memcpy(to + sc->sc_hdrlen, from, *count); *count += sc->sc_hdrlen; } Static void uftdi_set(void *vsc, int portno, int reg, int onoff) { struct uftdi_softc *sc = vsc; struct ucom_softc *ucom = vsc; usb_device_request_t req; int ctl; DPRINTF(("uftdi_set: sc=%p, port=%d reg=%d onoff=%d\n", vsc, portno, reg, onoff)); switch (reg) { case UCOM_SET_DTR: ctl = onoff ? FTDI_SIO_SET_DTR_HIGH : FTDI_SIO_SET_DTR_LOW; break; case UCOM_SET_RTS: ctl = onoff ? FTDI_SIO_SET_RTS_HIGH : FTDI_SIO_SET_RTS_LOW; break; case UCOM_SET_BREAK: uftdi_break(sc, portno, onoff); return; default: return; } req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = FTDI_SIO_MODEM_CTRL; USETW(req.wValue, ctl); USETW(req.wIndex, portno); USETW(req.wLength, 0); DPRINTFN(2,("uftdi_set: reqtype=0x%02x req=0x%02x value=0x%04x " "index=0x%04x len=%d\n", req.bmRequestType, req.bRequest, UGETW(req.wValue), UGETW(req.wIndex), UGETW(req.wLength))); (void)usbd_do_request(ucom->sc_udev, &req, NULL); } Static int uftdi_param(void *vsc, int portno, struct termios *t) { struct uftdi_softc *sc = vsc; struct ucom_softc *ucom = &sc->sc_ucom; usb_device_request_t req; usbd_status err; int rate=0, data, flow; DPRINTF(("uftdi_param: sc=%p\n", sc)); if (ucom->sc_dying) return (EIO); switch (sc->sc_type) { case UFTDI_TYPE_SIO: switch (t->c_ospeed) { case 300: rate = ftdi_sio_b300; break; case 600: rate = ftdi_sio_b600; break; case 1200: rate = ftdi_sio_b1200; break; case 2400: rate = ftdi_sio_b2400; break; case 4800: rate = ftdi_sio_b4800; break; case 9600: rate = ftdi_sio_b9600; break; case 19200: rate = ftdi_sio_b19200; break; case 38400: rate = ftdi_sio_b38400; break; case 57600: rate = ftdi_sio_b57600; break; case 115200: rate = ftdi_sio_b115200; break; default: return (EINVAL); } break; case UFTDI_TYPE_8U232AM: switch(t->c_ospeed) { case 300: rate = ftdi_8u232am_b300; break; case 600: rate = ftdi_8u232am_b600; break; case 1200: rate = ftdi_8u232am_b1200; break; case 2400: rate = ftdi_8u232am_b2400; break; case 4800: rate = ftdi_8u232am_b4800; break; case 9600: rate = ftdi_8u232am_b9600; break; case 19200: rate = ftdi_8u232am_b19200; break; case 38400: rate = ftdi_8u232am_b38400; break; case 57600: rate = ftdi_8u232am_b57600; break; case 115200: rate = ftdi_8u232am_b115200; break; case 230400: rate = ftdi_8u232am_b230400; break; case 460800: rate = ftdi_8u232am_b460800; break; case 921600: rate = ftdi_8u232am_b921600; break; default: return (EINVAL); } break; } req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = FTDI_SIO_SET_BAUD_RATE; USETW(req.wValue, rate); USETW(req.wIndex, portno); USETW(req.wLength, 0); DPRINTFN(2,("uftdi_param: reqtype=0x%02x req=0x%02x value=0x%04x " "index=0x%04x len=%d\n", req.bmRequestType, req.bRequest, UGETW(req.wValue), UGETW(req.wIndex), UGETW(req.wLength))); err = usbd_do_request(ucom->sc_udev, &req, NULL); if (err) return (EIO); if (ISSET(t->c_cflag, CSTOPB)) data = FTDI_SIO_SET_DATA_STOP_BITS_2; else data = FTDI_SIO_SET_DATA_STOP_BITS_1; if (ISSET(t->c_cflag, PARENB)) { if (ISSET(t->c_cflag, PARODD)) data |= FTDI_SIO_SET_DATA_PARITY_ODD; else data |= FTDI_SIO_SET_DATA_PARITY_EVEN; } else data |= FTDI_SIO_SET_DATA_PARITY_NONE; switch (ISSET(t->c_cflag, CSIZE)) { case CS5: data |= FTDI_SIO_SET_DATA_BITS(5); break; case CS6: data |= FTDI_SIO_SET_DATA_BITS(6); break; case CS7: data |= FTDI_SIO_SET_DATA_BITS(7); break; case CS8: data |= FTDI_SIO_SET_DATA_BITS(8); break; } sc->last_lcr = data; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = FTDI_SIO_SET_DATA; USETW(req.wValue, data); USETW(req.wIndex, portno); USETW(req.wLength, 0); DPRINTFN(2,("uftdi_param: reqtype=0x%02x req=0x%02x value=0x%04x " "index=0x%04x len=%d\n", req.bmRequestType, req.bRequest, UGETW(req.wValue), UGETW(req.wIndex), UGETW(req.wLength))); err = usbd_do_request(ucom->sc_udev, &req, NULL); if (err) return (EIO); if (ISSET(t->c_cflag, CRTSCTS)) { flow = FTDI_SIO_RTS_CTS_HS; USETW(req.wValue, 0); } else if (ISSET(t->c_iflag, IXON|IXOFF)) { flow = FTDI_SIO_XON_XOFF_HS; USETW2(req.wValue, t->c_cc[VSTOP], t->c_cc[VSTART]); } else { flow = FTDI_SIO_DISABLE_FLOW_CTRL; USETW(req.wValue, 0); } req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = FTDI_SIO_SET_FLOW_CTRL; USETW2(req.wIndex, flow, portno); USETW(req.wLength, 0); err = usbd_do_request(ucom->sc_udev, &req, NULL); if (err) return (EIO); return (0); } void uftdi_get_status(void *vsc, int portno, u_char *lsr, u_char *msr) { struct uftdi_softc *sc = vsc; DPRINTF(("uftdi_status: msr=0x%02x lsr=0x%02x\n", sc->sc_msr, sc->sc_lsr)); if (msr != NULL) *msr = sc->sc_msr; if (lsr != NULL) *lsr = sc->sc_lsr; } void uftdi_break(void *vsc, int portno, int onoff) { struct uftdi_softc *sc = vsc; struct ucom_softc *ucom = vsc; usb_device_request_t req; int data; DPRINTF(("uftdi_break: sc=%p, port=%d onoff=%d\n", vsc, portno, onoff)); if (onoff) { data = sc->last_lcr | FTDI_SIO_SET_BREAK; } else { data = sc->last_lcr; } req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = FTDI_SIO_SET_DATA; USETW(req.wValue, data); USETW(req.wIndex, portno); USETW(req.wLength, 0); (void)usbd_do_request(ucom->sc_udev, &req, NULL); } Static device_method_t uftdi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, uftdi_match), DEVMETHOD(device_attach, uftdi_attach), DEVMETHOD(device_detach, uftdi_detach), { 0, 0 } }; Static driver_t uftdi_driver = { "ucom", uftdi_methods, sizeof (struct uftdi_softc) }; DRIVER_MODULE(uftdi, uhub, uftdi_driver, ucom_devclass, usbd_driver_load, 0); MODULE_DEPEND(uftdi, usb, 1, 1, 1); MODULE_DEPEND(uftdi, ucom,UCOM_MINVER, UCOM_PREFVER, UCOM_MAXVER); Index: head/sys/dev/usb/umct.c =================================================================== --- head/sys/dev/usb/umct.c (revision 129878) +++ head/sys/dev/usb/umct.c (revision 129879) @@ -1,515 +1,516 @@ /*- * Copyright (c) 2003 Scott Long * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); /* * Driver for the MCT (Magic Control Technology) USB-RS232 Converter. * Based on the superb documentation from the linux mct_u232 driver by * Wolfgang Grandeggar . * This device smells a lot like the Belkin F5U103, except that it has * suffered some mild brain-damage. This driver is based off of the ubsa.c * driver from Alexander Kabaev . Merging the two together * might be useful, though the subtle differences might lead to lots of * #ifdef's. */ #include #include #include #include +#include #include #include #include #include #include #include #include #include /* The UMCT advertises the standard 8250 UART registers */ #define UMCT_GET_MSR 2 /* Get Modem Status Register */ #define UMCT_GET_MSR_SIZE 1 #define UMCT_GET_LCR 6 /* Get Line Control Register */ #define UMCT_GET_LCR_SIZE 1 #define UMCT_SET_BAUD 5 /* Set the Baud Rate Divisor */ #define UMCT_SET_BAUD_SIZE 4 #define UMCT_SET_LCR 7 /* Set Line Control Register */ #define UMCT_SET_LCR_SIZE 1 #define UMCT_SET_MCR 10 /* Set Modem Control Register */ #define UMCT_SET_MCR_SIZE 1 #define UMCT_INTR_INTERVAL 100 #define UMCT_IFACE_INDEX 0 #define UMCT_CONFIG_INDEX 1 struct umct_softc { struct ucom_softc sc_ucom; int sc_iface_number; usbd_interface_handle sc_intr_iface; int sc_intr_number; usbd_pipe_handle sc_intr_pipe; u_char *sc_intr_buf; int sc_isize; uint8_t sc_lsr; uint8_t sc_msr; uint8_t sc_lcr; uint8_t sc_mcr; void *sc_swicookie; }; Static void umct_intr(usbd_xfer_handle, usbd_private_handle, usbd_status); Static void umct_get_status(void *, int, u_char *, u_char *); Static void umct_set(void *, int, int, int); Static int umct_param(void *, int, struct termios *); Static int umct_open(void *, int); Static void umct_close(void *, int); Static void umct_notify(void *); Static struct ucom_callback umct_callback = { umct_get_status, /* ucom_get_status */ umct_set, /* ucom_set */ umct_param, /* ucom_param */ NULL, /* ucom_ioctl */ umct_open, /* ucom_open */ umct_close, /* ucom_close */ NULL, /* ucom_read */ NULL /* ucom_write */ }; Static const struct umct_product { uint16_t vendor; uint16_t product; } umct_products[] = { { USB_VENDOR_MCT, USB_PRODUCT_MCT_USB232 }, { USB_VENDOR_MCT, USB_PRODUCT_MCT_SITECOM_USB232 }, { USB_VENDOR_MCT, USB_PRODUCT_MCT_DU_H3SP_USB232 }, { USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_F5U109 }, { 0, 0 } }; Static device_probe_t umct_match; Static device_attach_t umct_attach; Static device_detach_t umct_detach; Static device_method_t umct_methods[] = { DEVMETHOD(device_probe, umct_match), DEVMETHOD(device_attach, umct_attach), DEVMETHOD(device_detach, umct_detach), { 0, 0 } }; Static driver_t umct_driver = { "ucom", umct_methods, sizeof(struct umct_softc) }; DRIVER_MODULE(umct, uhub, umct_driver, ucom_devclass, usbd_driver_load, 0); MODULE_DEPEND(umct, usb, 1, 1, 1); MODULE_DEPEND(umct, ucom, UCOM_MINVER, UCOM_PREFVER, UCOM_MAXVER); MODULE_VERSION(umct, 1); Static struct ithd *umct_ithd; USB_MATCH(umct) { USB_MATCH_START(umct, uaa); int i; if (uaa->iface != NULL) return (UMATCH_NONE); for (i = 0; umct_products[i].vendor != 0; i++) { if (umct_products[i].vendor == uaa->vendor && umct_products[i].product == uaa->product) { return (UMATCH_VENDOR_PRODUCT); } } return (UMATCH_NONE); } USB_ATTACH(umct) { USB_ATTACH_START(umct, sc, uaa); usbd_device_handle dev; struct ucom_softc *ucom; usb_config_descriptor_t *cdesc; usb_interface_descriptor_t *id; usb_endpoint_descriptor_t *ed; char *devinfo; const char *devname; usbd_status err; int i; dev = uaa->device; devinfo = malloc(1024, M_USBDEV, M_NOWAIT | M_ZERO); if (devinfo == NULL) return (ENOMEM); bzero(sc, sizeof(struct umct_softc)); ucom = &sc->sc_ucom; ucom->sc_dev = self; ucom->sc_udev = dev; ucom->sc_iface = uaa->iface; usbd_devinfo(dev, 0, devinfo); device_set_desc_copy(self, devinfo); devname = USBDEVNAME(ucom->sc_dev); printf("%s: %s\n", devname, devinfo); ucom->sc_bulkout_no = -1; ucom->sc_bulkin_no = -1; sc->sc_intr_number = -1; sc->sc_intr_pipe = NULL; err = usbd_set_config_index(dev, UMCT_CONFIG_INDEX, 1); if (err) { printf("%s: failed to set configuration: %s\n", devname, usbd_errstr(err)); ucom->sc_dying = 1; goto error; } cdesc = usbd_get_config_descriptor(ucom->sc_udev); if (cdesc == NULL) { printf("%s: failed to get configuration descriptor\n", devname); ucom->sc_dying = 1; goto error; } err = usbd_device2interface_handle(dev, UMCT_IFACE_INDEX, &ucom->sc_iface); if (err) { printf("%s: failed to get interface: %s\n", devname, usbd_errstr(err)); ucom->sc_dying = 1; goto error; } id = usbd_get_interface_descriptor(ucom->sc_iface); sc->sc_iface_number = id->bInterfaceNumber; for (i = 0; i < id->bNumEndpoints; i++) { ed = usbd_interface2endpoint_descriptor(ucom->sc_iface, i); if (ed == NULL) { printf("%s: no endpoint descriptor for %d\n", devname, i); ucom->sc_dying = 1; goto error; } /* * The real bulk-in endpoint is also marked as an interrupt. * The only way to differentiate it from the real interrupt * endpoint is to look at the wMaxPacketSize field. */ if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN) { if (UGETW(ed->wMaxPacketSize) == 0x2) { sc->sc_intr_number = ed->bEndpointAddress; sc->sc_isize = UGETW(ed->wMaxPacketSize); } else { ucom->sc_bulkin_no = ed->bEndpointAddress; ucom->sc_ibufsize = UGETW(ed->wMaxPacketSize); } continue; } if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT) { ucom->sc_bulkout_no = ed->bEndpointAddress; if (uaa->product == USB_PRODUCT_MCT_SITECOM_USB232) ucom->sc_obufsize = 16; /* device is broken */ else ucom->sc_obufsize = UGETW(ed->wMaxPacketSize); continue; } printf("%s: warning - unsupported endpoint 0x%x\n", devname, ed->bEndpointAddress); } if (sc->sc_intr_number == -1) { printf("%s: Could not fint interrupt in\n", devname); ucom->sc_dying = 1; goto error; } sc->sc_intr_iface = ucom->sc_iface; if (ucom->sc_bulkout_no == -1) { printf("%s: Could not find data bulk out\n", devname); ucom->sc_dying = 1; goto error; } ucom->sc_parent = sc; ucom->sc_portno = UCOM_UNK_PORTNO; ucom->sc_opkthdrlen = 0; ucom->sc_callback = &umct_callback; ucom_attach(ucom); swi_add(&umct_ithd, "ucom", umct_notify, sc, SWI_TTY, 0, &sc->sc_swicookie); free(devinfo, M_USBDEV); USB_ATTACH_SUCCESS_RETURN; error: free(devinfo, M_USBDEV); USB_ATTACH_ERROR_RETURN; } USB_DETACH(umct) { USB_DETACH_START(umct, sc); int rv; if (sc->sc_intr_pipe != NULL) { usbd_abort_pipe(sc->sc_intr_pipe); usbd_close_pipe(sc->sc_intr_pipe); free(sc->sc_intr_buf, M_USBDEV); sc->sc_intr_pipe = NULL; } sc->sc_ucom.sc_dying = 1; ithread_remove_handler(sc->sc_swicookie); rv = ucom_detach(&sc->sc_ucom); return (rv); } Static int umct_request(struct umct_softc *sc, uint8_t request, int len, uint32_t value) { usb_device_request_t req; usbd_status err; uint8_t oval[4]; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = request; USETW(req.wValue, 0); USETW(req.wIndex, sc->sc_iface_number); USETW(req.wLength, len); USETDW(oval, value); err = usbd_do_request(sc->sc_ucom.sc_udev, &req, oval); if (err) printf("%s: ubsa_request: %s\n", USBDEVNAME(sc->sc_ucom.sc_dev), usbd_errstr(err)); return (err); } Static void umct_intr(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct umct_softc *sc; u_char *buf; sc = (struct umct_softc *)priv; buf = sc->sc_intr_buf; if (sc->sc_ucom.sc_dying) return; if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) return; usbd_clear_endpoint_stall_async(sc->sc_intr_pipe); return; } sc->sc_msr = buf[0]; sc->sc_lsr = buf[1]; /* * Defer notifying the ucom layer as it doesn't like to be bothered * from an interrupt context. */ swi_sched(sc->sc_swicookie, 0); } Static void umct_notify(void *arg) { struct umct_softc *sc; sc = (struct umct_softc *)arg; if (sc->sc_ucom.sc_dying == 0) ucom_status_change(&sc->sc_ucom); } Static void umct_get_status(void *addr, int portno, u_char *lsr, u_char *msr) { struct umct_softc *sc; sc = addr; if (lsr != NULL) *lsr = sc->sc_lsr; if (msr != NULL) *msr = sc->sc_msr; return; } Static void umct_set(void *addr, int portno, int reg, int onoff) { struct umct_softc *sc; sc = addr; switch (reg) { case UCOM_SET_BREAK: sc->sc_lcr &= ~0x40; sc->sc_lcr |= (onoff) ? 0x40 : 0; umct_request(sc, UMCT_SET_LCR, UMCT_SET_LCR_SIZE, sc->sc_lcr); break; case UCOM_SET_DTR: sc->sc_mcr &= ~0x01; sc->sc_mcr |= (onoff) ? 0x01 : 0; umct_request(sc, UMCT_SET_MCR, UMCT_SET_MCR_SIZE, sc->sc_mcr); break; case UCOM_SET_RTS: sc->sc_mcr &= ~0x2; sc->sc_mcr |= (onoff) ? 0x02 : 0; umct_request(sc, UMCT_SET_MCR, UMCT_SET_MCR_SIZE, sc->sc_mcr); break; default: break; } } Static int umct_calc_baud(u_int baud) { switch(baud) { case B300: return (0x1); case B600: return (0x2); case B1200: return (0x3); case B2400: return (0x4); case B4800: return (0x6); case B9600: return (0x8); case B19200: return (0x9); case B38400: return (0xa); case B57600: return (0xb); case 115200: return (0xc); case B0: default: break; } return (0x0); } Static int umct_param(void *addr, int portno, struct termios *ti) { struct umct_softc *sc; uint32_t value; sc = addr; value = umct_calc_baud(ti->c_ospeed); umct_request(sc, UMCT_SET_BAUD, UMCT_SET_BAUD_SIZE, value); value = sc->sc_lcr & 0x40; switch (ti->c_cflag & CSIZE) { case CS5: value |= 0x0; break; case CS6: value |= 0x1; break; case CS7: value |= 0x2; break; case CS8: value |= 0x3; break; default: value |= 0x0; break; } value |= (ti->c_cflag & CSTOPB) ? 0x4 : 0; if (ti->c_cflag & PARENB) { value |= 0x8; value |= (ti->c_cflag & PARODD) ? 0x0 : 0x10; } /* * XXX There doesn't seem to be a way to tell the device to use flow * control. */ sc->sc_lcr = value; umct_request(sc, UMCT_SET_LCR, UMCT_SET_LCR_SIZE, value); return (0); } Static int umct_open(void *addr, int portno) { struct umct_softc *sc; int err; sc = addr; if (sc->sc_ucom.sc_dying) { return (ENXIO); } if (sc->sc_intr_number != -1 && sc->sc_intr_pipe == NULL) { sc->sc_intr_buf = malloc(sc->sc_isize, M_USBDEV, M_WAITOK); err = usbd_open_pipe_intr(sc->sc_intr_iface, sc->sc_intr_number, USBD_SHORT_XFER_OK, &sc->sc_intr_pipe, sc, sc->sc_intr_buf, sc->sc_isize, umct_intr, UMCT_INTR_INTERVAL); if (err) { printf("%s: cannot open interrupt pipe (addr %d)\n", USBDEVNAME(sc->sc_ucom.sc_dev), sc->sc_intr_number); free(sc->sc_intr_buf, M_USBDEV); return (EIO); } } return (0); } Static void umct_close(void *addr, int portno) { struct umct_softc *sc; int err; sc = addr; if (sc->sc_ucom.sc_dying) return; if (sc->sc_intr_pipe != NULL) { err = usbd_abort_pipe(sc->sc_intr_pipe); if (err) printf("%s: abort interrupt pipe failed: %s\n", USBDEVNAME(sc->sc_ucom.sc_dev), usbd_errstr(err)); err = usbd_close_pipe(sc->sc_intr_pipe); if (err) printf("%s: close interrupt pipe failed: %s\n", USBDEVNAME(sc->sc_ucom.sc_dev), usbd_errstr(err)); free(sc->sc_intr_buf, M_USBDEV); sc->sc_intr_pipe = NULL; } } Index: head/sys/dev/usb/umodem.c =================================================================== --- head/sys/dev/usb/umodem.c (revision 129878) +++ head/sys/dev/usb/umodem.c (revision 129879) @@ -1,821 +1,822 @@ /* $NetBSD: umodem.c,v 1.45 2002/09/23 05:51:23 simonb Exp $ */ #include __FBSDID("$FreeBSD$"); /*- * Copyright (c) 2003, M. Warner Losh . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Copyright (c) 1998 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Lennart Augustsson (lennart@augustsson.net) at * Carlstedt Research & Technology. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the NetBSD * Foundation, Inc. and its contributors. * 4. Neither the name of The NetBSD Foundation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Comm Class spec: http://www.usb.org/developers/devclass_docs/usbccs10.pdf * http://www.usb.org/developers/devclass_docs/usbcdc11.pdf */ /* * TODO: * - Add error recovery in various places; the big problem is what * to do in a callback if there is an error. * - Implement a Call Device for modems without multiplexed commands. * */ #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef USB_DEBUG int umodemdebug = 0; SYSCTL_NODE(_hw_usb, OID_AUTO, umodem, CTLFLAG_RW, 0, "USB umodem"); SYSCTL_INT(_hw_usb_umodem, OID_AUTO, debug, CTLFLAG_RW, &umodemdebug, 0, "umodem debug level"); #define DPRINTFN(n, x) if (umodemdebug > (n)) logprintf x #else #define DPRINTFN(n, x) #endif #define DPRINTF(x) DPRINTFN(0, x) static const struct umodem_product { u_int16_t vendor; u_int16_t product; u_int8_t interface; } umodem_products[] = { /* Kyocera AH-K3001V*/ { USB_VENDOR_KYOCERA, USB_PRODUCT_KYOCERA_AHK3001V, 0 }, { 0, 0, 0 }, }; /* * These are the maximum number of bytes transferred per frame. * If some really high speed devices should use this driver they * may need to be increased, but this is good enough for normal modems. */ #define UMODEMIBUFSIZE 64 #define UMODEMOBUFSIZE 256 #define UMODEM_MODVER 1 /* module version */ struct umodem_softc { struct ucom_softc sc_ucom; USBBASEDEVICE sc_dev; /* base device */ usbd_device_handle sc_udev; /* USB device */ int sc_ctl_iface_no; usbd_interface_handle sc_ctl_iface; /* control interface */ int sc_data_iface_no; usbd_interface_handle sc_data_iface; /* data interface */ int sc_cm_cap; /* CM capabilities */ int sc_acm_cap; /* ACM capabilities */ int sc_cm_over_data; usb_cdc_line_state_t sc_line_state; /* current line state */ u_char sc_dtr; /* current DTR state */ u_char sc_rts; /* current RTS state */ u_char sc_opening; /* lock during open */ int sc_ctl_notify; /* Notification endpoint */ usbd_pipe_handle sc_notify_pipe; /* Notification pipe */ usb_cdc_notification_t sc_notify_buf; /* Notification structure */ u_char sc_lsr; /* Local status register */ u_char sc_msr; /* Modem status register */ }; Static void *umodem_get_desc(usbd_device_handle dev, int type, int subtype); Static usbd_status umodem_set_comm_feature(struct umodem_softc *sc, int feature, int state); Static usbd_status umodem_set_line_coding(struct umodem_softc *sc, usb_cdc_line_state_t *state); Static void umodem_get_caps(usbd_device_handle, int *, int *); Static void umodem_get_status(void *, int portno, u_char *lsr, u_char *msr); Static void umodem_set(void *, int, int, int); Static void umodem_dtr(struct umodem_softc *, int); Static void umodem_rts(struct umodem_softc *, int); Static void umodem_break(struct umodem_softc *, int); Static void umodem_set_line_state(struct umodem_softc *); Static int umodem_param(void *, int, struct termios *); Static int umodem_ioctl(void *, int, u_long, caddr_t, int, usb_proc_ptr ); Static int umodem_open(void *, int portno); Static void umodem_close(void *, int portno); Static void umodem_intr(usbd_xfer_handle, usbd_private_handle, usbd_status); Static struct ucom_callback umodem_callback = { umodem_get_status, umodem_set, umodem_param, umodem_ioctl, umodem_open, umodem_close, NULL, NULL, }; Static device_probe_t umodem_match; Static device_attach_t umodem_attach; Static device_detach_t umodem_detach; Static device_method_t umodem_methods[] = { /* Device interface */ DEVMETHOD(device_probe, umodem_match), DEVMETHOD(device_attach, umodem_attach), DEVMETHOD(device_detach, umodem_detach), { 0, 0 } }; Static driver_t umodem_driver = { "ucom", umodem_methods, sizeof (struct umodem_softc) }; DRIVER_MODULE(umodem, uhub, umodem_driver, ucom_devclass, usbd_driver_load, 0); MODULE_DEPEND(umodem, usb, 1, 1, 1); MODULE_DEPEND(umodem, ucom, UCOM_MINVER, UCOM_PREFVER, UCOM_MAXVER); MODULE_VERSION(umodem, UMODEM_MODVER); USB_MATCH(umodem) { USB_MATCH_START(umodem, uaa); usb_interface_descriptor_t *id; usb_device_descriptor_t *dd; int cm, acm, i, ret; if (uaa->iface == NULL) return (UMATCH_NONE); id = usbd_get_interface_descriptor(uaa->iface); dd = usbd_get_device_descriptor(uaa->device); if (id == NULL || dd == NULL) return (UMATCH_NONE); ret = UMATCH_NONE; for (i = 0; umodem_products[i].vendor != 0; i++) { if (umodem_products[i].vendor == UGETW(dd->idVendor) && umodem_products[i].product == UGETW(dd->idProduct) && umodem_products[i].interface == id->bInterfaceNumber) { ret = UMATCH_VENDOR_PRODUCT; break; } } if (ret == UMATCH_NONE && id->bInterfaceClass == UICLASS_CDC && id->bInterfaceSubClass == UISUBCLASS_ABSTRACT_CONTROL_MODEL && id->bInterfaceProtocol == UIPROTO_CDC_AT) ret = UMATCH_IFACECLASS_IFACESUBCLASS_IFACEPROTO; if (ret == UMATCH_NONE) return (ret); umodem_get_caps(uaa->device, &cm, &acm); if (!(cm & USB_CDC_CM_DOES_CM) || !(cm & USB_CDC_CM_OVER_DATA) || !(acm & USB_CDC_ACM_HAS_LINE)) return (UMATCH_NONE); return ret; } USB_ATTACH(umodem) { USB_ATTACH_START(umodem, sc, uaa); usbd_device_handle dev = uaa->device; usb_interface_descriptor_t *id; usb_endpoint_descriptor_t *ed; usb_cdc_cm_descriptor_t *cmd; char *devinfo = NULL; const char *devname; usbd_status err; int data_ifcno; int i; struct ucom_softc *ucom; devinfo = malloc(1024, M_USBDEV, M_WAITOK); usbd_devinfo(dev, 0, devinfo); ucom = &sc->sc_ucom; ucom->sc_dev = self; sc->sc_dev = self; device_set_desc_copy(self, devinfo); ucom->sc_udev = dev; ucom->sc_iface = uaa->iface; /*USB_ATTACH_SETUP; */ sc->sc_udev = dev; sc->sc_ctl_iface = uaa->iface; devname = USBDEVNAME(sc->sc_dev); /* XXX ? use something else ? XXX */ id = usbd_get_interface_descriptor(sc->sc_ctl_iface); printf("%s: %s, iclass %d/%d\n", devname, devinfo, id->bInterfaceClass, id->bInterfaceSubClass); sc->sc_ctl_iface_no = id->bInterfaceNumber; umodem_get_caps(dev, &sc->sc_cm_cap, &sc->sc_acm_cap); /* Get the data interface no. */ cmd = umodem_get_desc(dev, UDESC_CS_INTERFACE, UDESCSUB_CDC_CM); if (cmd == NULL) { printf("%s: no CM descriptor\n", devname); goto bad; } sc->sc_data_iface_no = data_ifcno = cmd->bDataInterface; printf("%s: data interface %d, has %sCM over data, has %sbreak\n", devname, data_ifcno, sc->sc_cm_cap & USB_CDC_CM_OVER_DATA ? "" : "no ", sc->sc_acm_cap & USB_CDC_ACM_HAS_BREAK ? "" : "no "); /* Get the data interface too. */ for (i = 0; i < uaa->nifaces; i++) { if (uaa->ifaces[i] != NULL) { id = usbd_get_interface_descriptor(uaa->ifaces[i]); if (id != NULL && id->bInterfaceNumber == data_ifcno) { sc->sc_data_iface = uaa->ifaces[i]; uaa->ifaces[i] = NULL; } } } if (sc->sc_data_iface == NULL) { printf("%s: no data interface\n", devname); goto bad; } ucom->sc_iface = sc->sc_data_iface; /* * Find the bulk endpoints. * Iterate over all endpoints in the data interface and take note. */ ucom->sc_bulkin_no = ucom->sc_bulkout_no = -1; id = usbd_get_interface_descriptor(sc->sc_data_iface); for (i = 0; i < id->bNumEndpoints; i++) { ed = usbd_interface2endpoint_descriptor(sc->sc_data_iface, i); if (ed == NULL) { printf("%s: no endpoint descriptor for %d\n", devname, i); goto bad; } if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { ucom->sc_bulkin_no = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { ucom->sc_bulkout_no = ed->bEndpointAddress; } } if (ucom->sc_bulkin_no == -1) { printf("%s: Could not find data bulk in\n", devname); goto bad; } if (ucom->sc_bulkout_no == -1) { printf("%s: Could not find data bulk out\n", devname); goto bad; } if (usbd_get_quirks(sc->sc_udev)->uq_flags & UQ_ASSUME_CM_OVER_DATA) { DPRINTF(("Quirk says to assume CM over data\n")); sc->sc_cm_over_data = 1; } else { if (sc->sc_cm_cap & USB_CDC_CM_OVER_DATA) { if (sc->sc_acm_cap & USB_CDC_ACM_HAS_FEATURE) err = umodem_set_comm_feature(sc, UCDC_ABSTRACT_STATE, UCDC_DATA_MULTIPLEXED); else err = 0; if (err) { printf("%s: could not set data multiplex mode\n", devname); goto bad; } sc->sc_cm_over_data = 1; } } /* * The standard allows for notification messages (to indicate things * like a modem hangup) to come in via an interrupt endpoint * off of the control interface. Iterate over the endpoints on * the control interface and see if there are any interrupt * endpoints; if there are, then register it. */ sc->sc_ctl_notify = -1; sc->sc_notify_pipe = NULL; for (i = 0; i < id->bNumEndpoints; i++) { ed = usbd_interface2endpoint_descriptor(sc->sc_ctl_iface, i); if (ed == NULL) continue; if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && (ed->bmAttributes & UE_XFERTYPE) == UE_INTERRUPT) { printf("%s: status change notification available\n", devname); sc->sc_ctl_notify = ed->bEndpointAddress; } } sc->sc_dtr = -1; ucom->sc_parent = sc; ucom->sc_portno = UCOM_UNK_PORTNO; /* bulkin, bulkout set above */ ucom->sc_ibufsize = UMODEMIBUFSIZE; ucom->sc_obufsize = UMODEMOBUFSIZE; ucom->sc_ibufsizepad = UMODEMIBUFSIZE; ucom->sc_opkthdrlen = 0; ucom->sc_callback = &umodem_callback; ucom_attach(&sc->sc_ucom); free(devinfo, M_USBDEV); USB_ATTACH_SUCCESS_RETURN; bad: ucom->sc_dying = 1; free(devinfo, M_USBDEV); USB_ATTACH_ERROR_RETURN; } Static int umodem_open(void *addr, int portno) { struct umodem_softc *sc = addr; int err; DPRINTF(("umodem_open: sc=%p\n", sc)); if (sc->sc_ctl_notify != -1 && sc->sc_notify_pipe == NULL) { err = usbd_open_pipe_intr(sc->sc_ctl_iface, sc->sc_ctl_notify, USBD_SHORT_XFER_OK, &sc->sc_notify_pipe, sc, &sc->sc_notify_buf, sizeof(sc->sc_notify_buf), umodem_intr, USBD_DEFAULT_INTERVAL); if (err) { DPRINTF(("Failed to establish notify pipe: %s\n", usbd_errstr(err))); return EIO; } } return 0; } Static void umodem_close(void *addr, int portno) { struct umodem_softc *sc = addr; int err; DPRINTF(("umodem_close: sc=%p\n", sc)); if (sc->sc_notify_pipe != NULL) { err = usbd_abort_pipe(sc->sc_notify_pipe); if (err) printf("%s: abort notify pipe failed: %s\n", USBDEVNAME(sc->sc_dev), usbd_errstr(err)); err = usbd_close_pipe(sc->sc_notify_pipe); if (err) printf("%s: close notify pipe failed: %s\n", USBDEVNAME(sc->sc_dev), usbd_errstr(err)); sc->sc_notify_pipe = NULL; } } Static void umodem_intr(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct umodem_softc *sc = priv; u_char mstatus; if (sc->sc_ucom.sc_dying) return; if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) return; printf("%s: abnormal status: %s\n", USBDEVNAME(sc->sc_dev), usbd_errstr(status)); return; } if (sc->sc_notify_buf.bmRequestType != UCDC_NOTIFICATION) { DPRINTF(("%s: unknown message type (%02x) on notify pipe\n", USBDEVNAME(sc->sc_dev), sc->sc_notify_buf.bmRequestType)); return; } switch (sc->sc_notify_buf.bNotification) { case UCDC_N_SERIAL_STATE: /* * Set the serial state in ucom driver based on * the bits from the notify message */ if (UGETW(sc->sc_notify_buf.wLength) != 2) { printf("%s: Invalid notification length! (%d)\n", USBDEVNAME(sc->sc_dev), UGETW(sc->sc_notify_buf.wLength)); break; } DPRINTF(("%s: notify bytes = %02x%02x\n", USBDEVNAME(sc->sc_dev), sc->sc_notify_buf.data[0], sc->sc_notify_buf.data[1])); /* Currently, lsr is always zero. */ sc->sc_lsr = sc->sc_msr = 0; mstatus = sc->sc_notify_buf.data[0]; if (ISSET(mstatus, UCDC_N_SERIAL_RI)) sc->sc_msr |= UMSR_RI; if (ISSET(mstatus, UCDC_N_SERIAL_DSR)) sc->sc_msr |= UMSR_DSR; if (ISSET(mstatus, UCDC_N_SERIAL_DCD)) sc->sc_msr |= UMSR_DCD; ucom_status_change(&sc->sc_ucom); break; default: DPRINTF(("%s: unknown notify message: %02x\n", USBDEVNAME(sc->sc_dev), sc->sc_notify_buf.bNotification)); break; } } void umodem_get_caps(usbd_device_handle dev, int *cm, int *acm) { usb_cdc_cm_descriptor_t *cmd; usb_cdc_acm_descriptor_t *cad; *cm = *acm = 0; cmd = umodem_get_desc(dev, UDESC_CS_INTERFACE, UDESCSUB_CDC_CM); if (cmd == NULL) { DPRINTF(("umodem_get_desc: no CM desc\n")); return; } *cm = cmd->bmCapabilities; cad = umodem_get_desc(dev, UDESC_CS_INTERFACE, UDESCSUB_CDC_ACM); if (cad == NULL) { DPRINTF(("umodem_get_desc: no ACM desc\n")); return; } *acm = cad->bmCapabilities; } void umodem_get_status(void *addr, int portno, u_char *lsr, u_char *msr) { struct umodem_softc *sc = addr; DPRINTF(("umodem_get_status:\n")); if (lsr != NULL) *lsr = sc->sc_lsr; if (msr != NULL) *msr = sc->sc_msr; } int umodem_param(void *addr, int portno, struct termios *t) { struct umodem_softc *sc = addr; usbd_status err; usb_cdc_line_state_t ls; DPRINTF(("umodem_param: sc=%p\n", sc)); USETDW(ls.dwDTERate, t->c_ospeed); if (ISSET(t->c_cflag, CSTOPB)) ls.bCharFormat = UCDC_STOP_BIT_2; else ls.bCharFormat = UCDC_STOP_BIT_1; if (ISSET(t->c_cflag, PARENB)) { if (ISSET(t->c_cflag, PARODD)) ls.bParityType = UCDC_PARITY_ODD; else ls.bParityType = UCDC_PARITY_EVEN; } else ls.bParityType = UCDC_PARITY_NONE; switch (ISSET(t->c_cflag, CSIZE)) { case CS5: ls.bDataBits = 5; break; case CS6: ls.bDataBits = 6; break; case CS7: ls.bDataBits = 7; break; case CS8: ls.bDataBits = 8; break; } err = umodem_set_line_coding(sc, &ls); if (err) { DPRINTF(("umodem_param: err=%s\n", usbd_errstr(err))); return (ENOTTY); } return (0); } int umodem_ioctl(void *addr, int portno, u_long cmd, caddr_t data, int flag, usb_proc_ptr p) { struct umodem_softc *sc = addr; int error = 0; if (sc->sc_ucom.sc_dying) return (EIO); DPRINTF(("umodemioctl: cmd=0x%08lx\n", cmd)); switch (cmd) { case USB_GET_CM_OVER_DATA: *(int *)data = sc->sc_cm_over_data; break; case USB_SET_CM_OVER_DATA: if (*(int *)data != sc->sc_cm_over_data) { /* XXX change it */ } break; default: DPRINTF(("umodemioctl: unknown\n")); error = ENOTTY; break; } return (error); } void umodem_dtr(struct umodem_softc *sc, int onoff) { DPRINTF(("umodem_modem: onoff=%d\n", onoff)); if (sc->sc_dtr == onoff) return; sc->sc_dtr = onoff; umodem_set_line_state(sc); } void umodem_rts(struct umodem_softc *sc, int onoff) { DPRINTF(("umodem_modem: onoff=%d\n", onoff)); if (sc->sc_rts == onoff) return; sc->sc_rts = onoff; umodem_set_line_state(sc); } void umodem_set_line_state(struct umodem_softc *sc) { usb_device_request_t req; int ls; ls = (sc->sc_dtr ? UCDC_LINE_DTR : 0) | (sc->sc_rts ? UCDC_LINE_RTS : 0); req.bmRequestType = UT_WRITE_CLASS_INTERFACE; req.bRequest = UCDC_SET_CONTROL_LINE_STATE; USETW(req.wValue, ls); USETW(req.wIndex, sc->sc_ctl_iface_no); USETW(req.wLength, 0); (void)usbd_do_request(sc->sc_udev, &req, 0); } void umodem_break(struct umodem_softc *sc, int onoff) { usb_device_request_t req; DPRINTF(("umodem_break: onoff=%d\n", onoff)); if (!(sc->sc_acm_cap & USB_CDC_ACM_HAS_BREAK)) return; req.bmRequestType = UT_WRITE_CLASS_INTERFACE; req.bRequest = UCDC_SEND_BREAK; USETW(req.wValue, onoff ? UCDC_BREAK_ON : UCDC_BREAK_OFF); USETW(req.wIndex, sc->sc_ctl_iface_no); USETW(req.wLength, 0); (void)usbd_do_request(sc->sc_udev, &req, 0); } void umodem_set(void *addr, int portno, int reg, int onoff) { struct umodem_softc *sc = addr; switch (reg) { case UCOM_SET_DTR: umodem_dtr(sc, onoff); break; case UCOM_SET_RTS: umodem_rts(sc, onoff); break; case UCOM_SET_BREAK: umodem_break(sc, onoff); break; default: break; } } usbd_status umodem_set_line_coding(struct umodem_softc *sc, usb_cdc_line_state_t *state) { usb_device_request_t req; usbd_status err; DPRINTF(("umodem_set_line_coding: rate=%d fmt=%d parity=%d bits=%d\n", UGETDW(state->dwDTERate), state->bCharFormat, state->bParityType, state->bDataBits)); if (memcmp(state, &sc->sc_line_state, UCDC_LINE_STATE_LENGTH) == 0) { DPRINTF(("umodem_set_line_coding: already set\n")); return (USBD_NORMAL_COMPLETION); } req.bmRequestType = UT_WRITE_CLASS_INTERFACE; req.bRequest = UCDC_SET_LINE_CODING; USETW(req.wValue, 0); USETW(req.wIndex, sc->sc_ctl_iface_no); USETW(req.wLength, UCDC_LINE_STATE_LENGTH); err = usbd_do_request(sc->sc_udev, &req, state); if (err) { DPRINTF(("umodem_set_line_coding: failed, err=%s\n", usbd_errstr(err))); return (err); } sc->sc_line_state = *state; return (USBD_NORMAL_COMPLETION); } void * umodem_get_desc(usbd_device_handle dev, int type, int subtype) { usb_descriptor_t *desc; usb_config_descriptor_t *cd = usbd_get_config_descriptor(dev); uByte *p = (uByte *)cd; uByte *end = p + UGETW(cd->wTotalLength); while (p < end) { desc = (usb_descriptor_t *)p; if (desc->bDescriptorType == type && desc->bDescriptorSubtype == subtype) return (desc); p += desc->bLength; } return (0); } usbd_status umodem_set_comm_feature(struct umodem_softc *sc, int feature, int state) { usb_device_request_t req; usbd_status err; usb_cdc_abstract_state_t ast; DPRINTF(("umodem_set_comm_feature: feature=%d state=%d\n", feature, state)); req.bmRequestType = UT_WRITE_CLASS_INTERFACE; req.bRequest = UCDC_SET_COMM_FEATURE; USETW(req.wValue, feature); USETW(req.wIndex, sc->sc_ctl_iface_no); USETW(req.wLength, UCDC_ABSTRACT_STATE_LENGTH); USETW(ast.wState, state); err = usbd_do_request(sc->sc_udev, &req, &ast); if (err) { DPRINTF(("umodem_set_comm_feature: feature=%d, err=%s\n", feature, usbd_errstr(err))); return (err); } return (USBD_NORMAL_COMPLETION); } USB_DETACH(umodem) { USB_DETACH_START(umodem, sc); int rv = 0; DPRINTF(("umodem_detach: sc=%p\n", sc)); if (sc->sc_notify_pipe != NULL) { usbd_abort_pipe(sc->sc_notify_pipe); usbd_close_pipe(sc->sc_notify_pipe); sc->sc_notify_pipe = NULL; } sc->sc_ucom.sc_dying = 1; rv = ucom_detach(&sc->sc_ucom); return (rv); } Index: head/sys/dev/usb/uplcom.c =================================================================== --- head/sys/dev/usb/uplcom.c (revision 129878) +++ head/sys/dev/usb/uplcom.c (revision 129879) @@ -1,846 +1,847 @@ /* $NetBSD: uplcom.c,v 1.21 2001/11/13 06:24:56 lukem Exp $ */ /*- * Copyright (c) 2001-2002, Shunsuke Akiyama . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Copyright (c) 2001 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Ichiro FUKUHARA (ichiro@ichiro.org). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the NetBSD * Foundation, Inc. and its contributors. * 4. Neither the name of The NetBSD Foundation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Simple datasheet * http://www.prolific.com.tw/download/DataSheet/pl2303_ds11.PDF * http://www.nisseisg.co.jp/jyouhou/_cp/@gif/2303.pdf * (english) * */ #include "opt_uplcom.h" #include #include #include +#include #include #include #include #include #include #include #include #if __FreeBSD_version >= 500014 #include #else #include #endif #include #include #include #include #include #include #include #include #include #include #include SYSCTL_NODE(_hw_usb, OID_AUTO, uplcom, CTLFLAG_RW, 0, "USB uplcom"); #ifdef USB_DEBUG static int uplcomdebug = 0; SYSCTL_INT(_hw_usb_uplcom, OID_AUTO, debug, CTLFLAG_RW, &uplcomdebug, 0, "uplcom debug level"); #define DPRINTFN(n, x) do { \ if (uplcomdebug > (n)) \ logprintf x; \ } while (0) #else #define DPRINTFN(n, x) #endif #define DPRINTF(x) DPRINTFN(0, x) #define UPLCOM_MODVER 1 /* module version */ #define UPLCOM_CONFIG_INDEX 0 #define UPLCOM_IFACE_INDEX 0 #define UPLCOM_SECOND_IFACE_INDEX 1 #ifndef UPLCOM_INTR_INTERVAL #define UPLCOM_INTR_INTERVAL 100 /* ms */ #endif #define UPLCOM_SET_REQUEST 0x01 #define UPLCOM_SET_CRTSCTS 0x41 #define RSAQ_STATUS_DSR 0x02 #define RSAQ_STATUS_DCD 0x01 struct uplcom_softc { struct ucom_softc sc_ucom; int sc_iface_number; /* interface number */ usbd_interface_handle sc_intr_iface; /* interrupt interface */ int sc_intr_number; /* interrupt number */ usbd_pipe_handle sc_intr_pipe; /* interrupt pipe */ u_char *sc_intr_buf; /* interrupt buffer */ int sc_isize; usb_cdc_line_state_t sc_line_state; /* current line state */ u_char sc_dtr; /* current DTR state */ u_char sc_rts; /* current RTS state */ u_char sc_status; u_char sc_lsr; /* Local status register */ u_char sc_msr; /* uplcom status register */ }; /* * These are the maximum number of bytes transferred per frame. * The output buffer size cannot be increased due to the size encoding. */ #define UPLCOMIBUFSIZE 256 #define UPLCOMOBUFSIZE 256 Static usbd_status uplcom_reset(struct uplcom_softc *); Static usbd_status uplcom_set_line_coding(struct uplcom_softc *, usb_cdc_line_state_t *); Static usbd_status uplcom_set_crtscts(struct uplcom_softc *); Static void uplcom_intr(usbd_xfer_handle, usbd_private_handle, usbd_status); Static void uplcom_set(void *, int, int, int); Static void uplcom_dtr(struct uplcom_softc *, int); Static void uplcom_rts(struct uplcom_softc *, int); Static void uplcom_break(struct uplcom_softc *, int); Static void uplcom_set_line_state(struct uplcom_softc *); Static void uplcom_get_status(void *, int, u_char *, u_char *); #if TODO Static int uplcom_ioctl(void *, int, u_long, caddr_t, int, usb_proc_ptr); #endif Static int uplcom_param(void *, int, struct termios *); Static int uplcom_open(void *, int); Static void uplcom_close(void *, int); struct ucom_callback uplcom_callback = { uplcom_get_status, uplcom_set, uplcom_param, NULL, /* uplcom_ioctl, TODO */ uplcom_open, uplcom_close, NULL, NULL }; static const struct uplcom_product { uint16_t vendor; uint16_t product; } uplcom_products [] = { /* I/O DATA USB-RSAQ */ { USB_VENDOR_IODATA, USB_PRODUCT_IODATA_USBRSAQ }, /* I/O DATA USB-RSAQ2 */ { USB_VENDOR_PROLIFIC, USB_PRODUCT_PROLIFIC_RSAQ2 }, /* PLANEX USB-RS232 URS-03 */ { USB_VENDOR_ATEN, USB_PRODUCT_ATEN_UC232A }, /* IOGEAR/ATEN UC-232A */ { USB_VENDOR_PROLIFIC, USB_PRODUCT_PROLIFIC_PL2303 }, /* TDK USB-PHS Adapter UHA6400 */ { USB_VENDOR_TDK, USB_PRODUCT_TDK_UHA6400 }, /* RATOC REX-USB60 */ { USB_VENDOR_RATOC, USB_PRODUCT_RATOC_REXUSB60 }, /* ELECOM UC-SGT */ { USB_VENDOR_ELECOM, USB_PRODUCT_ELECOM_UCSGT }, /* SOURCENEXT KeikaiDenwa 8 */ { USB_VENDOR_SOURCENEXT, USB_PRODUCT_SOURCENEXT_KEIKAI8 }, /* SOURCENEXT KeikaiDenwa 8 with charger */ { USB_VENDOR_SOURCENEXT, USB_PRODUCT_SOURCENEXT_KEIKAI8_CHG }, /* HAL Corporation Crossam2+USB */ { USB_VENDOR_HAL, USB_PRODUCT_HAL_IMR001 }, { 0, 0 } }; Static device_probe_t uplcom_match; Static device_attach_t uplcom_attach; Static device_detach_t uplcom_detach; Static device_method_t uplcom_methods[] = { /* Device interface */ DEVMETHOD(device_probe, uplcom_match), DEVMETHOD(device_attach, uplcom_attach), DEVMETHOD(device_detach, uplcom_detach), { 0, 0 } }; Static driver_t uplcom_driver = { "ucom", uplcom_methods, sizeof (struct uplcom_softc) }; DRIVER_MODULE(uplcom, uhub, uplcom_driver, ucom_devclass, usbd_driver_load, 0); MODULE_DEPEND(uplcom, usb, 1, 1, 1); MODULE_DEPEND(uplcom, ucom, UCOM_MINVER, UCOM_PREFVER, UCOM_MAXVER); MODULE_VERSION(uplcom, UPLCOM_MODVER); static int uplcominterval = UPLCOM_INTR_INTERVAL; static int sysctl_hw_usb_uplcom_interval(SYSCTL_HANDLER_ARGS) { int err, val; val = uplcominterval; err = sysctl_handle_int(oidp, &val, sizeof(val), req); if (err != 0 || req->newptr == NULL) return (err); if (0 < val && val <= 1000) uplcominterval = val; else err = EINVAL; return (err); } SYSCTL_PROC(_hw_usb_uplcom, OID_AUTO, interval, CTLTYPE_INT | CTLFLAG_RW, 0, sizeof(int), sysctl_hw_usb_uplcom_interval, "I", "uplcom interrpt pipe interval"); USB_MATCH(uplcom) { USB_MATCH_START(uplcom, uaa); int i; if (uaa->iface != NULL) return (UMATCH_NONE); for (i = 0; uplcom_products[i].vendor != 0; i++) { if (uplcom_products[i].vendor == uaa->vendor && uplcom_products[i].product == uaa->product) { return (UMATCH_VENDOR_PRODUCT); } } return (UMATCH_NONE); } USB_ATTACH(uplcom) { USB_ATTACH_START(uplcom, sc, uaa); usbd_device_handle dev = uaa->device; struct ucom_softc *ucom; usb_config_descriptor_t *cdesc; usb_interface_descriptor_t *id; usb_endpoint_descriptor_t *ed; char *devinfo; const char *devname; usbd_status err; int i; devinfo = malloc(1024, M_USBDEV, M_WAITOK); ucom = &sc->sc_ucom; bzero(sc, sizeof (struct uplcom_softc)); usbd_devinfo(dev, 0, devinfo); /* USB_ATTACH_SETUP; */ ucom->sc_dev = self; device_set_desc_copy(self, devinfo); /* USB_ATTACH_SETUP; */ ucom->sc_udev = dev; ucom->sc_iface = uaa->iface; devname = USBDEVNAME(ucom->sc_dev); printf("%s: %s\n", devname, devinfo); DPRINTF(("uplcom attach: sc = %p\n", sc)); /* initialize endpoints */ ucom->sc_bulkin_no = ucom->sc_bulkout_no = -1; sc->sc_intr_number = -1; sc->sc_intr_pipe = NULL; /* Move the device into the configured state. */ err = usbd_set_config_index(dev, UPLCOM_CONFIG_INDEX, 1); if (err) { printf("%s: failed to set configuration: %s\n", devname, usbd_errstr(err)); ucom->sc_dying = 1; goto error; } /* get the config descriptor */ cdesc = usbd_get_config_descriptor(ucom->sc_udev); if (cdesc == NULL) { printf("%s: failed to get configuration descriptor\n", USBDEVNAME(ucom->sc_dev)); ucom->sc_dying = 1; goto error; } /* get the (first/common) interface */ err = usbd_device2interface_handle(dev, UPLCOM_IFACE_INDEX, &ucom->sc_iface); if (err) { printf("%s: failed to get interface: %s\n", devname, usbd_errstr(err)); ucom->sc_dying = 1; goto error; } /* Find the interrupt endpoints */ id = usbd_get_interface_descriptor(ucom->sc_iface); sc->sc_iface_number = id->bInterfaceNumber; for (i = 0; i < id->bNumEndpoints; i++) { ed = usbd_interface2endpoint_descriptor(ucom->sc_iface, i); if (ed == NULL) { printf("%s: no endpoint descriptor for %d\n", USBDEVNAME(ucom->sc_dev), i); ucom->sc_dying = 1; goto error; } if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_INTERRUPT) { sc->sc_intr_number = ed->bEndpointAddress; sc->sc_isize = UGETW(ed->wMaxPacketSize); } } if (sc->sc_intr_number == -1) { printf("%s: Could not find interrupt in\n", USBDEVNAME(ucom->sc_dev)); ucom->sc_dying = 1; goto error; } /* keep interface for interrupt */ sc->sc_intr_iface = ucom->sc_iface; /* * USB-RSAQ1 has two interface * * USB-RSAQ1 | USB-RSAQ2 * -----------------+----------------- * Interface 0 |Interface 0 * Interrupt(0x81) | Interrupt(0x81) * -----------------+ BulkIN(0x02) * Interface 1 | BulkOUT(0x83) * BulkIN(0x02) | * BulkOUT(0x83) | */ if (cdesc->bNumInterface == 2) { err = usbd_device2interface_handle(dev, UPLCOM_SECOND_IFACE_INDEX, &ucom->sc_iface); if (err) { printf("%s: failed to get second interface: %s\n", devname, usbd_errstr(err)); ucom->sc_dying = 1; goto error; } } /* Find the bulk{in,out} endpoints */ id = usbd_get_interface_descriptor(ucom->sc_iface); sc->sc_iface_number = id->bInterfaceNumber; for (i = 0; i < id->bNumEndpoints; i++) { ed = usbd_interface2endpoint_descriptor(ucom->sc_iface, i); if (ed == NULL) { printf("%s: no endpoint descriptor for %d\n", USBDEVNAME(ucom->sc_dev), i); ucom->sc_dying = 1; goto error; } if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { ucom->sc_bulkin_no = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { ucom->sc_bulkout_no = ed->bEndpointAddress; } } if (ucom->sc_bulkin_no == -1) { printf("%s: Could not find data bulk in\n", USBDEVNAME(ucom->sc_dev)); ucom->sc_dying = 1; goto error; } if (ucom->sc_bulkout_no == -1) { printf("%s: Could not find data bulk out\n", USBDEVNAME(ucom->sc_dev)); ucom->sc_dying = 1; goto error; } sc->sc_dtr = sc->sc_rts = -1; ucom->sc_parent = sc; ucom->sc_portno = UCOM_UNK_PORTNO; /* bulkin, bulkout set above */ ucom->sc_ibufsize = UPLCOMIBUFSIZE; ucom->sc_obufsize = UPLCOMOBUFSIZE; ucom->sc_ibufsizepad = UPLCOMIBUFSIZE; ucom->sc_opkthdrlen = 0; ucom->sc_callback = &uplcom_callback; err = uplcom_reset(sc); if (err) { printf("%s: reset failed: %s\n", USBDEVNAME(ucom->sc_dev), usbd_errstr(err)); ucom->sc_dying = 1; goto error; } DPRINTF(("uplcom: in = 0x%x, out = 0x%x, intr = 0x%x\n", ucom->sc_bulkin_no, ucom->sc_bulkout_no, sc->sc_intr_number)); ucom_attach(&sc->sc_ucom); free(devinfo, M_USBDEV); USB_ATTACH_SUCCESS_RETURN; error: free(devinfo, M_USBDEV); USB_ATTACH_ERROR_RETURN; } USB_DETACH(uplcom) { USB_DETACH_START(uplcom, sc); int rv = 0; DPRINTF(("uplcom_detach: sc = %p\n", sc)); if (sc->sc_intr_pipe != NULL) { usbd_abort_pipe(sc->sc_intr_pipe); usbd_close_pipe(sc->sc_intr_pipe); free(sc->sc_intr_buf, M_USBDEV); sc->sc_intr_pipe = NULL; } sc->sc_ucom.sc_dying = 1; rv = ucom_detach(&sc->sc_ucom); return (rv); } Static usbd_status uplcom_reset(struct uplcom_softc *sc) { usb_device_request_t req; usbd_status err; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = UPLCOM_SET_REQUEST; USETW(req.wValue, 0); USETW(req.wIndex, sc->sc_iface_number); USETW(req.wLength, 0); err = usbd_do_request(sc->sc_ucom.sc_udev, &req, 0); if (err) { printf("%s: uplcom_reset: %s\n", USBDEVNAME(sc->sc_ucom.sc_dev), usbd_errstr(err)); return (EIO); } return (0); } Static void uplcom_set_line_state(struct uplcom_softc *sc) { usb_device_request_t req; int ls; usbd_status err; ls = (sc->sc_dtr ? UCDC_LINE_DTR : 0) | (sc->sc_rts ? UCDC_LINE_RTS : 0); req.bmRequestType = UT_WRITE_CLASS_INTERFACE; req.bRequest = UCDC_SET_CONTROL_LINE_STATE; USETW(req.wValue, ls); USETW(req.wIndex, sc->sc_iface_number); USETW(req.wLength, 0); err = usbd_do_request(sc->sc_ucom.sc_udev, &req, 0); if (err) printf("%s: uplcom_set_line_status: %s\n", USBDEVNAME(sc->sc_ucom.sc_dev), usbd_errstr(err)); } Static void uplcom_set(void *addr, int portno, int reg, int onoff) { struct uplcom_softc *sc = addr; switch (reg) { case UCOM_SET_DTR: uplcom_dtr(sc, onoff); break; case UCOM_SET_RTS: uplcom_rts(sc, onoff); break; case UCOM_SET_BREAK: uplcom_break(sc, onoff); break; default: break; } } Static void uplcom_dtr(struct uplcom_softc *sc, int onoff) { DPRINTF(("uplcom_dtr: onoff = %d\n", onoff)); if (sc->sc_dtr == onoff) return; sc->sc_dtr = onoff; uplcom_set_line_state(sc); } Static void uplcom_rts(struct uplcom_softc *sc, int onoff) { DPRINTF(("uplcom_rts: onoff = %d\n", onoff)); if (sc->sc_rts == onoff) return; sc->sc_rts = onoff; uplcom_set_line_state(sc); } Static void uplcom_break(struct uplcom_softc *sc, int onoff) { usb_device_request_t req; usbd_status err; DPRINTF(("uplcom_break: onoff = %d\n", onoff)); req.bmRequestType = UT_WRITE_CLASS_INTERFACE; req.bRequest = UCDC_SEND_BREAK; USETW(req.wValue, onoff ? UCDC_BREAK_ON : UCDC_BREAK_OFF); USETW(req.wIndex, sc->sc_iface_number); USETW(req.wLength, 0); err = usbd_do_request(sc->sc_ucom.sc_udev, &req, 0); if (err) printf("%s: uplcom_break: %s\n", USBDEVNAME(sc->sc_ucom.sc_dev), usbd_errstr(err)); } Static usbd_status uplcom_set_crtscts(struct uplcom_softc *sc) { usb_device_request_t req; usbd_status err; DPRINTF(("uplcom_set_crtscts: on\n")); req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = UPLCOM_SET_REQUEST; USETW(req.wValue, 0); USETW(req.wIndex, UPLCOM_SET_CRTSCTS); USETW(req.wLength, 0); err = usbd_do_request(sc->sc_ucom.sc_udev, &req, 0); if (err) { printf("%s: uplcom_set_crtscts: %s\n", USBDEVNAME(sc->sc_ucom.sc_dev), usbd_errstr(err)); return (err); } return (USBD_NORMAL_COMPLETION); } Static usbd_status uplcom_set_line_coding(struct uplcom_softc *sc, usb_cdc_line_state_t *state) { usb_device_request_t req; usbd_status err; DPRINTF(( "uplcom_set_line_coding: rate = %d, fmt = %d, parity = %d bits = %d\n", UGETDW(state->dwDTERate), state->bCharFormat, state->bParityType, state->bDataBits)); if (memcmp(state, &sc->sc_line_state, UCDC_LINE_STATE_LENGTH) == 0) { DPRINTF(("uplcom_set_line_coding: already set\n")); return (USBD_NORMAL_COMPLETION); } req.bmRequestType = UT_WRITE_CLASS_INTERFACE; req.bRequest = UCDC_SET_LINE_CODING; USETW(req.wValue, 0); USETW(req.wIndex, sc->sc_iface_number); USETW(req.wLength, UCDC_LINE_STATE_LENGTH); err = usbd_do_request(sc->sc_ucom.sc_udev, &req, state); if (err) { printf("%s: uplcom_set_line_coding: %s\n", USBDEVNAME(sc->sc_ucom.sc_dev), usbd_errstr(err)); return (err); } sc->sc_line_state = *state; return (USBD_NORMAL_COMPLETION); } Static int uplcom_param(void *addr, int portno, struct termios *t) { struct uplcom_softc *sc = addr; usbd_status err; usb_cdc_line_state_t ls; DPRINTF(("uplcom_param: sc = %p\n", sc)); USETDW(ls.dwDTERate, t->c_ospeed); if (ISSET(t->c_cflag, CSTOPB)) ls.bCharFormat = UCDC_STOP_BIT_2; else ls.bCharFormat = UCDC_STOP_BIT_1; if (ISSET(t->c_cflag, PARENB)) { if (ISSET(t->c_cflag, PARODD)) ls.bParityType = UCDC_PARITY_ODD; else ls.bParityType = UCDC_PARITY_EVEN; } else ls.bParityType = UCDC_PARITY_NONE; switch (ISSET(t->c_cflag, CSIZE)) { case CS5: ls.bDataBits = 5; break; case CS6: ls.bDataBits = 6; break; case CS7: ls.bDataBits = 7; break; case CS8: ls.bDataBits = 8; break; } err = uplcom_set_line_coding(sc, &ls); if (err) return (EIO); if (ISSET(t->c_cflag, CRTSCTS)) { err = uplcom_set_crtscts(sc); if (err) return (EIO); } return (0); } Static int uplcom_open(void *addr, int portno) { struct uplcom_softc *sc = addr; int err; if (sc->sc_ucom.sc_dying) return (ENXIO); DPRINTF(("uplcom_open: sc = %p\n", sc)); if (sc->sc_intr_number != -1 && sc->sc_intr_pipe == NULL) { sc->sc_status = 0; /* clear status bit */ sc->sc_intr_buf = malloc(sc->sc_isize, M_USBDEV, M_WAITOK); err = usbd_open_pipe_intr(sc->sc_intr_iface, sc->sc_intr_number, USBD_SHORT_XFER_OK, &sc->sc_intr_pipe, sc, sc->sc_intr_buf, sc->sc_isize, uplcom_intr, uplcominterval); if (err) { printf("%s: cannot open interrupt pipe (addr %d)\n", USBDEVNAME(sc->sc_ucom.sc_dev), sc->sc_intr_number); return (EIO); } } return (0); } Static void uplcom_close(void *addr, int portno) { struct uplcom_softc *sc = addr; int err; if (sc->sc_ucom.sc_dying) return; DPRINTF(("uplcom_close: close\n")); if (sc->sc_intr_pipe != NULL) { err = usbd_abort_pipe(sc->sc_intr_pipe); if (err) printf("%s: abort interrupt pipe failed: %s\n", USBDEVNAME(sc->sc_ucom.sc_dev), usbd_errstr(err)); err = usbd_close_pipe(sc->sc_intr_pipe); if (err) printf("%s: close interrupt pipe failed: %s\n", USBDEVNAME(sc->sc_ucom.sc_dev), usbd_errstr(err)); free(sc->sc_intr_buf, M_USBDEV); sc->sc_intr_pipe = NULL; } } Static void uplcom_intr(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct uplcom_softc *sc = priv; u_char *buf = sc->sc_intr_buf; u_char pstatus; if (sc->sc_ucom.sc_dying) return; if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) return; DPRINTF(("%s: uplcom_intr: abnormal status: %s\n", USBDEVNAME(sc->sc_ucom.sc_dev), usbd_errstr(status))); usbd_clear_endpoint_stall_async(sc->sc_intr_pipe); return; } DPRINTF(("%s: uplcom status = %02x\n", USBDEVNAME(sc->sc_ucom.sc_dev), buf[8])); sc->sc_lsr = sc->sc_msr = 0; pstatus = buf[8]; if (ISSET(pstatus, RSAQ_STATUS_DSR)) sc->sc_msr |= UMSR_DSR; if (ISSET(pstatus, RSAQ_STATUS_DCD)) sc->sc_msr |= UMSR_DCD; ucom_status_change(&sc->sc_ucom); } Static void uplcom_get_status(void *addr, int portno, u_char *lsr, u_char *msr) { struct uplcom_softc *sc = addr; DPRINTF(("uplcom_get_status:\n")); if (lsr != NULL) *lsr = sc->sc_lsr; if (msr != NULL) *msr = sc->sc_msr; } #if TODO Static int uplcom_ioctl(void *addr, int portno, u_long cmd, caddr_t data, int flag, usb_proc_ptr p) { struct uplcom_softc *sc = addr; int error = 0; if (sc->sc_ucom.sc_dying) return (EIO); DPRINTF(("uplcom_ioctl: cmd = 0x%08lx\n", cmd)); switch (cmd) { case TIOCNOTTY: case TIOCMGET: case TIOCMSET: case USB_GET_CM_OVER_DATA: case USB_SET_CM_OVER_DATA: break; default: DPRINTF(("uplcom_ioctl: unknown\n")); error = ENOTTY; break; } return (error); } #endif Index: head/sys/dev/usb/usbdi_util.c =================================================================== --- head/sys/dev/usb/usbdi_util.c (revision 129878) +++ head/sys/dev/usb/usbdi_util.c (revision 129879) @@ -1,507 +1,508 @@ /* $NetBSD: usbdi_util.c,v 1.36 2001/11/13 06:24:57 lukem Exp $ */ /* * Copyright (c) 1998 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Lennart Augustsson (lennart@augustsson.net) at * Carlstedt Research & Technology. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the NetBSD * Foundation, Inc. and its contributors. * 4. Neither the name of The NetBSD Foundation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include +#include #include #if defined(__NetBSD__) || defined(__OpenBSD__) #include #include #elif defined(__FreeBSD__) #include #endif #include #include #include #include #ifdef USB_DEBUG #define DPRINTF(x) if (usbdebug) logprintf x #define DPRINTFN(n,x) if (usbdebug>(n)) logprintf x extern int usbdebug; #else #define DPRINTF(x) #define DPRINTFN(n,x) #endif usbd_status usbd_get_desc(usbd_device_handle dev, int type, int index, int len, void *desc) { usb_device_request_t req; DPRINTFN(3,("usbd_get_desc: type=%d, index=%d, len=%d\n", type, index, len)); req.bmRequestType = UT_READ_DEVICE; req.bRequest = UR_GET_DESCRIPTOR; USETW2(req.wValue, type, index); USETW(req.wIndex, 0); USETW(req.wLength, len); return (usbd_do_request(dev, &req, desc)); } usbd_status usbd_get_config_desc(usbd_device_handle dev, int confidx, usb_config_descriptor_t *d) { usbd_status err; DPRINTFN(3,("usbd_get_config_desc: confidx=%d\n", confidx)); err = usbd_get_desc(dev, UDESC_CONFIG, confidx, USB_CONFIG_DESCRIPTOR_SIZE, d); if (err) return (err); if (d->bDescriptorType != UDESC_CONFIG) { DPRINTFN(-1,("usbd_get_config_desc: confidx=%d, bad desc " "len=%d type=%d\n", confidx, d->bLength, d->bDescriptorType)); return (USBD_INVAL); } return (USBD_NORMAL_COMPLETION); } usbd_status usbd_get_config_desc_full(usbd_device_handle dev, int conf, void *d, int size) { DPRINTFN(3,("usbd_get_config_desc_full: conf=%d\n", conf)); return (usbd_get_desc(dev, UDESC_CONFIG, conf, size, d)); } usbd_status usbd_get_device_desc(usbd_device_handle dev, usb_device_descriptor_t *d) { DPRINTFN(3,("usbd_get_device_desc:\n")); return (usbd_get_desc(dev, UDESC_DEVICE, 0, USB_DEVICE_DESCRIPTOR_SIZE, d)); } usbd_status usbd_get_device_status(usbd_device_handle dev, usb_status_t *st) { usb_device_request_t req; req.bmRequestType = UT_READ_DEVICE; req.bRequest = UR_GET_STATUS; USETW(req.wValue, 0); USETW(req.wIndex, 0); USETW(req.wLength, sizeof(usb_status_t)); return (usbd_do_request(dev, &req, st)); } usbd_status usbd_get_hub_status(usbd_device_handle dev, usb_hub_status_t *st) { usb_device_request_t req; req.bmRequestType = UT_READ_CLASS_DEVICE; req.bRequest = UR_GET_STATUS; USETW(req.wValue, 0); USETW(req.wIndex, 0); USETW(req.wLength, sizeof(usb_hub_status_t)); return (usbd_do_request(dev, &req, st)); } usbd_status usbd_set_address(usbd_device_handle dev, int addr) { usb_device_request_t req; req.bmRequestType = UT_WRITE_DEVICE; req.bRequest = UR_SET_ADDRESS; USETW(req.wValue, addr); USETW(req.wIndex, 0); USETW(req.wLength, 0); return usbd_do_request(dev, &req, 0); } usbd_status usbd_get_port_status(usbd_device_handle dev, int port, usb_port_status_t *ps) { usb_device_request_t req; req.bmRequestType = UT_READ_CLASS_OTHER; req.bRequest = UR_GET_STATUS; USETW(req.wValue, 0); USETW(req.wIndex, port); USETW(req.wLength, sizeof *ps); return (usbd_do_request(dev, &req, ps)); } usbd_status usbd_clear_hub_feature(usbd_device_handle dev, int sel) { usb_device_request_t req; req.bmRequestType = UT_WRITE_CLASS_DEVICE; req.bRequest = UR_CLEAR_FEATURE; USETW(req.wValue, sel); USETW(req.wIndex, 0); USETW(req.wLength, 0); return (usbd_do_request(dev, &req, 0)); } usbd_status usbd_set_hub_feature(usbd_device_handle dev, int sel) { usb_device_request_t req; req.bmRequestType = UT_WRITE_CLASS_DEVICE; req.bRequest = UR_SET_FEATURE; USETW(req.wValue, sel); USETW(req.wIndex, 0); USETW(req.wLength, 0); return (usbd_do_request(dev, &req, 0)); } usbd_status usbd_clear_port_feature(usbd_device_handle dev, int port, int sel) { usb_device_request_t req; req.bmRequestType = UT_WRITE_CLASS_OTHER; req.bRequest = UR_CLEAR_FEATURE; USETW(req.wValue, sel); USETW(req.wIndex, port); USETW(req.wLength, 0); return (usbd_do_request(dev, &req, 0)); } usbd_status usbd_set_port_feature(usbd_device_handle dev, int port, int sel) { usb_device_request_t req; req.bmRequestType = UT_WRITE_CLASS_OTHER; req.bRequest = UR_SET_FEATURE; USETW(req.wValue, sel); USETW(req.wIndex, port); USETW(req.wLength, 0); return (usbd_do_request(dev, &req, 0)); } usbd_status usbd_set_protocol(usbd_interface_handle iface, int report) { usb_interface_descriptor_t *id = usbd_get_interface_descriptor(iface); usbd_device_handle dev; usb_device_request_t req; DPRINTFN(4, ("usbd_set_protocol: iface=%p, report=%d, endpt=%d\n", iface, report, id->bInterfaceNumber)); if (id == NULL) return (USBD_IOERROR); usbd_interface2device_handle(iface, &dev); req.bmRequestType = UT_WRITE_CLASS_INTERFACE; req.bRequest = UR_SET_PROTOCOL; USETW(req.wValue, report); USETW(req.wIndex, id->bInterfaceNumber); USETW(req.wLength, 0); return (usbd_do_request(dev, &req, 0)); } usbd_status usbd_set_report(usbd_interface_handle iface, int type, int id, void *data, int len) { usb_interface_descriptor_t *ifd = usbd_get_interface_descriptor(iface); usbd_device_handle dev; usb_device_request_t req; DPRINTFN(4, ("usbd_set_report: len=%d\n", len)); if (ifd == NULL) return (USBD_IOERROR); usbd_interface2device_handle(iface, &dev); req.bmRequestType = UT_WRITE_CLASS_INTERFACE; req.bRequest = UR_SET_REPORT; USETW2(req.wValue, type, id); USETW(req.wIndex, ifd->bInterfaceNumber); USETW(req.wLength, len); return (usbd_do_request(dev, &req, data)); } usbd_status usbd_set_report_async(usbd_interface_handle iface, int type, int id, void *data, int len) { usb_interface_descriptor_t *ifd = usbd_get_interface_descriptor(iface); usbd_device_handle dev; usb_device_request_t req; DPRINTFN(4, ("usbd_set_report_async: len=%d\n", len)); if (ifd == NULL) return (USBD_IOERROR); usbd_interface2device_handle(iface, &dev); req.bmRequestType = UT_WRITE_CLASS_INTERFACE; req.bRequest = UR_SET_REPORT; USETW2(req.wValue, type, id); USETW(req.wIndex, ifd->bInterfaceNumber); USETW(req.wLength, len); return (usbd_do_request_async(dev, &req, data)); } usbd_status usbd_get_report(usbd_interface_handle iface, int type, int id, void *data, int len) { usb_interface_descriptor_t *ifd = usbd_get_interface_descriptor(iface); usbd_device_handle dev; usb_device_request_t req; DPRINTFN(4, ("usbd_set_report: len=%d\n", len)); if (id == 0) return (USBD_IOERROR); usbd_interface2device_handle(iface, &dev); req.bmRequestType = UT_READ_CLASS_INTERFACE; req.bRequest = UR_GET_REPORT; USETW2(req.wValue, type, id); USETW(req.wIndex, ifd->bInterfaceNumber); USETW(req.wLength, len); return (usbd_do_request(dev, &req, data)); } usbd_status usbd_set_idle(usbd_interface_handle iface, int duration, int id) { usb_interface_descriptor_t *ifd = usbd_get_interface_descriptor(iface); usbd_device_handle dev; usb_device_request_t req; DPRINTFN(4, ("usbd_set_idle: %d %d\n", duration, id)); if (ifd == NULL) return (USBD_IOERROR); usbd_interface2device_handle(iface, &dev); req.bmRequestType = UT_WRITE_CLASS_INTERFACE; req.bRequest = UR_SET_IDLE; USETW2(req.wValue, duration, id); USETW(req.wIndex, ifd->bInterfaceNumber); USETW(req.wLength, 0); return (usbd_do_request(dev, &req, 0)); } usbd_status usbd_get_report_descriptor(usbd_device_handle dev, int ifcno, int size, void *d) { usb_device_request_t req; req.bmRequestType = UT_READ_INTERFACE; req.bRequest = UR_GET_DESCRIPTOR; USETW2(req.wValue, UDESC_REPORT, 0); /* report id should be 0 */ USETW(req.wIndex, ifcno); USETW(req.wLength, size); return (usbd_do_request(dev, &req, d)); } usb_hid_descriptor_t * usbd_get_hid_descriptor(usbd_interface_handle ifc) { usb_interface_descriptor_t *idesc = usbd_get_interface_descriptor(ifc); usbd_device_handle dev; usb_config_descriptor_t *cdesc; usb_hid_descriptor_t *hd; char *p, *end; if (idesc == NULL) return (0); usbd_interface2device_handle(ifc, &dev); cdesc = usbd_get_config_descriptor(dev); p = (char *)idesc + idesc->bLength; end = (char *)cdesc + UGETW(cdesc->wTotalLength); for (; p < end; p += hd->bLength) { hd = (usb_hid_descriptor_t *)p; if (p + hd->bLength <= end && hd->bDescriptorType == UDESC_HID) return (hd); if (hd->bDescriptorType == UDESC_INTERFACE) break; } return (0); } usbd_status usbd_read_report_desc(usbd_interface_handle ifc, void **descp, int *sizep, usb_malloc_type mem) { usb_interface_descriptor_t *id; usb_hid_descriptor_t *hid; usbd_device_handle dev; usbd_status err; usbd_interface2device_handle(ifc, &dev); id = usbd_get_interface_descriptor(ifc); if (id == NULL) return (USBD_INVAL); hid = usbd_get_hid_descriptor(ifc); if (hid == NULL) return (USBD_IOERROR); *sizep = UGETW(hid->descrs[0].wDescriptorLength); *descp = malloc(*sizep, mem, M_NOWAIT); if (*descp == NULL) return (USBD_NOMEM); err = usbd_get_report_descriptor(dev, id->bInterfaceNumber, *sizep, *descp); if (err) { free(*descp, mem); *descp = NULL; return (err); } return (USBD_NORMAL_COMPLETION); } usbd_status usbd_get_config(usbd_device_handle dev, u_int8_t *conf) { usb_device_request_t req; req.bmRequestType = UT_READ_DEVICE; req.bRequest = UR_GET_CONFIG; USETW(req.wValue, 0); USETW(req.wIndex, 0); USETW(req.wLength, 1); return (usbd_do_request(dev, &req, conf)); } Static void usbd_bulk_transfer_cb(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status); Static void usbd_bulk_transfer_cb(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { wakeup(xfer); } usbd_status usbd_bulk_transfer(usbd_xfer_handle xfer, usbd_pipe_handle pipe, u_int16_t flags, u_int32_t timeout, void *buf, u_int32_t *size, char *lbl) { usbd_status err; int s, error; usbd_setup_xfer(xfer, pipe, 0, buf, *size, flags, timeout, usbd_bulk_transfer_cb); DPRINTFN(1, ("usbd_bulk_transfer: start transfer %d bytes\n", *size)); s = splusb(); /* don't want callback until tsleep() */ err = usbd_transfer(xfer); if (err != USBD_IN_PROGRESS) { splx(s); return (err); } error = tsleep(xfer, PZERO | PCATCH, lbl, 0); splx(s); if (error) { DPRINTF(("usbd_bulk_transfer: tsleep=%d\n", error)); usbd_abort_pipe(pipe); return (USBD_INTERRUPTED); } usbd_get_xfer_status(xfer, NULL, NULL, size, &err); DPRINTFN(1,("usbd_bulk_transfer: transferred %d\n", *size)); if (err) { DPRINTF(("usbd_bulk_transfer: error=%d\n", err)); usbd_clear_endpoint_stall(pipe); } return (err); } Static void usbd_intr_transfer_cb(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status); Static void usbd_intr_transfer_cb(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { wakeup(xfer); } usbd_status usbd_intr_transfer(usbd_xfer_handle xfer, usbd_pipe_handle pipe, u_int16_t flags, u_int32_t timeout, void *buf, u_int32_t *size, char *lbl) { usbd_status err; int s, error; usbd_setup_xfer(xfer, pipe, 0, buf, *size, flags, timeout, usbd_intr_transfer_cb); DPRINTFN(1, ("usbd_intr_transfer: start transfer %d bytes\n", *size)); s = splusb(); /* don't want callback until tsleep() */ err = usbd_transfer(xfer); if (err != USBD_IN_PROGRESS) { splx(s); return (err); } error = tsleep(xfer, PZERO | PCATCH, lbl, 0); splx(s); if (error) { DPRINTF(("usbd_intr_transfer: tsleep=%d\n", error)); usbd_abort_pipe(pipe); return (USBD_INTERRUPTED); } usbd_get_xfer_status(xfer, NULL, NULL, size, &err); DPRINTFN(1,("usbd_intr_transfer: transferred %d\n", *size)); if (err) { DPRINTF(("usbd_intr_transfer: error=%d\n", err)); usbd_clear_endpoint_stall(pipe); } return (err); } void usb_detach_wait(device_ptr_t dv) { DPRINTF(("usb_detach_wait: waiting for %s\n", USBDEVPTRNAME(dv))); if (tsleep(dv, PZERO, "usbdet", hz * 60)) printf("usb_detach_wait: %s didn't detach\n", USBDEVPTRNAME(dv)); DPRINTF(("usb_detach_wait: %s done\n", USBDEVPTRNAME(dv))); } void usb_detach_wakeup(device_ptr_t dv) { DPRINTF(("usb_detach_wakeup: for %s\n", USBDEVPTRNAME(dv))); wakeup(dv); } Index: head/sys/dev/usb/uvisor.c =================================================================== --- head/sys/dev/usb/uvisor.c (revision 129878) +++ head/sys/dev/usb/uvisor.c (revision 129879) @@ -1,587 +1,588 @@ /* $NetBSD: uvisor.c,v 1.9 2001/01/23 14:04:14 augustss Exp $ */ /* $FreeBSD$ */ /* Also already merged from NetBSD: * $NetBSD: uvisor.c,v 1.12 2001/11/13 06:24:57 lukem Exp $ * $NetBSD: uvisor.c,v 1.13 2002/02/11 15:11:49 augustss Exp $ * $NetBSD: uvisor.c,v 1.14 2002/02/27 23:00:03 augustss Exp $ * $NetBSD: uvisor.c,v 1.15 2002/06/16 15:01:31 augustss Exp $ * $NetBSD: uvisor.c,v 1.16 2002/07/11 21:14:36 augustss Exp $ * $NetBSD: uvisor.c,v 1.17 2002/08/13 11:38:15 augustss Exp $ * $NetBSD: uvisor.c,v 1.18 2003/02/05 00:50:14 augustss Exp $ * $NetBSD: uvisor.c,v 1.19 2003/02/07 18:12:37 augustss Exp $ * $NetBSD: uvisor.c,v 1.20 2003/04/11 01:30:10 simonb Exp $ */ /* * Copyright (c) 2000 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Lennart Augustsson (lennart@augustsson.net) at * Carlstedt Research & Technology. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the NetBSD * Foundation, Inc. and its contributors. * 4. Neither the name of The NetBSD Foundation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Handspring Visor (Palmpilot compatible PDA) driver */ #include #include #include #if defined(__NetBSD__) || defined(__OpenBSD__) #include #elif defined(__FreeBSD__) +#include #include #endif #include #include #include #include #include #include #include #include #include #ifdef USB_DEBUG #define DPRINTF(x) if (uvisordebug) printf x #define DPRINTFN(n,x) if (uvisordebug>(n)) printf x int uvisordebug = 0; SYSCTL_NODE(_hw_usb, OID_AUTO, uvisor, CTLFLAG_RW, 0, "USB uvisor"); SYSCTL_INT(_hw_usb_uvisor, OID_AUTO, debug, CTLFLAG_RW, &uvisordebug, 0, "uvisor debug level"); #else #define DPRINTF(x) #define DPRINTFN(n,x) #endif #define UVISOR_CONFIG_INDEX 0 #define UVISOR_IFACE_INDEX 0 #define UVISOR_MODVER 1 /* From the Linux driver */ /* * UVISOR_REQUEST_BYTES_AVAILABLE asks the visor for the number of bytes that * are available to be transfered to the host for the specified endpoint. * Currently this is not used, and always returns 0x0001 */ #define UVISOR_REQUEST_BYTES_AVAILABLE 0x01 /* * UVISOR_CLOSE_NOTIFICATION is set to the device to notify it that the host * is now closing the pipe. An empty packet is sent in response. */ #define UVISOR_CLOSE_NOTIFICATION 0x02 /* * UVISOR_GET_CONNECTION_INFORMATION is sent by the host during enumeration to * get the endpoints used by the connection. */ #define UVISOR_GET_CONNECTION_INFORMATION 0x03 /* * UVISOR_GET_CONNECTION_INFORMATION returns data in the following format */ #define UVISOR_MAX_CONN 8 struct uvisor_connection_info { uWord num_ports; struct { uByte port_function_id; uByte port; } connections[UVISOR_MAX_CONN]; }; #define UVISOR_CONNECTION_INFO_SIZE 18 /* struct uvisor_connection_info.connection[x].port defines: */ #define UVISOR_ENDPOINT_1 0x01 #define UVISOR_ENDPOINT_2 0x02 /* struct uvisor_connection_info.connection[x].port_function_id defines: */ #define UVISOR_FUNCTION_GENERIC 0x00 #define UVISOR_FUNCTION_DEBUGGER 0x01 #define UVISOR_FUNCTION_HOTSYNC 0x02 #define UVISOR_FUNCTION_CONSOLE 0x03 #define UVISOR_FUNCTION_REMOTE_FILE_SYS 0x04 /* * Unknown PalmOS stuff. */ #define UVISOR_GET_PALM_INFORMATION 0x04 #define UVISOR_GET_PALM_INFORMATION_LEN 0x14 /* * Crank down UVISORBUFSIZE from 1024 to 64 to avoid a problem where * the Palm device and the USB host controller deadlock. The USB host * controller is expecting an early-end-of-transmission packet with 0 * data, and the Palm doesn't send one because it's already * communicated the amount of data it's going to send in a header * (which ucom/uvisor are oblivious to). This is the problem that has * been known on the pilot-link lists as the "[Free]BSD USB problem", * but not understood. */ #define UVISORIBUFSIZE 64 #define UVISOROBUFSIZE 1024 struct uvisor_softc { struct ucom_softc sc_ucom; u_int16_t sc_flags; }; Static usbd_status uvisor_init(struct uvisor_softc *); Static usbd_status clie_3_5_init(struct uvisor_softc *); Static void uvisor_close(void *, int); struct ucom_callback uvisor_callback = { NULL, NULL, NULL, NULL, NULL, uvisor_close, NULL, NULL, }; Static device_probe_t uvisor_match; Static device_attach_t uvisor_attach; Static device_detach_t uvisor_detach; Static device_method_t uvisor_methods[] = { /* Device interface */ DEVMETHOD(device_probe, uvisor_match), DEVMETHOD(device_attach, uvisor_attach), DEVMETHOD(device_detach, uvisor_detach), { 0, 0 } }; Static driver_t uvisor_driver = { "ucom", uvisor_methods, sizeof (struct uvisor_softc) }; DRIVER_MODULE(uvisor, uhub, uvisor_driver, ucom_devclass, usbd_driver_load, 0); MODULE_DEPEND(uvisor, usb, 1, 1, 1); MODULE_DEPEND(uvisor, ucom, UCOM_MINVER, UCOM_PREFVER, UCOM_MAXVER); MODULE_VERSION(uvisor, UVISOR_MODVER); struct uvisor_type { struct usb_devno uv_dev; u_int16_t uv_flags; #define PALM4 0x0001 }; static const struct uvisor_type uvisor_devs[] = { {{ USB_VENDOR_HANDSPRING, USB_PRODUCT_HANDSPRING_VISOR }, 0 }, {{ USB_VENDOR_HANDSPRING, USB_PRODUCT_HANDSPRING_TREO }, PALM4 }, {{ USB_VENDOR_HANDSPRING, USB_PRODUCT_HANDSPRING_TREO600 }, PALM4 }, {{ USB_VENDOR_PALM, USB_PRODUCT_PALM_M500 }, PALM4 }, {{ USB_VENDOR_PALM, USB_PRODUCT_PALM_M505 }, PALM4 }, {{ USB_VENDOR_PALM, USB_PRODUCT_PALM_M515 }, PALM4 }, {{ USB_VENDOR_PALM, USB_PRODUCT_PALM_I705 }, PALM4 }, {{ USB_VENDOR_PALM, USB_PRODUCT_PALM_M125 }, PALM4 }, {{ USB_VENDOR_PALM, USB_PRODUCT_PALM_M130 }, PALM4 }, {{ USB_VENDOR_PALM, USB_PRODUCT_PALM_TUNGSTEN_Z }, PALM4 }, {{ USB_VENDOR_PALM, USB_PRODUCT_PALM_TUNGSTEN_T }, PALM4 }, {{ USB_VENDOR_PALM, USB_PRODUCT_PALM_ZIRE }, PALM4 }, {{ USB_VENDOR_PALM, USB_PRODUCT_PALM_ZIRE31 }, PALM4 }, {{ USB_VENDOR_SONY, USB_PRODUCT_SONY_CLIE_40 }, 0 }, {{ USB_VENDOR_SONY, USB_PRODUCT_SONY_CLIE_41 }, PALM4 }, {{ USB_VENDOR_SONY, USB_PRODUCT_SONY_CLIE_S360 }, PALM4 }, {{ USB_VENDOR_SONY, USB_PRODUCT_SONY_CLIE_NX60 }, PALM4 }, {{ USB_VENDOR_SONY, USB_PRODUCT_SONY_CLIE_35 }, 0 }, /* {{ USB_VENDOR_SONY, USB_PRODUCT_SONY_CLIE_25 }, PALM4 },*/ }; #define uvisor_lookup(v, p) ((const struct uvisor_type *)usb_lookup(uvisor_devs, v, p)) USB_MATCH(uvisor) { USB_MATCH_START(uvisor, uaa); if (uaa->iface != NULL) return (UMATCH_NONE); DPRINTFN(20,("uvisor: vendor=0x%x, product=0x%x\n", uaa->vendor, uaa->product)); return (uvisor_lookup(uaa->vendor, uaa->product) != NULL ? UMATCH_VENDOR_PRODUCT : UMATCH_NONE); } USB_ATTACH(uvisor) { USB_ATTACH_START(uvisor, sc, uaa); usbd_device_handle dev = uaa->device; usbd_interface_handle iface; usb_interface_descriptor_t *id; usb_endpoint_descriptor_t *ed; char *devinfo; const char *devname; int i; usbd_status err; struct ucom_softc *ucom; devinfo = malloc(1024, M_USBDEV, M_WAITOK); ucom = &sc->sc_ucom; bzero(sc, sizeof (struct uvisor_softc)); usbd_devinfo(dev, 0, devinfo); ucom->sc_dev = self; device_set_desc_copy(self, devinfo); ucom->sc_udev = dev; ucom->sc_iface = uaa->iface; devname = USBDEVNAME(ucom->sc_dev); printf("%s: %s\n", devname, devinfo); DPRINTFN(10,("\nuvisor_attach: sc=%p\n", sc)); /* Move the device into the configured state. */ err = usbd_set_config_index(dev, UVISOR_CONFIG_INDEX, 1); if (err) { printf("\n%s: failed to set configuration, err=%s\n", devname, usbd_errstr(err)); goto bad; } err = usbd_device2interface_handle(dev, UVISOR_IFACE_INDEX, &iface); if (err) { printf("\n%s: failed to get interface, err=%s\n", devname, usbd_errstr(err)); goto bad; } printf("%s: %s\n", devname, devinfo); sc->sc_flags = uvisor_lookup(uaa->vendor, uaa->product)->uv_flags; id = usbd_get_interface_descriptor(iface); ucom->sc_udev = dev; ucom->sc_iface = iface; ucom->sc_bulkin_no = ucom->sc_bulkout_no = -1; for (i = 0; i < id->bNumEndpoints; i++) { int addr, dir, attr; ed = usbd_interface2endpoint_descriptor(iface, i); if (ed == NULL) { printf("%s: could not read endpoint descriptor" ": %s\n", devname, usbd_errstr(err)); goto bad; } addr = ed->bEndpointAddress; dir = UE_GET_DIR(ed->bEndpointAddress); attr = ed->bmAttributes & UE_XFERTYPE; if (dir == UE_DIR_IN && attr == UE_BULK) ucom->sc_bulkin_no = addr; else if (dir == UE_DIR_OUT && attr == UE_BULK) ucom->sc_bulkout_no = addr; else { printf("%s: unexpected endpoint\n", devname); goto bad; } } if (ucom->sc_bulkin_no == -1) { printf("%s: Could not find data bulk in\n", USBDEVNAME(ucom->sc_dev)); goto bad; } if (ucom->sc_bulkout_no == -1) { printf("%s: Could not find data bulk out\n", USBDEVNAME(ucom->sc_dev)); goto bad; } ucom->sc_parent = sc; ucom->sc_portno = UCOM_UNK_PORTNO; /* bulkin, bulkout set above */ ucom->sc_ibufsize = UVISORIBUFSIZE; ucom->sc_obufsize = UVISOROBUFSIZE; ucom->sc_ibufsizepad = UVISORIBUFSIZE; ucom->sc_opkthdrlen = 0; ucom->sc_callback = &uvisor_callback; if (uaa->vendor == USB_VENDOR_SONY && uaa->product == USB_PRODUCT_SONY_CLIE_35) err = clie_3_5_init(sc); else err = uvisor_init(sc); if (err) { printf("%s: init failed, %s\n", USBDEVNAME(ucom->sc_dev), usbd_errstr(err)); goto bad; } usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, ucom->sc_udev, USBDEV(ucom->sc_dev)); DPRINTF(("uvisor: in=0x%x out=0x%x\n", ucom->sc_bulkin_no, ucom->sc_bulkout_no)); ucom_attach(&sc->sc_ucom); USB_ATTACH_SUCCESS_RETURN; bad: DPRINTF(("uvisor_attach: ATTACH ERROR\n")); ucom->sc_dying = 1; USB_ATTACH_ERROR_RETURN; } #if 0 int uvisor_activate(device_ptr_t self, enum devact act) { struct uvisor_softc *sc = (struct uvisor_softc *)self; int rv = 0; switch (act) { case DVACT_ACTIVATE: return (EOPNOTSUPP); break; case DVACT_DEACTIVATE: if (sc->sc_subdev != NULL) rv = config_deactivate(sc->sc_subdev); sc->sc_dying = 1; break; } return (rv); } #endif USB_DETACH(uvisor) { USB_DETACH_START(uvisor, sc); int rv = 0; DPRINTF(("uvisor_detach: sc=%p\n", sc)); sc->sc_ucom.sc_dying = 1; rv = ucom_detach(&sc->sc_ucom); usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_ucom.sc_udev, USBDEV(sc->sc_ucom.sc_dev)); return (rv); } usbd_status uvisor_init(struct uvisor_softc *sc) { usbd_status err; usb_device_request_t req; struct uvisor_connection_info coninfo; int actlen; uWord avail; char buffer[256]; DPRINTF(("uvisor_init: getting connection info\n")); req.bmRequestType = UT_READ_VENDOR_ENDPOINT; req.bRequest = UVISOR_GET_CONNECTION_INFORMATION; USETW(req.wValue, 0); USETW(req.wIndex, 0); USETW(req.wLength, UVISOR_CONNECTION_INFO_SIZE); err = usbd_do_request_flags(sc->sc_ucom.sc_udev, &req, &coninfo, USBD_SHORT_XFER_OK, &actlen, USBD_DEFAULT_TIMEOUT); if (err) return (err); #ifdef USB_DEBUG { int i, np; char *string; np = UGETW(coninfo.num_ports); printf("%s: Number of ports: %d\n", USBDEVNAME(sc->sc_ucom.sc_dev), np); for (i = 0; i < np; ++i) { switch (coninfo.connections[i].port_function_id) { case UVISOR_FUNCTION_GENERIC: string = "Generic"; break; case UVISOR_FUNCTION_DEBUGGER: string = "Debugger"; break; case UVISOR_FUNCTION_HOTSYNC: string = "HotSync"; break; case UVISOR_FUNCTION_REMOTE_FILE_SYS: string = "Remote File System"; break; default: string = "unknown"; break; } printf("%s: port %d, is for %s\n", USBDEVNAME(sc->sc_ucom.sc_dev), coninfo.connections[i].port, string); } } #endif if (sc->sc_flags & PALM4) { /* Palm OS 4.0 Hack */ req.bmRequestType = UT_READ_VENDOR_ENDPOINT; req.bRequest = UVISOR_GET_PALM_INFORMATION; USETW(req.wValue, 0); USETW(req.wIndex, 0); USETW(req.wLength, UVISOR_GET_PALM_INFORMATION_LEN); err = usbd_do_request(sc->sc_ucom.sc_udev, &req, buffer); if (err) return (err); req.bmRequestType = UT_READ_VENDOR_ENDPOINT; req.bRequest = UVISOR_GET_PALM_INFORMATION; USETW(req.wValue, 0); USETW(req.wIndex, 0); USETW(req.wLength, UVISOR_GET_PALM_INFORMATION_LEN); err = usbd_do_request(sc->sc_ucom.sc_udev, &req, buffer); if (err) return (err); } DPRINTF(("uvisor_init: getting available bytes\n")); req.bmRequestType = UT_READ_VENDOR_ENDPOINT; req.bRequest = UVISOR_REQUEST_BYTES_AVAILABLE; USETW(req.wValue, 0); USETW(req.wIndex, 5); USETW(req.wLength, sizeof avail); err = usbd_do_request(sc->sc_ucom.sc_udev, &req, &avail); if (err) return (err); DPRINTF(("uvisor_init: avail=%d\n", UGETW(avail))); DPRINTF(("uvisor_init: done\n")); return (err); } usbd_status clie_3_5_init(struct uvisor_softc *sc) { usbd_status err; usb_device_request_t req; char buffer[256]; /* * Note that PEG-300 series devices expect the following two calls. */ /* get the config number */ DPRINTF(("clie_3_5_init: getting config info\n")); req.bmRequestType = UT_READ; req.bRequest = UR_GET_CONFIG; USETW(req.wValue, 0); USETW(req.wIndex, 0); USETW(req.wLength, 1); err = usbd_do_request(sc->sc_ucom.sc_udev, &req, buffer); if (err) return (err); /* get the interface number */ DPRINTF(("clie_3_5_init: get the interface number\n")); req.bmRequestType = UT_READ_DEVICE; req.bRequest = UR_GET_INTERFACE; USETW(req.wValue, 0); USETW(req.wIndex, 0); USETW(req.wLength, 1); err = usbd_do_request(sc->sc_ucom.sc_udev, &req, buffer); if (err) return (err); #ifdef USB_DEBUG { struct uvisor_connection_info coninfo; int i, np; char *string; np = UGETW(coninfo.num_ports); DPRINTF(("%s: Number of ports: %d\n", USBDEVNAME(sc->sc_ucom.sc_dev), np)); for (i = 0; i < np; ++i) { switch (coninfo.connections[i].port_function_id) { case UVISOR_FUNCTION_GENERIC: string = "Generic"; break; case UVISOR_FUNCTION_DEBUGGER: string = "Debugger"; break; case UVISOR_FUNCTION_HOTSYNC: string = "HotSync"; break; case UVISOR_FUNCTION_REMOTE_FILE_SYS: string = "Remote File System"; break; default: string = "unknown"; break; } DPRINTF(("%s: port %d, is for %s\n", USBDEVNAME(sc->sc_ucom.sc_dev), coninfo.connections[i].port, string)); } } #endif DPRINTF(("clie_3_5_init: done\n")); return (err); } void uvisor_close(void *addr, int portno) { struct uvisor_softc *sc = addr; usb_device_request_t req; struct uvisor_connection_info coninfo; /* XXX ? */ int actlen; if (sc->sc_ucom.sc_dying) return; req.bmRequestType = UT_READ_VENDOR_ENDPOINT; /* XXX read? */ req.bRequest = UVISOR_CLOSE_NOTIFICATION; USETW(req.wValue, 0); USETW(req.wIndex, 0); USETW(req.wLength, UVISOR_CONNECTION_INFO_SIZE); (void)usbd_do_request_flags(sc->sc_ucom.sc_udev, &req, &coninfo, USBD_SHORT_XFER_OK, &actlen, USBD_DEFAULT_TIMEOUT); } Index: head/sys/dev/usb/uvscom.c =================================================================== --- head/sys/dev/usb/uvscom.c (revision 129878) +++ head/sys/dev/usb/uvscom.c (revision 129879) @@ -1,946 +1,947 @@ /* $NetBSD: usb/uvscom.c,v 1.1 2002/03/19 15:08:42 augustss Exp $ */ /*- * Copyright (c) 2001-2002, Shunsuke Akiyama . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); /* * uvscom: SUNTAC Slipper U VS-10U driver. * Slipper U is a PC card to USB converter for data communication card * adapter. It supports DDI Pocket's Air H" C@rd, C@rd H" 64, NTT's P-in, * P-in m@ater and various data communication card adapters. */ #include "opt_uvscom.h" #include #include #include #include +#include #include #include #include #include #if defined(__FreeBSD__) #include #include #if __FreeBSD_version >= 500014 #include #else #include #endif #else #include #include #endif #include #include #include #include #include #include #include #include #include #include SYSCTL_NODE(_hw_usb, OID_AUTO, uvscom, CTLFLAG_RW, 0, "USB uvscom"); #ifdef USB_DEBUG static int uvscomdebug = 0; SYSCTL_INT(_hw_usb_uvscom, OID_AUTO, debug, CTLFLAG_RW, &uvscomdebug, 0, "uvscom debug level"); #define DPRINTFN(n, x) do { \ if (uvscomdebug > (n)) \ logprintf x; \ } while (0) #else #define DPRINTFN(n, x) #endif #define DPRINTF(x) DPRINTFN(0, x) #define UVSCOM_MODVER 1 /* module version */ #define UVSCOM_CONFIG_INDEX 0 #define UVSCOM_IFACE_INDEX 0 #ifndef UVSCOM_INTR_INTERVAL #define UVSCOM_INTR_INTERVAL 100 /* mS */ #endif #define UVSCOM_UNIT_WAIT 5 /* Request */ #define UVSCOM_SET_SPEED 0x10 #define UVSCOM_LINE_CTL 0x11 #define UVSCOM_SET_PARAM 0x12 #define UVSCOM_READ_STATUS 0xd0 #define UVSCOM_SHUTDOWN 0xe0 /* UVSCOM_SET_SPEED parameters */ #define UVSCOM_SPEED_150BPS 0x00 #define UVSCOM_SPEED_300BPS 0x01 #define UVSCOM_SPEED_600BPS 0x02 #define UVSCOM_SPEED_1200BPS 0x03 #define UVSCOM_SPEED_2400BPS 0x04 #define UVSCOM_SPEED_4800BPS 0x05 #define UVSCOM_SPEED_9600BPS 0x06 #define UVSCOM_SPEED_19200BPS 0x07 #define UVSCOM_SPEED_38400BPS 0x08 #define UVSCOM_SPEED_57600BPS 0x09 #define UVSCOM_SPEED_115200BPS 0x0a /* UVSCOM_LINE_CTL parameters */ #define UVSCOM_BREAK 0x40 #define UVSCOM_RTS 0x02 #define UVSCOM_DTR 0x01 #define UVSCOM_LINE_INIT 0x08 /* UVSCOM_SET_PARAM parameters */ #define UVSCOM_DATA_MASK 0x03 #define UVSCOM_DATA_BIT_8 0x03 #define UVSCOM_DATA_BIT_7 0x02 #define UVSCOM_DATA_BIT_6 0x01 #define UVSCOM_DATA_BIT_5 0x00 #define UVSCOM_STOP_MASK 0x04 #define UVSCOM_STOP_BIT_2 0x04 #define UVSCOM_STOP_BIT_1 0x00 #define UVSCOM_PARITY_MASK 0x18 #define UVSCOM_PARITY_EVEN 0x18 #if 0 #define UVSCOM_PARITY_UNK 0x10 #endif #define UVSCOM_PARITY_ODD 0x08 #define UVSCOM_PARITY_NONE 0x00 /* Status bits */ #define UVSCOM_TXRDY 0x04 #define UVSCOM_RXRDY 0x01 #define UVSCOM_DCD 0x08 #define UVSCOM_NOCARD 0x04 #define UVSCOM_DSR 0x02 #define UVSCOM_CTS 0x01 #define UVSCOM_USTAT_MASK (UVSCOM_NOCARD | UVSCOM_DSR | UVSCOM_CTS) struct uvscom_softc { struct ucom_softc sc_ucom; int sc_iface_number;/* interface number */ usbd_interface_handle sc_intr_iface; /* interrupt interface */ int sc_intr_number; /* interrupt number */ usbd_pipe_handle sc_intr_pipe; /* interrupt pipe */ u_char *sc_intr_buf; /* interrupt buffer */ int sc_isize; u_char sc_dtr; /* current DTR state */ u_char sc_rts; /* current RTS state */ u_char sc_lsr; /* Local status register */ u_char sc_msr; /* uvscom status register */ uint16_t sc_lcr; /* Line control */ u_char sc_usr; /* unit status */ }; /* * These are the maximum number of bytes transferred per frame. * The output buffer size cannot be increased due to the size encoding. */ #define UVSCOMIBUFSIZE 512 #define UVSCOMOBUFSIZE 64 #ifndef UVSCOM_DEFAULT_OPKTSIZE #define UVSCOM_DEFAULT_OPKTSIZE 8 #endif Static usbd_status uvscom_shutdown(struct uvscom_softc *); Static usbd_status uvscom_reset(struct uvscom_softc *); Static usbd_status uvscom_set_line_coding(struct uvscom_softc *, uint16_t, uint16_t); Static usbd_status uvscom_set_line(struct uvscom_softc *, uint16_t); Static usbd_status uvscom_set_crtscts(struct uvscom_softc *); Static void uvscom_get_status(void *, int, u_char *, u_char *); Static void uvscom_dtr(struct uvscom_softc *, int); Static void uvscom_rts(struct uvscom_softc *, int); Static void uvscom_break(struct uvscom_softc *, int); Static void uvscom_set(void *, int, int, int); Static void uvscom_intr(usbd_xfer_handle, usbd_private_handle, usbd_status); #if TODO Static int uvscom_ioctl(void *, int, u_long, caddr_t, int, usb_proc_ptr); #endif Static int uvscom_param(void *, int, struct termios *); Static int uvscom_open(void *, int); Static void uvscom_close(void *, int); struct ucom_callback uvscom_callback = { uvscom_get_status, uvscom_set, uvscom_param, NULL, /* uvscom_ioctl, TODO */ uvscom_open, uvscom_close, NULL, NULL }; static const struct usb_devno uvscom_devs [] = { /* SUNTAC U-Cable type A4 */ { USB_VENDOR_SUNTAC, USB_PRODUCT_SUNTAC_AS144L4 }, /* SUNTAC U-Cable type D2 */ { USB_VENDOR_SUNTAC, USB_PRODUCT_SUNTAC_DS96L }, /* SUNTAC Ir-Trinity */ { USB_VENDOR_SUNTAC, USB_PRODUCT_SUNTAC_IS96U }, /* SUNTAC U-Cable type P1 */ { USB_VENDOR_SUNTAC, USB_PRODUCT_SUNTAC_PS64P1 }, /* SUNTAC Slipper U */ { USB_VENDOR_SUNTAC, USB_PRODUCT_SUNTAC_VS10U }, }; #define uvscom_lookup(v, p) usb_lookup(uvscom_devs, v, p) Static device_probe_t uvscom_match; Static device_attach_t uvscom_attach; Static device_detach_t uvscom_detach; Static device_method_t uvscom_methods[] = { /* Device interface */ DEVMETHOD(device_probe, uvscom_match), DEVMETHOD(device_attach, uvscom_attach), DEVMETHOD(device_detach, uvscom_detach), { 0, 0 } }; Static driver_t uvscom_driver = { "ucom", uvscom_methods, sizeof (struct uvscom_softc) }; DRIVER_MODULE(uvscom, uhub, uvscom_driver, ucom_devclass, usbd_driver_load, 0); MODULE_DEPEND(uvscom, usb, 1, 1, 1); MODULE_DEPEND(uvscom, ucom, UCOM_MINVER, UCOM_PREFVER, UCOM_MAXVER); MODULE_VERSION(uvscom, UVSCOM_MODVER); static int uvscomobufsiz = UVSCOM_DEFAULT_OPKTSIZE; static int uvscominterval = UVSCOM_INTR_INTERVAL; static int sysctl_hw_usb_uvscom_opktsize(SYSCTL_HANDLER_ARGS) { int err, val; val = uvscomobufsiz; err = sysctl_handle_int(oidp, &val, sizeof(val), req); if (err != 0 || req->newptr == NULL) return (err); if (0 < val && val <= UVSCOMOBUFSIZE) uvscomobufsiz = val; else err = EINVAL; return (err); } static int sysctl_hw_usb_uvscom_interval(SYSCTL_HANDLER_ARGS) { int err, val; val = uvscominterval; err = sysctl_handle_int(oidp, &val, sizeof(val), req); if (err != 0 || req->newptr == NULL) return (err); if (0 < val && val <= 1000) uvscominterval = val; else err = EINVAL; return (err); } SYSCTL_PROC(_hw_usb_uvscom, OID_AUTO, opktsize, CTLTYPE_INT | CTLFLAG_RW, 0, sizeof(int), sysctl_hw_usb_uvscom_opktsize, "I", "uvscom output packet size"); SYSCTL_PROC(_hw_usb_uvscom, OID_AUTO, interval, CTLTYPE_INT | CTLFLAG_RW, 0, sizeof(int), sysctl_hw_usb_uvscom_interval, "I", "uvscom interrpt pipe interval"); USB_MATCH(uvscom) { USB_MATCH_START(uvscom, uaa); if (uaa->iface != NULL) return (UMATCH_NONE); return (uvscom_lookup(uaa->vendor, uaa->product) != NULL ? UMATCH_VENDOR_PRODUCT : UMATCH_NONE); } USB_ATTACH(uvscom) { USB_ATTACH_START(uvscom, sc, uaa); usbd_device_handle dev = uaa->device; struct ucom_softc *ucom; usb_config_descriptor_t *cdesc; usb_interface_descriptor_t *id; usb_endpoint_descriptor_t *ed; char *devinfo; const char *devname; usbd_status err; int i; devinfo = malloc(1024, M_USBDEV, M_WAITOK); ucom = &sc->sc_ucom; bzero(sc, sizeof (struct uvscom_softc)); usbd_devinfo(dev, 0, devinfo); /* USB_ATTACH_SETUP; */ ucom->sc_dev = self; device_set_desc_copy(self, devinfo); /* USB_ATTACH_SETUP; */ ucom->sc_udev = dev; ucom->sc_iface = uaa->iface; devname = USBDEVNAME(ucom->sc_dev); printf("%s: %s\n", devname, devinfo); DPRINTF(("uvscom attach: sc = %p\n", sc)); /* initialize endpoints */ ucom->sc_bulkin_no = ucom->sc_bulkout_no = -1; sc->sc_intr_number = -1; sc->sc_intr_pipe = NULL; /* Move the device into the configured state. */ err = usbd_set_config_index(dev, UVSCOM_CONFIG_INDEX, 1); if (err) { printf("%s: failed to set configuration, err=%s\n", devname, usbd_errstr(err)); goto error; } /* get the config descriptor */ cdesc = usbd_get_config_descriptor(ucom->sc_udev); if (cdesc == NULL) { printf("%s: failed to get configuration descriptor\n", USBDEVNAME(ucom->sc_dev)); goto error; } /* get the common interface */ err = usbd_device2interface_handle(dev, UVSCOM_IFACE_INDEX, &ucom->sc_iface); if (err) { printf("%s: failed to get interface, err=%s\n", devname, usbd_errstr(err)); goto error; } id = usbd_get_interface_descriptor(ucom->sc_iface); sc->sc_iface_number = id->bInterfaceNumber; /* Find endpoints */ for (i = 0; i < id->bNumEndpoints; i++) { ed = usbd_interface2endpoint_descriptor(ucom->sc_iface, i); if (ed == NULL) { printf("%s: no endpoint descriptor for %d\n", USBDEVNAME(ucom->sc_dev), i); goto error; } if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { ucom->sc_bulkin_no = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { ucom->sc_bulkout_no = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_INTERRUPT) { sc->sc_intr_number = ed->bEndpointAddress; sc->sc_isize = UGETW(ed->wMaxPacketSize); } } if (ucom->sc_bulkin_no == -1) { printf("%s: Could not find data bulk in\n", USBDEVNAME(ucom->sc_dev)); goto error; } if (ucom->sc_bulkout_no == -1) { printf("%s: Could not find data bulk out\n", USBDEVNAME(ucom->sc_dev)); goto error; } if (sc->sc_intr_number == -1) { printf("%s: Could not find interrupt in\n", USBDEVNAME(ucom->sc_dev)); goto error; } sc->sc_dtr = sc->sc_rts = 0; sc->sc_lcr = UVSCOM_LINE_INIT; ucom->sc_parent = sc; ucom->sc_portno = UCOM_UNK_PORTNO; /* bulkin, bulkout set above */ ucom->sc_ibufsize = UVSCOMIBUFSIZE; ucom->sc_obufsize = uvscomobufsiz; ucom->sc_ibufsizepad = UVSCOMIBUFSIZE; ucom->sc_opkthdrlen = 0; ucom->sc_callback = &uvscom_callback; err = uvscom_reset(sc); if (err) { printf("%s: reset failed, %s\n", USBDEVNAME(ucom->sc_dev), usbd_errstr(err)); goto error; } DPRINTF(("uvscom: in = 0x%x out = 0x%x intr = 0x%x\n", ucom->sc_bulkin_no, ucom->sc_bulkout_no, sc->sc_intr_number)); ucom_attach(&sc->sc_ucom); free(devinfo, M_USBDEV); USB_ATTACH_SUCCESS_RETURN; error: ucom->sc_dying = 1; free(devinfo, M_USBDEV); USB_ATTACH_ERROR_RETURN; } USB_DETACH(uvscom) { USB_DETACH_START(uvscom, sc); int rv = 0; DPRINTF(("uvscom_detach: sc = %p\n", sc)); sc->sc_ucom.sc_dying = 1; if (sc->sc_intr_pipe != NULL) { usbd_abort_pipe(sc->sc_intr_pipe); usbd_close_pipe(sc->sc_intr_pipe); free(sc->sc_intr_buf, M_USBDEV); sc->sc_intr_pipe = NULL; } rv = ucom_detach(&sc->sc_ucom); return (rv); } Static usbd_status uvscom_readstat(struct uvscom_softc *sc) { usb_device_request_t req; usbd_status err; uint16_t r; DPRINTF(("%s: send readstat\n", USBDEVNAME(sc->sc_ucom.sc_dev))); req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = UVSCOM_READ_STATUS; USETW(req.wValue, 0); USETW(req.wIndex, 0); USETW(req.wLength, 2); err = usbd_do_request(sc->sc_ucom.sc_udev, &req, &r); if (err) { printf("%s: uvscom_readstat: %s\n", USBDEVNAME(sc->sc_ucom.sc_dev), usbd_errstr(err)); return (err); } DPRINTF(("%s: uvscom_readstat: r = %d\n", USBDEVNAME(sc->sc_ucom.sc_dev), r)); return (USBD_NORMAL_COMPLETION); } Static usbd_status uvscom_shutdown(struct uvscom_softc *sc) { usb_device_request_t req; usbd_status err; DPRINTF(("%s: send shutdown\n", USBDEVNAME(sc->sc_ucom.sc_dev))); req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = UVSCOM_SHUTDOWN; USETW(req.wValue, 0); USETW(req.wIndex, 0); USETW(req.wLength, 0); err = usbd_do_request(sc->sc_ucom.sc_udev, &req, NULL); if (err) { printf("%s: uvscom_shutdown: %s\n", USBDEVNAME(sc->sc_ucom.sc_dev), usbd_errstr(err)); return (err); } return (USBD_NORMAL_COMPLETION); } Static usbd_status uvscom_reset(struct uvscom_softc *sc) { DPRINTF(("%s: uvscom_reset\n", USBDEVNAME(sc->sc_ucom.sc_dev))); return (USBD_NORMAL_COMPLETION); } Static usbd_status uvscom_set_crtscts(struct uvscom_softc *sc) { DPRINTF(("%s: uvscom_set_crtscts\n", USBDEVNAME(sc->sc_ucom.sc_dev))); return (USBD_NORMAL_COMPLETION); } Static usbd_status uvscom_set_line(struct uvscom_softc *sc, uint16_t line) { usb_device_request_t req; usbd_status err; DPRINTF(("%s: uvscom_set_line: %04x\n", USBDEVNAME(sc->sc_ucom.sc_dev), line)); req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = UVSCOM_LINE_CTL; USETW(req.wValue, line); USETW(req.wIndex, 0); USETW(req.wLength, 0); err = usbd_do_request(sc->sc_ucom.sc_udev, &req, NULL); if (err) { printf("%s: uvscom_set_line: %s\n", USBDEVNAME(sc->sc_ucom.sc_dev), usbd_errstr(err)); return (err); } return (USBD_NORMAL_COMPLETION); } Static usbd_status uvscom_set_line_coding(struct uvscom_softc *sc, uint16_t lsp, uint16_t ls) { usb_device_request_t req; usbd_status err; DPRINTF(("%s: uvscom_set_line_coding: %02x %02x\n", USBDEVNAME(sc->sc_ucom.sc_dev), lsp, ls)); req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = UVSCOM_SET_SPEED; USETW(req.wValue, lsp); USETW(req.wIndex, 0); USETW(req.wLength, 0); err = usbd_do_request(sc->sc_ucom.sc_udev, &req, NULL); if (err) { printf("%s: uvscom_set_line_coding: %s\n", USBDEVNAME(sc->sc_ucom.sc_dev), usbd_errstr(err)); return (err); } req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = UVSCOM_SET_PARAM; USETW(req.wValue, ls); USETW(req.wIndex, 0); USETW(req.wLength, 0); err = usbd_do_request(sc->sc_ucom.sc_udev, &req, NULL); if (err) { printf("%s: uvscom_set_line_coding: %s\n", USBDEVNAME(sc->sc_ucom.sc_dev), usbd_errstr(err)); return (err); } return (USBD_NORMAL_COMPLETION); } Static void uvscom_dtr(struct uvscom_softc *sc, int onoff) { DPRINTF(("%s: uvscom_dtr: onoff = %d\n", USBDEVNAME(sc->sc_ucom.sc_dev), onoff)); if (sc->sc_dtr == onoff) return; /* no change */ sc->sc_dtr = onoff; if (onoff) SET(sc->sc_lcr, UVSCOM_DTR); else CLR(sc->sc_lcr, UVSCOM_DTR); uvscom_set_line(sc, sc->sc_lcr); } Static void uvscom_rts(struct uvscom_softc *sc, int onoff) { DPRINTF(("%s: uvscom_rts: onoff = %d\n", USBDEVNAME(sc->sc_ucom.sc_dev), onoff)); if (sc->sc_rts == onoff) return; /* no change */ sc->sc_rts = onoff; if (onoff) SET(sc->sc_lcr, UVSCOM_RTS); else CLR(sc->sc_lcr, UVSCOM_RTS); uvscom_set_line(sc, sc->sc_lcr); } Static void uvscom_break(struct uvscom_softc *sc, int onoff) { DPRINTF(("%s: uvscom_break: onoff = %d\n", USBDEVNAME(sc->sc_ucom.sc_dev), onoff)); if (onoff) uvscom_set_line(sc, SET(sc->sc_lcr, UVSCOM_BREAK)); } Static void uvscom_set(void *addr, int portno, int reg, int onoff) { struct uvscom_softc *sc = addr; switch (reg) { case UCOM_SET_DTR: uvscom_dtr(sc, onoff); break; case UCOM_SET_RTS: uvscom_rts(sc, onoff); break; case UCOM_SET_BREAK: uvscom_break(sc, onoff); break; default: break; } } Static int uvscom_param(void *addr, int portno, struct termios *t) { struct uvscom_softc *sc = addr; usbd_status err; uint16_t lsp; uint16_t ls; DPRINTF(("%s: uvscom_param: sc = %p\n", USBDEVNAME(sc->sc_ucom.sc_dev), sc)); ls = 0; switch (t->c_ospeed) { case B150: lsp = UVSCOM_SPEED_150BPS; break; case B300: lsp = UVSCOM_SPEED_300BPS; break; case B600: lsp = UVSCOM_SPEED_600BPS; break; case B1200: lsp = UVSCOM_SPEED_1200BPS; break; case B2400: lsp = UVSCOM_SPEED_2400BPS; break; case B4800: lsp = UVSCOM_SPEED_4800BPS; break; case B9600: lsp = UVSCOM_SPEED_9600BPS; break; case B19200: lsp = UVSCOM_SPEED_19200BPS; break; case B38400: lsp = UVSCOM_SPEED_38400BPS; break; case B57600: lsp = UVSCOM_SPEED_57600BPS; break; case B115200: lsp = UVSCOM_SPEED_115200BPS; break; default: return (EIO); } if (ISSET(t->c_cflag, CSTOPB)) SET(ls, UVSCOM_STOP_BIT_2); else SET(ls, UVSCOM_STOP_BIT_1); if (ISSET(t->c_cflag, PARENB)) { if (ISSET(t->c_cflag, PARODD)) SET(ls, UVSCOM_PARITY_ODD); else SET(ls, UVSCOM_PARITY_EVEN); } else SET(ls, UVSCOM_PARITY_NONE); switch (ISSET(t->c_cflag, CSIZE)) { case CS5: SET(ls, UVSCOM_DATA_BIT_5); break; case CS6: SET(ls, UVSCOM_DATA_BIT_6); break; case CS7: SET(ls, UVSCOM_DATA_BIT_7); break; case CS8: SET(ls, UVSCOM_DATA_BIT_8); break; default: return (EIO); } err = uvscom_set_line_coding(sc, lsp, ls); if (err) return (EIO); if (ISSET(t->c_cflag, CRTSCTS)) { err = uvscom_set_crtscts(sc); if (err) return (EIO); } return (0); } Static int uvscom_open(void *addr, int portno) { struct uvscom_softc *sc = addr; int err; int i; if (sc->sc_ucom.sc_dying) return (ENXIO); DPRINTF(("uvscom_open: sc = %p\n", sc)); /* change output packet size */ sc->sc_ucom.sc_obufsize = uvscomobufsiz; if (sc->sc_intr_number != -1 && sc->sc_intr_pipe == NULL) { DPRINTF(("uvscom_open: open interrupt pipe.\n")); sc->sc_usr = 0; /* clear unit status */ err = uvscom_readstat(sc); if (err) { DPRINTF(("%s: uvscom_open: readstat faild\n", USBDEVNAME(sc->sc_ucom.sc_dev))); return (ENXIO); } sc->sc_intr_buf = malloc(sc->sc_isize, M_USBDEV, M_WAITOK); err = usbd_open_pipe_intr(sc->sc_ucom.sc_iface, sc->sc_intr_number, USBD_SHORT_XFER_OK, &sc->sc_intr_pipe, sc, sc->sc_intr_buf, sc->sc_isize, uvscom_intr, uvscominterval); if (err) { printf("%s: cannot open interrupt pipe (addr %d)\n", USBDEVNAME(sc->sc_ucom.sc_dev), sc->sc_intr_number); return (ENXIO); } } else { DPRINTF(("uvscom_open: did not open interrupt pipe.\n")); } if ((sc->sc_usr & UVSCOM_USTAT_MASK) == 0) { /* unit is not ready */ for (i = UVSCOM_UNIT_WAIT; i > 0; --i) { tsleep(&err, TTIPRI, "uvsop", hz); /* XXX */ if (ISSET(sc->sc_usr, UVSCOM_USTAT_MASK)) break; } if (i == 0) { DPRINTF(("%s: unit is not ready\n", USBDEVNAME(sc->sc_ucom.sc_dev))); return (ENXIO); } /* check PC card was inserted */ if (ISSET(sc->sc_usr, UVSCOM_NOCARD)) { DPRINTF(("%s: no card\n", USBDEVNAME(sc->sc_ucom.sc_dev))); return (ENXIO); } } return (0); } Static void uvscom_close(void *addr, int portno) { struct uvscom_softc *sc = addr; int err; if (sc->sc_ucom.sc_dying) return; DPRINTF(("uvscom_close: close\n")); uvscom_shutdown(sc); if (sc->sc_intr_pipe != NULL) { err = usbd_abort_pipe(sc->sc_intr_pipe); if (err) printf("%s: abort interrupt pipe failed: %s\n", USBDEVNAME(sc->sc_ucom.sc_dev), usbd_errstr(err)); err = usbd_close_pipe(sc->sc_intr_pipe); if (err) printf("%s: close interrupt pipe failed: %s\n", USBDEVNAME(sc->sc_ucom.sc_dev), usbd_errstr(err)); free(sc->sc_intr_buf, M_USBDEV); sc->sc_intr_pipe = NULL; } } Static void uvscom_intr(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct uvscom_softc *sc = priv; u_char *buf = sc->sc_intr_buf; u_char pstatus; if (sc->sc_ucom.sc_dying) return; if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) return; printf("%s: uvscom_intr: abnormal status: %s\n", USBDEVNAME(sc->sc_ucom.sc_dev), usbd_errstr(status)); usbd_clear_endpoint_stall_async(sc->sc_intr_pipe); return; } DPRINTFN(2, ("%s: uvscom status = %02x %02x\n", USBDEVNAME(sc->sc_ucom.sc_dev), buf[0], buf[1])); sc->sc_lsr = sc->sc_msr = 0; sc->sc_usr = buf[1]; pstatus = buf[0]; if (ISSET(pstatus, UVSCOM_TXRDY)) SET(sc->sc_lsr, ULSR_TXRDY); if (ISSET(pstatus, UVSCOM_RXRDY)) SET(sc->sc_lsr, ULSR_RXRDY); pstatus = buf[1]; if (ISSET(pstatus, UVSCOM_CTS)) SET(sc->sc_msr, UMSR_CTS); if (ISSET(pstatus, UVSCOM_DSR)) SET(sc->sc_msr, UMSR_DSR); if (ISSET(pstatus, UVSCOM_DCD)) SET(sc->sc_msr, UMSR_DCD); ucom_status_change(&sc->sc_ucom); } Static void uvscom_get_status(void *addr, int portno, u_char *lsr, u_char *msr) { struct uvscom_softc *sc = addr; if (lsr != NULL) *lsr = sc->sc_lsr; if (msr != NULL) *msr = sc->sc_msr; } #if TODO Static int uvscom_ioctl(void *addr, int portno, u_long cmd, caddr_t data, int flag, usb_proc_ptr p) { struct uvscom_softc *sc = addr; int error = 0; if (sc->sc_ucom.sc_dying) return (EIO); DPRINTF(("uvscom_ioctl: cmd = 0x%08lx\n", cmd)); switch (cmd) { case TIOCNOTTY: case TIOCMGET: case TIOCMSET: break; default: DPRINTF(("uvscom_ioctl: unknown\n")); error = ENOTTY; break; } return (error); } #endif Index: head/sys/dev/utopia/utopia.c =================================================================== --- head/sys/dev/utopia/utopia.c (revision 129878) +++ head/sys/dev/utopia/utopia.c (revision 129879) @@ -1,1563 +1,1564 @@ /* * Copyright (c) 2003 * Fraunhofer Institute for Open Communication Systems (FhG Fokus). * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Author: Hartmut Brandt */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #define READREGS(UTOPIA, REG, VALP, NP) \ (UTOPIA)->methods->readregs((UTOPIA)->ifatm, REG, VALP, NP) #define WRITEREG(UTOPIA, REG, MASK, VAL) \ (UTOPIA)->methods->writereg((UTOPIA)->ifatm, REG, MASK, VAL) /* * Global list of all registered interfaces */ static struct mtx utopia_list_mtx; static LIST_HEAD(, utopia) utopia_list = LIST_HEAD_INITIALIZER(utopia_list); #define UTP_RLOCK_LIST() mtx_lock(&utopia_list_mtx) #define UTP_RUNLOCK_LIST() mtx_unlock(&utopia_list_mtx) #define UTP_WLOCK_LIST() mtx_lock(&utopia_list_mtx) #define UTP_WUNLOCK_LIST() mtx_unlock(&utopia_list_mtx) #define UTP_LOCK(UTP) mtx_lock((UTP)->lock) #define UTP_UNLOCK(UTP) mtx_unlock((UTP)->lock) #define UTP_LOCK_ASSERT(UTP) mtx_assert((UTP)->lock, MA_OWNED) static struct proc *utopia_kproc; static void utopia_dump(struct utopia *) __unused; /* * Statistics update inlines */ static uint32_t utp_update(struct utopia *utp, u_int reg, u_int nreg, uint32_t mask) { int err; u_int n; uint8_t regs[4]; uint32_t val; n = nreg; if ((err = READREGS(utp, reg, regs, &n)) != 0) { #ifdef DIAGNOSTIC printf("%s: register read error %s(%u,%u): %d\n", __func__, utp->chip->name, reg, nreg, err); #endif return (0); } if (n < nreg) { #ifdef DIAGNOSTIC printf("%s: got only %u regs %s(%u,%u): %d\n", __func__, n, utp->chip->name, reg, nreg, err); #endif return (0); } val = 0; for (n = nreg; n > 0; n--) { val <<= 8; val |= regs[n - 1]; } return (val & mask); } #define UPDATE8(UTP, REG) utp_update(UTP, REG, 1, 0xff) #define UPDATE12(UTP, REG) utp_update(UTP, REG, 2, 0xfff) #define UPDATE16(UTP, REG) utp_update(UTP, REG, 2, 0xffff) #define UPDATE19(UTP, REG) utp_update(UTP, REG, 3, 0x7ffff) #define UPDATE20(UTP, REG) utp_update(UTP, REG, 3, 0xfffff) #define UPDATE21(UTP, REG) utp_update(UTP, REG, 3, 0x1fffff) /* * Debugging - dump all registers. */ static void utopia_dump(struct utopia *utp) { uint8_t regs[256]; u_int n = 256, i; int err; if ((err = READREGS(utp, SUNI_REGO_MRESET, regs, &n)) != 0) { printf("SUNI read error %d\n", err); return; } for (i = 0; i < n; i++) { if (i % 16 == 0) printf("%02x:", i); if (i % 16 == 8) printf(" "); printf(" %02x", regs[i]); if (i % 16 == 15) printf("\n"); } if (i % 16 != 0) printf("\n"); } /* * Update the carrier status */ static void utopia_check_carrier(struct utopia *utp, u_int carr_ok) { int old; old = utp->carrier; if (carr_ok) { /* carrier */ utp->carrier = UTP_CARR_OK; if (old != UTP_CARR_OK) { if_printf(&utp->ifatm->ifnet, "carrier detected\n"); ATMEV_SEND_IFSTATE_CHANGED(utp->ifatm, 1); } } else { /* no carrier */ utp->carrier = UTP_CARR_LOST; if (old == UTP_CARR_OK) { if_printf(&utp->ifatm->ifnet, "carrier lost\n"); ATMEV_SEND_IFSTATE_CHANGED(utp->ifatm, 0); } } } static int utopia_update_carrier_default(struct utopia *utp) { int err; uint8_t reg; u_int n = 1; if ((err = READREGS(utp, SUNI_REGO_RSOPSIS, ®, &n)) != 0) { utp->carrier = UTP_CARR_UNKNOWN; return (err); } utopia_check_carrier(utp, !(reg & SUNI_REGM_RSOPSIS_LOSV)); return (0); } /* * enable/disable scrambling */ static int utopia_set_noscramb_default(struct utopia *utp, int noscramb) { int err; if (noscramb) { err = WRITEREG(utp, SUNI_REGO_TACPCTRL, SUNI_REGM_TACPCTRL_DSCR, SUNI_REGM_TACPCTRL_DSCR); if (err) return (err); err = WRITEREG(utp, SUNI_REGO_RACPCTRL, SUNI_REGM_RACPCTRL_DDSCR, SUNI_REGM_RACPCTRL_DDSCR); if (err) return (err); utp->state |= UTP_ST_NOSCRAMB; } else { err = WRITEREG(utp, SUNI_REGO_TACPCTRL, SUNI_REGM_TACPCTRL_DSCR, 0); if (err) return (err); err = WRITEREG(utp, SUNI_REGO_RACPCTRL, SUNI_REGM_RACPCTRL_DDSCR, 0); if (err) return (err); utp->state &= ~UTP_ST_NOSCRAMB; } return (0); } /* * set SONET/SDH mode */ static int utopia_set_sdh_default(struct utopia *utp, int sdh) { int err; if (sdh) err = WRITEREG(utp, SUNI_REGO_TPOPAPTR + 1, SUNI_REGM_TPOPAPTR_S, SUNI_REGM_SDH << SUNI_REGS_TPOPAPTR_S); else err = WRITEREG(utp, SUNI_REGO_TPOPAPTR + 1, SUNI_REGM_TPOPAPTR_S, SUNI_REGM_SONET << SUNI_REGS_TPOPAPTR_S); if (err != 0) return (err); utp->state &= ~UTP_ST_SDH; if (sdh) utp->state |= UTP_ST_SDH; return (0); } /* * set idle/unassigned cells */ static int utopia_set_unass_default(struct utopia *utp, int unass) { int err; if (unass) err = WRITEREG(utp, SUNI_REGO_TACPIDLEH, 0xff, (0 << SUNI_REGS_TACPIDLEH_CLP)); else err = WRITEREG(utp, SUNI_REGO_TACPIDLEH, 0xff, (1 << SUNI_REGS_TACPIDLEH_CLP)); if (err != 0) return (err); utp->state &= ~UTP_ST_UNASS; if (unass) utp->state |= UTP_ST_UNASS; return (0); } /* * Set loopback mode for the Lite */ static int utopia_set_loopback_lite(struct utopia *utp, u_int mode) { int err; uint32_t val; u_int nmode; val = 0; nmode = mode; if (mode & UTP_LOOP_TIME) { nmode &= ~UTP_LOOP_TIME; val |= SUNI_REGM_MCTRL_LOOPT; } if (mode & UTP_LOOP_DIAG) { nmode &= ~UTP_LOOP_DIAG; val |= SUNI_REGM_MCTRL_DLE; } if (mode & UTP_LOOP_LINE) { nmode &= ~UTP_LOOP_LINE; if (val & SUNI_REGM_MCTRL_DLE) return (EINVAL); val |= SUNI_REGM_MCTRL_LLE; } if (nmode != 0) return (EINVAL); err = WRITEREG(utp, SUNI_REGO_MCTRL, SUNI_REGM_MCTRL_LLE | SUNI_REGM_MCTRL_DLE | SUNI_REGM_MCTRL_LOOPT, val); if (err) return (err); utp->loopback = mode; return (0); } /* * Set loopback mode for the Ultra */ static int utopia_set_loopback_ultra(struct utopia *utp, u_int mode) { int err; uint32_t val; u_int nmode; val = 0; nmode = mode; if (mode & UTP_LOOP_TIME) { nmode &= ~UTP_LOOP_TIME; val |= SUNI_REGM_MCTRL_LOOPT; } if (mode & UTP_LOOP_DIAG) { nmode &= ~UTP_LOOP_DIAG; if (val & SUNI_REGM_MCTRL_LOOPT) return (EINVAL); val |= SUNI_REGM_MCTRL_SDLE; } if (mode & UTP_LOOP_LINE) { nmode &= ~UTP_LOOP_LINE; if (val & (SUNI_REGM_MCTRL_LOOPT | SUNI_REGM_MCTRL_SDLE)) return (EINVAL); val |= SUNI_REGM_MCTRL_LLE; } if (mode & UTP_LOOP_PARAL) { nmode &= ~UTP_LOOP_PARAL; val |= SUNI_REGM_MCTRL_PDLE; } if (mode & UTP_LOOP_TWIST) { nmode &= ~UTP_LOOP_TWIST; val |= SUNI_REGM_MCTRL_TPLE; } if (nmode != 0) return (EINVAL); err = WRITEREG(utp, SUNI_REGO_MCTRL, SUNI_REGM_MCTRL_LLE | SUNI_REGM_MCTRL_SDLE | SUNI_REGM_MCTRL_LOOPT | SUNI_REGM_MCTRL_PDLE | SUNI_REGM_MCTRL_TPLE, val); if (err) return (err); utp->loopback = mode; return (0); } /* * Set loopback mode for the Ultra */ static int utopia_set_loopback_622(struct utopia *utp, u_int mode) { int err; uint32_t val; uint8_t config; int smode; u_int nmode; u_int n = 1; val = 0; nmode = mode; if (mode & UTP_LOOP_PATH) { nmode &= ~UTP_LOOP_PATH; val |= SUNI_REGM_MCTRLM_DPLE; } err = READREGS(utp, SUNI_REGO_MCONFIG, &config, &n); if (err != 0) return (err); smode = ((config & SUNI_REGM_MCONFIG_TMODE_622) == SUNI_REGM_MCONFIG_TMODE_STS1_BIT && (config & SUNI_REGM_MCONFIG_RMODE_622) == SUNI_REGM_MCONFIG_RMODE_STS1_BIT); if (mode & UTP_LOOP_TIME) { if (!smode) return (EINVAL); nmode &= ~UTP_LOOP_TIME; val |= SUNI_REGM_MCTRLM_LOOPT; } if (mode & UTP_LOOP_DIAG) { nmode &= ~UTP_LOOP_DIAG; if (val & SUNI_REGM_MCTRLM_LOOPT) return (EINVAL); val |= SUNI_REGM_MCTRLM_DLE; } if (mode & UTP_LOOP_LINE) { nmode &= ~UTP_LOOP_LINE; if (val & (SUNI_REGM_MCTRLM_LOOPT | SUNI_REGM_MCTRLM_DLE)) return (EINVAL); val |= SUNI_REGM_MCTRLM_LLE; } if (nmode != 0) return (EINVAL); err = WRITEREG(utp, SUNI_REGO_MCTRLM, SUNI_REGM_MCTRLM_LLE | SUNI_REGM_MCTRLM_DLE | SUNI_REGM_MCTRLM_DPLE | SUNI_REGM_MCTRL_LOOPT, val); if (err) return (err); utp->loopback = mode; return (0); } /* * Set the SUNI chip to reflect the current state in utopia. * Assume, that the chip has been reset. */ static int utopia_set_chip(struct utopia *utp) { int err = 0; /* set sonet/sdh */ err |= utopia_set_sdh(utp, utp->state & UTP_ST_SDH); /* unassigned or idle cells */ err |= utopia_set_unass(utp, utp->state & UTP_ST_UNASS); err |= WRITEREG(utp, SUNI_REGO_TACPIDLEP, 0xff, 0x6a); /* loopback */ err |= utopia_set_loopback(utp, utp->loopback); /* update carrier state */ err |= utopia_update_carrier(utp); /* enable interrupts on LOS */ err |= WRITEREG(utp, SUNI_REGO_RSOPCIE, SUNI_REGM_RSOPCIE_LOSE, SUNI_REGM_RSOPCIE_LOSE); return (err ? EIO : 0); } /* * Reset the SUNI chip to reflect the current state of utopia. */ static int utopia_reset_default(struct utopia *utp) { int err = 0; if (!(utp->flags & UTP_FL_NORESET)) { err |= WRITEREG(utp, SUNI_REGO_MRESET, SUNI_REGM_MRESET_RESET, SUNI_REGM_MRESET_RESET); err |= WRITEREG(utp, SUNI_REGO_MRESET, SUNI_REGM_MRESET_RESET, 0); } /* disable test mode */ err |= WRITEREG(utp, SUNI_REGO_MTEST, 0xff, 0x00); err |= utopia_set_chip(utp); return (err ? EIO : 0); } /* * Reset the SUNI chip to reflect the current state of utopia. */ static int utopia_reset_622(struct utopia *utp) { int err = 0; if (!(utp->flags & UTP_FL_NORESET)) { err |= WRITEREG(utp, SUNI_REGO_MRESET, SUNI_REGM_MRESET_RESET, SUNI_REGM_MRESET_RESET); err |= WRITEREG(utp, SUNI_REGO_MRESET, SUNI_REGM_MRESET_RESET, 0); } /* disable test mode */ err |= WRITEREG(utp, SUNI_REGO_MTEST, 0xff, SUNI_REGM_MTEST_DS27_53_622); err |= utopia_set_chip(utp); return (err ? EIO : 0); } /* * Handle interrupt on lite chip */ static void utopia_intr_default(struct utopia *utp) { uint8_t regs[SUNI_REGO_MTEST]; u_int n = SUNI_REGO_MTEST; int err; /* Read all registers. This acks the interrupts */ if ((err = READREGS(utp, SUNI_REGO_MRESET, regs, &n)) != 0) { printf("SUNI read error %d\n", err); return; } if (n <= SUNI_REGO_RSOPSIS) { printf("%s: could not read RSOPSIS", __func__); return; } /* check for LOSI (loss of signal) */ if ((regs[SUNI_REGO_MISTATUS] & SUNI_REGM_MISTATUS_RSOPI) && (regs[SUNI_REGO_RSOPSIS] & SUNI_REGM_RSOPSIS_LOSI)) utopia_check_carrier(utp, !(regs[SUNI_REGO_RSOPSIS] & SUNI_REGM_RSOPSIS_LOSV)); } /* * Update statistics from a SUNI/LITE or SUNI/ULTRA */ static void suni_lite_update_stats(struct utopia *utp) { int err; /* write to the master if we can */ if (!(utp->flags & UTP_FL_NORESET)) { err = WRITEREG(utp, SUNI_REGO_MRESET, 0, 0); } else { err = WRITEREG(utp, SUNI_REGO_RSOP_BIP8, 0, 0); err |= WRITEREG(utp, SUNI_REGO_RLOPBIP8_24, 0, 0); err |= WRITEREG(utp, SUNI_REGO_RPOPBIP8, 0, 0); err |= WRITEREG(utp, SUNI_REGO_RACPCHCS, 0, 0); err |= WRITEREG(utp, SUNI_REGO_TACPCNT, 0, 0); } if (err) { #ifdef DIAGNOSTIC printf("%s: register write error %s: %d\n", __func__, utp->chip->name, err); #endif return; } DELAY(8); utp->stats.rx_sbip += UPDATE16(utp, SUNI_REGO_RSOP_BIP8); utp->stats.rx_lbip += UPDATE20(utp, SUNI_REGO_RLOPBIP8_24); utp->stats.rx_lfebe += UPDATE20(utp, SUNI_REGO_RLOPFEBE); utp->stats.rx_pbip += UPDATE16(utp, SUNI_REGO_RPOPBIP8); utp->stats.rx_pfebe += UPDATE16(utp, SUNI_REGO_RPOPFEBE); utp->stats.rx_corr += UPDATE8(utp, SUNI_REGO_RACPCHCS); utp->stats.rx_uncorr += UPDATE8(utp, SUNI_REGO_RACPUHCS); utp->stats.rx_cells += UPDATE19(utp, SUNI_REGO_RACPCNT); utp->stats.tx_cells += UPDATE19(utp, SUNI_REGO_TACPCNT); } /* * Update statistics from a SUNI/622 */ static void suni_622_update_stats(struct utopia *utp) { int err; /* write to the master if we can */ if (!(utp->flags & UTP_FL_NORESET)) { err = WRITEREG(utp, SUNI_REGO_MRESET, 0, 0); } else { err = WRITEREG(utp, SUNI_REGO_RSOP_BIP8, 0, 0); err |= WRITEREG(utp, SUNI_REGO_RLOPBIP8_24, 0, 0); err |= WRITEREG(utp, SUNI_REGO_RPOPBIP8, 0, 0); err |= WRITEREG(utp, SUNI_REGO_RACPCHCS, 0, 0); err |= WRITEREG(utp, SUNI_REGO_TACPCNT, 0, 0); } if (err) { #ifdef DIAGNOSTIC printf("%s: register write error %s: %d\n", __func__, utp->chip->name, err); #endif return; } DELAY(8); utp->stats.rx_sbip += UPDATE16(utp, SUNI_REGO_RSOP_BIP8); utp->stats.rx_lbip += UPDATE20(utp, SUNI_REGO_RLOPBIP8_24); utp->stats.rx_lfebe += UPDATE20(utp, SUNI_REGO_RLOPFEBE); utp->stats.rx_pbip += UPDATE16(utp, SUNI_REGO_RPOPBIP8); utp->stats.rx_pfebe += UPDATE16(utp, SUNI_REGO_RPOPFEBE); utp->stats.rx_corr += UPDATE12(utp, SUNI_REGO_RACPCHCS_622); utp->stats.rx_uncorr += UPDATE12(utp, SUNI_REGO_RACPUHCS_622); utp->stats.rx_cells += UPDATE21(utp, SUNI_REGO_RACPCNT_622); utp->stats.tx_cells += UPDATE21(utp, SUNI_REGO_TACPCNT); } static const struct utopia_chip chip_622 = { UTP_TYPE_SUNI_622, "Suni/622 (PMC-5355)", 256, utopia_reset_622, utopia_set_sdh_default, utopia_set_unass_default, utopia_set_noscramb_default, utopia_update_carrier_default, utopia_set_loopback_622, utopia_intr_default, suni_622_update_stats, }; static const struct utopia_chip chip_lite = { UTP_TYPE_SUNI_LITE, "Suni/Lite (PMC-5346)", 256, utopia_reset_default, utopia_set_sdh_default, utopia_set_unass_default, utopia_set_noscramb_default, utopia_update_carrier_default, utopia_set_loopback_lite, utopia_intr_default, suni_lite_update_stats, }; static const struct utopia_chip chip_ultra = { UTP_TYPE_SUNI_ULTRA, "Suni/Ultra (PMC-5350)", 256, utopia_reset_default, utopia_set_sdh_default, utopia_set_unass_default, utopia_set_noscramb_default, utopia_update_carrier_default, utopia_set_loopback_ultra, utopia_intr_default, suni_lite_update_stats, }; /* * Reset IDT77105. There is really no way to reset this thing by acessing * the registers. Load the registers with default values. */ static int idt77105_reset(struct utopia *utp) { int err = 0; u_int n; uint8_t val[2]; err |= WRITEREG(utp, IDTPHY_REGO_MCR, 0xff, IDTPHY_REGM_MCR_DRIC | IDTPHY_REGM_MCR_EI); n = 1; err |= READREGS(utp, IDTPHY_REGO_ISTAT, val, &n); err |= WRITEREG(utp, IDTPHY_REGO_DIAG, 0xff, 0); err |= WRITEREG(utp, IDTPHY_REGO_LHEC, 0xff, 0); err |= WRITEREG(utp, IDTPHY_REGO_CNTS, 0xff, IDTPHY_REGM_CNTS_SEC); n = 2; err |= READREGS(utp, IDTPHY_REGO_CNT, val, &n); err |= WRITEREG(utp, IDTPHY_REGO_CNTS, 0xff, IDTPHY_REGM_CNTS_TX); n = 2; err |= READREGS(utp, IDTPHY_REGO_CNT, val, &n); err |= WRITEREG(utp, IDTPHY_REGO_CNTS, 0xff, IDTPHY_REGM_CNTS_RX); n = 2; err |= READREGS(utp, IDTPHY_REGO_CNT, val, &n); err |= WRITEREG(utp, IDTPHY_REGO_CNTS, 0xff, IDTPHY_REGM_CNTS_HECE); n = 2; err |= READREGS(utp, IDTPHY_REGO_CNT, val, &n); err |= WRITEREG(utp, IDTPHY_REGO_MCR, IDTPHY_REGM_MCR_DREC, IDTPHY_REGM_MCR_DREC); err |= WRITEREG(utp, IDTPHY_REGO_DIAG, IDTPHY_REGM_DIAG_RFLUSH, IDTPHY_REGM_DIAG_RFLUSH); /* loopback */ err |= utopia_set_loopback(utp, utp->loopback); /* update carrier state */ err |= utopia_update_carrier(utp); return (err ? EIO : 0); } static int unknown_inval(struct utopia *utp, int what __unused) { return (EINVAL); } static int idt77105_update_carrier(struct utopia *utp) { int err; uint8_t reg; u_int n = 1; if ((err = READREGS(utp, IDTPHY_REGO_ISTAT, ®, &n)) != 0) { utp->carrier = UTP_CARR_UNKNOWN; return (err); } utopia_check_carrier(utp, reg & IDTPHY_REGM_ISTAT_GOOD); return (0); } static int idt77105_set_loopback(struct utopia *utp, u_int mode) { int err; switch (mode) { case UTP_LOOP_NONE: err = WRITEREG(utp, IDTPHY_REGO_DIAG, IDTPHY_REGM_DIAG_LOOP, IDTPHY_REGM_DIAG_LOOP_NONE); break; case UTP_LOOP_DIAG: err = WRITEREG(utp, IDTPHY_REGO_DIAG, IDTPHY_REGM_DIAG_LOOP, IDTPHY_REGM_DIAG_LOOP_PHY); break; case UTP_LOOP_LINE: err = WRITEREG(utp, IDTPHY_REGO_DIAG, IDTPHY_REGM_DIAG_LOOP, IDTPHY_REGM_DIAG_LOOP_LINE); break; default: return (EINVAL); } if (err) return (err); utp->loopback = mode; return (0); } /* * Handle interrupt on IDT77105 chip */ static void idt77105_intr(struct utopia *utp) { uint8_t reg; u_int n = 1; int err; /* Interrupt status and ack the interrupt */ if ((err = READREGS(utp, IDTPHY_REGO_ISTAT, ®, &n)) != 0) { printf("IDT77105 read error %d\n", err); return; } /* check for signal condition */ utopia_check_carrier(utp, reg & IDTPHY_REGM_ISTAT_GOOD); } static void idt77105_update_stats(struct utopia *utp) { int err = 0; uint8_t regs[2]; u_int n; #ifdef DIAGNOSTIC #define UDIAG(F,A,B) printf(F, A, B) #else #define UDIAG(F,A,B) do { } while (0) #endif #define UPD(FIELD, CODE, N, MASK) \ err = WRITEREG(utp, IDTPHY_REGO_CNTS, 0xff, CODE); \ if (err != 0) { \ UDIAG("%s: cannot write CNTS: %d\n", __func__, err); \ return; \ } \ n = N; \ err = READREGS(utp, IDTPHY_REGO_CNT, regs, &n); \ if (err != 0) { \ UDIAG("%s: cannot read CNT: %d\n", __func__, err); \ return; \ } \ if (n != N) { \ UDIAG("%s: got only %u registers\n", __func__, n); \ return; \ } \ if (N == 1) \ utp->stats.FIELD += (regs[0] & MASK); \ else \ utp->stats.FIELD += (regs[0] | (regs[1] << 8)) & MASK; UPD(rx_symerr, IDTPHY_REGM_CNTS_SEC, 1, 0xff); UPD(tx_cells, IDTPHY_REGM_CNTS_TX, 2, 0xffff); UPD(rx_cells, IDTPHY_REGM_CNTS_RX, 2, 0xffff); UPD(rx_uncorr, IDTPHY_REGM_CNTS_HECE, 1, 0x1f); #undef UDIAG #undef UPD } static const struct utopia_chip chip_idt77105 = { UTP_TYPE_IDT77105, "IDT77105", 7, idt77105_reset, unknown_inval, unknown_inval, unknown_inval, idt77105_update_carrier, idt77105_set_loopback, idt77105_intr, idt77105_update_stats, }; /* * Update the carrier status */ static int idt77155_update_carrier(struct utopia *utp) { int err; uint8_t reg; u_int n = 1; if ((err = READREGS(utp, IDTPHY_REGO_RSOS, ®, &n)) != 0) { utp->carrier = UTP_CARR_UNKNOWN; return (err); } utopia_check_carrier(utp, !(reg & IDTPHY_REGM_RSOS_LOS)); return (0); } /* * Handle interrupt on IDT77155 chip */ static void idt77155_intr(struct utopia *utp) { uint8_t reg; u_int n = 1; int err; if ((err = READREGS(utp, IDTPHY_REGO_RSOS, ®, &n)) != 0) { printf("IDT77105 read error %d\n", err); return; } utopia_check_carrier(utp, !(reg & IDTPHY_REGM_RSOS_LOS)); } /* * set SONET/SDH mode */ static int idt77155_set_sdh(struct utopia *utp, int sdh) { int err; if (sdh) err = WRITEREG(utp, IDTPHY_REGO_PTRM, IDTPHY_REGM_PTRM_SS, IDTPHY_REGM_PTRM_SDH); else err = WRITEREG(utp, IDTPHY_REGO_PTRM, IDTPHY_REGM_PTRM_SS, IDTPHY_REGM_PTRM_SONET); if (err != 0) return (err); utp->state &= ~UTP_ST_SDH; if (sdh) utp->state |= UTP_ST_SDH; return (0); } /* * set idle/unassigned cells */ static int idt77155_set_unass(struct utopia *utp, int unass) { int err; if (unass) err = WRITEREG(utp, IDTPHY_REGO_TCHP, 0xff, 0); else err = WRITEREG(utp, IDTPHY_REGO_TCHP, 0xff, 1); if (err != 0) return (err); utp->state &= ~UTP_ST_UNASS; if (unass) utp->state |= UTP_ST_UNASS; return (0); } /* * enable/disable scrambling */ static int idt77155_set_noscramb(struct utopia *utp, int noscramb) { int err; if (noscramb) { err = WRITEREG(utp, IDTPHY_REGO_TCC, IDTPHY_REGM_TCC_DSCR, IDTPHY_REGM_TCC_DSCR); if (err) return (err); err = WRITEREG(utp, IDTPHY_REGO_RCC, IDTPHY_REGM_RCC_DSCR, IDTPHY_REGM_RCC_DSCR); if (err) return (err); utp->state |= UTP_ST_NOSCRAMB; } else { err = WRITEREG(utp, IDTPHY_REGO_TCC, IDTPHY_REGM_TCC_DSCR, 0); if (err) return (err); err = WRITEREG(utp, IDTPHY_REGO_RCC, IDTPHY_REGM_RCC_DSCR, 0); if (err) return (err); utp->state &= ~UTP_ST_NOSCRAMB; } return (0); } /* * Set loopback mode for the 77155 */ static int idt77155_set_loopback(struct utopia *utp, u_int mode) { int err; uint32_t val; u_int nmode; val = 0; nmode = mode; if (mode & UTP_LOOP_TIME) { nmode &= ~UTP_LOOP_TIME; val |= IDTPHY_REGM_MCTL_TLOOP; } if (mode & UTP_LOOP_DIAG) { nmode &= ~UTP_LOOP_DIAG; val |= IDTPHY_REGM_MCTL_DLOOP; } if (mode & UTP_LOOP_LINE) { nmode &= ~UTP_LOOP_LINE; val |= IDTPHY_REGM_MCTL_LLOOP; } if (nmode != 0) return (EINVAL); err = WRITEREG(utp, IDTPHY_REGO_MCTL, IDTPHY_REGM_MCTL_TLOOP | IDTPHY_REGM_MCTL_DLOOP | IDTPHY_REGM_MCTL_LLOOP, val); if (err) return (err); utp->loopback = mode; return (0); } /* * Set the chip to reflect the current state in utopia. * Assume, that the chip has been reset. */ static int idt77155_set_chip(struct utopia *utp) { int err = 0; /* set sonet/sdh */ err |= idt77155_set_sdh(utp, utp->state & UTP_ST_SDH); /* unassigned or idle cells */ err |= idt77155_set_unass(utp, utp->state & UTP_ST_UNASS); /* loopback */ err |= idt77155_set_loopback(utp, utp->loopback); /* update carrier state */ err |= idt77155_update_carrier(utp); /* enable interrupts on LOS */ err |= WRITEREG(utp, IDTPHY_REGO_INT, IDTPHY_REGM_INT_RXSOHI, IDTPHY_REGM_INT_RXSOHI); err |= WRITEREG(utp, IDTPHY_REGO_RSOC, IDTPHY_REGM_RSOC_LOSI, IDTPHY_REGM_RSOC_LOSI); return (err ? EIO : 0); } /* * Reset the chip to reflect the current state of utopia. */ static int idt77155_reset(struct utopia *utp) { int err = 0; if (!(utp->flags & UTP_FL_NORESET)) { err |= WRITEREG(utp, IDTPHY_REGO_MRID, IDTPHY_REGM_MRID_RESET, IDTPHY_REGM_MRID_RESET); err |= WRITEREG(utp, IDTPHY_REGO_MRID, IDTPHY_REGM_MRID_RESET, 0); } err |= idt77155_set_chip(utp); return (err ? EIO : 0); } /* * Update statistics from a IDT77155 * This appears to be the same as for the Suni/Lite and Ultra. IDT however * makes no assessment about the transfer time. Assume 7us. */ static void idt77155_update_stats(struct utopia *utp) { int err; /* write to the master if we can */ if (!(utp->flags & UTP_FL_NORESET)) { err = WRITEREG(utp, IDTPHY_REGO_MRID, 0, 0); } else { err = WRITEREG(utp, IDTPHY_REGO_BIPC, 0, 0); err |= WRITEREG(utp, IDTPHY_REGO_B2EC, 0, 0); err |= WRITEREG(utp, IDTPHY_REGO_B3EC, 0, 0); err |= WRITEREG(utp, IDTPHY_REGO_CEC, 0, 0); err |= WRITEREG(utp, IDTPHY_REGO_TXCNT, 0, 0); } if (err) { #ifdef DIAGNOSTIC printf("%s: register write error %s: %d\n", __func__, utp->chip->name, err); #endif return; } DELAY(8); utp->stats.rx_sbip += UPDATE16(utp, IDTPHY_REGO_BIPC); utp->stats.rx_lbip += UPDATE20(utp, IDTPHY_REGO_B2EC); utp->stats.rx_lfebe += UPDATE20(utp, IDTPHY_REGO_FEBEC); utp->stats.rx_pbip += UPDATE16(utp, IDTPHY_REGO_B3EC); utp->stats.rx_pfebe += UPDATE16(utp, IDTPHY_REGO_PFEBEC); utp->stats.rx_corr += UPDATE8(utp, IDTPHY_REGO_CEC); utp->stats.rx_uncorr += UPDATE8(utp, IDTPHY_REGO_UEC); utp->stats.rx_cells += UPDATE19(utp, IDTPHY_REGO_RCCNT); utp->stats.tx_cells += UPDATE19(utp, IDTPHY_REGO_TXCNT); } static const struct utopia_chip chip_idt77155 = { UTP_TYPE_IDT77155, "IDT77155", 0x80, idt77155_reset, idt77155_set_sdh, idt77155_set_unass, idt77155_set_noscramb, idt77155_update_carrier, idt77155_set_loopback, idt77155_intr, idt77155_update_stats, }; static int unknown_reset(struct utopia *utp __unused) { return (EIO); } static int unknown_update_carrier(struct utopia *utp) { utp->carrier = UTP_CARR_UNKNOWN; return (0); } static int unknown_set_loopback(struct utopia *utp __unused, u_int mode __unused) { return (EINVAL); } static void unknown_intr(struct utopia *utp __unused) { } static void unknown_update_stats(struct utopia *utp __unused) { } static const struct utopia_chip chip_unknown = { UTP_TYPE_UNKNOWN, "unknown", 0, unknown_reset, unknown_inval, unknown_inval, unknown_inval, unknown_update_carrier, unknown_set_loopback, unknown_intr, unknown_update_stats, }; /* * Callbacks for the ifmedia infrastructure. */ static int utopia_media_change(struct ifnet *ifp) { struct ifatm *ifatm = (struct ifatm *)ifp->if_softc; struct utopia *utp = ifatm->phy; int error = 0; UTP_LOCK(utp); if (utp->chip->type != UTP_TYPE_UNKNOWN && utp->state & UTP_ST_ACTIVE) { if (utp->media->ifm_media & IFM_ATM_SDH) { if (!(utp->state & UTP_ST_SDH)) error = utopia_set_sdh(utp, 1); } else { if (utp->state & UTP_ST_SDH) error = utopia_set_sdh(utp, 0); } if (utp->media->ifm_media & IFM_ATM_UNASSIGNED) { if (!(utp->state & UTP_ST_UNASS)) error = utopia_set_unass(utp, 1); } else { if (utp->state & UTP_ST_UNASS) error = utopia_set_unass(utp, 0); } if (utp->media->ifm_media & IFM_ATM_NOSCRAMB) { if (!(utp->state & UTP_ST_NOSCRAMB)) error = utopia_set_noscramb(utp, 1); } else { if (utp->state & UTP_ST_NOSCRAMB) error = utopia_set_noscramb(utp, 0); } } else error = EIO; UTP_UNLOCK(utp); return (error); } /* * Look at the carrier status. */ static void utopia_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) { struct utopia *utp = ((struct ifatm *)ifp->if_softc)->phy; UTP_LOCK(utp); if (utp->chip->type != UTP_TYPE_UNKNOWN && utp->state & UTP_ST_ACTIVE) { ifmr->ifm_active = IFM_ATM | utp->ifatm->mib.media; switch (utp->carrier) { case UTP_CARR_OK: ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; break; case UTP_CARR_LOST: ifmr->ifm_status = IFM_AVALID; break; default: ifmr->ifm_status = 0; break; } if (utp->state & UTP_ST_SDH) { ifmr->ifm_active |= IFM_ATM_SDH; ifmr->ifm_current |= IFM_ATM_SDH; } if (utp->state & UTP_ST_UNASS) { ifmr->ifm_active |= IFM_ATM_UNASSIGNED; ifmr->ifm_current |= IFM_ATM_UNASSIGNED; } if (utp->state & UTP_ST_NOSCRAMB) { ifmr->ifm_active |= IFM_ATM_NOSCRAMB; ifmr->ifm_current |= IFM_ATM_NOSCRAMB; } } else { ifmr->ifm_active = 0; ifmr->ifm_status = 0; } UTP_UNLOCK(utp); } /* * Initialize media from the mib */ void utopia_init_media(struct utopia *utp) { ifmedia_removeall(utp->media); ifmedia_add(utp->media, IFM_ATM | utp->ifatm->mib.media, 0, NULL); ifmedia_set(utp->media, IFM_ATM | utp->ifatm->mib.media); } /* * Reset all media */ void utopia_reset_media(struct utopia *utp) { ifmedia_removeall(utp->media); } /* * This is called by the driver as soon as the SUNI registers are accessible. * This may be either in the attach routine or the init routine of the driver. */ int utopia_start(struct utopia *utp) { uint8_t reg; int err; u_int n = 1; if ((err = READREGS(utp, SUNI_REGO_MRESET, ®, &n)) != 0) return (err); switch (reg & SUNI_REGM_MRESET_TYPE) { case SUNI_REGM_MRESET_TYPE_622: utp->chip = &chip_622; break; case SUNI_REGM_MRESET_TYPE_LITE: /* this may be either a SUNI LITE or a IDT77155 * * Read register 0x70. The SUNI doesn't have it */ n = 1; if ((err = READREGS(utp, IDTPHY_REGO_RBER, ®, &n)) != 0) return (err); if ((reg & ~IDTPHY_REGM_RBER_RESV) == (IDTPHY_REGM_RBER_FAIL | IDTPHY_REGM_RBER_WARN)) utp->chip = &chip_idt77155; else utp->chip = &chip_lite; break; case SUNI_REGM_MRESET_TYPE_ULTRA: utp->chip = &chip_ultra; break; default: if (reg == (IDTPHY_REGM_MCR_DRIC | IDTPHY_REGM_MCR_EI)) utp->chip = &chip_idt77105; else { if_printf(&utp->ifatm->ifnet, "unknown ATM-PHY chip %#x\n", reg); utp->chip = &chip_unknown; } break; } utp->state |= UTP_ST_ACTIVE; return (0); } /* * Stop the chip */ void utopia_stop(struct utopia *utp) { utp->state &= ~UTP_ST_ACTIVE; } /* * Handle the sysctls */ static int utopia_sysctl_regs(SYSCTL_HANDLER_ARGS) { struct utopia *utp = (struct utopia *)arg1; int error; u_int n; uint8_t *val; uint8_t new[3]; if ((n = utp->chip->nregs) == 0) return (EIO); val = malloc(sizeof(uint8_t) * n, M_TEMP, M_WAITOK); UTP_LOCK(utp); error = READREGS(utp, 0, val, &n); UTP_UNLOCK(utp); if (error) { free(val, M_TEMP); return (error); } error = SYSCTL_OUT(req, val, sizeof(uint8_t) * n); free(val, M_TEMP); if (error != 0 || req->newptr == NULL) return (error); error = SYSCTL_IN(req, new, sizeof(new)); if (error) return (error); UTP_LOCK(utp); error = WRITEREG(utp, new[0], new[1], new[2]); UTP_UNLOCK(utp); return (error); } static int utopia_sysctl_stats(SYSCTL_HANDLER_ARGS) { struct utopia *utp = (struct utopia *)arg1; void *val; int error; val = malloc(sizeof(utp->stats), M_TEMP, M_WAITOK); UTP_LOCK(utp); bcopy(&utp->stats, val, sizeof(utp->stats)); if (req->newptr != NULL) bzero((char *)&utp->stats + sizeof(utp->stats.version), sizeof(utp->stats) - sizeof(utp->stats.version)); UTP_UNLOCK(utp); error = SYSCTL_OUT(req, val, sizeof(utp->stats)); free(val, M_TEMP); if (error && req->newptr != NULL) bcopy(val, &utp->stats, sizeof(utp->stats)); /* ignore actual new value */ return (error); } /* * Handle the loopback sysctl */ static int utopia_sysctl_loopback(SYSCTL_HANDLER_ARGS) { struct utopia *utp = (struct utopia *)arg1; int error; u_int loopback; error = SYSCTL_OUT(req, &utp->loopback, sizeof(u_int)); if (error != 0 || req->newptr == NULL) return (error); error = SYSCTL_IN(req, &loopback, sizeof(u_int)); if (error) return (error); UTP_LOCK(utp); error = utopia_set_loopback(utp, loopback); UTP_UNLOCK(utp); return (error); } /* * Handle the type sysctl */ static int utopia_sysctl_type(SYSCTL_HANDLER_ARGS) { struct utopia *utp = (struct utopia *)arg1; return (SYSCTL_OUT(req, &utp->chip->type, sizeof(utp->chip->type))); } /* * Handle the name sysctl */ static int utopia_sysctl_name(SYSCTL_HANDLER_ARGS) { struct utopia *utp = (struct utopia *)arg1; return (SYSCTL_OUT(req, utp->chip->name, strlen(utp->chip->name) + 1)); } /* * Initialize the state. This is called from the drivers attach * function. The mutex must be already initialized. */ int utopia_attach(struct utopia *utp, struct ifatm *ifatm, struct ifmedia *media, struct mtx *lock, struct sysctl_ctx_list *ctx, struct sysctl_oid_list *children, const struct utopia_methods *m) { bzero(utp, sizeof(*utp)); utp->ifatm = ifatm; utp->methods = m; utp->media = media; utp->lock = lock; utp->chip = &chip_unknown; utp->stats.version = 1; ifmedia_init(media, IFM_ATM_SDH | IFM_ATM_UNASSIGNED | IFM_ATM_NOSCRAMB, utopia_media_change, utopia_media_status); if (SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "phy_regs", CTLFLAG_RW | CTLTYPE_OPAQUE, utp, 0, utopia_sysctl_regs, "S", "phy registers") == NULL) return (-1); if (SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "phy_loopback", CTLFLAG_RW | CTLTYPE_UINT, utp, 0, utopia_sysctl_loopback, "IU", "phy loopback mode") == NULL) return (-1); if (SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "phy_type", CTLFLAG_RD | CTLTYPE_UINT, utp, 0, utopia_sysctl_type, "IU", "phy type") == NULL) return (-1); if (SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "phy_name", CTLFLAG_RD | CTLTYPE_STRING, utp, 0, utopia_sysctl_name, "A", "phy name") == NULL) return (-1); if (SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "phy_stats", CTLFLAG_RW | CTLTYPE_OPAQUE, utp, 0, utopia_sysctl_stats, "S", "phy statistics") == NULL) return (-1); if (SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "phy_state", CTLFLAG_RD, &utp->state, 0, "phy state") == NULL) return (-1); if (SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "phy_carrier", CTLFLAG_RD, &utp->carrier, 0, "phy carrier") == NULL) return (-1); UTP_WLOCK_LIST(); LIST_INSERT_HEAD(&utopia_list, utp, link); UTP_WUNLOCK_LIST(); utp->state |= UTP_ST_ATTACHED; return (0); } /* * Detach. We set a flag here, wakeup the daemon and let him do it. * Here we need the lock for synchronisation with the daemon. */ void utopia_detach(struct utopia *utp) { UTP_LOCK_ASSERT(utp); if (utp->state & UTP_ST_ATTACHED) { utp->state |= UTP_ST_DETACH; while (utp->state & UTP_ST_DETACH) { wakeup(&utopia_list); msleep(utp, utp->lock, PZERO, "utopia_detach", hz); } } } /* * The carrier state kernel proc for those adapters that do not interrupt. * * We assume, that utopia_attach can safely add a new utopia while we are going * through the list without disturbing us (we lock the list while getting * the address of the first element, adding is always done at the head). * Removing is entirely handled here. */ static void utopia_daemon(void *arg __unused) { struct utopia *utp, *next; UTP_RLOCK_LIST(); while (utopia_kproc != NULL) { utp = LIST_FIRST(&utopia_list); UTP_RUNLOCK_LIST(); while (utp != NULL) { mtx_lock(&Giant); /* XXX depend on MPSAFE */ UTP_LOCK(utp); next = LIST_NEXT(utp, link); if (utp->state & UTP_ST_DETACH) { LIST_REMOVE(utp, link); utp->state &= ~UTP_ST_DETACH; wakeup_one(utp); } else if (utp->state & UTP_ST_ACTIVE) { if (utp->flags & UTP_FL_POLL_CARRIER) utopia_update_carrier(utp); utopia_update_stats(utp); } UTP_UNLOCK(utp); mtx_unlock(&Giant); /* XXX depend on MPSAFE */ utp = next; } UTP_RLOCK_LIST(); msleep(&utopia_list, &utopia_list_mtx, PZERO, "*idle*", hz); } wakeup_one(&utopia_list); UTP_RUNLOCK_LIST(); kthread_exit(0); } /* * Module initialisation */ static int utopia_mod_init(module_t mod, int what, void *arg) { int err; struct proc *kp; switch (what) { case MOD_LOAD: mtx_init(&utopia_list_mtx, "utopia list mutex", NULL, MTX_DEF); err = kthread_create(utopia_daemon, NULL, &utopia_kproc, RFHIGHPID, 0, "utopia"); if (err != 0) { printf("cannot created utopia thread %d\n", err); return (err); } break; case MOD_UNLOAD: UTP_WLOCK_LIST(); if ((kp = utopia_kproc) != NULL) { utopia_kproc = NULL; wakeup_one(&utopia_list); PROC_LOCK(kp); UTP_WUNLOCK_LIST(); msleep(kp, &kp->p_mtx, PWAIT, "utopia_destroy", 0); PROC_UNLOCK(kp); } else UTP_WUNLOCK_LIST(); mtx_destroy(&utopia_list_mtx); break; } return (0); } static moduledata_t utopia_mod = { "utopia", utopia_mod_init, 0 }; DECLARE_MODULE(utopia, utopia_mod, SI_SUB_INIT_IF, SI_ORDER_ANY); MODULE_VERSION(utopia, 1); Index: head/sys/dev/vx/if_vx_pci.c =================================================================== --- head/sys/dev/vx/if_vx_pci.c (revision 129878) +++ head/sys/dev/vx/if_vx_pci.c (revision 129879) @@ -1,181 +1,182 @@ /* * Copyright (C) 1996 Naoki Hamada * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include static void vx_pci_shutdown(device_t); static int vx_pci_probe(device_t); static int vx_pci_attach(device_t); static device_method_t vx_methods[] = { /* Device interface */ DEVMETHOD(device_probe, vx_pci_probe), DEVMETHOD(device_attach, vx_pci_attach), DEVMETHOD(device_shutdown, vx_pci_shutdown), { 0, 0 } }; static driver_t vx_driver = { "vx", vx_methods, sizeof(struct vx_softc) }; static devclass_t vx_devclass; DRIVER_MODULE(vx, pci, vx_driver, vx_devclass, 0, 0); MODULE_DEPEND(vx, pci, 1, 1, 1); MODULE_DEPEND(vx, ether, 1, 1, 1); static void vx_pci_shutdown( device_t dev) { struct vx_softc *sc; sc = device_get_softc(dev); vxstop(sc); return; } static int vx_pci_probe( device_t dev) { u_int32_t device_id; device_id = pci_read_config(dev, PCIR_DEVVENDOR, 4); if(device_id == 0x590010b7ul) { device_set_desc(dev, "3COM 3C590 Etherlink III PCI"); return(0); } if(device_id == 0x595010b7ul || device_id == 0x595110b7ul || device_id == 0x595210b7ul) { device_set_desc(dev, "3COM 3C595 Etherlink III PCI"); return(0); } /* * The (Fast) Etherlink XL adapters are now supported by * the xl driver, which uses bus master DMA and is much * faster. (And which also supports the 3c905B. */ #ifdef VORTEX_ETHERLINK_XL if(device_id == 0x900010b7ul || device_id == 0x900110b7ul) { device_set_desc(dev, "3COM 3C900 Etherlink XL PCI"); return(0); } if(device_id == 0x905010b7ul || device_id == 0x905110b7ul) { device_set_desc(dev, "3COM 3C905 Etherlink XL PCI"); return(0); } #endif return (ENXIO); } static int vx_pci_attach( device_t dev) { struct vx_softc *sc; int rid; sc = device_get_softc(dev); rid = PCIR_BAR(0); sc->vx_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (sc->vx_res == NULL) goto bad; sc->bst = rman_get_bustag(sc->vx_res); sc->bsh = rman_get_bushandle(sc->vx_res); rid = 0; sc->vx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->vx_irq == NULL) goto bad; if (bus_setup_intr(dev, sc->vx_irq, INTR_TYPE_NET, vxintr, sc, &sc->vx_intrhand)) goto bad; if (vxattach(dev) == 0) { goto bad; } /* defect check for 3C590 */ if ((pci_read_config(dev, PCIR_DEVVENDOR, 4) >> 16) == 0x5900) { GO_WINDOW(0); if (vxbusyeeprom(sc)) goto bad; CSR_WRITE_2(sc, VX_W0_EEPROM_COMMAND, EEPROM_CMD_RD | EEPROM_SOFTINFO2); if (vxbusyeeprom(sc)) goto bad; if (!(CSR_READ_2(sc, VX_W0_EEPROM_DATA) & NO_RX_OVN_ANOMALY)) { printf("Warning! Defective early revision adapter!\n"); } } return(0); bad: if (sc->vx_intrhand != NULL) bus_teardown_intr(dev, sc->vx_irq, sc->vx_intrhand); if (sc->vx_res != NULL) bus_release_resource(dev, SYS_RES_IOPORT, 0, sc->vx_res); if (sc->vx_irq != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vx_irq); return(ENXIO); } Index: head/sys/dev/wl/if_wl.c =================================================================== --- head/sys/dev/wl/if_wl.c (revision 129878) +++ head/sys/dev/wl/if_wl.c (revision 129879) @@ -1,2644 +1,2645 @@ /* * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain all copyright * notices, this list of conditions and the following disclaimer. * 2. The names of the authors may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ /* * if_wl.c - original MACH, then BSDI ISA wavelan driver * ported to mach by Anders Klemets * to BSDI by Robert Morris * to FreeBSD by Jim Binkley * to FreeBSD 2.2+ by Michael Smith * * 2.2 update: * Changed interface to match 2.1-2.2 differences. * Implement IRQ selection logic in wlprobe() * Implement PSA updating. * Pruned heading comments for relevance. * Ripped out all the 'interface counters' cruft. * Cut the missing-interrupt timer back to 100ms. * 2.2.1 update: * now supports all multicast mode (mrouted will work), * but unfortunately must do that by going into promiscuous mode * NWID sysctl added so that normally promiscuous mode is NWID-specific * but can be made NWID-inspecific * 7/14/97 jrb * * Work done: * Ported to FreeBSD, got promiscuous mode working with bpfs, * and rewired timer routine. The i82586 will hang occasionally on output * and the watchdog timer will kick it if so and log an entry. * 2 second timeout there. Apparently the chip loses an interrupt. * Code borrowed from if_ie.c for watchdog timer. * * The wavelan card is a 2mbit radio modem that emulates ethernet; * i.e., it uses MAC addresses. This should not be a surprise since * it uses an ethernet controller as a major hw item. * It can broadcast, unicast or apparently multicast in a base cell * using an omni-directional antennae that is * about 800 feet around the base cell barring walls and metal. * With directional antennae, it can be used point to point over a mile * or so apparently (haven't tried that). * * There are ISA and pcmcia versions (not supported by this code). * The ISA card has an Intel 82586 lan controller on it. It consists * of 2 pieces of hw, the lan controller (intel) and a radio-modem. * The latter has an extra set of controller registers that has nothing * to do with the i82586 and allows setting and monitoring of radio * signal strength, etc. There is a nvram area called the PSA that * contains a number of setup variables including the IRQ and so-called * NWID or Network ID. The NWID must be set the same for all radio * cards to communicate (unless you are using the ATT/NCR roaming feature * with their access points. There is no support for that here. Roaming * involves a link-layer beacon sent out from the access points. End * stations monitor the signal strength and only use the strongest * access point). This driver assumes that the base ISA port, IRQ, * and NWID are first set in nvram via the dos-side "instconf.exe" utility * supplied with the card. This driver takes the ISA port from * the kernel configuration setup, and then determines the IRQ either * from the kernel config (if an explicit IRQ is set) or from the * PSA on the card if not. * The hw also magically just uses the IRQ set in the nvram. * The NWID is used magically as well by the radio-modem * to determine which packets to keep or throw out. * * sample config: * * device wl0 at isa? port 0x300 net irq ? * * Ifdefs: * 1. WLDEBUG. (off) - if turned on enables IFF_DEBUG set via ifconfig debug * 2. MULTICAST (on) - turned on and works up to and including mrouted * 3. WLCACHE (off) - define to turn on a signal strength * (and other metric) cache that is indexed by sender MAC address. * Apps can read this out to learn the remote signal strength of a * sender. Note that it has a switch so that it only stores * broadcast/multicast senders but it could be set to store unicast * too only. Size is hardwired in if_wl_wavelan.h * * one further note: promiscuous mode is a curious thing. In this driver, * promiscuous mode apparently CAN catch ALL packets and ignore the NWID * setting. This is probably more useful in a sense (for snoopers) if * you are interested in all traffic as opposed to if you are interested * in just your own. There is a driver specific sysctl to turn promiscuous * from just promiscuous to wildly promiscuous... * * This driver also knows how to load the synthesizers in the 2.4 Gz * ISA Half-card, Product number 847647476 (USA/FCC IEEE Channel set). * This product consists of a "mothercard" that contains the 82586, * NVRAM that holds the PSA, and the ISA-buss interface custom ASIC. * The radio transceiver is a "daughtercard" called the WaveMODEM which * connects to the mothercard through two single-inline connectors: a * 20-pin connector provides DC-power and modem signals, and a 3-pin * connector which exports the antenna connection. The code herein * loads the receive and transmit synthesizers and the corresponding * transmitter output power value from an EEPROM controlled through * additional registers via the MMC. The EEPROM address selected * are those whose values are preset by the DOS utility programs * provided with the product, and this provides compatible operation * with the DOS Packet Driver software. A future modification will * add the necessary functionality to this driver and to the wlconfig * utility to completely replace the DOS Configuration Utilities. * The 2.4 Gz WaveMODEM is described in document number 407-024692/E, * and is available through Lucent Technologies OEM supply channels. * --RAB 1997/06/08. */ #define MULTICAST 1 /* * Olivetti PC586 Mach Ethernet driver v1.0 * Copyright Ing. C. Olivetti & C. S.p.A. 1988, 1989 * All rights reserved. * */ /* Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc., Cupertino, California. All Rights Reserved Permission to use, copy, modify, and distribute this software and its documentation for any purpose and without fee is hereby granted, provided that the above copyright notice appears in all copies and that both the copyright notice and this permission notice appear in supporting documentation, and that the name of Olivetti not be used in advertising or publicity pertaining to distribution of the software without specific, written prior permission. OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* Copyright 1988, 1989 by Intel Corporation, Santa Clara, California. All Rights Reserved Permission to use, copy, modify, and distribute this software and its documentation for any purpose and without fee is hereby granted, provided that the above copyright notice appears in all copies and that both the copyright notice and this permission notice appear in supporting documentation, and that the name of Intel not be used in advertising or publicity pertaining to distribution of the software without specific, written prior permission. INTEL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL INTEL BE LIABLE FOR ANY SPECIAL, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /* * NOTE: * by rvb: * 1. The best book on the 82586 is: * LAN Components User's Manual by Intel * The copy I found was dated 1984. This really tells you * what the state machines are doing * 2. In the current design, we only do one write at a time, * though the hardware is capable of chaining and possibly * even batching. The problem is that we only make one * transmit buffer available in sram space. */ #include "opt_wavelan.h" #include "opt_inet.h" #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #include #include #endif #include #include #include /* Definitions for the Intel chip */ /* was 1000 in original, fed to DELAY(x) */ #define DELAYCONST 1000 #include #include static char t_packet[ETHERMTU + sizeof(struct ether_header) + sizeof(long)]; struct wl_softc{ struct arpcom wl_ac; /* Ethernet common part */ #define wl_if wl_ac.ac_if /* network visible interface */ #define wl_addr wl_ac.ac_enaddr /* hardware address */ u_char psa[0x40]; u_char nwid[2]; /* current radio modem nwid */ short base; short unit; int flags; int tbusy; /* flag to determine if xmit is busy */ u_short begin_fd; u_short end_fd; u_short end_rbd; u_short hacr; /* latest host adapter CR command */ short mode; u_char chan24; /* 2.4 Gz: channel number/EEPROM Area # */ u_short freq24; /* 2.4 Gz: resulting frequency */ int rid_ioport; int rid_irq; struct resource *res_ioport; struct resource *res_irq; void *intr_cookie; bus_space_tag_t bt; bus_space_handle_t bh; struct mtx wl_mtx; struct callout_handle watchdog_ch; #ifdef WLCACHE int w_sigitems; /* number of cached entries */ /* array of cache entries */ struct w_sigcache w_sigcache[ MAXCACHEITEMS ]; int w_nextcache; /* next free cache entry */ int w_wrapindex; /* next "free" cache entry */ #endif }; #define WL_LOCK(_sc) mtx_lock(&(_sc)->wl_mtx) #define WL_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->wl_mtx, MA_OWNED) #define WL_UNLOCK(_sc) mtx_unlock(&(_sc)->wl_mtx) static int wlprobe(device_t); static int wlattach(device_t); static int wldetach(device_t); static device_method_t wl_methods[] = { DEVMETHOD(device_probe, wlprobe), DEVMETHOD(device_attach, wlattach), DEVMETHOD(device_detach, wldetach), { 0, 0} }; static driver_t wl_driver = { "wl", wl_methods, sizeof (struct wl_softc) }; devclass_t wl_devclass; DRIVER_MODULE(wl, isa, wl_driver, wl_devclass, 0, 0); MODULE_DEPEND(wl, isa, 1, 1, 1); MODULE_DEPEND(wl, ether, 1, 1, 1); static struct isa_pnp_id wl_ids[] = { {0, NULL} }; /* * XXX The Wavelan appears to be prone to dropping stuff if you talk to * it too fast. This disgusting hack inserts a delay after each packet * is queued which helps avoid this behaviour on fast systems. */ static int wl_xmit_delay = 250; SYSCTL_INT(_machdep, OID_AUTO, wl_xmit_delay, CTLFLAG_RW, &wl_xmit_delay, 0, ""); /* * not XXX, but ZZZ (bizarre). * promiscuous mode can be toggled to ignore NWIDs. By default, * it does not. Caution should be exercised about combining * this mode with IFF_ALLMULTI which puts this driver in * promiscuous mode. */ static int wl_ignore_nwid = 0; SYSCTL_INT(_machdep, OID_AUTO, wl_ignore_nwid, CTLFLAG_RW, &wl_ignore_nwid, 0, ""); /* * Emit diagnostics about transmission problems */ static int xmt_watch = 0; SYSCTL_INT(_machdep, OID_AUTO, wl_xmit_watch, CTLFLAG_RW, &xmt_watch, 0, ""); /* * Collect SNR statistics */ static int gathersnr = 0; SYSCTL_INT(_machdep, OID_AUTO, wl_gather_snr, CTLFLAG_RW, &gathersnr, 0, ""); static int wl_allocate_resources(device_t device); static int wl_deallocate_resources(device_t device); static void wlstart(struct ifnet *ifp); static void wlinit(void *xsc); static int wlioctl(struct ifnet *ifp, u_long cmd, caddr_t data); static timeout_t wlwatchdog; static void wlintr(void *arg); static void wlxmt(struct wl_softc *sc, struct mbuf *m); static int wldiag(struct wl_softc *sc); static int wlconfig(struct wl_softc *sc); static int wlcmd(struct wl_softc *sc, char *str); static void wlmmcstat(struct wl_softc *sc); static u_short wlbldru(struct wl_softc *sc); static u_short wlmmcread(u_int base, u_short reg); static void wlinitmmc(struct wl_softc *sc); static int wlhwrst(struct wl_softc *sc); static void wlrustrt(struct wl_softc *sc); static void wlbldcu(struct wl_softc *sc); static int wlack(struct wl_softc *sc); static int wlread(struct wl_softc *sc, u_short fd_p); static void getsnr(struct wl_softc *sc); static void wlrcv(struct wl_softc *sc); static int wlrequeue(struct wl_softc *sc, u_short fd_p); static void wlsftwsleaze(u_short *countp, u_char **mb_pp, struct mbuf **tm_pp, struct wl_softc *sc); static void wlhdwsleaze(u_short *countp, u_char **mb_pp, struct mbuf **tm_pp, struct wl_softc *sc); #ifdef WLDEBUG static void wltbd(struct wl_softc *sc); #endif static void wlgetpsa(int base, u_char *buf); static void wlsetpsa(struct wl_softc *sc); static u_short wlpsacrc(u_char *buf); static void wldump(struct wl_softc *sc); #ifdef WLCACHE static void wl_cache_store(struct wl_softc *, int, struct ether_header *, struct mbuf *); static void wl_cache_zero(struct wl_softc *sc); #endif /* array for maping irq numbers to values for the irq parameter register */ static int irqvals[16] = { 0, 0, 0, 0x01, 0x02, 0x04, 0, 0x08, 0, 0, 0x10, 0x20, 0x40, 0, 0, 0x80 }; /* * wlprobe: * * This function "probes" or checks for the WaveLAN board on the bus to * see if it is there. As far as I can tell, the best break between this * routine and the attach code is to simply determine whether the board * is configured in properly. Currently my approach to this is to write * and read a word from the SRAM on the board being probed. If the word * comes back properly then we assume the board is there. The config * code expects to see a successful return from the probe routine before * attach will be called. * * input : address device is mapped to, and unit # being checked * output : a '1' is returned if the board exists, and a 0 otherwise * */ static int wlprobe(device_t device) { struct wl_softc *sc; short base; char *str = "wl%d: board out of range [0..%d]\n"; u_char inbuf[100]; unsigned long junk, oldpri, sirq; int error, irq; error = ISA_PNP_PROBE(device_get_parent(device), device, wl_ids); if (error == ENXIO || error == 0) return (error); sc = device_get_softc(device); error = wl_allocate_resources(device); if (error) goto errexit; base = rman_get_start(sc->res_ioport); /* TBD. not true. * regular CMD() will not work, since no softc yet */ #define PCMD(base, hacr) outw((base), (hacr)) oldpri = splimp(); PCMD(base, HACR_RESET); /* reset the board */ DELAY(DELAYCONST); /* >> 4 clocks at 6MHz */ PCMD(base, HACR_RESET); /* reset the board */ DELAY(DELAYCONST); /* >> 4 clocks at 6MHz */ splx(oldpri); /* clear reset command and set PIO#1 in autoincrement mode */ PCMD(base, HACR_DEFAULT); PCMD(base, HACR_DEFAULT); outw(PIOR1(base), 0); /* go to beginning of RAM */ outsw(PIOP1(base), str, strlen(str)/2+1); /* write string */ outw(PIOR1(base), 0); /* rewind */ insw(PIOP1(base), inbuf, strlen(str)/2+1); /* read result */ if (bcmp(str, inbuf, strlen(str))) { error = ENXIO; goto errexit; } sc->chan24 = 0; /* 2.4 Gz: config channel */ sc->freq24 = 0; /* 2.4 Gz: frequency */ /* read the PSA from the board into temporary storage */ wlgetpsa(base, inbuf); /* We read the IRQ value from the PSA on the board. */ for (irq = 15; irq >= 0; irq--) if (irqvals[irq] == inbuf[WLPSA_IRQNO]) break; if ((irq == 0) || (irqvals[irq] == 0)){ printf("wl%d: PSA corrupt (invalid IRQ value)\n", device_get_unit(device)); } else { /* * If the IRQ requested by the PSA is already claimed by another * device, the board won't work, but the user can still access the * driver to change the IRQ. */ if (bus_get_resource(device, SYS_RES_IRQ, 0, &sirq, &junk)) goto errexit; if (irq != (int)sirq) printf("wl%d: board is configured for interrupt %d\n", device_get_unit(device), irq); } wl_deallocate_resources(device); return (0); errexit: wl_deallocate_resources(device); return (error); } /* * wlattach: * * This function attaches a WaveLAN board to the "system". The rest of * runtime structures are initialized here (this routine is called after * a successful probe of the board). Once the ethernet address is read * and stored, the board's ifnet structure is attached and readied. * * input : isa_dev structure setup in autoconfig * output : board structs and ifnet is setup * */ static int wlattach(device_t device) { struct wl_softc *sc; short base; int error, i, j; int unit; struct ifnet *ifp; sc = device_get_softc(device); ifp = &sc->wl_if; mtx_init(&sc->wl_mtx, device_get_nameunit(device), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); error = wl_allocate_resources(device); if (error) { wl_deallocate_resources(device); return (ENXIO); } base = rman_get_start(sc->res_ioport); unit = device_get_unit(device); #ifdef WLDEBUG printf("wlattach: base %x, unit %d\n", base, unit); #endif sc->base = base; sc->unit = unit; sc->flags = 0; sc->mode = 0; sc->hacr = HACR_RESET; callout_handle_init(&sc->watchdog_ch); CMD(sc); /* reset the board */ DELAY(DELAYCONST); /* >> 4 clocks at 6MHz */ /* clear reset command and set PIO#2 in parameter access mode */ sc->hacr = (HACR_DEFAULT & ~HACR_16BITS); CMD(sc); /* Read the PSA from the board for our later reference */ wlgetpsa(base, sc->psa); /* fetch NWID */ sc->nwid[0] = sc->psa[WLPSA_NWID]; sc->nwid[1] = sc->psa[WLPSA_NWID+1]; /* fetch MAC address - decide which one first */ if (sc->psa[WLPSA_MACSEL] & 1) j = WLPSA_LOCALMAC; else j = WLPSA_UNIMAC; for (i=0; i < WAVELAN_ADDR_SIZE; ++i) sc->wl_addr[i] = sc->psa[j + i]; /* enter normal 16 bit mode operation */ sc->hacr = HACR_DEFAULT; CMD(sc); wlinitmmc(sc); outw(PIOR1(base), OFFSET_SCB + 8); /* address of scb_crcerrs */ outw(PIOP1(base), 0); /* clear scb_crcerrs */ outw(PIOP1(base), 0); /* clear scb_alnerrs */ outw(PIOP1(base), 0); /* clear scb_rscerrs */ outw(PIOP1(base), 0); /* clear scb_ovrnerrs */ bzero(ifp, sizeof(ifp)); ifp->if_softc = sc; ifp->if_mtu = WAVELAN_MTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX; #ifdef WLDEBUG ifp->if_flags |= IFF_DEBUG; #endif #if MULTICAST ifp->if_flags |= IFF_MULTICAST; #endif /* MULTICAST */ if_initname(ifp, device_get_name(device), device_get_unit(device)); ifp->if_init = wlinit; ifp->if_start = wlstart; ifp->if_ioctl = wlioctl; ifp->if_timer = 0; /* paranoia */ ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; /* no entries ifp->if_watchdog ifp->if_done ifp->if_reset */ ether_ifattach(ifp, &sc->wl_addr[0]); bcopy(&sc->wl_addr[0], sc->wl_ac.ac_enaddr, WAVELAN_ADDR_SIZE); if_printf(ifp, "NWID 0x%02x%02x", sc->nwid[0], sc->nwid[1]); if (sc->freq24) printf(", Freq %d MHz",sc->freq24); /* 2.4 Gz */ printf("\n"); /* 2.4 Gz */ bus_setup_intr(device, sc->res_irq, INTR_TYPE_NET, wlintr, sc, &sc->intr_cookie); if (bootverbose) wldump(sc); return (0); } static int wldetach(device_t device) { struct wl_softc *sc = device_get_softc(device); device_t parent = device_get_parent(device); struct ifnet *ifp; ifp = &sc->wl_if; ether_ifdetach(ifp); WL_LOCK(sc); /* reset the board */ sc->hacr = HACR_RESET; CMD(sc); sc->hacr = HACR_DEFAULT; CMD(sc); if (sc->intr_cookie != NULL) { BUS_TEARDOWN_INTR(parent, device, sc->res_irq, sc->intr_cookie); sc->intr_cookie = NULL; } bus_generic_detach(device); wl_deallocate_resources(device); WL_UNLOCK(sc); mtx_destroy(&sc->wl_mtx); return (0); } static int wl_allocate_resources(device_t device) { struct wl_softc *sc = device_get_softc(device); int ports = 16; /* Number of ports */ sc->res_ioport = bus_alloc_resource(device, SYS_RES_IOPORT, &sc->rid_ioport, 0ul, ~0ul, ports, RF_ACTIVE); if (sc->res_ioport == NULL) goto errexit; sc->res_irq = bus_alloc_resource_any(device, SYS_RES_IRQ, &sc->rid_irq, RF_SHAREABLE|RF_ACTIVE); if (sc->res_irq == NULL) goto errexit; return (0); errexit: wl_deallocate_resources(device); return (ENXIO); } static int wl_deallocate_resources(device_t device) { struct wl_softc *sc = device_get_softc(device); if (sc->res_irq != 0) { bus_deactivate_resource(device, SYS_RES_IRQ, sc->rid_irq, sc->res_irq); bus_release_resource(device, SYS_RES_IRQ, sc->rid_irq, sc->res_irq); sc->res_irq = 0; } if (sc->res_ioport != 0) { bus_deactivate_resource(device, SYS_RES_IOPORT, sc->rid_ioport, sc->res_ioport); bus_release_resource(device, SYS_RES_IOPORT, sc->rid_ioport, sc->res_ioport); sc->res_ioport = 0; } return (0); } /* * Print out interesting information about the 82596. */ static void wldump(struct wl_softc *sc) { int base = sc->base; int i; printf("hasr %04x\n", inw(HASR(base))); printf("scb at %04x:\n ", OFFSET_SCB); outw(PIOR1(base), OFFSET_SCB); for (i = 0; i < 8; i++) printf("%04x ", inw(PIOP1(base))); printf("\n"); printf("cu at %04x:\n ", OFFSET_CU); outw(PIOR1(base), OFFSET_CU); for (i = 0; i < 8; i++) printf("%04x ", inw(PIOP1(base))); printf("\n"); printf("tbd at %04x:\n ", OFFSET_TBD); outw(PIOR1(base), OFFSET_TBD); for (i = 0; i < 4; i++) printf("%04x ", inw(PIOP1(base))); printf("\n"); } /* Initialize the Modem Management Controller */ static void wlinitmmc(struct wl_softc *sc) { int base = sc->base; int configured; int mode = sc->mode; int i; /* 2.4 Gz */ /* enter 8 bit operation */ sc->hacr = (HACR_DEFAULT & ~HACR_16BITS); CMD(sc); configured = sc->psa[WLPSA_CONFIGURED] & 1; /* * Set default modem control parameters. Taken from NCR document * 407-0024326 Rev. A */ MMC_WRITE(MMC_JABBER_ENABLE, 0x01); MMC_WRITE(MMC_ANTEN_SEL, 0x02); MMC_WRITE(MMC_IFS, 0x20); MMC_WRITE(MMC_MOD_DELAY, 0x04); MMC_WRITE(MMC_JAM_TIME, 0x38); MMC_WRITE(MMC_DECAY_PRM, 0x00); /* obsolete ? */ MMC_WRITE(MMC_DECAY_UPDAT_PRM, 0x00); if (!configured) { MMC_WRITE(MMC_LOOPT_SEL, 0x00); if (sc->psa[WLPSA_COMPATNO] & 1) { MMC_WRITE(MMC_THR_PRE_SET, 0x01); /* 0x04 for AT and 0x01 for MCA */ } else { MMC_WRITE(MMC_THR_PRE_SET, 0x04); /* 0x04 for AT and 0x01 for MCA */ } MMC_WRITE(MMC_QUALITY_THR, 0x03); } else { /* use configuration defaults from parameter storage area */ if (sc->psa[WLPSA_NWIDENABLE] & 1) { if ((mode & (MOD_PROM | MOD_ENAL)) && wl_ignore_nwid) { MMC_WRITE(MMC_LOOPT_SEL, 0x40); } else { MMC_WRITE(MMC_LOOPT_SEL, 0x00); } } else { MMC_WRITE(MMC_LOOPT_SEL, 0x40); /* disable network id check */ } MMC_WRITE(MMC_THR_PRE_SET, sc->psa[WLPSA_THRESH]); MMC_WRITE(MMC_QUALITY_THR, sc->psa[WLPSA_QUALTHRESH]); } MMC_WRITE(MMC_FREEZE, 0x00); MMC_WRITE(MMC_ENCR_ENABLE, 0x00); MMC_WRITE(MMC_NETW_ID_L,sc->nwid[1]); /* set NWID */ MMC_WRITE(MMC_NETW_ID_H,sc->nwid[0]); /* enter normal 16 bit mode operation */ sc->hacr = HACR_DEFAULT; CMD(sc); CMD(sc); /* virtualpc1 needs this! */ if (sc->psa[WLPSA_COMPATNO]== /* 2.4 Gz: half-card ver */ WLPSA_COMPATNO_WL24B) { /* 2.4 Gz */ i=sc->chan24<<4; /* 2.4 Gz: position ch # */ MMC_WRITE(MMC_EEADDR,i+0x0f); /* 2.4 Gz: named ch, wc=16 */ MMC_WRITE(MMC_EECTRL,MMC_EECTRL_DWLD+ /* 2.4 Gz: Download Synths */ MMC_EECTRL_EEOP_READ); /* 2.4 Gz: Read EEPROM */ for (i=0; i<1000; ++i) { /* 2.4 Gz: wait for download */ DELAY(40); /* 2.4 Gz */ if ((wlmmcread(base,MMC_EECTRLstat) /* 2.4 Gz: check DWLD and */ &(MMC_EECTRLstat_DWLD /* 2.4 Gz: EEBUSY */ +MMC_EECTRLstat_EEBUSY))==0) /* 2.4 Gz: */ break; /* 2.4 Gz: download finished */ } /* 2.4 Gz */ if (i==1000) printf("wl: synth load failed\n"); /* 2.4 Gz */ MMC_WRITE(MMC_EEADDR,0x61); /* 2.4 Gz: default pwr, wc=2 */ MMC_WRITE(MMC_EECTRL,MMC_EECTRL_DWLD+ /* 2.4 Gz: Download Xmit Pwr */ MMC_EECTRL_EEOP_READ); /* 2.4 Gz: Read EEPROM */ for (i=0; i<1000; ++i) { /* 2.4 Gz: wait for download */ DELAY(40); /* 2.4 Gz */ if ((wlmmcread(base,MMC_EECTRLstat) /* 2.4 Gz: check DWLD and */ &(MMC_EECTRLstat_DWLD /* 2.4 Gz: EEBUSY */ +MMC_EECTRLstat_EEBUSY))==0) /* 2.4 Gz: */ break; /* 2.4 Gz: download finished */ } /* 2.4 Gz */ if (i==1000) printf("wl: xmit pwr load failed\n"); /* 2.4 Gz */ MMC_WRITE(MMC_ANALCTRL, /* 2.4 Gz: EXT ant+polarity */ MMC_ANALCTRL_ANTPOL + /* 2.4 Gz: */ MMC_ANALCTRL_EXTANT); /* 2.4 Gz: */ i=sc->chan24<<4; /* 2.4 Gz: position ch # */ MMC_WRITE(MMC_EEADDR,i); /* 2.4 Gz: get frequency */ MMC_WRITE(MMC_EECTRL, /* 2.4 Gz: EEPROM read */ MMC_EECTRL_EEOP_READ); /* 2.4 Gz: */ DELAY(40); /* 2.4 Gz */ i = wlmmcread(base,MMC_EEDATALrv) /* 2.4 Gz: freq val */ + (wlmmcread(base,MMC_EEDATAHrv)<<8); /* 2.4 Gz */ sc->freq24 = (i>>6)+2400; /* 2.4 Gz: save real freq */ } } /* * wlinit: * * Another routine that interfaces the "if" layer to this driver. * Simply resets the structures that are used by "upper layers". * As well as calling wlhwrst that does reset the WaveLAN board. * * input : softc pointer for this interface * output : structures (if structs) and board are reset * */ static void wlinit(void *xsc) { struct wl_softc *sc = xsc; struct ifnet *ifp = &sc->wl_if; int stat; u_long oldpri; #ifdef WLDEBUG if (sc->wl_if.if_flags & IFF_DEBUG) printf("wl%d: entered wlinit()\n",sc->unit); #endif WL_LOCK(sc); oldpri = splimp(); if ((stat = wlhwrst(sc)) == TRUE) { sc->wl_if.if_flags |= IFF_RUNNING; /* same as DSF_RUNNING */ /* * OACTIVE is used by upper-level routines * and must be set */ sc->wl_if.if_flags &= ~IFF_OACTIVE; /* same as tbusy below */ sc->flags |= DSF_RUNNING; sc->tbusy = 0; untimeout(wlwatchdog, sc, sc->watchdog_ch); wlstart(ifp); } else { printf("wl%d init(): trouble resetting board.\n", sc->unit); } splx(oldpri); WL_UNLOCK(sc); } /* * wlhwrst: * * This routine resets the WaveLAN board that corresponds to the * board number passed in. * * input : board number to do a hardware reset * output : board is reset * */ static int wlhwrst(struct wl_softc *sc) { #ifdef WLDEBUG if (sc->wl_if.if_flags & IFF_DEBUG) printf("wl%d: entered wlhwrst()\n", sc->unit); #endif sc->hacr = HACR_RESET; CMD(sc); /* reset the board */ /* clear reset command and set PIO#1 in autoincrement mode */ sc->hacr = HACR_DEFAULT; CMD(sc); #ifdef WLDEBUG if (sc->wl_if.if_flags & IFF_DEBUG) wlmmcstat(sc); /* Display MMC registers */ #endif /* WLDEBUG */ wlbldcu(sc); /* set up command unit structures */ if (wldiag(sc) == 0) return(0); if (wlconfig(sc) == 0) return(0); /* * insert code for loopback test here */ wlrustrt(sc); /* start receive unit */ /* enable interrupts */ sc->hacr = (HACR_DEFAULT | HACR_INTRON); CMD(sc); return(1); } /* * wlbldcu: * * This function builds up the command unit structures. It inits * the scp, iscp, scb, cb, tbd, and tbuf. * */ static void wlbldcu(struct wl_softc *sc) { short base = sc->base; scp_t scp; iscp_t iscp; scb_t scb; ac_t cb; tbd_t tbd; int i; bzero(&scp, sizeof(scp)); scp.scp_sysbus = 0; scp.scp_iscp = OFFSET_ISCP; scp.scp_iscp_base = 0; outw(PIOR1(base), OFFSET_SCP); outsw(PIOP1(base), &scp, sizeof(scp_t)/2); bzero(&iscp, sizeof(iscp)); iscp.iscp_busy = 1; iscp.iscp_scb_offset = OFFSET_SCB; iscp.iscp_scb = 0; iscp.iscp_scb_base = 0; outw(PIOR1(base), OFFSET_ISCP); outsw(PIOP1(base), &iscp, sizeof(iscp_t)/2); scb.scb_status = 0; scb.scb_command = SCB_RESET; scb.scb_cbl_offset = OFFSET_CU; scb.scb_rfa_offset = OFFSET_RU; scb.scb_crcerrs = 0; scb.scb_alnerrs = 0; scb.scb_rscerrs = 0; scb.scb_ovrnerrs = 0; outw(PIOR1(base), OFFSET_SCB); outsw(PIOP1(base), &scb, sizeof(scb_t)/2); SET_CHAN_ATTN(sc); outw(PIOR0(base), OFFSET_ISCP + 0); /* address of iscp_busy */ for (i = 1000000; inw(PIOP0(base)) && (i-- > 0); ) continue; if (i <= 0) printf("wl%d bldcu(): iscp_busy timeout.\n", sc->unit); outw(PIOR0(base), OFFSET_SCB + 0); /* address of scb_status */ for (i = STATUS_TRIES; i-- > 0; ) { if (inw(PIOP0(base)) == (SCB_SW_CX|SCB_SW_CNA)) break; } if (i <= 0) printf("wl%d bldcu(): not ready after reset.\n", sc->unit); wlack(sc); cb.ac_status = 0; cb.ac_command = AC_CW_EL; /* NOP */ cb.ac_link_offset = OFFSET_CU; outw(PIOR1(base), OFFSET_CU); outsw(PIOP1(base), &cb, 6/2); tbd.act_count = 0; tbd.next_tbd_offset = I82586NULL; tbd.buffer_addr = 0; tbd.buffer_base = 0; outw(PIOR1(base), OFFSET_TBD); outsw(PIOP1(base), &tbd, sizeof(tbd_t)/2); } /* * wlstart: * * send a packet * * input : board number * output : stuff sent to board if any there * */ static void wlstart(struct ifnet *ifp) { struct mbuf *m; struct wl_softc *sc = ifp->if_softc; short base = sc->base; int scb_status, cu_status, scb_command; WL_LOCK(sc); #ifdef WLDEBUG if (sc->wl_if.if_flags & IFF_DEBUG) printf("%s: entered wlstart()\n", ifp->if_xname); #endif outw(PIOR1(base), OFFSET_CU); cu_status = inw(PIOP1(base)); outw(PIOR0(base),OFFSET_SCB + 0); /* scb_status */ scb_status = inw(PIOP0(base)); outw(PIOR0(base), OFFSET_SCB + 2); scb_command = inw(PIOP0(base)); /* * don't need OACTIVE check as tbusy here checks to see * if we are already busy */ if (sc->tbusy) { if ((scb_status & 0x0700) == SCB_CUS_IDLE && (cu_status & AC_SW_B) == 0){ sc->tbusy = 0; untimeout(wlwatchdog, sc, sc->watchdog_ch); sc->wl_ac.ac_if.if_flags &= ~IFF_OACTIVE; /* * This is probably just a race. The xmt'r is just * became idle but WE have masked interrupts so ... */ #ifdef WLDEBUG printf("%s: CU idle, scb %04x %04x cu %04x\n", ifp->if_xname, scb_status, scb_command, cu_status); #endif if (xmt_watch) printf("!!"); } else { WL_UNLOCK(sc); return; /* genuinely still busy */ } } else if ((scb_status & 0x0700) == SCB_CUS_ACTV || (cu_status & AC_SW_B)){ #ifdef WLDEBUG printf("%s: CU unexpectedly busy; scb %04x cu %04x\n", ifp->if_xname, scb_status, cu_status); #endif if (xmt_watch) printf("%s: busy?!",ifp->if_xname); WL_UNLOCK(sc); return; /* hey, why are we busy? */ } /* get ourselves some data */ ifp = &(sc->wl_if); IF_DEQUEUE(&ifp->if_snd, m); if (m != (struct mbuf *)0) { /* let BPF see it before we commit it */ BPF_MTAP(ifp, m); sc->tbusy++; /* set the watchdog timer so that if the board * fails to interrupt we will restart */ /* try 10 ticks, not very long */ sc->watchdog_ch = timeout(wlwatchdog, sc, 10); sc->wl_ac.ac_if.if_flags |= IFF_OACTIVE; sc->wl_if.if_opackets++; wlxmt(sc, m); } else { sc->wl_ac.ac_if.if_flags &= ~IFF_OACTIVE; } WL_UNLOCK(sc); return; } /* * wlread: * * This routine does the actual copy of data (including ethernet header * structure) from the WaveLAN to an mbuf chain that will be passed up * to the "if" (network interface) layer. NOTE: we currently * don't handle trailer protocols, so if that is needed, it will * (at least in part) be added here. For simplicities sake, this * routine copies the receive buffers from the board into a local (stack) * buffer until the frame has been copied from the board. Once in * the local buffer, the contents are copied to an mbuf chain that * is then enqueued onto the appropriate "if" queue. * * input : board number, and a frame descriptor address * output : the packet is put into an mbuf chain, and passed up * assumes : if any errors occur, packet is "dropped on the floor" * */ static int wlread(struct wl_softc *sc, u_short fd_p) { struct ifnet *ifp = &sc->wl_if; short base = sc->base; fd_t fd; struct ether_header *eh; struct mbuf *m; rbd_t rbd; u_char *mb_p; u_short mlen, len; u_short bytes_in_msg, bytes_in_mbuf, bytes; WL_LOCK_ASSERT(sc); #ifdef WLDEBUG if (sc->wl_if.if_flags & IFF_DEBUG) printf("wl%d: entered wlread()\n", sc->unit); #endif if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING)) { printf("%s read(): board is not running.\n", ifp->if_xname); sc->hacr &= ~HACR_INTRON; CMD(sc); /* turn off interrupts */ } /* * Collect message size. */ outw(PIOR1(base), fd_p); insw(PIOP1(base), &fd, sizeof(fd_t)/2); if (fd.rbd_offset == I82586NULL) { if (wlhwrst(sc) != TRUE) { sc->hacr &= ~HACR_INTRON; CMD(sc); /* turn off interrupts */ printf("wl%d read(): hwrst trouble.\n", sc->unit); } return 0; } outw(PIOR1(base), fd.rbd_offset); insw(PIOP1(base), &rbd, sizeof(rbd_t)/2); bytes_in_msg = rbd.status & RBD_SW_COUNT; /* * Allocate a cluster'd mbuf to receive the packet. */ m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { if (wlhwrst(sc) != TRUE) { sc->hacr &= ~HACR_INTRON; CMD(sc); /* turn off interrupts */ printf("wl%d read(): hwrst trouble.\n", sc->unit); } return 0; } m->m_pkthdr.len = m->m_len = MCLBYTES; m_adj(m, ETHER_ALIGN); /* align IP header */ /* * Collect the message data. */ mlen = 0; mb_p = mtod(m, u_char *); bytes_in_mbuf = m->m_len; /* Put the ethernet header inside the mbuf. */ bcopy(&fd.destination[0], mb_p, 14); mb_p += 14; mlen += 14; bytes_in_mbuf -= 14; bytes = min(bytes_in_mbuf, bytes_in_msg); for (;;) { if (bytes & 1) { len = bytes + 1; } else { len = bytes; } outw(PIOR1(base), rbd.buffer_addr); insw(PIOP1(base), mb_p, len/2); mlen += bytes; if (bytes > bytes_in_mbuf) { /* XXX something wrong, a packet should fit in 1 cluster */ m_freem(m); printf("wl%d read(): packet too large (%u > %u)\n", sc->unit, bytes, bytes_in_mbuf); if (wlhwrst(sc) != TRUE) { sc->hacr &= ~HACR_INTRON; CMD(sc); /* turn off interrupts */ printf("wl%d read(): hwrst trouble.\n", sc->unit); } return 0; } mb_p += bytes; bytes_in_mbuf -= bytes; bytes_in_msg -= bytes; if (bytes_in_msg == 0) { if (rbd.status & RBD_SW_EOF || rbd.next_rbd_offset == I82586NULL) { break; } outw(PIOR1(base), rbd.next_rbd_offset); insw(PIOP1(base), &rbd, sizeof(rbd_t)/2); bytes_in_msg = rbd.status & RBD_SW_COUNT; } else { rbd.buffer_addr += bytes; } bytes = min(bytes_in_mbuf, bytes_in_msg); } m->m_pkthdr.len = m->m_len = mlen; m->m_pkthdr.rcvif = ifp; /* * If hw is in promiscuous mode (note that I said hardware, not if * IFF_PROMISC is set in ifnet flags), then if this is a unicast * packet and the MAC dst is not us, drop it. This check in normally * inside ether_input(), but IFF_MULTI causes hw promisc without * a bpf listener, so this is wrong. * Greg Troxel , 1998-08-07 */ /* * TBD: also discard packets where NWID does not match. * However, there does not appear to be a way to read the nwid * for a received packet. -gdt 1998-08-07 */ /* XXX verify mbuf length */ eh = mtod(m, struct ether_header *); if ( #ifdef WL_USE_IFNET_PROMISC_CHECK /* not defined */ (sc->wl_ac.ac_if.if_flags & (IFF_PROMISC|IFF_ALLMULTI)) #else /* hw is in promisc mode if this is true */ (sc->mode & (MOD_PROM | MOD_ENAL)) #endif && (eh->ether_dhost[0] & 1) == 0 && /* !mcast and !bcast */ bcmp(eh->ether_dhost, sc->wl_ac.ac_enaddr, sizeof(eh->ether_dhost)) != 0 ) { m_freem(m); return 1; } #ifdef WLDEBUG if (sc->wl_if.if_flags & IFF_DEBUG) printf("wl%d: wlrecv %u bytes\n", sc->unit, mlen); #endif #ifdef WLCACHE wl_cache_store(sc, base, eh, m); #endif /* * received packet is now in a chain of mbuf's. next step is * to pass the packet upwards. */ WL_UNLOCK(sc); (*ifp->if_input)(ifp, m); WL_LOCK(sc); return 1; } /* * wlioctl: * * This routine processes an ioctl request from the "if" layer * above. * * input : pointer the appropriate "if" struct, command, and data * output : based on command appropriate action is taken on the * WaveLAN board(s) or related structures * return : error is returned containing exit conditions * */ static int wlioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct ifreq *ifr = (struct ifreq *)data; struct wl_softc *sc = ifp->if_softc; short base = sc->base; short mode = 0; int opri, error = 0; struct thread *td = curthread; /* XXX */ int irq, irqval, i, isroot; caddr_t up; #ifdef WLCACHE int size; char * cpt; #endif WL_LOCK(sc); #ifdef WLDEBUG if (sc->wl_if.if_flags & IFF_DEBUG) printf("%s: entered wlioctl()\n", ifp->if_xname); #endif opri = splimp(); switch (cmd) { case SIOCSIFFLAGS: if (ifp->if_flags & IFF_ALLMULTI) { mode |= MOD_ENAL; } if (ifp->if_flags & IFF_PROMISC) { mode |= MOD_PROM; } if (ifp->if_flags & IFF_LINK0) { mode |= MOD_PROM; } /* * force a complete reset if the recieve multicast/ * promiscuous mode changes so that these take * effect immediately. * */ if (sc->mode != mode) { sc->mode = mode; if (sc->flags & DSF_RUNNING) { sc->flags &= ~DSF_RUNNING; wlinit(sc); } } /* if interface is marked DOWN and still running then * stop it. */ if ((ifp->if_flags & IFF_UP) == 0 && sc->flags & DSF_RUNNING) { printf("%s ioctl(): board is not running\n", ifp->if_xname); sc->flags &= ~DSF_RUNNING; sc->hacr &= ~HACR_INTRON; CMD(sc); /* turn off interrupts */ } /* else if interface is UP and RUNNING, start it */ else if (ifp->if_flags & IFF_UP && (sc->flags & DSF_RUNNING) == 0) { wlinit(sc); } /* if WLDEBUG set on interface, then printf rf-modem regs */ if (ifp->if_flags & IFF_DEBUG) wlmmcstat(sc); break; #if MULTICAST case SIOCADDMULTI: case SIOCDELMULTI: wlinit(sc); break; #endif /* MULTICAST */ /* DEVICE SPECIFIC */ /* copy the PSA out to the caller */ case SIOCGWLPSA: /* pointer to buffer in user space */ up = (void *)ifr->ifr_data; /* work out if they're root */ isroot = (suser(td) == 0); for (i = 0; i < 0x40; i++) { /* don't hand the DES key out to non-root users */ if ((i > WLPSA_DESKEY) && (i < (WLPSA_DESKEY + 8)) && !isroot) continue; if (subyte((up + i), sc->psa[i])) { WL_UNLOCK(sc); return(EFAULT); } } break; /* copy the PSA in from the caller; we only copy _some_ values */ case SIOCSWLPSA: /* root only */ if ((error = suser(td))) break; error = EINVAL; /* assume the worst */ /* pointer to buffer in user space containing data */ up = (void *)ifr->ifr_data; /* check validity of input range */ for (i = 0; i < 0x40; i++) if (fubyte(up + i) < 0) { WL_UNLOCK(sc); return(EFAULT); } /* check IRQ value */ irqval = fubyte(up+WLPSA_IRQNO); for (irq = 15; irq >= 0; irq--) if (irqvals[irq] == irqval) break; if (irq == 0) /* oops */ break; /* new IRQ */ sc->psa[WLPSA_IRQNO] = irqval; /* local MAC */ for (i = 0; i < 6; i++) sc->psa[WLPSA_LOCALMAC+i] = fubyte(up+WLPSA_LOCALMAC+i); /* MAC select */ sc->psa[WLPSA_MACSEL] = fubyte(up+WLPSA_MACSEL); /* default nwid */ sc->psa[WLPSA_NWID] = fubyte(up+WLPSA_NWID); sc->psa[WLPSA_NWID+1] = fubyte(up+WLPSA_NWID+1); error = 0; wlsetpsa(sc); /* update the PSA */ break; /* get the current NWID out of the sc since we stored it there */ case SIOCGWLCNWID: ifr->ifr_data = (caddr_t) (sc->nwid[0] << 8 | sc->nwid[1]); break; /* * change the nwid dynamically. This * ONLY changes the radio modem and does not * change the PSA. * * 2 steps: * 1. save in softc "soft registers" * 2. save in radio modem (MMC) */ case SIOCSWLCNWID: /* root only */ if ((error = suser(td))) break; if (!(ifp->if_flags & IFF_UP)) { error = EIO; /* only allowed while up */ } else { /* * soft c nwid shadows radio modem setting */ sc->nwid[0] = (int)ifr->ifr_data >> 8; sc->nwid[1] = (int)ifr->ifr_data & 0xff; MMC_WRITE(MMC_NETW_ID_L,sc->nwid[1]); MMC_WRITE(MMC_NETW_ID_H,sc->nwid[0]); } break; /* copy the EEPROM in 2.4 Gz WaveMODEM out to the caller */ case SIOCGWLEEPROM: /* root only */ if ((error = suser(td))) break; /* pointer to buffer in user space */ up = (void *)ifr->ifr_data; for (i=0x00; i<0x80; ++i) { /* 2.4 Gz: size of EEPROM */ MMC_WRITE(MMC_EEADDR,i); /* 2.4 Gz: get frequency */ MMC_WRITE(MMC_EECTRL, /* 2.4 Gz: EEPROM read */ MMC_EECTRL_EEOP_READ); /* 2.4 Gz: */ DELAY(40); /* 2.4 Gz */ if (subyte(up + 2*i, /* 2.4 Gz: pass low byte of */ wlmmcread(base,MMC_EEDATALrv))) {/* 2.4 Gz: EEPROM word */ WL_UNLOCK(sc); return(EFAULT); /* 2.4 Gz: */ } if (subyte(up + 2*i+1, /* 2.4 Gz: pass hi byte of */ wlmmcread(base,MMC_EEDATALrv))) {/* 2.4 Gz: EEPROM word */ WL_UNLOCK(sc); return(EFAULT); /* 2.4 Gz: */ } } break; #ifdef WLCACHE /* zero (Delete) the wl cache */ case SIOCDWLCACHE: /* root only */ if ((error = suser(td))) break; wl_cache_zero(sc); break; /* read out the number of used cache elements */ case SIOCGWLCITEM: ifr->ifr_data = (caddr_t) sc->w_sigitems; break; /* read out the wl cache */ case SIOCGWLCACHE: /* pointer to buffer in user space */ up = (void *)ifr->ifr_data; cpt = (char *) &sc->w_sigcache[0]; size = sc->w_sigitems * sizeof(struct w_sigcache); for (i = 0; i < size; i++) { if (subyte((up + i), *cpt++)) { WL_UNLOCK(sc); return(EFAULT); } } break; #endif default: error = ether_ioctl(ifp, cmd, data); break; } splx(opri); WL_UNLOCK(sc); return (error); } /* * wlwatchdog(): * * Called if the timer set in wlstart expires before an interrupt is received * from the wavelan. It seems to lose interrupts sometimes. * The watchdog routine gets called if the transmitter failed to interrupt * * input : which board is timing out * output : board reset * */ static void wlwatchdog(void *vsc) { struct wl_softc *sc = vsc; int unit = sc->unit; log(LOG_ERR, "wl%d: wavelan device timeout on xmit\n", unit); WL_LOCK(sc); sc->wl_ac.ac_if.if_oerrors++; wlinit(sc); WL_UNLOCK(sc); } /* * wlintr: * * This function is the interrupt handler for the WaveLAN * board. This routine will be called whenever either a packet * is received, or a packet has successfully been transfered and * the unit is ready to transmit another packet. * * input : board number that interrupted * output : either a packet is received, or a packet is transfered * */ static void wlintr(void *arg) { struct wl_softc *sc = (struct wl_softc *)arg; short base = sc->base; int ac_status; u_short int_type, int_type1; WL_LOCK(sc); #ifdef WLDEBUG if (sc->wl_if.if_flags & IFF_DEBUG) printf("wl%d: wlintr() called\n", sc->unit); #endif if ((int_type = inw(HASR(base))) & HASR_MMC_INTR) { /* handle interrupt from the modem management controler */ /* This will clear the interrupt condition */ (void) wlmmcread(base,MMC_DCE_STATUS); /* ignored for now */ } if (!(int_type & HASR_INTR)){ /* return if no interrupt from 82586 */ /* commented out. jrb. it happens when reinit occurs printf("wlintr: int_type %x, dump follows\n", int_type); wldump(unit); */ WL_UNLOCK(sc); return; } if (gathersnr) getsnr(sc); for (;;) { outw(PIOR0(base), OFFSET_SCB + 0); /* get scb status */ int_type = (inw(PIOP0(base)) & SCB_SW_INT); if (int_type == 0) /* no interrupts left */ break; int_type1 = wlack(sc); /* acknowledge interrupt(s) */ /* make sure no bits disappeared (others may appear) */ if ((int_type & int_type1) != int_type) printf("wlack() int bits disappeared : %04x != int_type %04x\n", int_type1, int_type); int_type = int_type1; /* go with the new status */ /* * incoming packet */ if (int_type & SCB_SW_FR) { sc->wl_if.if_ipackets++; wlrcv(sc); } /* * receiver not ready */ if (int_type & SCB_SW_RNR) { sc->wl_if.if_ierrors++; #ifdef WLDEBUG if (sc->wl_if.if_flags & IFF_DEBUG) printf("wl%d intr(): receiver overrun! begin_fd = %x\n", sc->unit, sc->begin_fd); #endif wlrustrt(sc); } /* * CU not ready */ if (int_type & SCB_SW_CNA) { /* * At present, we don't care about CNA's. We * believe they are a side effect of XMT. */ } if (int_type & SCB_SW_CX) { /* * At present, we only request Interrupt for * XMT. */ outw(PIOR1(base), OFFSET_CU); /* get command status */ ac_status = inw(PIOP1(base)); if (xmt_watch) { /* report some anomalies */ if (sc->tbusy == 0) { printf("wl%d: xmt intr but not busy, CU %04x\n", sc->unit, ac_status); } if (ac_status == 0) { printf("wl%d: xmt intr but ac_status == 0\n", sc->unit); } if (ac_status & AC_SW_A) { printf("wl%d: xmt aborted\n", sc->unit); } #ifdef notdef if (ac_status & TC_CARRIER) { printf("wl%d: no carrier\n", sc->unit); } #endif /* notdef */ if (ac_status & TC_CLS) { printf("wl%d: no CTS\n", sc->unit); } if (ac_status & TC_DMA) { printf("wl%d: DMA underrun\n", sc->unit); } if (ac_status & TC_DEFER) { printf("wl%d: xmt deferred\n", sc->unit); } if (ac_status & TC_SQE) { printf("wl%d: heart beat\n", sc->unit); } if (ac_status & TC_COLLISION) { printf("wl%d: too many collisions\n", sc->unit); } } /* if the transmit actually failed, or returned some status */ if ((!(ac_status & AC_SW_OK)) || (ac_status & 0xfff)) { if (ac_status & (TC_COLLISION | TC_CLS | TC_DMA)) { sc->wl_if.if_oerrors++; } /* count collisions */ sc->wl_if.if_collisions += (ac_status & 0xf); /* if TC_COLLISION set and collision count zero, 16 collisions */ if ((ac_status & 0x20) == 0x20) { sc->wl_if.if_collisions += 0x10; } } sc->tbusy = 0; untimeout(wlwatchdog, sc, sc->watchdog_ch); sc->wl_ac.ac_if.if_flags &= ~IFF_OACTIVE; wlstart(&(sc->wl_if)); } } WL_UNLOCK(sc); return; } /* * wlrcv: * * This routine is called by the interrupt handler to initiate a * packet transfer from the board to the "if" layer above this * driver. This routine checks if a buffer has been successfully * received by the WaveLAN. If so, the routine wlread is called * to do the actual transfer of the board data (including the * ethernet header) into a packet (consisting of an mbuf chain). * * input : number of the board to check * output : if a packet is available, it is "sent up" * */ static void wlrcv(struct wl_softc *sc) { short base = sc->base; u_short fd_p, status, offset, link_offset; #ifdef WLDEBUG if (sc->wl_if.if_flags & IFF_DEBUG) printf("wl%d: entered wlrcv()\n", sc->unit); #endif for (fd_p = sc->begin_fd; fd_p != I82586NULL; fd_p = sc->begin_fd) { outw(PIOR0(base), fd_p + 0); /* address of status */ status = inw(PIOP0(base)); outw(PIOR1(base), fd_p + 4); /* address of link_offset */ link_offset = inw(PIOP1(base)); offset = inw(PIOP1(base)); /* rbd_offset */ if (status == 0xffff || offset == 0xffff /*I82586NULL*/) { if (wlhwrst(sc) != TRUE) printf("wl%d rcv(): hwrst ffff trouble.\n", sc->unit); return; } else if (status & AC_SW_C) { if (status == (RFD_DONE|RFD_RSC)) { /* lost one */ #ifdef WLDEBUG if (sc->wl_if.if_flags & IFF_DEBUG) printf("wl%d RCV: RSC %x\n", sc->unit, status); #endif sc->wl_if.if_ierrors++; } else if (!(status & RFD_OK)) { printf("wl%d RCV: !OK %x\n", sc->unit, status); sc->wl_if.if_ierrors++; } else if (status & 0xfff) { /* can't happen */ printf("wl%d RCV: ERRs %x\n", sc->unit, status); sc->wl_if.if_ierrors++; } else if (!wlread(sc, fd_p)) return; if (!wlrequeue(sc, fd_p)) { /* abort on chain error */ if (wlhwrst(sc) != TRUE) printf("wl%d rcv(): hwrst trouble.\n", sc->unit); return; } sc->begin_fd = link_offset; } else { break; } } return; } /* * wlrequeue: * * This routine puts rbd's used in the last receive back onto the * free list for the next receive. * */ static int wlrequeue(struct wl_softc *sc, u_short fd_p) { short base = sc->base; fd_t fd; u_short l_rbdp, f_rbdp, rbd_offset; outw(PIOR0(base), fd_p + 6); rbd_offset = inw(PIOP0(base)); if ((f_rbdp = rbd_offset) != I82586NULL) { l_rbdp = f_rbdp; for (;;) { outw(PIOR0(base), l_rbdp + 0); /* address of status */ if (inw(PIOP0(base)) & RBD_SW_EOF) break; outw(PIOP0(base), 0); outw(PIOR0(base), l_rbdp + 2); /* next_rbd_offset */ if ((l_rbdp = inw(PIOP0(base))) == I82586NULL) break; } outw(PIOP0(base), 0); outw(PIOR0(base), l_rbdp + 2); /* next_rbd_offset */ outw(PIOP0(base), I82586NULL); outw(PIOR0(base), l_rbdp + 8); /* address of size */ outw(PIOP0(base), inw(PIOP0(base)) | AC_CW_EL); outw(PIOR0(base), sc->end_rbd + 2); outw(PIOP0(base), f_rbdp); /* end_rbd->next_rbd_offset */ outw(PIOR0(base), sc->end_rbd + 8); /* size */ outw(PIOP0(base), inw(PIOP0(base)) & ~AC_CW_EL); sc->end_rbd = l_rbdp; } fd.status = 0; fd.command = AC_CW_EL; fd.link_offset = I82586NULL; fd.rbd_offset = I82586NULL; outw(PIOR1(base), fd_p); outsw(PIOP1(base), &fd, 8/2); outw(PIOR1(base), sc->end_fd + 2); /* addr of command */ outw(PIOP1(base), 0); /* command = 0 */ outw(PIOP1(base), fd_p); /* end_fd->link_offset = fd_p */ sc->end_fd = fd_p; return 1; } #ifdef WLDEBUG static int xmt_debug = 0; #endif /* WLDEBUG */ /* * wlxmt: * * This routine fills in the appropriate registers and memory * locations on the WaveLAN board and starts the board off on * the transmit. * * input : pointers to board of interest's softc and the mbuf * output : board memory and registers are set for xfer and attention * */ static void wlxmt(struct wl_softc *sc, struct mbuf *m) { u_short xmtdata_p = OFFSET_TBUF; u_short xmtshort_p; struct mbuf *tm_p = m; struct ether_header *eh_p = mtod(m, struct ether_header *); u_char *mb_p = mtod(m, u_char *) + sizeof(struct ether_header); u_short count = m->m_len - sizeof(struct ether_header); ac_t cb; u_short tbd_p = OFFSET_TBD; u_short len, clen = 0; short base = sc->base; int spin; #ifdef WLDEBUG if (sc->wl_if.if_flags & IFF_DEBUG) printf("%s: entered wlxmt()\n", sc->wl_if.if_xname); #endif cb.ac_status = 0; cb.ac_command = (AC_CW_EL|AC_TRANSMIT|AC_CW_I); cb.ac_link_offset = I82586NULL; outw(PIOR1(base), OFFSET_CU); outsw(PIOP1(base), &cb, 6/2); outw(PIOP1(base), OFFSET_TBD); /* cb.cmd.transmit.tbd_offset */ outsw(PIOP1(base), eh_p->ether_dhost, WAVELAN_ADDR_SIZE/2); outw(PIOP1(base), eh_p->ether_type); #ifdef WLDEBUG if (sc->wl_if.if_flags & IFF_DEBUG) { if (xmt_debug) { printf("XMT mbuf: L%d @%p ", count, (void *)mb_p); printf("ether type %x\n", eh_p->ether_type); } } #endif /* WLDEBUG */ outw(PIOR0(base), OFFSET_TBD); outw(PIOP0(base), 0); /* act_count */ outw(PIOR1(base), OFFSET_TBD + 4); outw(PIOP1(base), xmtdata_p); /* buffer_addr */ outw(PIOP1(base), 0); /* buffer_base */ for (;;) { if (count) { if (clen + count > WAVELAN_MTU) break; if (count & 1) len = count + 1; else len = count; outw(PIOR1(base), xmtdata_p); outsw(PIOP1(base), mb_p, len/2); clen += count; outw(PIOR0(base), tbd_p); /* address of act_count */ outw(PIOP0(base), inw(PIOP0(base)) + count); xmtdata_p += len; if ((tm_p = tm_p->m_next) == (struct mbuf *)0) break; if (count & 1) { /* go to the next descriptor */ outw(PIOR0(base), tbd_p + 2); tbd_p += sizeof (tbd_t); outw(PIOP0(base), tbd_p); /* next_tbd_offset */ outw(PIOR0(base), tbd_p); outw(PIOP0(base), 0); /* act_count */ outw(PIOR1(base), tbd_p + 4); outw(PIOP1(base), xmtdata_p); /* buffer_addr */ outw(PIOP1(base), 0); /* buffer_base */ /* at the end -> coallesce remaining mbufs */ if (tbd_p == OFFSET_TBD + (N_TBD-1) * sizeof (tbd_t)) { wlsftwsleaze(&count, &mb_p, &tm_p, sc); continue; } /* next mbuf short -> coallesce as needed */ if ( (tm_p->m_next == (struct mbuf *) 0) || #define HDW_THRESHOLD 55 tm_p->m_len > HDW_THRESHOLD) /* ok */; else { wlhdwsleaze(&count, &mb_p, &tm_p, sc); continue; } } } else if ((tm_p = tm_p->m_next) == (struct mbuf *)0) break; count = tm_p->m_len; mb_p = mtod(tm_p, u_char *); #ifdef WLDEBUG if (sc->wl_if.if_flags & IFF_DEBUG) if (xmt_debug) printf("mbuf+ L%d @%p ", count, (void *)mb_p); #endif /* WLDEBUG */ } #ifdef WLDEBUG if (sc->wl_if.if_flags & IFF_DEBUG) if (xmt_debug) printf("CLEN = %d\n", clen); #endif /* WLDEBUG */ outw(PIOR0(base), tbd_p); if (clen < ETHERMIN) { outw(PIOP0(base), inw(PIOP0(base)) + ETHERMIN - clen); outw(PIOR1(base), xmtdata_p); for (xmtshort_p = xmtdata_p; clen < ETHERMIN; clen += 2) outw(PIOP1(base), 0); } outw(PIOP0(base), inw(PIOP0(base)) | TBD_SW_EOF); outw(PIOR0(base), tbd_p + 2); outw(PIOP0(base), I82586NULL); #ifdef WLDEBUG if (sc->wl_if.if_flags & IFF_DEBUG) { if (xmt_debug) { wltbd(sc); printf("\n"); } } #endif /* WLDEBUG */ outw(PIOR0(base), OFFSET_SCB + 2); /* address of scb_command */ /* * wait for 586 to clear previous command, complain if it takes * too long */ for (spin = 1;;spin = (spin + 1) % 10000) { if (inw(PIOP0(base)) == 0) { /* it's done, we can go */ break; } if ((spin == 0) && xmt_watch) { /* not waking up, and we care */ printf("%s: slow accepting xmit\n", sc->wl_if.if_xname); } } outw(PIOP0(base), SCB_CU_STRT); /* new command */ SET_CHAN_ATTN(sc); m_freem(m); /* XXX * Pause to avoid transmit overrun problems. * The required delay tends to vary with platform type, and may be * related to interrupt loss. */ if (wl_xmit_delay) { DELAY(wl_xmit_delay); } return; } /* * wlbldru: * * This function builds the linear linked lists of fd's and * rbd's. Based on page 4-32 of 1986 Intel microcom handbook. * */ static u_short wlbldru(struct wl_softc *sc) { short base = sc->base; fd_t fd; rbd_t rbd; u_short fd_p = OFFSET_RU; u_short rbd_p = OFFSET_RBD; int i; sc->begin_fd = fd_p; for (i = 0; i < N_FD; i++) { fd.status = 0; fd.command = 0; fd.link_offset = fd_p + sizeof(fd_t); fd.rbd_offset = I82586NULL; outw(PIOR1(base), fd_p); outsw(PIOP1(base), &fd, 8/2); fd_p = fd.link_offset; } fd_p -= sizeof(fd_t); sc->end_fd = fd_p; outw(PIOR1(base), fd_p + 2); outw(PIOP1(base), AC_CW_EL); /* command */ outw(PIOP1(base), I82586NULL); /* link_offset */ fd_p = OFFSET_RU; outw(PIOR0(base), fd_p + 6); /* address of rbd_offset */ outw(PIOP0(base), rbd_p); outw(PIOR1(base), rbd_p); for (i = 0; i < N_RBD; i++) { rbd.status = 0; rbd.buffer_addr = rbd_p + sizeof(rbd_t) + 2; rbd.buffer_base = 0; rbd.size = RCVBUFSIZE; if (i != N_RBD-1) { rbd_p += sizeof(ru_t); rbd.next_rbd_offset = rbd_p; } else { rbd.next_rbd_offset = I82586NULL; rbd.size |= AC_CW_EL; sc->end_rbd = rbd_p; } outsw(PIOP1(base), &rbd, sizeof(rbd_t)/2); outw(PIOR1(base), rbd_p); } return sc->begin_fd; } /* * wlrustrt: * * This routine starts the receive unit running. First checks if the * board is actually ready, then the board is instructed to receive * packets again. * */ static void wlrustrt(struct wl_softc *sc) { short base = sc->base; u_short rfa; #ifdef WLDEBUG if (sc->wl_if.if_flags & IFF_DEBUG) printf("wl%d: entered wlrustrt()\n", sc->unit); #endif outw(PIOR0(base), OFFSET_SCB); if (inw(PIOP0(base)) & SCB_RUS_READY){ printf("wlrustrt: RUS_READY\n"); return; } outw(PIOR0(base), OFFSET_SCB + 2); outw(PIOP0(base), SCB_RU_STRT); /* command */ rfa = wlbldru(sc); outw(PIOR0(base), OFFSET_SCB + 6); /* address of scb_rfa_offset */ outw(PIOP0(base), rfa); SET_CHAN_ATTN(sc); return; } /* * wldiag: * * This routine does a 586 op-code number 7, and obtains the * diagnose status for the WaveLAN. * */ static int wldiag(struct wl_softc *sc) { short base = sc->base; short status; #ifdef WLDEBUG if (sc->wl_if.if_flags & IFF_DEBUG) printf("wl%d: entered wldiag()\n", sc->unit); #endif outw(PIOR0(base), OFFSET_SCB); status = inw(PIOP0(base)); if (status & SCB_SW_INT) { /* state is 2000 which seems ok printf("wl%d diag(): unexpected initial state %\n", sc->unit, inw(PIOP0(base))); */ wlack(sc); } outw(PIOR1(base), OFFSET_CU); outw(PIOP1(base), 0); /* ac_status */ outw(PIOP1(base), AC_DIAGNOSE|AC_CW_EL);/* ac_command */ if (wlcmd(sc, "diag()") == 0) return 0; outw(PIOR0(base), OFFSET_CU); if (inw(PIOP0(base)) & 0x0800) { printf("wl%d: i82586 Self Test failed!\n", sc->unit); return 0; } return TRUE; } /* * wlconfig: * * This routine does a standard config of the WaveLAN board. * */ static int wlconfig(struct wl_softc *sc) { configure_t configure; short base = sc->base; #if MULTICAST struct ifmultiaddr *ifma; u_char *addrp; int cnt = 0; #endif /* MULTICAST */ #ifdef WLDEBUG if (sc->wl_if.if_flags & IFF_DEBUG) printf("wl%d: entered wlconfig()\n", sc->unit); #endif outw(PIOR0(base), OFFSET_SCB); if (inw(PIOP0(base)) & SCB_SW_INT) { /* printf("wl%d config(): unexpected initial state %x\n", sc->unit, inw(PIOP0(base))); */ } wlack(sc); outw(PIOR1(base), OFFSET_CU); outw(PIOP1(base), 0); /* ac_status */ outw(PIOP1(base), AC_CONFIGURE|AC_CW_EL); /* ac_command */ /* jrb hack */ configure.fifolim_bytecnt = 0x080c; configure.addrlen_mode = 0x0600; configure.linprio_interframe = 0x2060; configure.slot_time = 0xf200; configure.hardware = 0x0008; /* tx even w/o CD */ configure.min_frame_len = 0x0040; #if 0 /* This is the configuration block suggested by Marc Meertens * in an e-mail message to John * Ioannidis on 10 Nov 92. */ configure.fifolim_bytecnt = 0x040c; configure.addrlen_mode = 0x0600; configure.linprio_interframe = 0x2060; configure.slot_time = 0xf000; configure.hardware = 0x0008; /* tx even w/o CD */ configure.min_frame_len = 0x0040; #else /* * below is the default board configuration from p2-28 from 586 book */ configure.fifolim_bytecnt = 0x080c; configure.addrlen_mode = 0x2600; configure.linprio_interframe = 0x7820; /* IFS=120, ACS=2 */ configure.slot_time = 0xf00c; /* slottime=12 */ configure.hardware = 0x0008; /* tx even w/o CD */ configure.min_frame_len = 0x0040; #endif if (sc->mode & (MOD_PROM | MOD_ENAL)) configure.hardware |= 1; outw(PIOR1(base), OFFSET_CU + 6); outsw(PIOP1(base), &configure, sizeof(configure_t)/2); if (wlcmd(sc, "config()-configure") == 0) return 0; #if MULTICAST outw(PIOR1(base), OFFSET_CU); outw(PIOP1(base), 0); /* ac_status */ outw(PIOP1(base), AC_MCSETUP|AC_CW_EL); /* ac_command */ outw(PIOR1(base), OFFSET_CU + 8); TAILQ_FOREACH(ifma, &sc->wl_if.if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; addrp = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); outw(PIOP1(base), addrp[0] + (addrp[1] << 8)); outw(PIOP1(base), addrp[2] + (addrp[3] << 8)); outw(PIOP1(base), addrp[4] + (addrp[5] << 8)); ++cnt; } outw(PIOR1(base), OFFSET_CU + 6); /* mc-cnt */ outw(PIOP1(base), cnt * WAVELAN_ADDR_SIZE); if (wlcmd(sc, "config()-mcaddress") == 0) return 0; #endif /* MULTICAST */ outw(PIOR1(base), OFFSET_CU); outw(PIOP1(base), 0); /* ac_status */ outw(PIOP1(base), AC_IASETUP|AC_CW_EL); /* ac_command */ outw(PIOR1(base), OFFSET_CU + 6); outsw(PIOP1(base), sc->wl_addr, WAVELAN_ADDR_SIZE/2); if (wlcmd(sc, "config()-address") == 0) return(0); wlinitmmc(sc); return(1); } /* * wlcmd: * * Set channel attention bit and busy wait until command has * completed. Then acknowledge the command completion. */ static int wlcmd(struct wl_softc *sc, char *str) { short base = sc->base; int i; outw(PIOR0(base), OFFSET_SCB + 2); /* address of scb_command */ outw(PIOP0(base), SCB_CU_STRT); SET_CHAN_ATTN(sc); outw(PIOR0(base), OFFSET_CU); for (i = 0; i < 0xffff; i++) if (inw(PIOP0(base)) & AC_SW_C) break; if (i == 0xffff || !(inw(PIOP0(base)) & AC_SW_OK)) { printf("wl%d: %s failed; status = %d, inw = %x, outw = %x\n", sc->unit, str, inw(PIOP0(base)) & AC_SW_OK, inw(PIOP0(base)), inw(PIOR0(base))); outw(PIOR0(base), OFFSET_SCB); printf("scb_status %x\n", inw(PIOP0(base))); outw(PIOR0(base), OFFSET_SCB+2); printf("scb_command %x\n", inw(PIOP0(base))); outw(PIOR0(base), OFFSET_SCB+4); printf("scb_cbl %x\n", inw(PIOP0(base))); outw(PIOR0(base), OFFSET_CU+2); printf("cu_cmd %x\n", inw(PIOP0(base))); return(0); } outw(PIOR0(base), OFFSET_SCB); if ((inw(PIOP0(base)) & SCB_SW_INT) && (inw(PIOP0(base)) != SCB_SW_CNA)) { /* printf("wl%d %s: unexpected final state %x\n", sc->unit, str, inw(PIOP0(base))); */ } wlack(sc); return(TRUE); } /* * wlack: if the 82596 wants attention because it has finished * sending or receiving a packet, acknowledge its desire and * return bits indicating the kind of attention. wlack() returns * these bits so that the caller can service exactly the * conditions that wlack() acknowledged. */ static int wlack(struct wl_softc *sc) { int i; u_short cmd; short base = sc->base; outw(PIOR1(base), OFFSET_SCB); if (!(cmd = (inw(PIOP1(base)) & SCB_SW_INT))) return(0); #ifdef WLDEBUG if (sc->wl_if.if_flags & IFF_DEBUG) printf("wl%d: doing a wlack()\n", sc->unit); #endif outw(PIOP1(base), cmd); SET_CHAN_ATTN(sc); outw(PIOR0(base), OFFSET_SCB + 2); /* address of scb_command */ for (i = 1000000; inw(PIOP0(base)) && (i-- > 0); ) continue; if (i < 1) printf("wl%d wlack(): board not accepting command.\n", sc->unit); return(cmd); } #ifdef WLDEBUG static void wltbd(struct wl_softc *sc) { short base = sc->base; u_short tbd_p = OFFSET_TBD; tbd_t tbd; int i = 0; int sum = 0; for (;;) { outw(PIOR1(base), tbd_p); insw(PIOP1(base), &tbd, sizeof(tbd_t)/2); sum += (tbd.act_count & ~TBD_SW_EOF); printf("%d: addr %x, count %d (%d), next %x, base %x\n", i++, tbd.buffer_addr, (tbd.act_count & ~TBD_SW_EOF), sum, tbd.next_tbd_offset, tbd.buffer_base); if (tbd.act_count & TBD_SW_EOF) break; tbd_p = tbd.next_tbd_offset; } } #endif static void wlhdwsleaze(u_short *countp, u_char **mb_pp, struct mbuf **tm_pp, struct wl_softc *sc) { struct mbuf *tm_p = *tm_pp; u_char *mb_p = *mb_pp; u_short count = 0; u_char *cp; int len; /* * can we get a run that will be coallesced or * that terminates before breaking */ do { count += tm_p->m_len; if (tm_p->m_len & 1) break; } while ((tm_p = tm_p->m_next) != (struct mbuf *)0); if ( (tm_p == (struct mbuf *)0) || count > HDW_THRESHOLD) { *countp = (*tm_pp)->m_len; *mb_pp = mtod((*tm_pp), u_char *); return; } /* we need to copy */ tm_p = *tm_pp; mb_p = *mb_pp; count = 0; cp = (u_char *) t_packet; for (;;) { bcopy(mtod(tm_p, u_char *), cp, len = tm_p->m_len); count += len; if (count > HDW_THRESHOLD) break; cp += len; if (tm_p->m_next == (struct mbuf *)0) break; tm_p = tm_p->m_next; } *countp = count; *mb_pp = (u_char *) t_packet; *tm_pp = tm_p; return; } static void wlsftwsleaze(u_short *countp, u_char **mb_pp, struct mbuf **tm_pp, struct wl_softc *sc) { struct mbuf *tm_p = *tm_pp; u_short count = 0; u_char *cp = (u_char *) t_packet; int len; /* we need to copy */ for (;;) { bcopy(mtod(tm_p, u_char *), cp, len = tm_p->m_len); count += len; cp += len; if (tm_p->m_next == (struct mbuf *)0) break; tm_p = tm_p->m_next; } *countp = count; *mb_pp = (u_char *) t_packet; *tm_pp = tm_p; return; } static void wlmmcstat(struct wl_softc *sc) { short base = sc->base; u_short tmp; printf("wl%d: DCE_STATUS: 0x%x, ", sc->unit, wlmmcread(base,MMC_DCE_STATUS) & 0x0f); tmp = wlmmcread(base,MMC_CORRECT_NWID_H) << 8; tmp |= wlmmcread(base,MMC_CORRECT_NWID_L); printf("Correct NWID's: %d, ", tmp); tmp = wlmmcread(base,MMC_WRONG_NWID_H) << 8; tmp |= wlmmcread(base,MMC_WRONG_NWID_L); printf("Wrong NWID's: %d\n", tmp); printf("THR_PRE_SET: 0x%x, ", wlmmcread(base,MMC_THR_PRE_SET)); printf("SIGNAL_LVL: %d, SILENCE_LVL: %d\n", wlmmcread(base,MMC_SIGNAL_LVL), wlmmcread(base,MMC_SILENCE_LVL)); printf("SIGN_QUAL: 0x%x, NETW_ID: %x:%x, DES: %d\n", wlmmcread(base,MMC_SIGN_QUAL), wlmmcread(base,MMC_NETW_ID_H), wlmmcread(base,MMC_NETW_ID_L), wlmmcread(base,MMC_DES_AVAIL)); } static u_short wlmmcread(u_int base, u_short reg) { while (inw(HASR(base)) & HASR_MMC_BUSY) continue; outw(MMCR(base),reg << 1); while (inw(HASR(base)) & HASR_MMC_BUSY) continue; return (u_short)inw(MMCR(base)) >> 8; } static void getsnr(struct wl_softc *sc) { MMC_WRITE(MMC_FREEZE,1); /* * SNR retrieval procedure : * * read signal level : wlmmcread(base, MMC_SIGNAL_LVL); * read silence level : wlmmcread(base, MMC_SILENCE_LVL); */ MMC_WRITE(MMC_FREEZE,0); /* * SNR is signal:silence ratio. */ } /* ** wlgetpsa ** ** Reads the psa for the wavelan at (base) into (buf) */ static void wlgetpsa(int base, u_char *buf) { int i; PCMD(base, HACR_DEFAULT & ~HACR_16BITS); PCMD(base, HACR_DEFAULT & ~HACR_16BITS); for (i = 0; i < 0x40; i++) { outw(PIOR2(base), i); buf[i] = inb(PIOP2(base)); } PCMD(base, HACR_DEFAULT); PCMD(base, HACR_DEFAULT); } /* ** wlsetpsa ** ** Writes the psa for wavelan (unit) from the softc back to the ** board. Updates the CRC and sets the CRC OK flag. ** ** Do not call this when the board is operating, as it doesn't ** preserve the hacr. */ static void wlsetpsa(struct wl_softc *sc) { short base = sc->base; int i, oldpri; u_short crc; crc = wlpsacrc(sc->psa); /* calculate CRC of PSA */ sc->psa[WLPSA_CRCLOW] = crc & 0xff; sc->psa[WLPSA_CRCHIGH] = (crc >> 8) & 0xff; sc->psa[WLPSA_CRCOK] = 0x55; /* default to 'bad' until programming complete */ oldpri = splimp(); /* ick, long pause */ PCMD(base, HACR_DEFAULT & ~HACR_16BITS); PCMD(base, HACR_DEFAULT & ~HACR_16BITS); for (i = 0; i < 0x40; i++) { DELAY(DELAYCONST); outw(PIOR2(base),i); /* write param memory */ DELAY(DELAYCONST); outb(PIOP2(base), sc->psa[i]); } DELAY(DELAYCONST); outw(PIOR2(base),WLPSA_CRCOK); /* update CRC flag*/ DELAY(DELAYCONST); sc->psa[WLPSA_CRCOK] = 0xaa; /* OK now */ outb(PIOP2(base), 0xaa); /* all OK */ DELAY(DELAYCONST); PCMD(base, HACR_DEFAULT); PCMD(base, HACR_DEFAULT); splx(oldpri); } /* ** CRC routine provided by Christopher Giordano , ** from original code by Tomi Mikkonen (tomitm@remedy.fi) */ static u_int crc16_table[16] = { 0x0000, 0xCC01, 0xD801, 0x1400, 0xF001, 0x3C00, 0x2800, 0xE401, 0xA001, 0x6C00, 0x7800, 0xB401, 0x5000, 0x9C01, 0x8801, 0x4400 }; static u_short wlpsacrc(u_char *buf) { u_short crc = 0; int i, r1; for (i = 0; i < 0x3d; i++, buf++) { /* lower 4 bits */ r1 = crc16_table[crc & 0xF]; crc = (crc >> 4) & 0x0FFF; crc = crc ^ r1 ^ crc16_table[*buf & 0xF]; /* upper 4 bits */ r1 = crc16_table[crc & 0xF]; crc = (crc >> 4) & 0x0FFF; crc = crc ^ r1 ^ crc16_table[(*buf >> 4) & 0xF]; } return(crc); } #ifdef WLCACHE /* * wl_cache_store * * take input packet and cache various radio hw characteristics * indexed by MAC address. * * Some things to think about: * note that no space is malloced. * We might hash the mac address if the cache were bigger. * It is not clear that the cache is big enough. * It is also not clear how big it should be. * The cache is IP-specific. We don't care about that as * we want it to be IP-specific. * The last N recv. packets are saved. This will tend * to reward agents and mobile hosts that beacon. * That is probably fine for mobile ip. */ /* globals for wavelan signal strength cache */ /* this should go into softc structure above. */ /* set true if you want to limit cache items to broadcast/mcast * only packets (not unicast) */ static int wl_cache_mcastonly = 1; SYSCTL_INT(_machdep, OID_AUTO, wl_cache_mcastonly, CTLFLAG_RW, &wl_cache_mcastonly, 0, ""); /* set true if you want to limit cache items to IP packets only */ static int wl_cache_iponly = 1; SYSCTL_INT(_machdep, OID_AUTO, wl_cache_iponly, CTLFLAG_RW, &wl_cache_iponly, 0, ""); /* zero out the cache */ static void wl_cache_zero(struct wl_softc *sc) { bzero(&sc->w_sigcache[0], sizeof(struct w_sigcache) * MAXCACHEITEMS); sc->w_sigitems = 0; sc->w_nextcache = 0; sc->w_wrapindex = 0; } /* store hw signal info in cache. * index is MAC address, but an ip src gets stored too * There are two filters here controllable via sysctl: * throw out unicast (on by default, but can be turned off) * throw out non-ip (on by default, but can be turned off) */ static void wl_cache_store (struct wl_softc *sc, int base, struct ether_header *eh, struct mbuf *m) { #ifdef INET struct ip *ip = NULL; /* Avoid GCC warning */ int i; int signal, silence; int w_insertcache; /* computed index for cache entry storage */ int ipflag = wl_cache_iponly; #endif /* filters: * 1. ip only * 2. configurable filter to throw out unicast packets, * keep multicast only. */ #ifdef INET /* reject if not IP packet */ if ( wl_cache_iponly && (ntohs(eh->ether_type) != 0x800)) { return; } /* check if broadcast or multicast packet. we toss * unicast packets */ if (wl_cache_mcastonly && ((eh->ether_dhost[0] & 1) == 0)) { return; } /* find the ip header. we want to store the ip_src * address. use the mtod macro(in mbuf.h) * to typecast m to struct ip * */ if (ipflag) { ip = mtod(m, struct ip *); } /* do a linear search for a matching MAC address * in the cache table * . MAC address is 6 bytes, * . var w_nextcache holds total number of entries already cached */ for (i = 0; i < sc->w_nextcache; i++) { if (! bcmp(eh->ether_shost, sc->w_sigcache[i].macsrc, 6 )) { /* Match!, * so we already have this entry, * update the data, and LRU age */ break; } } /* did we find a matching mac address? * if yes, then overwrite a previously existing cache entry */ if (i < sc->w_nextcache ) { w_insertcache = i; } /* else, have a new address entry,so * add this new entry, * if table full, then we need to replace entry */ else { /* check for space in cache table * note: w_nextcache also holds number of entries * added in the cache table */ if ( sc->w_nextcache < MAXCACHEITEMS ) { w_insertcache = sc->w_nextcache; sc->w_nextcache++; sc->w_sigitems = sc->w_nextcache; } /* no space found, so simply wrap with wrap index * and "zap" the next entry */ else { if (sc->w_wrapindex == MAXCACHEITEMS) { sc->w_wrapindex = 0; } w_insertcache = sc->w_wrapindex++; } } /* invariant: w_insertcache now points at some slot * in cache. */ if (w_insertcache < 0 || w_insertcache >= MAXCACHEITEMS) { log(LOG_ERR, "wl_cache_store, bad index: %d of [0..%d], gross cache error\n", w_insertcache, MAXCACHEITEMS); return; } /* store items in cache * .ipsrc * .macsrc * .signal (0..63) ,silence (0..63) ,quality (0..15) */ if (ipflag) { sc->w_sigcache[w_insertcache].ipsrc = ip->ip_src.s_addr; } bcopy( eh->ether_shost, sc->w_sigcache[w_insertcache].macsrc, 6); signal = sc->w_sigcache[w_insertcache].signal = wlmmcread(base, MMC_SIGNAL_LVL) & 0x3f; silence = sc->w_sigcache[w_insertcache].silence = wlmmcread(base, MMC_SILENCE_LVL) & 0x3f; sc->w_sigcache[w_insertcache].quality = wlmmcread(base, MMC_SIGN_QUAL) & 0x0f; if (signal > 0) sc->w_sigcache[w_insertcache].snr = signal - silence; else sc->w_sigcache[w_insertcache].snr = 0; #endif /* INET */ } #endif /* WLCACHE */ Index: head/sys/dev/xe/if_xe_pccard.c =================================================================== --- head/sys/dev/xe/if_xe_pccard.c (revision 129878) +++ head/sys/dev/xe/if_xe_pccard.c (revision 129879) @@ -1,453 +1,454 @@ /* * Copyright (c) 2002 Takeshi Shibagaki * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* xe pccard interface driver */ #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "card_if.h" #include "pccarddevs.h" /* * Debug logging levels - set with hw.xe.debug sysctl * 0 = None * 1 = More hardware details, probe/attach progress * 2 = Most function calls, ioctls and media selection progress * 3 = Everything - interrupts, packets in/out and multicast address setup */ #define XE_DEBUG #ifdef XE_DEBUG extern int xe_debug; #define DEVPRINTF(level, arg) if (xe_debug >= (level)) device_printf arg #define DPRINTF(level, arg) if (xe_debug >= (level)) printf arg #else #define DEVPRINTF(level, arg) #define DPRINTF(level, arg) #endif #define XE_CARD_TYPE_FLAGS_NO 0x0 #define XE_CARD_TYPE_FLAGS_CE2 0x1 #define XE_CARD_TYPE_FLAGS_MOHAWK 0x2 #define XE_CARD_TYPE_FLAGS_DINGO 0x4 #define XE_PROD_ETHER_MASK 0x0100 #define XE_PROD_MODEM_MASK 0x1000 #define XE_BOGUS_MAC_OFFSET 0x90 /* MAC vendor prefix used by most Xircom cards is 00:80:c7 */ #define XE_MAC_ADDR_0 0x00 #define XE_MAC_ADDR_1 0x80 #define XE_MAC_ADDR_2 0xc7 /* Some (all?) REM56 cards have vendor prefix 00:10:a4 */ #define XE_REM56_MAC_ADDR_0 0x00 #define XE_REM56_MAC_ADDR_1 0x10 #define XE_REM56_MAC_ADDR_2 0xa4 struct xe_pccard_product { struct pccard_product product; u_int16_t prodext; u_int16_t flags; }; static const struct xe_pccard_product xe_pccard_products[] = { { PCMCIA_CARD_D(ACCTON, EN2226, 0), 0x43, XE_CARD_TYPE_FLAGS_MOHAWK }, { PCMCIA_CARD_D(COMPAQ2, CPQ_10_100, 0), 0x43, XE_CARD_TYPE_FLAGS_MOHAWK }, { PCMCIA_CARD_D(INTEL, EEPRO100, 0), 0x43, XE_CARD_TYPE_FLAGS_MOHAWK }, { PCMCIA_CARD_D(XIRCOM, CE, 0), 0x41, XE_CARD_TYPE_FLAGS_NO }, { PCMCIA_CARD_D(XIRCOM, CE2, 0), 0x41, XE_CARD_TYPE_FLAGS_CE2 }, { PCMCIA_CARD_D(XIRCOM, CE2, 0), 0x42, XE_CARD_TYPE_FLAGS_CE2 }, { PCMCIA_CARD_D(XIRCOM, CE2_2, 0), 0x41, XE_CARD_TYPE_FLAGS_CE2 }, { PCMCIA_CARD_D(XIRCOM, CE2_2, 0), 0x42, XE_CARD_TYPE_FLAGS_CE2 }, { PCMCIA_CARD_D(XIRCOM, CE3, 0), 0x43, XE_CARD_TYPE_FLAGS_MOHAWK }, { PCMCIA_CARD_D(XIRCOM, CEM, 0), 0x41, XE_CARD_TYPE_FLAGS_NO }, { PCMCIA_CARD_D(XIRCOM, CEM2, 0), 0x42, XE_CARD_TYPE_FLAGS_CE2 }, { PCMCIA_CARD_D(XIRCOM, CEM28, 0), 0x43, XE_CARD_TYPE_FLAGS_CE2 }, { PCMCIA_CARD_D(XIRCOM, CEM33, 0), 0x44, XE_CARD_TYPE_FLAGS_CE2 }, { PCMCIA_CARD_D(XIRCOM, CEM33_2, 0), 0x44, XE_CARD_TYPE_FLAGS_CE2 }, { PCMCIA_CARD_D(XIRCOM, CEM56, 0), 0x45, XE_CARD_TYPE_FLAGS_DINGO }, { PCMCIA_CARD_D(XIRCOM, CEM56_2, 0), 0x46, XE_CARD_TYPE_FLAGS_DINGO }, { PCMCIA_CARD_D(XIRCOM, REM56, 0), 0x46, XE_CARD_TYPE_FLAGS_DINGO }, { PCMCIA_CARD_D(XIRCOM, REM10, 0), 0x47, XE_CARD_TYPE_FLAGS_DINGO }, { PCMCIA_CARD_D(XIRCOM, XEM5600, 0), 0x56, XE_CARD_TYPE_FLAGS_DINGO }, { { NULL }, 0, 0 } }; /* * Fixing for CEM2, CEM3 and CEM56/REM56 cards. These need some magic to * enable the Ethernet function, which isn't mentioned anywhere in the CIS. * Despite the register names, most of this isn't Dingo-specific. */ static int xe_cemfix(device_t dev) { struct xe_softc *sc = (struct xe_softc *) device_get_softc(dev); bus_space_tag_t bst; bus_space_handle_t bsh; struct resource *r; int rid; int ioport; DEVPRINTF(2, (dev, "cemfix\n")); DEVPRINTF(1, (dev, "CEM I/O port 0x%0lx, size 0x%0lx\n", bus_get_resource_start(dev, SYS_RES_IOPORT, sc->port_rid), bus_get_resource_count(dev, SYS_RES_IOPORT, sc->port_rid))); rid = 0; r = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 4 << 10, RF_ACTIVE); if (!r) { device_printf(dev, "cemfix: Can't map in attribute memory\n"); return (-1); } bsh = rman_get_bushandle(r); bst = rman_get_bustag(r); CARD_SET_RES_FLAGS(device_get_parent(dev), dev, SYS_RES_MEMORY, rid, PCCARD_A_MEM_ATTR); bus_space_write_1(bst, bsh, DINGO_ECOR, DINGO_ECOR_IRQ_LEVEL | DINGO_ECOR_INT_ENABLE | DINGO_ECOR_IOB_ENABLE | DINGO_ECOR_ETH_ENABLE); ioport = bus_get_resource_start(dev, SYS_RES_IOPORT, sc->port_rid); bus_space_write_1(bst, bsh, DINGO_EBAR0, ioport & 0xff); bus_space_write_1(bst, bsh, DINGO_EBAR1, (ioport >> 8) & 0xff); if (sc->dingo) { bus_space_write_1(bst, bsh, DINGO_DCOR0, DINGO_DCOR0_SF_INT); bus_space_write_1(bst, bsh, DINGO_DCOR1, DINGO_DCOR1_INT_LEVEL | DINGO_DCOR1_EEDIO); bus_space_write_1(bst, bsh, DINGO_DCOR2, 0x00); bus_space_write_1(bst, bsh, DINGO_DCOR3, 0x00); bus_space_write_1(bst, bsh, DINGO_DCOR4, 0x00); } bus_release_resource(dev, SYS_RES_MEMORY, rid, r); /* success! */ return (0); } /* * Fixing for CE2-class cards with bogus CIS entry for MAC address. This * should be in a type 0x22 tuple, but some cards seem to use 0x89. * This function looks for a sensible MAC address tuple starting at the given * offset in attribute memory, ignoring the tuple type field. */ static int xe_macfix(device_t dev, int offset) { struct xe_softc *sc = (struct xe_softc *) device_get_softc(dev); bus_space_tag_t bst; bus_space_handle_t bsh; struct resource *r; int rid, i; u_int8_t cisdata[9]; u_int8_t required[6] = { 0x08, PCCARD_TPLFE_TYPE_LAN_NID, ETHER_ADDR_LEN, XE_MAC_ADDR_0, XE_MAC_ADDR_1, XE_MAC_ADDR_2 }; DEVPRINTF(2, (dev, "macfix\n")); rid = 0; r = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 4 << 10, RF_ACTIVE); if (!r) { device_printf(dev, "macfix: Can't map in attribute memory\n"); return (-1); } bsh = rman_get_bushandle(r); bst = rman_get_bustag(r); CARD_SET_RES_FLAGS(device_get_parent(dev), dev, SYS_RES_MEMORY, rid, PCCARD_A_MEM_ATTR); /* * Looking for (relative to offset): * * 0x00 0x?? Tuple type (ignored) * 0x02 0x08 Tuple length (must be 8) * 0x04 0x04 Address type? (must be 4) * 0x06 0x06 Address length (must be 6) * 0x08 0x00 Manufacturer ID, byte 1 * 0x0a 0x80 Manufacturer ID, byte 2 * 0x0c 0xc7 Manufacturer ID, byte 3 * 0x0e 0x?? Card ID, byte 1 * 0x10 0x?? Card ID, byte 2 * 0x12 0x?? Card ID, byte 3 */ for (i = 0; i < 9; i++) { cisdata[i] = bus_space_read_1(bst, bsh, offset + (2 * i) + 2); if (i < 6 && required[i] != cisdata[i]) { device_printf(dev, "macfix: Can't find valid MAC address\n"); bus_release_resource(dev, SYS_RES_MEMORY, rid, r); return (-1); } } for (i = 0; i < ETHER_ADDR_LEN; i++) { sc->arpcom.ac_enaddr[i] = cisdata[i + 3]; } bus_release_resource(dev, SYS_RES_MEMORY, rid, r); /* success! */ return (0); } /* * PCMCIA probe routine. * Identify the device. Called from the bus driver when the card is * inserted or otherwise powers up. */ static int xe_pccard_probe(device_t dev) { struct xe_softc *scp = (struct xe_softc *) device_get_softc(dev); u_int32_t vendor,product; u_int16_t prodext; const char* vendor_str = NULL; const char* product_str = NULL; const char* cis4_str = NULL; const char *cis3_str=NULL; const struct xe_pccard_product *xpp; DEVPRINTF(2, (dev, "pccard_probe\n")); pccard_get_vendor(dev, &vendor); pccard_get_product(dev, &product); pccard_get_prodext(dev, &prodext); pccard_get_vendor_str(dev, &vendor_str); pccard_get_product_str(dev, &product_str); pccard_get_cis3_str(dev, &cis3_str); pccard_get_cis4_str(dev, &cis4_str); DEVPRINTF(1, (dev, "vendor = 0x%04x\n", vendor)); DEVPRINTF(1, (dev, "product = 0x%04x\n", product)); DEVPRINTF(1, (dev, "prodext = 0x%02x\n", prodext)); DEVPRINTF(1, (dev, "vendor_str = %s\n", vendor_str)); DEVPRINTF(1, (dev, "product_str = %s\n", product_str)); DEVPRINTF(1, (dev, "cis3_str = %s\n", cis3_str)); DEVPRINTF(1, (dev, "cis4_str = %s\n", cis4_str)); /* * Possibly already did this search in xe_pccard_match(), * but we need to do it here anyway to figure out which * card we have. */ for (xpp = xe_pccard_products; xpp->product.pp_vendor != 0; xpp++) { if (vendor == xpp->product.pp_vendor && product == xpp->product.pp_product && prodext == xpp->prodext) break; } /* Found a match? */ if (xpp->product.pp_vendor == 0) return (ENODEV); /* Set card name for logging later */ if (xpp->product.pp_name != NULL) device_set_desc(dev, xpp->product.pp_name); /* Reject known but unsupported cards */ if (xpp->flags & XE_CARD_TYPE_FLAGS_NO) { device_printf(dev, "Sorry, your %s %s card is not supported :(\n", vendor_str, product_str); return (ENODEV); } /* Set various card ID fields in softc */ scp->vendor = vendor_str; scp->card_type = product_str; if (xpp->flags & XE_CARD_TYPE_FLAGS_CE2) scp->ce2 = 1; if (xpp->flags & XE_CARD_TYPE_FLAGS_MOHAWK) scp->mohawk = 1; if (xpp->flags & XE_CARD_TYPE_FLAGS_DINGO) { scp->dingo = 1; scp->mohawk = 1; } if (xpp->product.pp_product & XE_PROD_MODEM_MASK) scp->modem = 1; /* Get MAC address */ pccard_get_ether(dev, scp->arpcom.ac_enaddr); /* Deal with bogus MAC address */ if (xpp->product.pp_vendor == PCMCIA_VENDOR_XIRCOM && scp->ce2 && (scp->arpcom.ac_enaddr[0] != XE_MAC_ADDR_0 || scp->arpcom.ac_enaddr[1] != XE_MAC_ADDR_1 || scp->arpcom.ac_enaddr[2] != XE_MAC_ADDR_2) && xe_macfix(dev, XE_BOGUS_MAC_OFFSET) < 0) { device_printf(dev, "Unable to find MAC address for your %s card\n", scp->card_type); return (ENODEV); } /* Success */ return (0); } /* * Attach a device. */ static int xe_pccard_attach(device_t dev) { struct xe_softc *scp = device_get_softc(dev); int err; DEVPRINTF(2, (dev, "pccard_attach\n")); if ((err = xe_activate(dev)) != 0) return (err); /* Hack RealPorts into submission */ if (scp->modem && xe_cemfix(dev) < 0) { device_printf(dev, "Unable to fix your %s %s combo card\n", scp->vendor, scp->card_type); xe_deactivate(dev); return (ENODEV); } if ((err = xe_attach(dev))) { device_printf(dev, "xe_attach() failed! (%d)\n", err); return (err); } return (0); } /* * The device entry is being removed, probably because someone ejected the * card. The interface should have been brought down manually before calling * this function; if not you may well lose packets. In any case, I shut down * the card and the interface, and hope for the best. */ static int xe_pccard_detach(device_t dev) { struct xe_softc *sc = device_get_softc(dev); DEVPRINTF(2, (dev, "pccard_detach\n")); sc->arpcom.ac_if.if_flags &= ~IFF_RUNNING; ether_ifdetach(&sc->arpcom.ac_if); xe_deactivate(dev); return (0); } static int xe_pccard_product_match(device_t dev, const struct pccard_product* ent, int vpfmatch) { const struct xe_pccard_product* xpp; u_int16_t prodext; DEVPRINTF(2, (dev, "pccard_product_match\n")); xpp = (const struct xe_pccard_product*)ent; pccard_get_prodext(dev, &prodext); if (xpp->prodext != prodext) vpfmatch = 0; return (vpfmatch); } static int xe_pccard_match(device_t dev) { const struct pccard_product *pp; DEVPRINTF(2, (dev, "pccard_match\n")); pp = (const struct pccard_product*)xe_pccard_products; if ((pp = pccard_product_lookup(dev, pp, sizeof(xe_pccard_products[0]), xe_pccard_product_match)) != NULL) return (0); return (EIO); } static device_method_t xe_pccard_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pccard_compat_probe), DEVMETHOD(device_attach, pccard_compat_attach), DEVMETHOD(device_detach, xe_pccard_detach), /* Card interface */ DEVMETHOD(card_compat_match, xe_pccard_match), DEVMETHOD(card_compat_probe, xe_pccard_probe), DEVMETHOD(card_compat_attach, xe_pccard_attach), { 0, 0 } }; static driver_t xe_pccard_driver = { "xe", xe_pccard_methods, sizeof(struct xe_softc), }; devclass_t xe_devclass; DRIVER_MODULE(xe, pccard, xe_pccard_driver, xe_devclass, 0, 0);