diff --git a/sys/dev/ata/ata-all.c b/sys/dev/ata/ata-all.c index 3028a8890c16..6b87979b8ef6 100644 --- a/sys/dev/ata/ata-all.c +++ b/sys/dev/ata/ata-all.c @@ -1,986 +1,978 @@ /*- * Copyright (c) 1998 - 2004 Søren Schmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ata.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __alpha__ #include #endif #include #include #include #include /* device structures */ static d_ioctl_t ata_ioctl; static struct cdevsw ata_cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT, .d_ioctl = ata_ioctl, .d_name = "ata", }; /* prototypes */ static void ata_shutdown(void *, int); static int ata_getparam(struct ata_device *, u_int8_t); static void ata_identify_devices(struct ata_channel *); static void ata_boot_attach(void); static void bswap(int8_t *, int); static void btrim(int8_t *, int); static void bpack(int8_t *, int8_t *, int); static void ata_init(void); /* global vars */ MALLOC_DEFINE(M_ATA, "ATA generic", "ATA driver generic layer"); struct intr_config_hook *ata_delayed_attach = NULL; devclass_t ata_devclass; uma_zone_t ata_zone; int ata_wc = 1; /* local vars */ static int ata_dma = 1; static int atapi_dma = 1; /* sysctl vars */ SYSCTL_NODE(_hw, OID_AUTO, ata, CTLFLAG_RD, 0, "ATA driver parameters"); TUNABLE_INT("hw.ata.ata_dma", &ata_dma); SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma, CTLFLAG_RDTUN, &ata_dma, 0, "ATA disk DMA mode control"); TUNABLE_INT("hw.ata.wc", &ata_wc); SYSCTL_INT(_hw_ata, OID_AUTO, wc, CTLFLAG_RDTUN, &ata_wc, 0, "ATA disk write caching"); TUNABLE_INT("hw.ata.atapi_dma", &atapi_dma); SYSCTL_INT(_hw_ata, OID_AUTO, atapi_dma, CTLFLAG_RDTUN, &atapi_dma, 0, "ATAPI device DMA mode control"); /* * newbus device interface related functions */ int ata_probe(device_t dev) { struct ata_channel *ch; if (!dev || !(ch = device_get_softc(dev))) return ENXIO; if (ch->r_irq) return EEXIST; /* initialize the softc basics */ ch->device[MASTER].channel = ch; ch->device[MASTER].unit = ATA_MASTER; ch->device[MASTER].mode = ATA_PIO; ch->device[SLAVE].channel = ch; ch->device[SLAVE].unit = ATA_SLAVE; ch->device[SLAVE].mode = ATA_PIO; ch->dev = dev; ch->lock = ATA_IDLE; /* initialise device(s) on this channel */ ch->locking(ch, ATA_LF_LOCK); ch->hw.reset(ch); ch->locking(ch, ATA_LF_UNLOCK); return 0; } int ata_attach(device_t dev) { struct ata_channel *ch; int error, rid; if (!dev || !(ch = device_get_softc(dev))) return ENXIO; rid = ATA_IRQ_RID; ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (!ch->r_irq) { ata_printf(ch, -1, "unable to allocate interrupt\n"); return ENXIO; } if ((error = bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, ch->hw.interrupt, ch, &ch->ih))) { ata_printf(ch, -1, "unable to setup interrupt\n"); return error; } if (ch->dma) ch->dma->alloc(ch); /* initialize queue and associated lock */ bzero(&ch->queue_mtx, sizeof(struct mtx)); mtx_init(&ch->queue_mtx, "ATA queue lock", NULL, MTX_DEF); TAILQ_INIT(&ch->ata_queue); /* do not attach devices if we are in early boot */ if (ata_delayed_attach) return 0; ata_identify_devices(ch); if (ch->device[MASTER].attach) ch->device[MASTER].attach(&ch->device[MASTER]); if (ch->device[SLAVE].attach) ch->device[SLAVE].attach(&ch->device[SLAVE]); #ifdef DEV_ATAPICAM atapi_cam_attach_bus(ch); #endif return 0; } int ata_detach(device_t dev) { struct ata_channel *ch; if (!dev || !(ch = device_get_softc(dev)) || !ch->r_irq) return ENXIO; /* mark devices on this channel as detaching */ ch->device[MASTER].flags |= ATA_D_DETACHING; ch->device[SLAVE].flags |= ATA_D_DETACHING; /* fail outstanding requests on this channel */ ata_fail_requests(ch, NULL); /* detach devices on this channel */ if (ch->device[MASTER].detach) ch->device[MASTER].detach(&ch->device[MASTER]); if (ch->device[SLAVE].detach) ch->device[SLAVE].detach(&ch->device[SLAVE]); #ifdef DEV_ATAPICAM atapi_cam_detach_bus(ch); #endif /* flush cache and powerdown device */ if (ch->device[MASTER].param) { if (ch->device[MASTER].param->support.command2 & ATA_SUPPORT_FLUSHCACHE) ata_controlcmd(&ch->device[MASTER], ATA_FLUSHCACHE, 0, 0, 0); ata_controlcmd(&ch->device[MASTER], ATA_SLEEP, 0, 0, 0); free(ch->device[MASTER].param, M_ATA); ch->device[MASTER].param = NULL; } if (ch->device[SLAVE].param) { if (ch->device[SLAVE].param->support.command2 & ATA_SUPPORT_FLUSHCACHE) ata_controlcmd(&ch->device[SLAVE], ATA_FLUSHCACHE, 0, 0, 0); ata_controlcmd(&ch->device[SLAVE], ATA_SLEEP, 0, 0, 0); free(ch->device[SLAVE].param, M_ATA); ch->device[SLAVE].param = NULL; } ch->device[MASTER].mode = ATA_PIO; ch->device[SLAVE].mode = ATA_PIO; ch->devices = 0; if (ch->dma) ch->dma->free(ch); bus_teardown_intr(dev, ch->r_irq, ch->ih); bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); ch->r_irq = NULL; mtx_destroy(&ch->queue_mtx); return 0; } int ata_reinit(struct ata_channel *ch) { - struct ata_request *request = ch->running; int devices, misdev, newdev; if (!ch->r_irq) return ENXIO; /* reset the HW */ if (bootverbose) ata_printf(ch, -1, "reiniting channel ..\n"); ATA_FORCELOCK_CH(ch); ch->flags |= ATA_IMMEDIATE_MODE; - ch->running = NULL; devices = ch->devices; ch->hw.reset(ch); - ATA_UNLOCK_CH(ch); if (bootverbose) ata_printf(ch, -1, "resetting done ..\n"); /* detach what left the channel during reset */ if ((misdev = devices & ~ch->devices)) { if ((misdev & (ATA_ATA_MASTER | ATA_ATAPI_MASTER)) && ch->device[MASTER].detach) { - if (request && (request->device == &ch->device[MASTER])) { - request->result = ENXIO; - request->retries = 0; - } ch->device[MASTER].detach(&ch->device[MASTER]); ata_fail_requests(ch, &ch->device[MASTER]); free(ch->device[MASTER].param, M_ATA); ch->device[MASTER].param = NULL; } if ((misdev & (ATA_ATA_SLAVE | ATA_ATAPI_SLAVE)) && ch->device[SLAVE].detach) { - if (request && (request->device == &ch->device[SLAVE])) { - request->result = ENXIO; - request->retries = 0; - } ch->device[SLAVE].detach(&ch->device[SLAVE]); ata_fail_requests(ch, &ch->device[SLAVE]); free(ch->device[SLAVE].param, M_ATA); ch->device[SLAVE].param = NULL; } } + ch->running = NULL; + ATA_UNLOCK_CH(ch); + /* identify what is present on the channel now */ ata_identify_devices(ch); /* attach new devices that appeared during reset */ if ((newdev = ~devices & ch->devices)) { if ((newdev & (ATA_ATA_MASTER | ATA_ATAPI_MASTER)) && ch->device[MASTER].attach) ch->device[MASTER].attach(&ch->device[MASTER]); if ((newdev & (ATA_ATA_SLAVE | ATA_ATAPI_SLAVE)) && ch->device[SLAVE].attach) ch->device[SLAVE].attach(&ch->device[SLAVE]); } #ifdef DEV_ATAPICAM atapi_cam_reinit_bus(ch); #endif if (bootverbose) ata_printf(ch, -1, "device config done ..\n"); ch->flags &= ~ATA_IMMEDIATE_MODE; ata_start(ch); return 0; } int ata_suspend(device_t dev) { struct ata_channel *ch; if (!dev || !(ch = device_get_softc(dev))) return ENXIO; ch->locking(ch, ATA_LF_LOCK); ATA_SLEEPLOCK_CH(ch); return 0; } int ata_resume(device_t dev) { struct ata_channel *ch; int error; if (!dev || !(ch = device_get_softc(dev))) return ENXIO; ch->locking(ch, ATA_LF_LOCK); error = ata_reinit(ch); ch->locking(ch, ATA_LF_UNLOCK); ata_start(ch); return error; } static void ata_shutdown(void *arg, int howto) { struct ata_channel *ch; int ctlr; /* flush cache on all devices */ for (ctlr = 0; ctlr < devclass_get_maxunit(ata_devclass); ctlr++) { if (!(ch = devclass_get_softc(ata_devclass, ctlr))) continue; if (ch->device[MASTER].param && ch->device[MASTER].param->support.command2 & ATA_SUPPORT_FLUSHCACHE) ata_controlcmd(&ch->device[MASTER], ATA_FLUSHCACHE, 0, 0, 0); if (ch->device[SLAVE].param && ch->device[SLAVE].param->support.command2 & ATA_SUPPORT_FLUSHCACHE) ata_controlcmd(&ch->device[SLAVE], ATA_FLUSHCACHE, 0, 0, 0); } } /* * device related interfaces */ static int ata_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, struct thread *td) { struct ata_cmd *iocmd = (struct ata_cmd *)addr; device_t device = devclass_get_device(ata_devclass, iocmd->channel); struct ata_channel *ch; struct ata_device *atadev; struct ata_request *request; caddr_t buf; int error = ENOTTY; DROP_GIANT(); switch (iocmd->cmd) { case ATAGMAXCHANNEL: iocmd->u.maxchan = devclass_get_maxunit(ata_devclass); error = 0; break; case ATAGPARM: if (!device || !(ch = device_get_softc(device))) { error = ENXIO; break; } iocmd->u.param.type[MASTER] = ch->devices & (ATA_ATA_MASTER | ATA_ATAPI_MASTER); iocmd->u.param.type[SLAVE] = ch->devices & (ATA_ATA_SLAVE | ATA_ATAPI_SLAVE); if (ch->device[MASTER].name) strcpy(iocmd->u.param.name[MASTER], ch->device[MASTER].name); if (ch->device[SLAVE].name) strcpy(iocmd->u.param.name[SLAVE], ch->device[SLAVE].name); if (ch->device[MASTER].param) bcopy(ch->device[MASTER].param, &iocmd->u.param.params[MASTER], sizeof(struct ata_params)); if (ch->device[SLAVE].param) bcopy(ch->device[SLAVE].param, &iocmd->u.param.params[SLAVE], sizeof(struct ata_params)); error = 0; break; case ATAGMODE: if (!device || !(ch = device_get_softc(device))) { error = ENXIO; break; } iocmd->u.mode.mode[MASTER] = ch->device[MASTER].mode; iocmd->u.mode.mode[SLAVE] = ch->device[SLAVE].mode; error = 0; break; case ATASMODE: if (!device || !(ch = device_get_softc(device))) { error = ENXIO; break; } if (iocmd->u.mode.mode[MASTER] >= 0 && ch->device[MASTER].param) ch->device[MASTER].setmode(&ch->device[MASTER], iocmd->u.mode.mode[MASTER]); iocmd->u.mode.mode[MASTER] = ch->device[MASTER].mode; if (iocmd->u.mode.mode[SLAVE] >= 0 && ch->device[SLAVE].param) ch->device[SLAVE].setmode(&ch->device[SLAVE], iocmd->u.mode.mode[SLAVE]); iocmd->u.mode.mode[SLAVE] = ch->device[SLAVE].mode; error = 0; break; case ATAREQUEST: if (!device || !(ch = device_get_softc(device))) { error = ENXIO; break; } if (!(atadev = &ch->device[iocmd->device])) { error = ENODEV; break; } if (!(buf = malloc(iocmd->u.request.count, M_ATA, M_NOWAIT))) { error = ENOMEM; break; } if (!(request = ata_alloc_request())) { error = ENOMEM; free(buf, M_ATA); break; } if (iocmd->u.request.flags & ATA_CMD_WRITE) { error = copyin(iocmd->u.request.data, buf, iocmd->u.request.count); if (error) { free(buf, M_ATA); ata_free_request(request); break; } } request->device = atadev; if (iocmd->u.request.flags & ATA_CMD_ATAPI) { request->flags = ATA_R_ATAPI; bcopy(iocmd->u.request.u.atapi.ccb, request->u.atapi.ccb, 16); } else { request->u.ata.command = iocmd->u.request.u.ata.command; request->u.ata.feature = iocmd->u.request.u.ata.feature; request->u.ata.lba = iocmd->u.request.u.ata.lba; request->u.ata.count = iocmd->u.request.u.ata.count; } request->timeout = iocmd->u.request.timeout; request->data = buf; request->bytecount = iocmd->u.request.count; request->transfersize = request->bytecount; if (iocmd->u.request.flags & ATA_CMD_CONTROL) request->flags |= ATA_R_CONTROL; if (iocmd->u.request.flags & ATA_CMD_READ) request->flags |= ATA_R_READ; if (iocmd->u.request.flags & ATA_CMD_WRITE) request->flags |= ATA_R_WRITE; ata_queue_request(request); if (request->result) iocmd->u.request.error = request->result; else { if (iocmd->u.request.flags & ATA_CMD_READ) error = copyout(buf, iocmd->u.request.data, iocmd->u.request.count); else error = 0; } free(buf, M_ATA); ata_free_request(request); break; case ATAREINIT: if (!device || !(ch = device_get_softc(device))) return ENXIO; error = ata_reinit(ch); ata_start(ch); break; case ATAATTACH: if (!device) { error = ENXIO; break; } /* SOS should enable channel HW on controller XXX */ error = ata_probe(device); if (!error) error = ata_attach(device); break; case ATADETACH: if (!device) { error = ENXIO; break; } error = ata_detach(device); /* SOS should disable channel HW on controller XXX */ break; #ifdef DEV_ATARAID case ATARAIDCREATE: error = ata_raid_create(&iocmd->u.raid_setup); break; case ATARAIDDELETE: error = ata_raid_delete(iocmd->channel); break; case ATARAIDSTATUS: error = ata_raid_status(iocmd->channel, &iocmd->u.raid_status); break; case ATARAIDADDSPARE: error = ata_raid_addspare(iocmd->channel, iocmd->u.raid_spare.disk); break; case ATARAIDREBUILD: error = ata_raid_rebuild(iocmd->channel); break; #endif } PICKUP_GIANT(); return error; } /* * device probe functions */ static int ata_getparam(struct ata_device *atadev, u_int8_t command) { struct ata_request *request; int error = ENOMEM; if (!atadev->param) atadev->param = malloc(sizeof(struct ata_params), M_ATA, M_NOWAIT); if (atadev->param) { request = ata_alloc_request(); if (request) { int retries = 2; while (retries-- > 0) { request->device = atadev; request->timeout = 5; request->retries = -1; request->u.ata.command = command; request->flags = (ATA_R_READ | ATA_R_IMMEDIATE); request->data = (caddr_t)atadev->param; request->bytecount = sizeof(struct ata_params); request->donecount = 0; request->transfersize = DEV_BSIZE; ata_queue_request(request); if (!(error = request->result)) break; } ata_free_request(request); } if (!error && (isprint(atadev->param->model[0]) || isprint(atadev->param->model[1]))) { struct ata_params *atacap = atadev->param; #if BYTE_ORDER == BIG_ENDIAN int16_t *ptr; for (ptr = (int16_t *)atacap; ptr < (int16_t *)atacap + sizeof(struct ata_params)/2; ptr++) { *ptr = bswap16(*ptr); } #endif if (!((atacap->model[0] == 'N' && atacap->model[1] == 'E') || (atacap->model[0] == 'F' && atacap->model[1] == 'X') || (atacap->model[0] == 'P' && atacap->model[1] == 'i'))) bswap(atacap->model, sizeof(atacap->model)); btrim(atacap->model, sizeof(atacap->model)); bpack(atacap->model, atacap->model, sizeof(atacap->model)); bswap(atacap->revision, sizeof(atacap->revision)); btrim(atacap->revision, sizeof(atacap->revision)); bpack(atacap->revision, atacap->revision, sizeof(atacap->revision)); bswap(atacap->serial, sizeof(atacap->serial)); btrim(atacap->serial, sizeof(atacap->serial)); bpack(atacap->serial, atacap->serial, sizeof(atacap->serial)); if (bootverbose) ata_prtdev(atadev, "pio=0x%02x wdma=0x%02x udma=0x%02x cable=%spin\n", ata_pmode(atacap), ata_wmode(atacap), ata_umode(atacap), (atacap->hwres & ATA_CABLE_ID) ? "80":"40"); } else { if (!error) error = ENXIO; if (atadev->param) { free(atadev->param, M_ATA); atadev->param = NULL; } } } return error; } static void ata_identify_devices(struct ata_channel *ch) { if (ch->devices & ATA_ATA_SLAVE) { if (ata_getparam(&ch->device[SLAVE], ATA_ATA_IDENTIFY)) ch->devices &= ~ATA_ATA_SLAVE; #ifdef DEV_ATADISK else ch->device[SLAVE].attach = ad_attach; #endif } if (ch->devices & ATA_ATAPI_SLAVE) { if (ata_getparam(&ch->device[SLAVE], ATA_ATAPI_IDENTIFY)) ch->devices &= ~ATA_ATAPI_SLAVE; else { ata_controlcmd(&ch->device[SLAVE], ATA_ATAPI_RESET, 0, 0, 0); switch (ch->device[SLAVE].param->config & ATA_ATAPI_TYPE_MASK) { #ifdef DEV_ATAPICD case ATA_ATAPI_TYPE_CDROM: ch->device[SLAVE].attach = acd_attach; break; #endif #ifdef DEV_ATAPIFD case ATA_ATAPI_TYPE_DIRECT: ch->device[SLAVE].attach = afd_attach; break; #endif #ifdef DEV_ATAPIST case ATA_ATAPI_TYPE_TAPE: ch->device[SLAVE].attach = ast_attach; break; #endif } } } if (ch->devices & ATA_ATA_MASTER) { if (ata_getparam(&ch->device[MASTER], ATA_ATA_IDENTIFY)) ch->devices &= ~ATA_ATA_MASTER; #ifdef DEV_ATADISK else ch->device[MASTER].attach = ad_attach; #endif } if (ch->devices & ATA_ATAPI_MASTER) { if (ata_getparam(&ch->device[MASTER], ATA_ATAPI_IDENTIFY)) ch->devices &= ~ATA_ATAPI_MASTER; else { ata_controlcmd(&ch->device[MASTER], ATA_ATAPI_RESET, 0, 0, 0); switch (ch->device[MASTER].param->config & ATA_ATAPI_TYPE_MASK) { #ifdef DEV_ATAPICD case ATA_ATAPI_TYPE_CDROM: ch->device[MASTER].attach = acd_attach; break; #endif #ifdef DEV_ATAPIFD case ATA_ATAPI_TYPE_DIRECT: ch->device[MASTER].attach = afd_attach; break; #endif #ifdef DEV_ATAPIST case ATA_ATAPI_TYPE_TAPE: ch->device[MASTER].attach = ast_attach; break; #endif } } } /* setup basic transfer mode by setting PIO mode and DMA if supported */ if (ch->device[MASTER].param) { ch->device[MASTER].setmode(&ch->device[MASTER], ATA_PIO_MAX); if ((((ch->devices & ATA_ATAPI_MASTER) && atapi_dma && (ch->device[MASTER].param->config&ATA_DRQ_MASK) != ATA_DRQ_INTR && ata_umode(ch->device[MASTER].param) >= ATA_UDMA2) || ((ch->devices & ATA_ATA_MASTER) && ata_dma)) && ch->dma) ch->device[MASTER].setmode(&ch->device[MASTER], ATA_DMA_MAX); } if (ch->device[SLAVE].param) { ch->device[SLAVE].setmode(&ch->device[SLAVE], ATA_PIO_MAX); if ((((ch->devices & ATA_ATAPI_SLAVE) && atapi_dma && (ch->device[SLAVE].param->config&ATA_DRQ_MASK) != ATA_DRQ_INTR && ata_umode(ch->device[SLAVE].param) >= ATA_UDMA2) || ((ch->devices & ATA_ATA_SLAVE) && ata_dma)) && ch->dma) ch->device[SLAVE].setmode(&ch->device[SLAVE], ATA_DMA_MAX); } } static void ata_boot_attach(void) { struct ata_channel *ch; int ctlr; /* * run through all ata devices and look for real ATA & ATAPI devices * using the hints we found in the early probe, this avoids some of * the delays probing of non-exsistent devices can cause. */ for (ctlr=0; ctlrdevice[MASTER].attach) ch->device[MASTER].attach(&ch->device[MASTER]); if (ch->device[SLAVE].attach) ch->device[SLAVE].attach(&ch->device[SLAVE]); #ifdef DEV_ATAPICAM atapi_cam_attach_bus(ch); #endif } #ifdef DEV_ATARAID ata_raid_attach(); #endif if (ata_delayed_attach) { config_intrhook_disestablish(ata_delayed_attach); free(ata_delayed_attach, M_TEMP); ata_delayed_attach = NULL; } } /* * misc support functions */ static void bswap(int8_t *buf, int len) { u_int16_t *ptr = (u_int16_t*)(buf + len); while (--ptr >= (u_int16_t*)buf) *ptr = ntohs(*ptr); } static void btrim(int8_t *buf, int len) { int8_t *ptr; for (ptr = buf; ptr < buf+len; ++ptr) if (!*ptr) *ptr = ' '; for (ptr = buf + len - 1; ptr >= buf && *ptr == ' '; --ptr) *ptr = 0; } static void bpack(int8_t *src, int8_t *dst, int len) { int i, j, blank; for (i = j = blank = 0 ; i < len; i++) { if (blank && src[i] == ' ') continue; if (blank && src[i] != ' ') { dst[j++] = src[i]; blank = 0; continue; } if (src[i] == ' ') { blank = 1; if (i == 0) continue; } dst[j++] = src[i]; } if (j < len) dst[j] = 0x00; } int ata_printf(struct ata_channel *ch, int device, const char * fmt, ...) { va_list ap; int ret; if (device == -1) ret = printf("ata%d: ", device_get_unit(ch->dev)); else { if (ch->device[ATA_DEV(device)].name) ret = printf("%s: ", ch->device[ATA_DEV(device)].name); else ret = printf("ata%d-%s: ", device_get_unit(ch->dev), (device == ATA_MASTER) ? "master" : "slave"); } va_start(ap, fmt); ret += vprintf(fmt, ap); va_end(ap); return ret; } int ata_prtdev(struct ata_device *atadev, const char * fmt, ...) { va_list ap; int ret; if (atadev->name) ret = printf("%s: ", atadev->name); else ret = printf("ata%d-%s: ", device_get_unit(atadev->channel->dev), (atadev->unit == ATA_MASTER) ? "master" : "slave"); va_start(ap, fmt); ret += vprintf(fmt, ap); va_end(ap); return ret; } void ata_set_name(struct ata_device *atadev, char *name, int lun) { atadev->name = malloc(strlen(name) + 4, M_ATA, M_NOWAIT); if (atadev->name) sprintf(atadev->name, "%s%d", name, lun); } void ata_free_name(struct ata_device *atadev) { if (atadev->name) free(atadev->name, M_ATA); atadev->name = NULL; } int ata_get_lun(u_int32_t *map) { int lun = ffs(~*map) - 1; *map |= (1 << lun); return lun; } int ata_test_lun(u_int32_t *map, int lun) { return (*map & (1 << lun)); } void ata_free_lun(u_int32_t *map, int lun) { *map &= ~(1 << lun); } char * ata_mode2str(int mode) { switch (mode) { case ATA_PIO: return "BIOSPIO"; case ATA_PIO0: return "PIO0"; case ATA_PIO1: return "PIO1"; case ATA_PIO2: return "PIO2"; case ATA_PIO3: return "PIO3"; case ATA_PIO4: return "PIO4"; case ATA_DMA: return "BIOSDMA"; case ATA_WDMA0: return "WDMA0"; case ATA_WDMA1: return "WDMA1"; case ATA_WDMA2: return "WDMA2"; case ATA_UDMA0: return "UDMA16"; case ATA_UDMA1: return "UDMA25"; case ATA_UDMA2: return "UDMA33"; case ATA_UDMA3: return "UDMA40"; case ATA_UDMA4: return "UDMA66"; case ATA_UDMA5: return "UDMA100"; case ATA_UDMA6: return "UDMA133"; case ATA_SA150: return "SATA150"; default: return "???"; } } int ata_pmode(struct ata_params *ap) { if (ap->atavalid & ATA_FLAG_64_70) { if (ap->apiomodes & 0x02) return ATA_PIO4; if (ap->apiomodes & 0x01) return ATA_PIO3; } if (ap->mwdmamodes & 0x04) return ATA_PIO4; if (ap->mwdmamodes & 0x02) return ATA_PIO3; if (ap->mwdmamodes & 0x01) return ATA_PIO2; if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x200) return ATA_PIO2; if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x100) return ATA_PIO1; if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x000) return ATA_PIO0; return ATA_PIO0; } int ata_wmode(struct ata_params *ap) { if (ap->mwdmamodes & 0x04) return ATA_WDMA2; if (ap->mwdmamodes & 0x02) return ATA_WDMA1; if (ap->mwdmamodes & 0x01) return ATA_WDMA0; return -1; } int ata_umode(struct ata_params *ap) { if (ap->atavalid & ATA_FLAG_88) { if (ap->udmamodes & 0x40) return ATA_UDMA6; if (ap->udmamodes & 0x20) return ATA_UDMA5; if (ap->udmamodes & 0x10) return ATA_UDMA4; if (ap->udmamodes & 0x08) return ATA_UDMA3; if (ap->udmamodes & 0x04) return ATA_UDMA2; if (ap->udmamodes & 0x02) return ATA_UDMA1; if (ap->udmamodes & 0x01) return ATA_UDMA0; } return -1; } int ata_limit_mode(struct ata_device *atadev, int mode, int maxmode) { if (maxmode && mode > maxmode) mode = maxmode; if (mode >= ATA_UDMA0 && ata_umode(atadev->param) > 0) return min(mode, ata_umode(atadev->param)); if (mode >= ATA_WDMA0 && ata_wmode(atadev->param) > 0) return min(mode, ata_wmode(atadev->param)); if (mode > ata_pmode(atadev->param)) return min(mode, ata_pmode(atadev->param)); return mode; } static void ata_init(void) { /* register controlling device */ make_dev(&ata_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "ata"); /* register boot attach to be run when interrupts are enabled */ if (!(ata_delayed_attach = (struct intr_config_hook *) malloc(sizeof(struct intr_config_hook), M_TEMP, M_NOWAIT | M_ZERO))) { printf("ata: malloc of delayed attach hook failed\n"); return; } ata_delayed_attach->ich_func = (void*)ata_boot_attach; if (config_intrhook_establish(ata_delayed_attach) != 0) { printf("ata: config_intrhook_establish failed\n"); free(ata_delayed_attach, M_TEMP); } /* register handler to flush write caches on shutdown */ if ((EVENTHANDLER_REGISTER(shutdown_post_sync, ata_shutdown, NULL, SHUTDOWN_PRI_DEFAULT)) == NULL) printf("ata: shutdown event registration failed!\n"); /* init our UMA zone for ATA requests */ ata_zone = uma_zcreate("ata_request", sizeof(struct ata_request), NULL, NULL, NULL, NULL, 0, 0); } SYSINIT(atadev, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_init, NULL) diff --git a/sys/dev/ata/ata-all.h b/sys/dev/ata/ata-all.h index 7e78c2fb86dd..60698dfae907 100644 --- a/sys/dev/ata/ata-all.h +++ b/sys/dev/ata/ata-all.h @@ -1,532 +1,533 @@ /*- * Copyright (c) 1998 - 2004 Søren Schmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* ATA register defines */ #define ATA_DATA 0x00 /* data register */ #define ATA_ERROR 0x01 /* (R) error register */ #define ATA_E_ILI 0x01 /* illegal length */ #define ATA_E_NM 0x02 /* no media */ #define ATA_E_ABORT 0x04 /* command aborted */ #define ATA_E_MCR 0x08 /* media change request */ #define ATA_E_IDNF 0x10 /* ID not found */ #define ATA_E_MC 0x20 /* media changed */ #define ATA_E_UNC 0x40 /* uncorrectable data */ #define ATA_E_ICRC 0x80 /* UDMA crc error */ #define ATA_E_MASK 0x0f /* error mask */ #define ATA_SK_MASK 0xf0 /* sense key mask */ #define ATA_SK_NO_SENSE 0x00 /* no specific sense key info */ #define ATA_SK_RECOVERED_ERROR 0x10 /* command OK, data recovered */ #define ATA_SK_NOT_READY 0x20 /* no access to drive */ #define ATA_SK_MEDIUM_ERROR 0x30 /* non-recovered data error */ #define ATA_SK_HARDWARE_ERROR 0x40 /* non-recoverable HW failure */ #define ATA_SK_ILLEGAL_REQUEST 0x50 /* invalid command param(s) */ #define ATA_SK_UNIT_ATTENTION 0x60 /* media changed */ #define ATA_SK_DATA_PROTECT 0x70 /* write protect */ #define ATA_SK_BLANK_CHECK 0x80 /* blank check */ #define ATA_SK_VENDOR_SPECIFIC 0x90 /* vendor specific skey */ #define ATA_SK_COPY_ABORTED 0xa0 /* copy aborted */ #define ATA_SK_ABORTED_COMMAND 0xb0 /* command aborted, try again */ #define ATA_SK_EQUAL 0xc0 /* equal */ #define ATA_SK_VOLUME_OVERFLOW 0xd0 /* volume overflow */ #define ATA_SK_MISCOMPARE 0xe0 /* data dont match the medium */ #define ATA_SK_RESERVED 0xf0 #define ATA_FEATURE 0x01 /* (W) feature register */ #define ATA_F_DMA 0x01 /* enable DMA */ #define ATA_F_OVL 0x02 /* enable overlap */ #define ATA_COUNT 0x02 /* (W) sector count */ #define ATA_IREASON 0x02 /* (R) interrupt reason */ #define ATA_I_CMD 0x01 /* cmd (1) | data (0) */ #define ATA_I_IN 0x02 /* read (1) | write (0) */ #define ATA_I_RELEASE 0x04 /* released bus (1) */ #define ATA_I_TAGMASK 0xf8 /* tag mask */ #define ATA_SECTOR 0x03 /* sector # */ #define ATA_CYL_LSB 0x04 /* cylinder# LSB */ #define ATA_CYL_MSB 0x05 /* cylinder# MSB */ #define ATA_DRIVE 0x06 /* Sector/Drive/Head register */ #define ATA_D_LBA 0x40 /* use LBA addressing */ #define ATA_D_IBM 0xa0 /* 512 byte sectors, ECC */ #define ATA_CMD 0x07 /* command register */ #define ATA_STATUS 0x07 /* status register */ #define ATA_S_ERROR 0x01 /* error */ #define ATA_S_INDEX 0x02 /* index */ #define ATA_S_CORR 0x04 /* data corrected */ #define ATA_S_DRQ 0x08 /* data request */ #define ATA_S_DSC 0x10 /* drive seek completed */ #define ATA_S_SERVICE 0x10 /* drive needs service */ #define ATA_S_DWF 0x20 /* drive write fault */ #define ATA_S_DMA 0x20 /* DMA ready */ #define ATA_S_READY 0x40 /* drive ready */ #define ATA_S_BUSY 0x80 /* busy */ #define ATA_ALTSTAT 0x08 /* alternate status register */ #define ATA_ALTOFFSET 0x206 /* alternate registers offset */ #define ATA_PCCARD_ALTOFFSET 0x0e /* do for PCCARD devices */ #define ATA_PC98_ALTOFFSET 0x10c /* do for PC98 devices */ #define ATA_A_IDS 0x02 /* disable interrupts */ #define ATA_A_RESET 0x04 /* RESET controller */ #define ATA_A_4BIT 0x08 /* 4 head bits */ /* ATAPI misc defines */ #define ATAPI_MAGIC_LSB 0x14 #define ATAPI_MAGIC_MSB 0xeb #define ATAPI_P_READ (ATA_S_DRQ | ATA_I_IN) #define ATAPI_P_WRITE (ATA_S_DRQ) #define ATAPI_P_CMDOUT (ATA_S_DRQ | ATA_I_CMD) #define ATAPI_P_DONEDRQ (ATA_S_DRQ | ATA_I_CMD | ATA_I_IN) #define ATAPI_P_DONE (ATA_I_CMD | ATA_I_IN) #define ATAPI_P_ABORT 0 /* misc defines */ #define ATA_PRIMARY 0x1f0 #define ATA_SECONDARY 0x170 #define ATA_PC98_BANK 0x432 #define ATA_IOSIZE 0x08 #define ATA_PC98_IOSIZE 0x10 #define ATA_ALTIOSIZE 0x01 #define ATA_BMIOSIZE 0x08 #define ATA_PC98_BANKIOSIZE 0x01 #define ATA_IOADDR_RID 0 #define ATA_ALTADDR_RID 1 #define ATA_BMADDR_RID 0x20 #define ATA_PC98_ALTADDR_RID 8 #define ATA_PC98_BANKADDR_RID 9 #define ATA_IRQ_RID 0 #define ATA_DEV(device) ((device == ATA_MASTER) ? 0 : 1) /* busmaster DMA related defines */ #define ATA_DMA_ENTRIES 256 #define ATA_DMA_EOT 0x80000000 #define ATA_BMCMD_PORT 0x09 #define ATA_BMCMD_START_STOP 0x01 #define ATA_BMCMD_WRITE_READ 0x08 #define ATA_BMDEVSPEC_0 0x0a #define ATA_BMSTAT_PORT 0x0b #define ATA_BMSTAT_ACTIVE 0x01 #define ATA_BMSTAT_ERROR 0x02 #define ATA_BMSTAT_INTERRUPT 0x04 #define ATA_BMSTAT_MASK 0x07 #define ATA_BMSTAT_DMA_MASTER 0x20 #define ATA_BMSTAT_DMA_SLAVE 0x40 #define ATA_BMSTAT_DMA_SIMPLEX 0x80 #define ATA_BMDEVSPEC_1 0x0c #define ATA_BMDTP_PORT 0x0d #define ATA_IDX_ADDR 0x0e #define ATA_IDX_DATA 0x0f #define ATA_MAX_RES 0x10 #define ATA_INTR_FLAGS (INTR_MPSAFE|INTR_TYPE_BIO|INTR_ENTROPY) #define ATA_OP_CONTINUES 0 #define ATA_OP_FINISHED 1 /* ATAPI request sense structure */ struct atapi_sense { u_int8_t error_code :7; /* current or deferred errors */ u_int8_t valid :1; /* follows ATAPI spec */ u_int8_t segment; /* Segment number */ u_int8_t sense_key :4; /* sense key */ u_int8_t reserved2_4 :1; /* reserved */ u_int8_t ili :1; /* incorrect length indicator */ u_int8_t eom :1; /* end of medium */ u_int8_t filemark :1; /* filemark */ u_int32_t cmd_info __packed; /* cmd information */ u_int8_t sense_length; /* additional sense len (n-7) */ u_int32_t cmd_specific_info __packed; /* additional cmd spec info */ u_int8_t asc; /* additional sense code */ u_int8_t ascq; /* additional sense code qual */ u_int8_t replaceable_unit_code; /* replaceable unit code */ u_int8_t sk_specific :7; /* sense key specific */ u_int8_t sksv :1; /* sense key specific info OK */ u_int8_t sk_specific1; /* sense key specific */ u_int8_t sk_specific2; /* sense key specific */ }; struct ata_request { struct ata_device *device; /* ptr to device softc */ void *driver; /* driver specific */ union { struct { u_int8_t command; /* command reg */ u_int8_t feature; /* feature reg */ u_int16_t count; /* count reg */ u_int64_t lba; /* lba reg */ } ata; struct { u_int8_t ccb[16]; /* ATAPI command block */ struct atapi_sense sense_data; /* ATAPI request sense data */ u_int8_t sense_key; /* ATAPI request sense key */ u_int8_t sense_cmd; /* ATAPI saved command */ } atapi; } u; u_int8_t status; /* ATA status */ u_int8_t error; /* ATA error */ u_int8_t dmastat; /* DMA status */ u_int32_t bytecount; /* bytes to transfer */ u_int32_t transfersize; /* bytes pr transfer */ u_int32_t donecount; /* bytes transferred */ caddr_t data; /* pointer to data buf */ int flags; #define ATA_R_CONTROL 0x0001 #define ATA_R_READ 0x0002 #define ATA_R_WRITE 0x0004 #define ATA_R_DMA 0x0008 #define ATA_R_ATAPI 0x0010 #define ATA_R_QUIET 0x0020 #define ATA_R_INTR_SEEN 0x0040 #define ATA_R_TIMEOUT 0x0080 #define ATA_R_ORDERED 0x0100 #define ATA_R_IMMEDIATE 0x0200 #define ATA_R_REQUEUE 0x0400 #define ATA_R_DEBUG 0x1000 void (*callback)(struct ata_request *request); struct sema done; /* request done sema */ int retries; /* retry count */ int timeout; /* timeout for this cmd */ struct callout_handle timeout_handle; /* handle for untimeout */ int result; /* result error code */ struct task task; /* task management */ struct bio *bio; /* bio for this request */ TAILQ_ENTRY(ata_request) sequence; /* sequence management */ TAILQ_ENTRY(ata_request) chain; /* list management */ }; /* define this for debugging request processing */ #if 0 #define ATA_DEBUG_RQ(request, string) \ { \ if (request->flags & ATA_R_DEBUG) \ ata_prtdev(request->device, "req=%08x %s " string "\n", \ (u_int)request, ata_cmd2str(request)); \ } #else #define ATA_DEBUG_RQ(request, string) #endif /* structure describing an ATA/ATAPI device */ struct ata_device { struct ata_channel *channel; int unit; /* unit number */ #define ATA_MASTER 0x00 #define ATA_SLAVE 0x10 char *name; /* device name */ struct ata_params *param; /* ata param structure */ void *softc; /* ptr to softc for device */ void (*attach)(struct ata_device *atadev); void (*detach)(struct ata_device *atadev); void (*config)(struct ata_device *atadev); void (*start)(struct ata_device *atadev); int flags; #define ATA_D_USE_CHS 0x0001 #define ATA_D_DETACHING 0x0002 #define ATA_D_MEDIA_CHANGED 0x0004 #define ATA_D_ENC_PRESENT 0x0008 int cmd; /* last cmd executed */ int mode; /* transfermode */ void (*setmode)(struct ata_device *atadev, int mode); }; /* structure for holding DMA address data */ struct ata_dmaentry { u_int32_t base; u_int32_t count; }; /* structure holding DMA related information */ struct ata_dma { bus_dma_tag_t dmatag; /* parent DMA tag */ bus_dma_tag_t cdmatag; /* control DMA tag */ bus_dmamap_t cdmamap; /* control DMA map */ bus_dma_tag_t ddmatag; /* data DMA tag */ bus_dmamap_t ddmamap; /* data DMA map */ struct ata_dmaentry *dmatab; /* DMA transfer table */ bus_addr_t mdmatab; /* bus address of dmatab */ bus_dma_tag_t wdmatag; /* workspace DMA tag */ bus_dmamap_t wdmamap; /* workspace DMA map */ u_int8_t *workspace; /* workspace */ bus_addr_t wdmatab; /* bus address of dmatab */ u_int32_t alignment; /* DMA engine alignment */ u_int32_t boundary; /* DMA engine boundary */ u_int32_t max_iosize; /* DMA engine max IO size */ u_int32_t cur_iosize; /* DMA engine current IO size */ int flags; -#define ATA_DMA_ACTIVE 0x01 /* DMA transfer in progress */ -#define ATA_DMA_READ 0x02 /* transaction is a read */ +#define ATA_DMA_READ 0x01 /* transaction is a read */ +#define ATA_DMA_LOADED 0x02 /* DMA tables etc loaded */ +#define ATA_DMA_ACTIVE 0x04 /* DMA transfer in progress */ void (*alloc)(struct ata_channel *ch); void (*free)(struct ata_channel *ch); int (*load)(struct ata_device *atadev, caddr_t data, int32_t count,int dir); int (*unload)(struct ata_channel *ch); int (*start)(struct ata_channel *ch); int (*stop)(struct ata_channel *ch); }; /* structure holding lowlevel functions */ struct ata_lowlevel { void (*reset)(struct ata_channel *ch); void (*interrupt)(void *channel); int (*transaction)(struct ata_request *request); int (*command)(struct ata_device *atadev, u_int8_t command, u_int64_t lba, u_int16_t count, u_int16_t feature); }; /* structure holding resources for an ATA channel */ struct ata_resource { struct resource *res; int offset; }; /* structure describing an ATA channel */ struct ata_channel { struct device *dev; /* device handle */ int unit; /* channel number */ struct ata_resource r_io[ATA_MAX_RES];/* I/O resources */ struct resource *r_irq; /* interrupt of this channel */ void *ih; /* interrupt handle */ struct ata_lowlevel hw; /* lowlevel HW functions */ struct ata_dma *dma; /* DMA data / functions */ int flags; /* channel flags */ #define ATA_NO_SLAVE 0x01 #define ATA_USE_16BIT 0x02 #define ATA_USE_PC98GEOM 0x04 #define ATA_ATAPI_DMA_RO 0x08 #define ATA_48BIT_ACTIVE 0x10 #define ATA_IMMEDIATE_MODE 0x20 #define ATA_HWGONE 0x40 struct ata_device device[2]; /* devices on this channel */ #define MASTER 0x00 #define SLAVE 0x01 int devices; /* what is present */ #define ATA_ATA_MASTER 0x01 #define ATA_ATA_SLAVE 0x02 #define ATA_ATAPI_MASTER 0x04 #define ATA_ATAPI_SLAVE 0x08 int lock; /* ATA channel lock */ #define ATA_IDLE 0x0000 #define ATA_ACTIVE 0x0001 void (*reset)(struct ata_channel *); void (*locking)(struct ata_channel *, int); #define ATA_LF_LOCK 0x0001 #define ATA_LF_UNLOCK 0x0002 struct mtx queue_mtx; /* queue lock */ TAILQ_HEAD(, ata_request) ata_queue; /* head of ATA queue */ struct ata_request *running; /* currently running request */ }; /* disk bay/enclosure related */ #define ATA_LED_OFF 0x00 #define ATA_LED_RED 0x01 #define ATA_LED_GREEN 0x02 #define ATA_LED_ORANGE 0x03 #define ATA_LED_MASK 0x03 /* externs */ extern devclass_t ata_devclass; extern int ata_wc; /* public prototypes */ /* ata-all.c: */ int ata_probe(device_t dev); int ata_attach(device_t dev); int ata_detach(device_t dev); int ata_suspend(device_t dev); int ata_resume(device_t dev); int ata_printf(struct ata_channel *ch, int device, const char *fmt, ...) __printflike(3, 4); int ata_prtdev(struct ata_device *atadev, const char *fmt, ...) __printflike(2, 3); void ata_set_name(struct ata_device *atadev, char *name, int lun); void ata_free_name(struct ata_device *atadev); int ata_get_lun(u_int32_t *map); int ata_test_lun(u_int32_t *map, int lun); void ata_free_lun(u_int32_t *map, int lun); char *ata_mode2str(int mode); int ata_pmode(struct ata_params *ap); int ata_wmode(struct ata_params *ap); int ata_umode(struct ata_params *ap); int ata_limit_mode(struct ata_device *atadev, int mode, int maxmode); /* ata-queue.c: */ int ata_reinit(struct ata_channel *ch); void ata_start(struct ata_channel *ch); int ata_controlcmd(struct ata_device *atadev, u_int8_t command, u_int16_t feature, u_int64_t lba, u_int16_t count); int ata_atapicmd(struct ata_device *atadev, u_int8_t *ccb, caddr_t data, int count, int flags, int timeout); void ata_queue_request(struct ata_request *request); void ata_finish(struct ata_request *request); void ata_fail_requests(struct ata_channel *ch, struct ata_device *device); char *ata_cmd2str(struct ata_request *request); /* ata-lowlevel.c: */ void ata_generic_hw(struct ata_channel *ch); int ata_generic_command(struct ata_device *atadev, u_int8_t command, u_int64_t lba, u_int16_t count, u_int16_t feature); /* subdrivers */ void ad_attach(struct ata_device *atadev); void acd_attach(struct ata_device *atadev); void afd_attach(struct ata_device *atadev); void ast_attach(struct ata_device *atadev); void atapi_cam_attach_bus(struct ata_channel *ch); void atapi_cam_detach_bus(struct ata_channel *ch); void atapi_cam_reinit_bus(struct ata_channel *ch); /* macros for alloc/free of ata_requests */ extern uma_zone_t ata_zone; #define ata_alloc_request() uma_zalloc(ata_zone, M_NOWAIT | M_ZERO) #define ata_free_request(request) uma_zfree(ata_zone, request) /* macros for locking a channel */ #define ATA_LOCK_CH(ch) \ atomic_cmpset_acq_int(&(ch)->lock, ATA_IDLE, ATA_ACTIVE) #define ATA_SLEEPLOCK_CH(ch) \ while (!ATA_LOCK_CH(ch)) tsleep((caddr_t)&(ch), PRIBIO, "atalck", 1); #define ATA_FORCELOCK_CH(ch) \ atomic_store_rel_int(&(ch)->lock, ATA_ACTIVE) #define ATA_UNLOCK_CH(ch) \ atomic_store_rel_int(&(ch)->lock, ATA_IDLE) /* macros to hide busspace uglyness */ #define ATA_INB(res, offset) \ bus_space_read_1(rman_get_bustag((res)), \ rman_get_bushandle((res)), (offset)) #define ATA_INW(res, offset) \ bus_space_read_2(rman_get_bustag((res)), \ rman_get_bushandle((res)), (offset)) #define ATA_INL(res, offset) \ bus_space_read_4(rman_get_bustag((res)), \ rman_get_bushandle((res)), (offset)) #define ATA_INSW(res, offset, addr, count) \ bus_space_read_multi_2(rman_get_bustag((res)), \ rman_get_bushandle((res)), \ (offset), (addr), (count)) #define ATA_INSW_STRM(res, offset, addr, count) \ bus_space_read_multi_stream_2(rman_get_bustag((res)), \ rman_get_bushandle((res)), \ (offset), (addr), (count)) #define ATA_INSL(res, offset, addr, count) \ bus_space_read_multi_4(rman_get_bustag((res)), \ rman_get_bushandle((res)), \ (offset), (addr), (count)) #define ATA_INSL_STRM(res, offset, addr, count) \ bus_space_read_multi_stream_4(rman_get_bustag((res)), \ rman_get_bushandle((res)), \ (offset), (addr), (count)) #define ATA_OUTB(res, offset, value) \ bus_space_write_1(rman_get_bustag((res)), \ rman_get_bushandle((res)), (offset), (value)) #define ATA_OUTW(res, offset, value) \ bus_space_write_2(rman_get_bustag((res)), \ rman_get_bushandle((res)), (offset), (value)) #define ATA_OUTL(res, offset, value) \ bus_space_write_4(rman_get_bustag((res)), \ rman_get_bushandle((res)), (offset), (value)) #define ATA_OUTSW(res, offset, addr, count) \ bus_space_write_multi_2(rman_get_bustag((res)), \ rman_get_bushandle((res)), \ (offset), (addr), (count)) #define ATA_OUTSW_STRM(res, offset, addr, count) \ bus_space_write_multi_stream_2(rman_get_bustag((res)), \ rman_get_bushandle((res)), \ (offset), (addr), (count)) #define ATA_OUTSL(res, offset, addr, count) \ bus_space_write_multi_4(rman_get_bustag((res)), \ rman_get_bushandle((res)), \ (offset), (addr), (count)) #define ATA_OUTSL_STRM(res, offset, addr, count) \ bus_space_write_multi_stream_4(rman_get_bustag((res)), \ rman_get_bushandle((res)), \ (offset), (addr), (count)) #define ATA_IDX_INB(ch, idx) \ ATA_INB(ch->r_io[idx].res, ch->r_io[idx].offset) #define ATA_IDX_INW(ch, idx) \ ATA_INW(ch->r_io[idx].res, ch->r_io[idx].offset) #define ATA_IDX_INL(ch, idx) \ ATA_INL(ch->r_io[idx].res, ch->r_io[idx].offset) #define ATA_IDX_INSW(ch, idx, addr, count) \ ATA_INSW(ch->r_io[idx].res, ch->r_io[idx].offset, addr, count) #define ATA_IDX_INSW_STRM(ch, idx, addr, count) \ ATA_INSW_STRM(ch->r_io[idx].res, ch->r_io[idx].offset, addr, count) #define ATA_IDX_INSL(ch, idx, addr, count) \ ATA_INSL(ch->r_io[idx].res, ch->r_io[idx].offset, addr, count) #define ATA_IDX_INSL_STRM(ch, idx, addr, count) \ ATA_INSL_STRM(ch->r_io[idx].res, ch->r_io[idx].offset, addr, count) #define ATA_IDX_OUTB(ch, idx, value) \ ATA_OUTB(ch->r_io[idx].res, ch->r_io[idx].offset, value) #define ATA_IDX_OUTW(ch, idx, value) \ ATA_OUTW(ch->r_io[idx].res, ch->r_io[idx].offset, value) #define ATA_IDX_OUTL(ch, idx, value) \ ATA_OUTL(ch->r_io[idx].res, ch->r_io[idx].offset, value) #define ATA_IDX_OUTSW(ch, idx, addr, count) \ ATA_OUTSW(ch->r_io[idx].res, ch->r_io[idx].offset, addr, count) #define ATA_IDX_OUTSW_STRM(ch, idx, addr, count) \ ATA_OUTSW_STRM(ch->r_io[idx].res, ch->r_io[idx].offset, addr, count) #define ATA_IDX_OUTSL(ch, idx, addr, count) \ ATA_OUTSL(ch->r_io[idx].res, ch->r_io[idx].offset, addr, count) #define ATA_IDX_OUTSL_STRM(ch, idx, addr, count) \ ATA_OUTSL_STRM(ch->r_io[idx].res, ch->r_io[idx].offset, addr, count) diff --git a/sys/dev/ata/ata-chipset.c b/sys/dev/ata/ata-chipset.c index 394667c2620a..69ecee52ce1e 100644 --- a/sys/dev/ata/ata-chipset.c +++ b/sys/dev/ata/ata-chipset.c @@ -1,2818 +1,2822 @@ /*- * Copyright (c) 1998 - 2004 Søren Schmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ata.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* misc defines */ #define GRANDPARENT(dev) device_get_parent(device_get_parent(dev)) #define ATAPI_DEVICE(atadev) \ ((atadev->unit == ATA_MASTER && \ atadev->channel->devices & ATA_ATAPI_MASTER) ||\ (atadev->unit == ATA_SLAVE && \ atadev->channel->devices & ATA_ATAPI_SLAVE)) /* local prototypes */ static int ata_generic_chipinit(device_t); static void ata_generic_intr(void *); static void ata_generic_setmode(struct ata_device *, int); static int ata_acard_chipinit(device_t); static void ata_acard_intr(void *); static void ata_acard_850_setmode(struct ata_device *, int); static void ata_acard_86X_setmode(struct ata_device *, int); static int ata_ali_chipinit(device_t); static void ata_ali_setmode(struct ata_device *, int); static int ata_amd_chipinit(device_t); static int ata_cyrix_chipinit(device_t); static void ata_cyrix_setmode(struct ata_device *, int); static int ata_cypress_chipinit(device_t); static void ata_cypress_setmode(struct ata_device *, int); static int ata_highpoint_chipinit(device_t); static void ata_highpoint_intr(void *); static void ata_highpoint_setmode(struct ata_device *, int); static int ata_highpoint_check_80pin(struct ata_device *, int); static int ata_intel_chipinit(device_t); static void ata_intel_intr(void *); static void ata_intel_reset(struct ata_channel *); static void ata_intel_old_setmode(struct ata_device *, int); static void ata_intel_new_setmode(struct ata_device *, int); static int ata_national_chipinit(device_t); static void ata_national_setmode(struct ata_device *, int); static int ata_nvidia_chipinit(device_t); static int ata_via_chipinit(device_t); static void ata_via_family_setmode(struct ata_device *, int); static void ata_via_southbridge_fixup(device_t); static int ata_promise_chipinit(device_t); static int ata_promise_mio_allocate(device_t, struct ata_channel *); static void ata_promise_mio_intr(void *); static void ata_promise_sx4_intr(void *); static void ata_promise_mio_dmainit(struct ata_channel *); static void ata_promise_mio_reset(struct ata_channel *ch); static int ata_promise_mio_command(struct ata_device *atadev, u_int8_t command, u_int64_t lba, u_int16_t count, u_int16_t feature); static int ata_promise_sx4_command(struct ata_device *atadev, u_int8_t command, u_int64_t lba, u_int16_t count, u_int16_t feature); static int ata_promise_apkt(u_int8_t *bytep, struct ata_device *atadev, u_int8_t command, u_int64_t lba, u_int16_t count, u_int16_t feature); static void ata_promise_queue_hpkt(struct ata_pci_controller *ctlr, u_int32_t hpkt); static void ata_promise_next_hpkt(struct ata_pci_controller *ctlr); static void ata_promise_tx2_intr(void *); static void ata_promise_old_intr(void *); static void ata_promise_new_dmainit(struct ata_channel *); static void ata_promise_setmode(struct ata_device *, int); static int ata_serverworks_chipinit(device_t); static void ata_serverworks_setmode(struct ata_device *, int); static int ata_sii_chipinit(device_t); static int ata_sii_allocate(device_t, struct ata_channel *); static void ata_sii_reset(struct ata_channel *); static void ata_sii_intr(void *); static void ata_cmd_intr(void *); static void ata_cmd_old_intr(void *); static void ata_sii_setmode(struct ata_device *, int); static void ata_cmd_setmode(struct ata_device *, int); static int ata_sis_chipinit(device_t); static void ata_sis_setmode(struct ata_device *, int); static int ata_check_80pin(struct ata_device *, int); static struct ata_chip_id *ata_find_chip(device_t, struct ata_chip_id *, int); static struct ata_chip_id *ata_match_chip(device_t, struct ata_chip_id *); static int ata_setup_interrupt(device_t); static void ata_serialize(struct ata_channel *, int); static int ata_mode2idx(int); /* generic or unknown ATA chipset init code */ int ata_generic_ident(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); device_set_desc(dev, "GENERIC ATA controller"); ctlr->chipinit = ata_generic_chipinit; return 0; } static int ata_generic_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); if (ata_setup_interrupt(dev)) return ENXIO; ctlr->setmode = ata_generic_setmode; return 0; } static void ata_generic_intr(void *data) { struct ata_pci_controller *ctlr = data; struct ata_channel *ch; int unit; /* implement this as a toggle instead to balance load XXX */ for (unit = 0; unit < 2; unit++) { if (!(ch = ctlr->interrupt[unit].argument)) continue; if (ch->dma && (ch->dma->flags & ATA_DMA_ACTIVE)) { int bmstat = ATA_IDX_INB(ch, ATA_BMSTAT_PORT) & ATA_BMSTAT_MASK; if ((bmstat & (ATA_BMSTAT_ACTIVE | ATA_BMSTAT_INTERRUPT)) != ATA_BMSTAT_INTERRUPT) continue; ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, bmstat & ~ATA_BMSTAT_ERROR); DELAY(1); } ctlr->interrupt[unit].function(ch); } } static void ata_generic_setmode(struct ata_device *atadev, int mode) { mode = ata_limit_mode(atadev, mode, ATA_UDMA2); mode = ata_check_80pin(atadev, mode); if (!ata_controlcmd(atadev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode)) atadev->mode = mode; } static void ata_sata_setmode(struct ata_device *atadev, int mode) { /* * if we detect that the device isn't a real SATA device we limit * the transfer mode to UDMA5/ATA100. * this works around the problems some devices has with the * Marvell SATA->PATA converters and UDMA6/ATA133. */ if (atadev->param->satacapabilities != 0x0000 && atadev->param->satacapabilities != 0xffff) mode = ata_limit_mode(atadev, mode, ATA_UDMA6); else mode = ata_limit_mode(atadev, mode, ATA_UDMA5); if (!ata_controlcmd(atadev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode)) atadev->mode = mode; } /* * Acard chipset support functions */ int ata_acard_ident(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); struct ata_chip_id *idx; static struct ata_chip_id ids[] = {{ ATA_ATP850R, 0, ATPOLD, 0x00, ATA_UDMA2, "Acard ATP850" }, { ATA_ATP860A, 0, 0, 0x00, ATA_UDMA4, "Acard ATP860A" }, { ATA_ATP860R, 0, 0, 0x00, ATA_UDMA4, "Acard ATP860R" }, { ATA_ATP865A, 0, 0, 0x00, ATA_UDMA6, "Acard ATP865A" }, { ATA_ATP865R, 0, 0, 0x00, ATA_UDMA6, "Acard ATP865R" }, { 0, 0, 0, 0, 0, 0}}; char buffer[64]; if (!(idx = ata_match_chip(dev, ids))) return ENXIO; sprintf(buffer, "%s %s controller", idx->text, ata_mode2str(idx->max_dma)); device_set_desc_copy(dev, buffer); ctlr->chip = idx; ctlr->chipinit = ata_acard_chipinit; return 0; } static int ata_acard_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); int rid = ATA_IRQ_RID; if (!(ctlr->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "unable to map interrupt\n"); return ENXIO; } if ((bus_setup_intr(dev, ctlr->r_irq, ATA_INTR_FLAGS, ata_acard_intr, ctlr, &ctlr->handle))) { device_printf(dev, "unable to setup interrupt\n"); return ENXIO; } if (ctlr->chip->cfg1 == ATPOLD) { ctlr->setmode = ata_acard_850_setmode; ctlr->locking = ata_serialize; } else ctlr->setmode = ata_acard_86X_setmode; return 0; } static void ata_acard_intr(void *data) { struct ata_pci_controller *ctlr = data; struct ata_channel *ch; int unit; /* implement this as a toggle instead to balance load XXX */ for (unit = 0; unit < 2; unit++) { if (ctlr->chip->cfg1 == ATPOLD && ctlr->locked_ch != unit) continue; if (!(ch = ctlr->interrupt[unit].argument)) continue; if (ch->dma && (ch->dma->flags & ATA_DMA_ACTIVE)) { int bmstat = ATA_IDX_INB(ch, ATA_BMSTAT_PORT) & ATA_BMSTAT_MASK; if ((bmstat & (ATA_BMSTAT_ACTIVE | ATA_BMSTAT_INTERRUPT)) != ATA_BMSTAT_INTERRUPT) continue; ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, bmstat & ~ATA_BMSTAT_ERROR); DELAY(1); ATA_IDX_OUTB(ch, ATA_BMCMD_PORT, ATA_IDX_INB(ch, ATA_BMCMD_PORT)&~ATA_BMCMD_START_STOP); DELAY(1); } ctlr->interrupt[unit].function(ch); } } static void ata_acard_850_setmode(struct ata_device *atadev, int mode) { device_t parent = device_get_parent(atadev->channel->dev); struct ata_pci_controller *ctlr = device_get_softc(parent); int devno = (atadev->channel->unit << 1) + ATA_DEV(atadev->unit); int error; mode = ata_limit_mode(atadev, mode, ATAPI_DEVICE(atadev)?ATA_PIO_MAX:ctlr->chip->max_dma); /* XXX missing WDMA0+1 + PIO modes */ if (mode >= ATA_WDMA2) { error = ata_controlcmd(atadev, ATA_SETFEATURES, ATA_SF_SETXFER, 0,mode); if (bootverbose) ata_prtdev(atadev, "%ssetting %s on %s chip\n", (error) ? "FAILURE " : "", ata_mode2str(mode), ctlr->chip->text); if (!error) { u_int8_t reg54 = pci_read_config(parent, 0x54, 1); reg54 &= ~(0x03 << (devno << 1)); if (mode >= ATA_UDMA0) reg54 |= (((mode & ATA_MODE_MASK) + 1) << (devno << 1)); pci_write_config(parent, 0x54, reg54, 1); pci_write_config(parent, 0x4a, 0xa6, 1); pci_write_config(parent, 0x40 + (devno << 1), 0x0301, 2); atadev->mode = mode; return; } } /* we could set PIO mode timings, but we assume the BIOS did that */ } static void ata_acard_86X_setmode(struct ata_device *atadev, int mode) { device_t parent = device_get_parent(atadev->channel->dev); struct ata_pci_controller *ctlr = device_get_softc(parent); int devno = (atadev->channel->unit << 1) + ATA_DEV(atadev->unit); int error; mode = ata_limit_mode(atadev, mode, ATAPI_DEVICE(atadev)?ATA_PIO_MAX:ctlr->chip->max_dma); mode = ata_check_80pin(atadev, mode); /* XXX missing WDMA0+1 + PIO modes */ if (mode >= ATA_WDMA2) { error = ata_controlcmd(atadev, ATA_SETFEATURES, ATA_SF_SETXFER, 0,mode); if (bootverbose) ata_prtdev(atadev, "%ssetting %s on %s chip\n", (error) ? "FAILURE " : "", ata_mode2str(mode), ctlr->chip->text); if (!error) { u_int16_t reg44 = pci_read_config(parent, 0x44, 2); reg44 &= ~(0x000f << (devno << 2)); if (mode >= ATA_UDMA0) reg44 |= (((mode & ATA_MODE_MASK) + 1) << (devno << 2)); pci_write_config(parent, 0x44, reg44, 2); pci_write_config(parent, 0x4a, 0xa6, 1); pci_write_config(parent, 0x40 + devno, 0x31, 1); atadev->mode = mode; return; } } /* we could set PIO mode timings, but we assume the BIOS did that */ } /* * Acer Labs Inc (ALI) chipset support functions */ int ata_ali_ident(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); struct ata_chip_id *idx; static struct ata_chip_id ids[] = {{ ATA_ALI_5229, 0xc4, 0, ALINEW, ATA_UDMA5, "AcerLabs Aladdin" }, { ATA_ALI_5229, 0xc2, 0, ALINEW, ATA_UDMA4, "AcerLabs Aladdin" }, { ATA_ALI_5229, 0x20, 0, ALIOLD, ATA_UDMA2, "AcerLabs Aladdin" }, { ATA_ALI_5229, 0x00, 0, ALIOLD, ATA_WDMA2, "AcerLabs Aladdin" }, { 0, 0, 0, 0, 0, 0}}; char buffer[64]; if (!(idx = ata_match_chip(dev, ids))) return ENXIO; sprintf(buffer, "%s %s controller", idx->text, ata_mode2str(idx->max_dma)); device_set_desc_copy(dev, buffer); ctlr->chip = idx; ctlr->chipinit = ata_ali_chipinit; return 0; } static int ata_ali_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); if (ata_setup_interrupt(dev)) return ENXIO; /* deactivate the ATAPI FIFO and enable ATAPI UDMA */ pci_write_config(dev, 0x53, pci_read_config(dev, 0x53, 1) | 0x03, 1); /* enable cable detection and UDMA support on newer chips */ if (ctlr->chip->cfg2 & ALINEW) pci_write_config(dev, 0x4b, pci_read_config(dev, 0x4b, 1) | 0x09, 1); ctlr->setmode = ata_ali_setmode; return 0; } static void ata_ali_setmode(struct ata_device *atadev, int mode) { device_t parent = device_get_parent(atadev->channel->dev); struct ata_pci_controller *ctlr = device_get_softc(parent); int devno = (atadev->channel->unit << 1) + ATA_DEV(atadev->unit); int error; mode = ata_limit_mode(atadev, mode, ctlr->chip->max_dma); if (ctlr->chip->cfg2 & ALINEW) { if (mode > ATA_UDMA2 && pci_read_config(parent, 0x4a, 1) & (1 << atadev->channel->unit)) { ata_prtdev(atadev, "DMA limited to UDMA33, non-ATA66 cable or device\n"); mode = ATA_UDMA2; } } else mode = ata_check_80pin(atadev, mode); if (ctlr->chip->cfg2 & ALIOLD) { /* doesn't support ATAPI DMA on write */ atadev->channel->flags |= ATA_ATAPI_DMA_RO; if (atadev->channel->devices & ATA_ATAPI_MASTER && atadev->channel->devices & ATA_ATAPI_SLAVE) { /* doesn't support ATAPI DMA on two ATAPI devices */ ata_prtdev(atadev, "two atapi devices on this channel, no DMA\n"); mode = ata_limit_mode(atadev, mode, ATA_PIO_MAX); } } error = ata_controlcmd(atadev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode); if (bootverbose) ata_prtdev(atadev, "%ssetting %s on %s chip\n", (error) ? "FAILURE " : "", ata_mode2str(mode), ctlr->chip->text); if (!error) { if (mode >= ATA_UDMA0) { u_int8_t udma[] = {0x0c, 0x0b, 0x0a, 0x09, 0x08, 0x0f}; u_int32_t word54 = pci_read_config(parent, 0x54, 4); word54 &= ~(0x000f000f << (devno << 2)); word54 |= (((udma[mode&ATA_MODE_MASK]<<16)|0x05)<<(devno<<2)); pci_write_config(parent, 0x54, word54, 4); pci_write_config(parent, 0x58 + (atadev->channel->unit << 2), 0x00310001, 4); } else { u_int32_t piotimings[] = { 0x006d0003, 0x00580002, 0x00440001, 0x00330001, 0x00310001, 0x00440001, 0x00330001, 0x00310001}; pci_write_config(parent, 0x54, pci_read_config(parent, 0x54, 4) & ~(0x0008000f << (devno << 2)), 4); pci_write_config(parent, 0x58 + (atadev->channel->unit << 2), piotimings[ata_mode2idx(mode)], 4); } atadev->mode = mode; } } /* * American Micro Devices (AMD) support functions */ int ata_amd_ident(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); struct ata_chip_id *idx; static struct ata_chip_id ids[] = {{ ATA_AMD756, 0x00, AMDNVIDIA, 0x00, ATA_UDMA4, "AMD 756" }, { ATA_AMD766, 0x00, AMDNVIDIA, AMDCABLE|AMDBUG, ATA_UDMA5, "AMD 766" }, { ATA_AMD768, 0x00, AMDNVIDIA, AMDCABLE, ATA_UDMA5, "AMD 768" }, { ATA_AMD8111, 0x00, AMDNVIDIA, AMDCABLE, ATA_UDMA6, "AMD 8111" }, { 0, 0, 0, 0, 0, 0}}; char buffer[64]; if (!(idx = ata_match_chip(dev, ids))) return ENXIO; sprintf(buffer, "%s %s controller", idx->text, ata_mode2str(idx->max_dma)); device_set_desc_copy(dev, buffer); ctlr->chip = idx; ctlr->chipinit = ata_amd_chipinit; return 0; } static int ata_amd_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); if (ata_setup_interrupt(dev)) return ENXIO; /* set prefetch, postwrite */ if (ctlr->chip->cfg2 & AMDBUG) pci_write_config(dev, 0x41, pci_read_config(dev, 0x41, 1) & 0x0f, 1); else pci_write_config(dev, 0x41, pci_read_config(dev, 0x41, 1) | 0xf0, 1); ctlr->setmode = ata_via_family_setmode; return 0; } /* * Cyrix chipset support functions */ int ata_cyrix_ident(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); if (pci_get_devid(dev) == ATA_CYRIX_5530) { device_set_desc(dev, "Cyrix 5530 ATA33 controller"); ctlr->chipinit = ata_cyrix_chipinit; return 0; } return ENXIO; } static int ata_cyrix_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); if (ata_setup_interrupt(dev)) return ENXIO; if (ctlr->r_res1) ctlr->setmode = ata_cyrix_setmode; else ctlr->setmode = ata_generic_setmode; return 0; } static void ata_cyrix_setmode(struct ata_device *atadev, int mode) { struct ata_channel *ch = atadev->channel; int devno = (atadev->channel->unit << 1) + ATA_DEV(atadev->unit); u_int32_t piotiming[] = { 0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010 }; u_int32_t dmatiming[] = { 0x00077771, 0x00012121, 0x00002020 }; u_int32_t udmatiming[] = { 0x00921250, 0x00911140, 0x00911030 }; int error; atadev->channel->dma->alignment = 16; atadev->channel->dma->max_iosize = 63 * 1024; mode = ata_limit_mode(atadev, mode, ATA_UDMA2); error = ata_controlcmd(atadev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode); if (bootverbose) ata_prtdev(atadev, "%ssetting %s on Cyrix chip\n", (error) ? "FAILURE " : "", ata_mode2str(mode)); if (!error) { if (mode >= ATA_UDMA0) { ATA_OUTL(ch->r_io[ATA_BMCMD_PORT].res, 0x24 + (devno << 3), udmatiming[mode & ATA_MODE_MASK]); } else if (mode >= ATA_WDMA0) { ATA_OUTL(ch->r_io[ATA_BMCMD_PORT].res, 0x24 + (devno << 3), dmatiming[mode & ATA_MODE_MASK]); } else { ATA_OUTL(ch->r_io[ATA_BMCMD_PORT].res, 0x20 + (devno << 3), piotiming[mode & ATA_MODE_MASK]); } atadev->mode = mode; } } /* * Cypress chipset support functions */ int ata_cypress_ident(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); /* * the Cypress chip is a mess, it contains two ATA functions, but * both channels are visible on the first one. * simply ignore the second function for now, as the right * solution (ignoring the second channel on the first function) * doesn't work with the crappy ATA interrupt setup on the alpha. */ if (pci_get_devid(dev) == ATA_CYPRESS_82C693 && pci_get_function(dev) == 1 && pci_get_subclass(dev) == PCIS_STORAGE_IDE) { device_set_desc(dev, "Cypress 82C693 ATA controller"); ctlr->chipinit = ata_cypress_chipinit; return 0; } return ENXIO; } static int ata_cypress_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); if (ata_setup_interrupt(dev)) return ENXIO; ctlr->setmode = ata_cypress_setmode; return 0; } static void ata_cypress_setmode(struct ata_device *atadev, int mode) { device_t parent = device_get_parent(atadev->channel->dev); int error; mode = ata_limit_mode(atadev, mode, ATA_WDMA2); /* XXX missing WDMA0+1 + PIO modes */ if (mode == ATA_WDMA2) { error = ata_controlcmd(atadev, ATA_SETFEATURES, ATA_SF_SETXFER, 0,mode); if (bootverbose) ata_prtdev(atadev, "%ssetting WDMA2 on Cypress chip\n", error ? "FAILURE " : ""); if (!error) { pci_write_config(parent, atadev->channel->unit?0x4e:0x4c,0x2020,2); atadev->mode = mode; return; } } /* we could set PIO mode timings, but we assume the BIOS did that */ } /* * HighPoint chipset support functions */ int ata_highpoint_ident(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); struct ata_chip_id *idx; static struct ata_chip_id ids[] = {{ ATA_HPT366, 0x05, HPT372, 0x00, ATA_UDMA6, "HighPoint HPT372" }, { ATA_HPT366, 0x03, HPT370, 0x00, ATA_UDMA5, "HighPoint HPT370" }, { ATA_HPT366, 0x02, HPT366, 0x00, ATA_UDMA4, "HighPoint HPT368" }, { ATA_HPT366, 0x00, HPT366, HPTOLD, ATA_UDMA4, "HighPoint HPT366" }, { ATA_HPT372, 0x01, HPT372, 0x00, ATA_UDMA6, "HighPoint HPT372" }, { ATA_HPT302, 0x01, HPT372, 0x00, ATA_UDMA6, "HighPoint HPT302" }, { ATA_HPT371, 0x01, HPT372, 0x00, ATA_UDMA6, "HighPoint HPT371" }, { ATA_HPT374, 0x07, HPT374, 0x00, ATA_UDMA6, "HighPoint HPT374" }, { 0, 0, 0, 0, 0, 0}}; char buffer[64]; if (!(idx = ata_match_chip(dev, ids))) return ENXIO; strcpy(buffer, idx->text); if (idx->cfg1 == HPT374) { if (pci_get_function(dev) == 0) strcat(buffer, " (channel 0+1)"); else if (pci_get_function(dev) == 1) strcat(buffer, " (channel 2+3)"); } sprintf(buffer, "%s %s controller", buffer, ata_mode2str(idx->max_dma)); device_set_desc_copy(dev, buffer); ctlr->chip = idx; ctlr->chipinit = ata_highpoint_chipinit; return 0; } static int ata_highpoint_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); int rid = ATA_IRQ_RID; if (!(ctlr->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "unable to map interrupt\n"); return ENXIO; } if ((bus_setup_intr(dev, ctlr->r_irq, ATA_INTR_FLAGS, ata_highpoint_intr, ctlr, &ctlr->handle))) { device_printf(dev, "unable to setup interrupt\n"); return ENXIO; } if (ctlr->chip->cfg2 == HPTOLD) { /* disable interrupt prediction */ pci_write_config(dev, 0x51, (pci_read_config(dev, 0x51, 1) & ~0x80), 1); } else { /* disable interrupt prediction */ pci_write_config(dev, 0x51, (pci_read_config(dev, 0x51, 1) & ~0x03), 1); pci_write_config(dev, 0x55, (pci_read_config(dev, 0x55, 1) & ~0x03), 1); /* enable interrupts */ pci_write_config(dev, 0x5a, (pci_read_config(dev, 0x5a, 1) & ~0x10), 1); /* set clocks etc */ if (ctlr->chip->cfg1 < HPT372) pci_write_config(dev, 0x5b, 0x22, 1); else pci_write_config(dev, 0x5b, (pci_read_config(dev, 0x5b, 1) & 0x01) | 0x20, 1); } ctlr->setmode = ata_highpoint_setmode; return 0; } static void ata_highpoint_intr(void *data) { struct ata_pci_controller *ctlr = data; struct ata_channel *ch; int unit; /* implement this as a toggle instead to balance load XXX */ for (unit = 0; unit < 2; unit++) { if (!(ch = ctlr->interrupt[unit].argument)) continue; if (ch->dma) { int bmstat = ATA_IDX_INB(ch, ATA_BMSTAT_PORT) & ATA_BMSTAT_MASK; if ((bmstat & (ATA_BMSTAT_ACTIVE | ATA_BMSTAT_INTERRUPT)) != ATA_BMSTAT_INTERRUPT) continue; ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, bmstat & ~ATA_BMSTAT_ERROR); DELAY(1); } ctlr->interrupt[unit].function(ch); } } static void ata_highpoint_setmode(struct ata_device *atadev, int mode) { device_t parent = device_get_parent(atadev->channel->dev); struct ata_pci_controller *ctlr = device_get_softc(parent); int devno = (atadev->channel->unit << 1) + ATA_DEV(atadev->unit); int error; u_int32_t timings33[][4] = { /* HPT366 HPT370 HPT372 HPT374 mode */ { 0x40d0a7aa, 0x06914e57, 0x0d029d5e, 0x0ac1f48a }, /* PIO 0 */ { 0x40d0a7a3, 0x06914e43, 0x0d029d26, 0x0ac1f465 }, /* PIO 1 */ { 0x40d0a753, 0x06514e33, 0x0c829ca6, 0x0a81f454 }, /* PIO 2 */ { 0x40c8a742, 0x06514e22, 0x0c829c84, 0x0a81f443 }, /* PIO 3 */ { 0x40c8a731, 0x06514e21, 0x0c829c62, 0x0a81f442 }, /* PIO 4 */ { 0x20c8a797, 0x26514e97, 0x2c82922e, 0x228082ea }, /* MWDMA 0 */ { 0x20c8a732, 0x26514e33, 0x2c829266, 0x22808254 }, /* MWDMA 1 */ { 0x20c8a731, 0x26514e21, 0x2c829262, 0x22808242 }, /* MWDMA 2 */ { 0x10c8a731, 0x16514e31, 0x1c82dc62, 0x121882ea }, /* UDMA 0 */ { 0x10cba731, 0x164d4e31, 0x1c9adc62, 0x12148254 }, /* UDMA 1 */ { 0x10caa731, 0x16494e31, 0x1c91dc62, 0x120c8242 }, /* UDMA 2 */ { 0x10cfa731, 0x166d4e31, 0x1c8edc62, 0x128c8242 }, /* UDMA 3 */ { 0x10c9a731, 0x16454e31, 0x1c8ddc62, 0x12ac8242 }, /* UDMA 4 */ { 0, 0x16454e31, 0x1c6ddc62, 0x12848242 }, /* UDMA 5 */ { 0, 0, 0x1c81dc62, 0x12448242 } /* UDMA 6 */ }; mode = ata_limit_mode(atadev, mode, ctlr->chip->max_dma); if (ctlr->chip->cfg1 == HPT366 && ATAPI_DEVICE(atadev)) mode = ata_limit_mode(atadev, mode, ATA_PIO_MAX); mode = ata_highpoint_check_80pin(atadev, mode); error = ata_controlcmd(atadev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode); if (bootverbose) ata_prtdev(atadev, "%ssetting %s on HighPoint chip\n", (error) ? "FAILURE " : "", ata_mode2str(mode)); if (!error) pci_write_config(parent, 0x40 + (devno << 2), timings33[ata_mode2idx(mode)][ctlr->chip->cfg1], 4); atadev->mode = mode; } static int ata_highpoint_check_80pin(struct ata_device *atadev, int mode) { device_t parent = device_get_parent(atadev->channel->dev); struct ata_pci_controller *ctlr = device_get_softc(parent); u_int8_t reg, val, res; if (ctlr->chip->cfg1 == HPT374 && pci_get_function(parent) == 1) { reg = atadev->channel->unit ? 0x57 : 0x53; val = pci_read_config(parent, reg, 1); pci_write_config(parent, reg, val | 0x80, 1); } else { reg = 0x5b; val = pci_read_config(parent, reg, 1); pci_write_config(parent, reg, val & 0xfe, 1); } res = pci_read_config(parent, 0x5a, 1) & (atadev->channel->unit ? 0x1:0x2); pci_write_config(parent, reg, val, 1); if (mode > ATA_UDMA2 && res) { ata_prtdev(atadev,"DMA limited to UDMA33, non-ATA66 cable or device\n"); mode = ATA_UDMA2; } return mode; } /* * Intel chipset support functions */ int ata_intel_ident(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); struct ata_chip_id *idx; static struct ata_chip_id ids[] = {{ ATA_I82371FB, 0, 0, 0x00, ATA_WDMA2, "Intel PIIX" }, { ATA_I82371SB, 0, 0, 0x00, ATA_WDMA2, "Intel PIIX3" }, { ATA_I82371AB, 0, 0, 0x00, ATA_UDMA2, "Intel PIIX4" }, { ATA_I82443MX, 0, 0, 0x00, ATA_UDMA2, "Intel PIIX4" }, { ATA_I82451NX, 0, 0, 0x00, ATA_UDMA2, "Intel PIIX4" }, { ATA_I82801AB, 0, 0, 0x00, ATA_UDMA2, "Intel ICH0" }, { ATA_I82801AA, 0, 0, 0x00, ATA_UDMA4, "Intel ICH" }, { ATA_I82372FB, 0, 0, 0x00, ATA_UDMA4, "Intel ICH" }, { ATA_I82801BA, 0, 0, 0x00, ATA_UDMA5, "Intel ICH2" }, { ATA_I82801BA_1, 0, 0, 0x00, ATA_UDMA5, "Intel ICH2" }, { ATA_I82801CA, 0, 0, 0x00, ATA_UDMA5, "Intel ICH3" }, { ATA_I82801CA_1, 0, 0, 0x00, ATA_UDMA5, "Intel ICH3" }, { ATA_I82801DB, 0, 0, 0x00, ATA_UDMA5, "Intel ICH4" }, { ATA_I82801DB_1, 0, 0, 0x00, ATA_UDMA5, "Intel ICH4" }, { ATA_I82801EB, 0, 0, 0x00, ATA_UDMA5, "Intel ICH5" }, { ATA_I82801EB_1, 0, 0, 0x00, ATA_SA150, "Intel ICH5" }, { ATA_I82801EB_2, 0, 0, 0x00, ATA_SA150, "Intel ICH5" }, { ATA_I6300ESB, 0, 0, 0x00, ATA_UDMA5, "Intel 6300ESB" }, { ATA_I6300ESB_1, 0, 0, 0x00, ATA_SA150, "Intel 6300ESB" }, { ATA_I6300ESB_2, 0, 0, 0x00, ATA_SA150, "Intel 6300ESB" }, { 0, 0, 0, 0, 0, 0}}; char buffer[64]; if (!(idx = ata_match_chip(dev, ids))) return ENXIO; sprintf(buffer, "%s %s controller", idx->text, ata_mode2str(idx->max_dma)); device_set_desc_copy(dev, buffer); ctlr->chip = idx; ctlr->chipinit = ata_intel_chipinit; return 0; } static int ata_intel_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); int rid = ATA_IRQ_RID; if (!ata_legacy(dev)) { if (!(ctlr->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "unable to map interrupt\n"); return ENXIO; } if ((bus_setup_intr(dev, ctlr->r_irq, ATA_INTR_FLAGS, ata_intel_intr, ctlr, &ctlr->handle))) { device_printf(dev, "unable to setup interrupt\n"); return ENXIO; } } if (ctlr->chip->chipid == ATA_I82371FB) { ctlr->setmode = ata_intel_old_setmode; } else if (ctlr->chip->max_dma < ATA_SA150) { ctlr->setmode = ata_intel_new_setmode; } else { pci_write_config(dev, PCIR_COMMAND, pci_read_config(dev, PCIR_COMMAND, 2) & ~0x0400, 2); ctlr->reset = ata_intel_reset; ctlr->setmode = ata_sata_setmode; } return 0; } static void ata_intel_intr(void *data) { struct ata_pci_controller *ctlr = data; struct ata_channel *ch; int unit; /* implement this as a toggle instead to balance load XXX */ for (unit = 0; unit < 2; unit++) { if (!(ch = ctlr->interrupt[unit].argument)) continue; if (ch->dma) { int bmstat = ATA_IDX_INB(ch, ATA_BMSTAT_PORT) & ATA_BMSTAT_MASK; if ((bmstat & (ATA_BMSTAT_ACTIVE | ATA_BMSTAT_INTERRUPT)) != ATA_BMSTAT_INTERRUPT) continue; ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, bmstat & ~ATA_BMSTAT_ERROR); DELAY(1); } ctlr->interrupt[unit].function(ch); } } static void ata_intel_old_setmode(struct ata_device *atadev, int mode) { /* NOT YET */ } static void ata_intel_reset(struct ata_channel *ch) { device_t parent = device_get_parent(ch->dev); int mask, timeout = 100; if (pci_read_config(parent, 0x90, 1) & 0x04) mask = 0x0003; else mask = (0x0001 << ch->unit); pci_write_config(parent, 0x92, pci_read_config(parent, 0x92, 2) & ~mask, 2); DELAY(10); pci_write_config(parent, 0x92, pci_read_config(parent, 0x92, 2) | mask, 2); while (timeout--) { DELAY(10000); if ((pci_read_config(parent, 0x92, 2) & (mask << 4)) == (mask << 4)) { DELAY(10000); return; } } } static void ata_intel_new_setmode(struct ata_device *atadev, int mode) { device_t parent = device_get_parent(atadev->channel->dev); struct ata_pci_controller *ctlr = device_get_softc(parent); int devno = (atadev->channel->unit << 1) + ATA_DEV(atadev->unit); u_int32_t reg40 = pci_read_config(parent, 0x40, 4); u_int8_t reg44 = pci_read_config(parent, 0x44, 1); u_int8_t reg48 = pci_read_config(parent, 0x48, 1); u_int16_t reg4a = pci_read_config(parent, 0x4a, 2); u_int16_t reg54 = pci_read_config(parent, 0x54, 2); u_int32_t mask40 = 0, new40 = 0; u_int8_t mask44 = 0, new44 = 0; int error; u_int8_t timings[] = { 0x00, 0x00, 0x10, 0x21, 0x23, 0x10, 0x21, 0x23, 0x23, 0x23, 0x23, 0x23, 0x23, 0x23 }; mode = ata_limit_mode(atadev, mode, ctlr->chip->max_dma); if ( mode > ATA_UDMA2 && !(reg54 & (0x10 << devno))) { ata_prtdev(atadev,"DMA limited to UDMA33, non-ATA66 cable or device\n"); mode = ATA_UDMA2; } error = ata_controlcmd(atadev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode); if (bootverbose) ata_prtdev(atadev, "%ssetting %s on %s chip\n", (error) ? "FAILURE " : "", ata_mode2str(mode), ctlr->chip->text); if (error) return; if (mode >= ATA_UDMA0) { pci_write_config(parent, 0x48, reg48 | (0x0001 << devno), 2); pci_write_config(parent, 0x4a, (reg4a & ~(0x3 << (devno<<2))) | (0x01 + !(mode & 0x01)), 2); } else { pci_write_config(parent, 0x48, reg48 & ~(0x0001 << devno), 2); pci_write_config(parent, 0x4a, (reg4a & ~(0x3 << (devno << 2))), 2); } if (mode >= ATA_UDMA2) pci_write_config(parent, 0x54, reg54 | (0x1 << devno), 2); else pci_write_config(parent, 0x54, reg54 & ~(0x1 << devno), 2); if (mode >= ATA_UDMA5) pci_write_config(parent, 0x54, reg54 | (0x10000 << devno), 2); else pci_write_config(parent, 0x54, reg54 & ~(0x10000 << devno), 2); reg40 &= ~0x00ff00ff; reg40 |= 0x40774077; if (atadev->unit == ATA_MASTER) { mask40 = 0x3300; new40 = timings[ata_mode2idx(mode)] << 8; } else { mask44 = 0x0f; new44 = ((timings[ata_mode2idx(mode)] & 0x30) >> 2) | (timings[ata_mode2idx(mode)] & 0x03); } if (atadev->channel->unit) { mask40 <<= 16; new40 <<= 16; mask44 <<= 4; new44 <<= 4; } pci_write_config(parent, 0x40, (reg40 & ~mask40) | new40, 4); pci_write_config(parent, 0x44, (reg44 & ~mask44) | new44, 1); atadev->mode = mode; } /* * National chipset support functions */ int ata_national_ident(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); /* this chip is a clone of the Cyrix chip, bugs and all */ if (pci_get_devid(dev) == ATA_SC1100) { device_set_desc(dev, "National Geode SC1100 ATA33 controller"); ctlr->chipinit = ata_national_chipinit; return 0; } return ENXIO; } static device_t nat_host = NULL; static int ata_national_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); device_t *children; int nchildren, i; if (ata_setup_interrupt(dev)) return ENXIO; /* locate the ISA part in the southbridge and enable UDMA33 */ if (!device_get_children(device_get_parent(dev), &children,&nchildren)){ for (i = 0; i < nchildren; i++) { if (pci_get_devid(children[i]) == 0x0510100b) { nat_host = children[i]; break; } } free(children, M_TEMP); } ctlr->setmode = ata_national_setmode; return 0; } static void ata_national_setmode(struct ata_device *atadev, int mode) { device_t parent = device_get_parent(atadev->channel->dev); int devno = (atadev->channel->unit << 1) + ATA_DEV(atadev->unit); u_int32_t piotiming[] = { 0x9172d132, 0x21717121, 0x00803020, 0x20102010, 0x00100010, 0x00803020, 0x20102010, 0x00100010, 0x00100010, 0x00100010, 0x00100010 }; u_int32_t dmatiming[] = { 0x80077771, 0x80012121, 0x80002020 }; u_int32_t udmatiming[] = { 0x80921250, 0x80911140, 0x80911030 }; int error; atadev->channel->dma->alignment = 16; atadev->channel->dma->max_iosize = 63 * 1024; mode = ata_limit_mode(atadev, mode, ATA_UDMA2); error = ata_controlcmd(atadev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode); if (bootverbose) ata_prtdev(atadev, "%s setting %s on National chip\n", (error) ? "failed" : "success", ata_mode2str(mode)); if (!error) { if (mode >= ATA_UDMA0) { pci_write_config(parent, 0x44 + (devno << 3), udmatiming[mode & ATA_MODE_MASK], 4); } else if (mode >= ATA_WDMA0) { pci_write_config(parent, 0x44 + (devno << 3), dmatiming[mode & ATA_MODE_MASK], 4); } else { pci_write_config(parent, 0x44 + (devno << 3), pci_read_config(parent, 0x44 + (devno << 3), 4) | 0x80000000, 4); } pci_write_config(parent, 0x40 + (devno << 3), piotiming[ata_mode2idx(mode)], 4); atadev->mode = mode; } } /* * nVidia chipset support functions */ int ata_nvidia_ident(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); struct ata_chip_id *idx; static struct ata_chip_id ids[] = {{ ATA_NFORCE1, 0, AMDNVIDIA, NVIDIA|AMDBUG, ATA_UDMA5, "nVidia nForce" }, { ATA_NFORCE2, 0, AMDNVIDIA, NVIDIA|AMDBUG, ATA_UDMA6, "nVidia nForce2" }, { ATA_NFORCE3, 0, AMDNVIDIA, NVIDIA, ATA_UDMA6, "nVidia nForce3" }, { 0, 0, 0, 0, 0, 0}}; char buffer[64]; if (!(idx = ata_match_chip(dev, ids))) return ENXIO; sprintf(buffer, "%s %s controller", idx->text, ata_mode2str(idx->max_dma)); device_set_desc_copy(dev, buffer); ctlr->chip = idx; ctlr->chipinit = ata_nvidia_chipinit; return 0; } static int ata_nvidia_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); if (ata_setup_interrupt(dev)) return ENXIO; /* set prefetch, postwrite */ if (ctlr->chip->cfg2 & AMDBUG) pci_write_config(dev, 0x51, pci_read_config(dev, 0x51, 1) & 0x0f, 1); else pci_write_config(dev, 0x51, pci_read_config(dev, 0x51, 1) | 0xf0, 1); ctlr->setmode = ata_via_family_setmode; return 0; } /* * Promise chipset support functions */ #define ATA_PDC_APKT_OFFSET 0x00000010 #define ATA_PDC_HPKT_OFFSET 0x00000040 #define ATA_PDC_ASG_OFFSET 0x00000080 #define ATA_PDC_LSG_OFFSET 0x000000c0 #define ATA_PDC_HSG_OFFSET 0x00000100 #define ATA_PDC_CHN_OFFSET 0x00000400 #define ATA_PDC_BUF_BASE 0x00400000 #define ATA_PDC_BUF_OFFSET 0x00100000 #define ATA_PDC_MAX_HPKT 8 #define ATA_PDC_WRITE_REG 0x00 #define ATA_PDC_WRITE_CTL 0x0e #define ATA_PDC_WRITE_END 0x08 #define ATA_PDC_WAIT_NBUSY 0x10 #define ATA_PDC_WAIT_READY 0x18 #define ATA_PDC_1B 0x20 #define ATA_PDC_2B 0x40 struct ata_promise_sx4 { struct mtx mtx; u_int32_t array[ATA_PDC_MAX_HPKT]; int head, tail; int busy; }; int ata_promise_ident(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); struct ata_chip_id *idx; static struct ata_chip_id ids[] = {{ ATA_PDC20246, 0, PROLD, 0x00, ATA_UDMA2, "Promise PDC20246" }, { ATA_PDC20262, 0, PRNEW, 0x00, ATA_UDMA4, "Promise PDC20262" }, { ATA_PDC20263, 0, PRNEW, 0x00, ATA_UDMA4, "Promise PDC20263" }, { ATA_PDC20265, 0, PRNEW, 0x00, ATA_UDMA5, "Promise PDC20265" }, { ATA_PDC20267, 0, PRNEW, 0x00, ATA_UDMA5, "Promise PDC20267" }, { ATA_PDC20268, 0, PRTX, PRTX4, ATA_UDMA5, "Promise PDC20268" }, { ATA_PDC20269, 0, PRTX, 0x00, ATA_UDMA6, "Promise PDC20269" }, { ATA_PDC20270, 0, PRTX, PRTX4, ATA_UDMA5, "Promise PDC20270" }, { ATA_PDC20271, 0, PRTX, 0x00, ATA_UDMA6, "Promise PDC20271" }, { ATA_PDC20275, 0, PRTX, 0x00, ATA_UDMA6, "Promise PDC20275" }, { ATA_PDC20276, 0, PRTX, PRSX6K, ATA_UDMA6, "Promise PDC20276" }, { ATA_PDC20277, 0, PRTX, 0x00, ATA_UDMA6, "Promise PDC20277" }, { ATA_PDC20318, 0, PRMIO, PRSATA, ATA_SA150, "Promise PDC20318" }, { ATA_PDC20319, 0, PRMIO, PRSATA, ATA_SA150, "Promise PDC20319" }, { ATA_PDC20371, 0, PRMIO, PRSATA, ATA_SA150, "Promise PDC20371" }, { ATA_PDC20375, 0, PRMIO, PRSATA, ATA_SA150, "Promise PDC20375" }, { ATA_PDC20376, 0, PRMIO, PRSATA, ATA_SA150, "Promise PDC20376" }, { ATA_PDC20377, 0, PRMIO, PRSATA, ATA_SA150, "Promise PDC20377" }, { ATA_PDC20378, 0, PRMIO, PRSATA, ATA_SA150, "Promise PDC20378" }, { ATA_PDC20379, 0, PRMIO, PRSATA, ATA_SA150, "Promise PDC20379" }, { ATA_PDC20617, 0, PRMIO, PRDUAL, ATA_UDMA6, "Promise PDC20617" }, { ATA_PDC20618, 0, PRMIO, PRDUAL, ATA_UDMA6, "Promise PDC20618" }, { ATA_PDC20619, 0, PRMIO, PRDUAL, ATA_UDMA6, "Promise PDC20619" }, { ATA_PDC20620, 0, PRMIO, PRDUAL, ATA_UDMA6, "Promise PDC20620" }, { ATA_PDC20621, 0, PRMIO, PRSX4X, ATA_UDMA5, "Promise PDC20621" }, { ATA_PDC20622, 0, PRMIO, PRSX4X, ATA_SA150, "Promise PDC20622" }, { 0, 0, 0, 0, 0, 0}}; char buffer[64]; uintptr_t devid = 0; if (!(idx = ata_match_chip(dev, ids))) return ENXIO; /* if we are on a SuperTrak SX6000 dont attach */ if ((idx->cfg2 & PRSX6K) && pci_get_class(GRANDPARENT(dev))==PCIC_BRIDGE && !BUS_READ_IVAR(device_get_parent(GRANDPARENT(dev)), GRANDPARENT(dev), PCI_IVAR_DEVID, &devid) && devid == ATA_I960RM) return ENXIO; strcpy(buffer, idx->text); /* if we are on a FastTrak TX4, adjust the interrupt resource */ if ((idx->cfg2 & PRTX4) && pci_get_class(GRANDPARENT(dev))==PCIC_BRIDGE && !BUS_READ_IVAR(device_get_parent(GRANDPARENT(dev)), GRANDPARENT(dev), PCI_IVAR_DEVID, &devid) && ((devid == ATA_DEC_21150) || (devid == ATA_DEC_21150_1))) { static long start = 0, end = 0; if (pci_get_slot(dev) == 1) { bus_get_resource(dev, SYS_RES_IRQ, 0, &start, &end); strcat(buffer, " (channel 0+1)"); } else if (pci_get_slot(dev) == 2 && start && end) { bus_set_resource(dev, SYS_RES_IRQ, 0, start, end); start = end = 0; strcat(buffer, " (channel 2+3)"); } else { start = end = 0; } } sprintf(buffer, "%s %s controller", buffer, ata_mode2str(idx->max_dma)); device_set_desc_copy(dev, buffer); ctlr->chip = idx; ctlr->chipinit = ata_promise_chipinit; return 0; } static int ata_promise_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); int rid = ATA_IRQ_RID; if (!(ctlr->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "unable to map interrupt\n"); return ENXIO; } switch (ctlr->chip->cfg1) { case PRNEW: /* setup clocks */ ATA_OUTB(ctlr->r_res1, 0x11, ATA_INB(ctlr->r_res1, 0x11) | 0x0a); ctlr->dmainit = ata_promise_new_dmainit; /* FALLTHROUGH */ case PROLD: /* enable burst mode */ ATA_OUTB(ctlr->r_res1, 0x1f, ATA_INB(ctlr->r_res1, 0x1f) | 0x01); if ((bus_setup_intr(dev, ctlr->r_irq, ATA_INTR_FLAGS, ata_promise_old_intr, ctlr, &ctlr->handle))) { device_printf(dev, "unable to setup interrupt\n"); return ENXIO; } break; case PRTX: if ((bus_setup_intr(dev, ctlr->r_irq, ATA_INTR_FLAGS, ata_promise_tx2_intr, ctlr, &ctlr->handle))) { device_printf(dev, "unable to setup interrupt\n"); return ENXIO; } break; case PRMIO: - if (ctlr->r_res1) - bus_release_resource(dev, ctlr->r_type1, ctlr->r_rid1,ctlr->r_res1); +// if (ctlr->r_res1) +// bus_release_resource(dev, ctlr->r_type1, ctlr->r_rid1,ctlr->r_res1); ctlr->r_type1 = SYS_RES_MEMORY; - ctlr->r_rid1 = 0x20; + ctlr->r_rid1 = PCIR_BAR(4); if (!(ctlr->r_res1 = bus_alloc_resource_any(dev, ctlr->r_type1, &ctlr->r_rid1, RF_ACTIVE))) return ENXIO; ctlr->r_type2 = SYS_RES_MEMORY; - ctlr->r_rid2 = 0x1c; + ctlr->r_rid2 = PCIR_BAR(3); if (!(ctlr->r_res2 = bus_alloc_resource_any(dev, ctlr->r_type2, &ctlr->r_rid2, RF_ACTIVE))) return ENXIO; ctlr->reset = ata_promise_mio_reset; ctlr->dmainit = ata_promise_mio_dmainit; ctlr->allocate = ata_promise_mio_allocate; if (ctlr->chip->cfg2 & PRDUAL) { ctlr->channels = ((ATA_INL(ctlr->r_res2, 0x48) & 0x01) > 0) + ((ATA_INL(ctlr->r_res2, 0x48) & 0x02) > 0) + 2; } else if (ctlr->chip->cfg2 & PRSATA) { ATA_OUTL(ctlr->r_res2, 0x06c, 0x000000ff); ctlr->channels = ((ATA_INL(ctlr->r_res2, 0x48) & 0x02) > 0) + 3; } else ctlr->channels = 4; if (ctlr->chip->cfg2 & PRSX4X) { struct ata_promise_sx4 *hpkt; u_int32_t dimm = ATA_INL(ctlr->r_res2, 0x000c0080); /* print info about cache memory */ device_printf(dev, "DIMM size %dMB @ 0x%08x%s\n", (((dimm >> 16) & 0xff)-((dimm >> 24) & 0xff)+1) << 4, ((dimm >> 24) & 0xff), ATA_INL(ctlr->r_res2, 0x000c0088) & (1<<16) ? " ECC enabled" : "" ); ATA_OUTL(ctlr->r_res2, 0x000c000c, (ATA_INL(ctlr->r_res2, 0x000c000c) & 0xffff0000)); ctlr->driver = malloc(sizeof(struct ata_promise_sx4), M_TEMP, M_NOWAIT | M_ZERO); hpkt = ctlr->driver; mtx_init(&hpkt->mtx, "ATA promise HPKT lock", NULL, MTX_DEF); hpkt->busy = hpkt->head = hpkt->tail = 0; if ((bus_setup_intr(dev, ctlr->r_irq, ATA_INTR_FLAGS, ata_promise_sx4_intr, ctlr, &ctlr->handle))) { device_printf(dev, "unable to setup interrupt\n"); return ENXIO; } } else { if ((bus_setup_intr(dev, ctlr->r_irq, ATA_INTR_FLAGS, ata_promise_mio_intr, ctlr, &ctlr->handle))) { device_printf(dev, "unable to setup interrupt\n"); return ENXIO; } } break; } if (ctlr->chip->max_dma >= ATA_SA150) ctlr->setmode = ata_sata_setmode; else ctlr->setmode = ata_promise_setmode; return 0; } static int ata_promise_mio_allocate(device_t dev, struct ata_channel *ch) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); int offset = (ctlr->chip->cfg2 & PRSX4X) ? 0x000c0000 : 0; int i; for (i = ATA_DATA; i <= ATA_STATUS; i++) { ch->r_io[i].res = ctlr->r_res2; ch->r_io[i].offset = offset + 0x0200 + (i << 2) + (ch->unit << 7); } ch->r_io[ATA_ALTSTAT].res = ctlr->r_res2; ch->r_io[ATA_ALTSTAT].offset = offset + 0x0238 + (ch->unit << 7); ch->r_io[ATA_IDX_ADDR].res = ctlr->r_res2; ch->flags |= ATA_USE_16BIT; ctlr->dmainit(ch); ata_generic_hw(ch); if (ctlr->chip->cfg2 & PRSX4X) ch->hw.command = ata_promise_sx4_command; else ch->hw.command = ata_promise_mio_command; return 0; } static void ata_promise_mio_intr(void *data) { struct ata_pci_controller *ctlr = data; struct ata_channel *ch; u_int32_t vector = ATA_INL(ctlr->r_res2, 0x00040); u_int32_t status = 0; int unit; if (ctlr->chip->cfg2 & PRSATA) { status = ATA_INL(ctlr->r_res2, 0x06c); ATA_OUTL(ctlr->r_res2, 0x06c, status & 0x000000ff); } for (unit = 0; unit < ctlr->channels; unit++) { if (status & (0x00000011 << unit)) if ((ch = ctlr->interrupt[unit].argument) && ch->reset) ch->reset(ch); if (vector & (1 << (unit + 1))) if ((ch = ctlr->interrupt[unit].argument)) ctlr->interrupt[unit].function(ch); } } static void ata_promise_sx4_intr(void *data) { struct ata_pci_controller *ctlr = data; struct ata_channel *ch; u_int32_t vector = ATA_INL(ctlr->r_res2, 0x000c0480); int unit; for (unit = 0; unit < ctlr->channels; unit++) { if (vector & (1 << (unit + 1))) if ((ch = ctlr->interrupt[unit].argument)) ctlr->interrupt[unit].function(ch); if (vector & (1 << (unit + 5))) if ((ch = ctlr->interrupt[unit].argument)) ata_promise_queue_hpkt(ctlr, htole32((ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_HPKT_OFFSET)); if (vector & (1 << (unit + 9))) { ata_promise_next_hpkt(ctlr); if ((ch = ctlr->interrupt[unit].argument)) ctlr->interrupt[unit].function(ch); } if (vector & (1 << (unit + 13))) { ata_promise_next_hpkt(ctlr); if ((ch = ctlr->interrupt[unit].argument)) ATA_OUTL(ctlr->r_res2, 0x000c0240 + (ch->unit << 7), htole32((ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_APKT_OFFSET)); } } } static int ata_promise_mio_dmastart(struct ata_channel *ch) { + ch->flags |= ATA_DMA_ACTIVE; return 0; } static int ata_promise_mio_dmastop(struct ata_channel *ch) { + ch->flags &= ~ATA_DMA_ACTIVE; /* get status XXX SOS */ return 0; } static void ata_promise_mio_dmainit(struct ata_channel *ch) { ata_dmainit(ch); if (ch->dma) { ch->dma->start = ata_promise_mio_dmastart; ch->dma->stop = ata_promise_mio_dmastop; } } static void ata_promise_mio_reset(struct ata_channel *ch) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(ch->dev)); if (ctlr->chip->cfg2 & PRSX4X) { struct ata_promise_sx4 *hpktp = ctlr->driver; ATA_OUTL(ctlr->r_res2, 0xc0260 + (ch->unit << 7), ch->unit + 1); DELAY(1000); ATA_OUTL(ctlr->r_res2, 0xc0260 + (ch->unit << 7), (ATA_INL(ctlr->r_res2, 0xc0260 + (ch->unit << 7)) & ~0x00003f9f) | (ch->unit + 1)); mtx_lock(&hpktp->mtx); ATA_OUTL(ctlr->r_res2, 0xc012c, (ATA_INL(ctlr->r_res2, 0xc012c) & ~0x00000f9f) | (1 << 11)); DELAY(10); ATA_OUTL(ctlr->r_res2, 0xc012c, (ATA_INL(ctlr->r_res2, 0xc012c) & ~0x00000f9f)); mtx_unlock(&hpktp->mtx); } else { if (ctlr->chip->cfg2 & PRSATA) ATA_OUTL(ctlr->r_res2, 0x06c, (0x00110000 << ch->unit)); ATA_OUTL(ctlr->r_res2, 0x0048, ATA_INL(ctlr->r_res2, 0x0048) & ~((1 << 12) << (ch->unit))); DELAY(10); ATA_OUTL(ctlr->r_res2, 0x0048, ATA_INL(ctlr->r_res2, 0x0048) | ((1 << 12) << (ch->unit))); DELAY(100); if (ctlr->chip->cfg2 & PRSATA) ATA_OUTL(ctlr->r_res2, 0x06c, (0x00000011 << ch->unit)); ATA_OUTL(ctlr->r_res2, 0x0260 + (ch->unit << 7), 0x00000800); DELAY(1000); ATA_OUTL(ctlr->r_res2, 0x0260 + (ch->unit << 7), (ATA_INL(ctlr->r_res2, 0x0260 + (ch->unit << 7)) & ~0x00003f9f) | (ch->unit + 1)); } } static int ata_promise_mio_command(struct ata_device *atadev, u_int8_t command, u_int64_t lba, u_int16_t count, u_int16_t feature) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(atadev->channel->dev)); u_int32_t *wordp = (u_int32_t *)atadev->channel->dma->workspace; ATA_OUTL(ctlr->r_res2, (atadev->channel->unit + 1) << 2, 0x00000001); if (command != ATA_READ_DMA && command != ATA_WRITE_DMA) return ata_generic_command(atadev, command, lba, count, feature); if (command == ATA_READ_DMA) wordp[0] = htole32(0x04 | ((atadev->channel->unit+1)<<16) | (0x00<<24)); if (command == ATA_WRITE_DMA) wordp[0] = htole32(0x00 | ((atadev->channel->unit+1)<<16) | (0x00<<24)); wordp[1] = atadev->channel->dma->mdmatab; wordp[2] = 0; ata_promise_apkt((u_int8_t*)wordp, atadev, command, lba, count, feature); ATA_OUTL(ctlr->r_res2, 0x0240 + (atadev->channel->unit << 7), atadev->channel->dma->wdmatab); return 0; } static int ata_promise_sx4_command(struct ata_device *atadev, u_int8_t command, u_int64_t lba, u_int16_t count, u_int16_t feature) { struct ata_channel *ch = atadev->channel; struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(ch->dev)); caddr_t window = rman_get_virtual(ctlr->r_res1); u_int32_t *wordp; int i, idx, length = 0; if (command == ATA_ATA_IDENTIFY) { ATA_OUTL(ctlr->r_res2, 0x000c0400 + ((ch->unit + 1) << 2), 0x00000001); return ata_generic_command(atadev, command, lba, count, feature); } if (ch->running->flags & ATA_R_CONTROL) { wordp = (u_int32_t *) (window + (ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_APKT_OFFSET); wordp[0] = htole32(0x08 | ((ch->unit + 1)<<16) | (0x00 << 24)); wordp[1] = 0; wordp[2] = 0; ata_promise_apkt((u_int8_t *)wordp, atadev, command, lba,count,feature); ATA_OUTL(ctlr->r_res2, 0x000c0484, 0x00000001); ATA_OUTL(ctlr->r_res2, 0x000c0400 + ((ch->unit + 1) << 2), 0x00000001); ATA_OUTL(ctlr->r_res2, 0x000c0240 + (ch->unit << 7), htole32((ch->unit * ATA_PDC_CHN_OFFSET)+ATA_PDC_APKT_OFFSET)); return 0; } if (command != ATA_READ_DMA && command != ATA_WRITE_DMA) return -1; wordp = (u_int32_t *) (window + (ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_HSG_OFFSET); i = idx = 0; do { wordp[idx++] = htole32(ch->dma->dmatab[i].base); wordp[idx++] = htole32(ch->dma->dmatab[i].count & ~ATA_DMA_EOT); length += (ch->dma->dmatab[i].count & ~ATA_DMA_EOT); } while (!(ch->dma->dmatab[i++].count & ATA_DMA_EOT)); wordp[idx - 1] |= htole32(ATA_DMA_EOT); wordp = (u_int32_t *) (window + (ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_LSG_OFFSET); wordp[0] = htole32((ch->unit * ATA_PDC_BUF_OFFSET) + ATA_PDC_BUF_BASE); wordp[1] = htole32((count * DEV_BSIZE) | ATA_DMA_EOT); wordp = (u_int32_t *) (window + (ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_ASG_OFFSET); wordp[0] = htole32((ch->unit * ATA_PDC_BUF_OFFSET) + ATA_PDC_BUF_BASE); wordp[1] = htole32((count * DEV_BSIZE) | ATA_DMA_EOT); wordp = (u_int32_t *) (window + (ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_HPKT_OFFSET); if (command == ATA_READ_DMA) wordp[0] = htole32(0x14 | ((ch->unit + 9)<<16) | ((ch->unit + 5)<<24)); if (command == ATA_WRITE_DMA) wordp[0] = htole32(0x00 | ((ch->unit + 13) << 16) | (0x00 << 24)); wordp[1] = htole32((ch->unit * ATA_PDC_CHN_OFFSET)+ATA_PDC_HSG_OFFSET); wordp[2] = htole32((ch->unit * ATA_PDC_CHN_OFFSET)+ATA_PDC_LSG_OFFSET); wordp[3] = 0; wordp = (u_int32_t *) (window + (ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_APKT_OFFSET); if (command == ATA_READ_DMA) { wordp[0] = htole32(0x04 | ((ch->unit + 5) << 16) | (0x00 << 24)); wordp[1] = htole32((ch->unit * ATA_PDC_CHN_OFFSET)+ATA_PDC_ASG_OFFSET); } if (command == ATA_WRITE_DMA) { wordp[0] = htole32(0x10 | ((ch->unit + 1)<<16) | ((ch->unit + 13)<<24)); wordp[1] = htole32((ch->unit * ATA_PDC_CHN_OFFSET)+ATA_PDC_ASG_OFFSET); } wordp[2] = 0; ata_promise_apkt((u_int8_t *)wordp, atadev, command, lba, count, feature); ATA_OUTL(ctlr->r_res2, 0x000c0484, 0x00000001); if (command == ATA_READ_DMA) { ATA_OUTL(ctlr->r_res2, 0x000c0400 + ((ch->unit + 5) << 2), 0x00000001); ATA_OUTL(ctlr->r_res2, 0x000c0400 + ((ch->unit + 9) << 2), 0x00000001); ATA_OUTL(ctlr->r_res2, 0x000c0240 + (ch->unit << 7), htole32((ch->unit * ATA_PDC_CHN_OFFSET)+ATA_PDC_APKT_OFFSET)); } if (command == ATA_WRITE_DMA) { ATA_OUTL(ctlr->r_res2, 0x000c0400 + ((ch->unit + 1) << 2), 0x00000001); ATA_OUTL(ctlr->r_res2, 0x000c0400 + ((ch->unit + 13) << 2), 0x00000001); ata_promise_queue_hpkt(ctlr, htole32((ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_HPKT_OFFSET)); } return 0; } static int ata_promise_apkt(u_int8_t *bytep, struct ata_device *atadev, u_int8_t command, u_int64_t lba, u_int16_t count, u_int16_t feature) { int i = 12; bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_PDC_WAIT_NBUSY|ATA_DRIVE; bytep[i++] = ATA_D_IBM | ATA_D_LBA | atadev->unit; bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_CTL; bytep[i++] = ATA_A_4BIT; if ((lba > 268435455 || count > 256) && atadev->param && (atadev->param->support.command2 & ATA_SUPPORT_ADDRESS48)) { atadev->channel->flags |= ATA_48BIT_ACTIVE; if (command == ATA_READ_DMA) command = ATA_READ_DMA48; if (command == ATA_WRITE_DMA) command = ATA_WRITE_DMA48; bytep[i++] = ATA_PDC_2B | ATA_PDC_WRITE_REG | ATA_FEATURE; bytep[i++] = (feature >> 8) & 0xff; bytep[i++] = feature & 0xff; bytep[i++] = ATA_PDC_2B | ATA_PDC_WRITE_REG | ATA_COUNT; bytep[i++] = (count >> 8) & 0xff; bytep[i++] = count & 0xff; bytep[i++] = ATA_PDC_2B | ATA_PDC_WRITE_REG | ATA_SECTOR; bytep[i++] = (lba >> 24) & 0xff; bytep[i++] = lba & 0xff; bytep[i++] = ATA_PDC_2B | ATA_PDC_WRITE_REG | ATA_CYL_LSB; bytep[i++] = (lba >> 32) & 0xff; bytep[i++] = (lba >> 8) & 0xff; bytep[i++] = ATA_PDC_2B | ATA_PDC_WRITE_REG | ATA_CYL_MSB; bytep[i++] = (lba >> 40) & 0xff; bytep[i++] = (lba >> 16) & 0xff; bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_DRIVE; bytep[i++] = ATA_D_LBA | atadev->unit; } else { atadev->channel->flags &= ~ATA_48BIT_ACTIVE; bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_FEATURE; bytep[i++] = feature; bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_COUNT; bytep[i++] = count; bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_SECTOR; bytep[i++] = lba & 0xff; bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_CYL_LSB; bytep[i++] = (lba >> 8) & 0xff; bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_CYL_MSB; bytep[i++] = (lba >> 16) & 0xff; bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_DRIVE; bytep[i++] = (atadev->flags & ATA_D_USE_CHS ? 0 : ATA_D_LBA) | ATA_D_IBM | atadev->unit | ((lba >> 24) & 0xf); } bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_END | ATA_CMD; bytep[i++] = command; return i; } static void ata_promise_queue_hpkt(struct ata_pci_controller *ctlr, u_int32_t hpkt) { struct ata_promise_sx4 *hpktp = ctlr->driver; mtx_lock(&hpktp->mtx); if (hpktp->tail == hpktp->head && !hpktp->busy) { ATA_OUTL(ctlr->r_res2, 0x000c0100, hpkt); hpktp->busy = 1; } else hpktp->array[(hpktp->head++) & (ATA_PDC_MAX_HPKT - 1)] = hpkt; mtx_unlock(&hpktp->mtx); } static void ata_promise_next_hpkt(struct ata_pci_controller *ctlr) { struct ata_promise_sx4 *hpktp = ctlr->driver; mtx_lock(&hpktp->mtx); if (hpktp->tail != hpktp->head) { ATA_OUTL(ctlr->r_res2, 0x000c0100, hpktp->array[(hpktp->tail++) & (ATA_PDC_MAX_HPKT - 1)]); } else hpktp->busy = 0; mtx_unlock(&hpktp->mtx); } static void ata_promise_tx2_intr(void *data) { struct ata_pci_controller *ctlr = data; struct ata_channel *ch; int unit; /* implement this as a toggle instead to balance load XXX */ for (unit = 0; unit < 2; unit++) { if (!(ch = ctlr->interrupt[unit].argument)) continue; ATA_IDX_OUTB(ch, ATA_BMDEVSPEC_0, 0x0b); if (ATA_IDX_INB(ch, ATA_BMDEVSPEC_1) & 0x20) { if (ch->dma && (ch->dma->flags & ATA_DMA_ACTIVE)) { int bmstat = ATA_IDX_INB(ch, ATA_BMSTAT_PORT) & ATA_BMSTAT_MASK; if ((bmstat & (ATA_BMSTAT_ACTIVE | ATA_BMSTAT_INTERRUPT)) != ATA_BMSTAT_INTERRUPT) continue; ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, bmstat & ~ATA_BMSTAT_ERROR); DELAY(1); } ctlr->interrupt[unit].function(ch); } } } static void ata_promise_old_intr(void *data) { struct ata_pci_controller *ctlr = data; struct ata_channel *ch; int unit; /* implement this as a toggle instead to balance load XXX */ for (unit = 0; unit < 2; unit++) { if (!(ch = ctlr->interrupt[unit].argument)) continue; if (ATA_INL(ctlr->r_res1, 0x1c) & (ch->unit ? 0x00004000 : 0x00000400)){ if (ch->dma && (ch->dma->flags & ATA_DMA_ACTIVE)) { int bmstat = ATA_IDX_INB(ch, ATA_BMSTAT_PORT) & ATA_BMSTAT_MASK; if ((bmstat & (ATA_BMSTAT_ACTIVE | ATA_BMSTAT_INTERRUPT)) != ATA_BMSTAT_INTERRUPT) continue; ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, bmstat & ~ATA_BMSTAT_ERROR); DELAY(1); } ctlr->interrupt[unit].function(ch); } } } static int ata_promise_new_dmastart(struct ata_channel *ch) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(ch->dev)); if (ch->flags & ATA_48BIT_ACTIVE) { ATA_OUTB(ctlr->r_res1, 0x11, ATA_INB(ctlr->r_res1, 0x11) | (ch->unit ? 0x08 : 0x02)); ATA_OUTL(ctlr->r_res1, 0x20, ((ch->dma->flags & ATA_DMA_READ) ? 0x05000000 : 0x06000000) | (ch->dma->cur_iosize >> 1)); } ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, (ATA_IDX_INB(ch, ATA_BMSTAT_PORT) | (ATA_BMSTAT_INTERRUPT | ATA_BMSTAT_ERROR))); ATA_IDX_OUTL(ch, ATA_BMDTP_PORT, ch->dma->mdmatab); ATA_IDX_OUTB(ch, ATA_BMCMD_PORT, ((ch->dma->flags & ATA_DMA_READ) ? ATA_BMCMD_WRITE_READ : 0) | ATA_BMCMD_START_STOP); + ch->flags |= ATA_DMA_ACTIVE; return 0; } static int ata_promise_new_dmastop(struct ata_channel *ch) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(ch->dev)); int error; if (ch->flags & ATA_48BIT_ACTIVE) { ATA_OUTB(ctlr->r_res1, 0x11, ATA_INB(ctlr->r_res1, 0x11) & ~(ch->unit ? 0x08 : 0x02)); ATA_OUTL(ctlr->r_res1, 0x20, 0); } error = ATA_IDX_INB(ch, ATA_BMSTAT_PORT); ATA_IDX_OUTB(ch, ATA_BMCMD_PORT, ATA_IDX_INB(ch, ATA_BMCMD_PORT) & ~ATA_BMCMD_START_STOP); + ch->flags &= ~ATA_DMA_ACTIVE; ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, ATA_BMSTAT_INTERRUPT | ATA_BMSTAT_ERROR); return error; } static void ata_promise_new_dmainit(struct ata_channel *ch) { ata_dmainit(ch); if (ch->dma) { ch->dma->start = ata_promise_new_dmastart; ch->dma->stop = ata_promise_new_dmastop; } } static void ata_promise_setmode(struct ata_device *atadev, int mode) { device_t parent = device_get_parent(atadev->channel->dev); struct ata_pci_controller *ctlr = device_get_softc(parent); int devno = (atadev->channel->unit << 1) + ATA_DEV(atadev->unit); int error; u_int32_t timings33[][2] = { /* PROLD PRNEW mode */ { 0x004ff329, 0x004fff2f }, /* PIO 0 */ { 0x004fec25, 0x004ff82a }, /* PIO 1 */ { 0x004fe823, 0x004ff026 }, /* PIO 2 */ { 0x004fe622, 0x004fec24 }, /* PIO 3 */ { 0x004fe421, 0x004fe822 }, /* PIO 4 */ { 0x004567f3, 0x004acef6 }, /* MWDMA 0 */ { 0x004467f3, 0x0048cef6 }, /* MWDMA 1 */ { 0x004367f3, 0x0046cef6 }, /* MWDMA 2 */ { 0x004367f3, 0x0046cef6 }, /* UDMA 0 */ { 0x004247f3, 0x00448ef6 }, /* UDMA 1 */ { 0x004127f3, 0x00436ef6 }, /* UDMA 2 */ { 0, 0x00424ef6 }, /* UDMA 3 */ { 0, 0x004127f3 }, /* UDMA 4 */ { 0, 0x004127f3 } /* UDMA 5 */ }; mode = ata_limit_mode(atadev, mode, ctlr->chip->max_dma); switch (ctlr->chip->cfg1) { case PROLD: case PRNEW: if (mode > ATA_UDMA2 && (pci_read_config(parent, 0x50, 2) & (atadev->channel->unit ? 1 << 11 : 1 << 10))) { ata_prtdev(atadev, "DMA limited to UDMA33, non-ATA66 cable or device\n"); mode = ATA_UDMA2; } if (ATAPI_DEVICE(atadev) && mode > ATA_PIO_MAX) mode = ata_limit_mode(atadev, mode, ATA_PIO_MAX); break; case PRTX: ATA_IDX_OUTB(atadev->channel, ATA_BMDEVSPEC_0, 0x0b); if (mode > ATA_UDMA2 && ATA_IDX_INB(atadev->channel, ATA_BMDEVSPEC_1) & 0x04) { ata_prtdev(atadev, "DMA limited to UDMA33, non-ATA66 cable or device\n"); mode = ATA_UDMA2; } break; case PRMIO: if (mode > ATA_UDMA2 && (ATA_INL(ctlr->r_res2, (ctlr->chip->cfg2 & PRSX4X ? 0x000c0260 : 0x0260) + (atadev->channel->unit << 7)) & 0x01000000)) { ata_prtdev(atadev, "DMA limited to UDMA33, non-ATA66 cable or device\n"); mode = ATA_UDMA2; } break; } error = ata_controlcmd(atadev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode); if (bootverbose) ata_prtdev(atadev, "%ssetting %s on %s chip\n", (error) ? "FAILURE " : "", ata_mode2str(mode), ctlr->chip->text); if (!error) { if (ctlr->chip->cfg1 < PRTX) pci_write_config(parent, 0x60 + (devno << 2), timings33[ctlr->chip->cfg1][ata_mode2idx(mode)],4); atadev->mode = mode; } return; } /* * ServerWorks chipset support functions */ int ata_serverworks_ident(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); struct ata_chip_id *idx; static struct ata_chip_id ids[] = {{ ATA_ROSB4, 0x00, SWKS33, 0x00, ATA_UDMA2, "ServerWorks ROSB4" }, { ATA_CSB5, 0x92, SWKS100, 0x00, ATA_UDMA5, "ServerWorks CSB5" }, { ATA_CSB5, 0x00, SWKS66, 0x00, ATA_UDMA4, "ServerWorks CSB5" }, { ATA_CSB6, 0x00, SWKS100, 0x00, ATA_UDMA5, "ServerWorks CSB6" }, { ATA_CSB6_1, 0x00, SWKS66, 0x00, ATA_UDMA4, "ServerWorks CSB6" }, { 0, 0, 0, 0, 0, 0}}; char buffer[64]; if (!(idx = ata_match_chip(dev, ids))) return ENXIO; sprintf(buffer, "%s %s controller", idx->text, ata_mode2str(idx->max_dma)); device_set_desc_copy(dev, buffer); ctlr->chip = idx; ctlr->chipinit = ata_serverworks_chipinit; return 0; } static int ata_serverworks_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); if (ata_setup_interrupt(dev)) return ENXIO; if (ctlr->chip->cfg1 == SWKS33) { device_t *children; int nchildren, i; /* locate the ISA part in the southbridge and enable UDMA33 */ if (!device_get_children(device_get_parent(dev), &children,&nchildren)){ for (i = 0; i < nchildren; i++) { if (pci_get_devid(children[i]) == ATA_ROSB4_ISA) { pci_write_config(children[i], 0x64, (pci_read_config(children[i], 0x64, 4) & ~0x00002000) | 0x00004000, 4); break; } } free(children, M_TEMP); } } else { pci_write_config(dev, 0x5a, (pci_read_config(dev, 0x5a, 1) & ~0x40) | (ctlr->chip->cfg1 == SWKS100) ? 0x03 : 0x02, 1); } ctlr->setmode = ata_serverworks_setmode; return 0; } static void ata_serverworks_setmode(struct ata_device *atadev, int mode) { device_t parent = device_get_parent(atadev->channel->dev); struct ata_pci_controller *ctlr = device_get_softc(parent); int devno = (atadev->channel->unit << 1) + ATA_DEV(atadev->unit); int offset = devno ^ 0x01; int error; u_int8_t piotimings[] = { 0x5d, 0x47, 0x34, 0x22, 0x20, 0x34, 0x22, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20 }; u_int8_t dmatimings[] = { 0x77, 0x21, 0x20 }; mode = ata_limit_mode(atadev, mode, ctlr->chip->max_dma); mode = ata_check_80pin(atadev, mode); error = ata_controlcmd(atadev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode); if (bootverbose) ata_prtdev(atadev, "%ssetting %s on %s chip\n", (error) ? "FAILURE " : "", ata_mode2str(mode), ctlr->chip->text); if (!error) { if (mode >= ATA_UDMA0) { pci_write_config(parent, 0x56, (pci_read_config(parent, 0x56, 2) & ~(0xf << (devno << 2))) | ((mode & ATA_MODE_MASK) << (devno << 2)), 2); pci_write_config(parent, 0x54, pci_read_config(parent, 0x54, 1) | (0x01 << devno), 1); pci_write_config(parent, 0x44, (pci_read_config(parent, 0x44, 4) & ~(0xff << (offset << 8))) | (dmatimings[2] << (offset << 8)), 4); } else if (mode >= ATA_WDMA0) { pci_write_config(parent, 0x54, pci_read_config(parent, 0x54, 1) & ~(0x01 << devno), 1); pci_write_config(parent, 0x44, (pci_read_config(parent, 0x44, 4) & ~(0xff << (offset << 8))) | (dmatimings[mode & ATA_MODE_MASK]<<(offset<<8)),4); } else pci_write_config(parent, 0x54, pci_read_config(parent, 0x54, 1) & ~(0x01 << devno), 1); pci_write_config(parent, 0x40, (pci_read_config(parent, 0x40, 4) & ~(0xff << (offset << 8))) | (piotimings[ata_mode2idx(mode)] << (offset << 8)), 4); atadev->mode = mode; } } /* * Silicon Image (former CMD) chipset support functions */ int ata_sii_ident(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); struct ata_chip_id *idx; static struct ata_chip_id ids[] = {{ ATA_SII3114, 0x00, SIIMEMIO, SII4CH, ATA_SA150, "SiI 3114" }, { ATA_SII3512, 0x02, SIIMEMIO, 0, ATA_SA150, "SiI 3512" }, { ATA_SII3112, 0x02, SIIMEMIO, 0, ATA_SA150, "SiI 3112" }, { ATA_SII3112_1, 0x02, SIIMEMIO, 0, ATA_SA150, "SiI 3112" }, { ATA_SII3512, 0x00, SIIMEMIO, SIIBUG, ATA_SA150, "SiI 3512" }, { ATA_SII3112, 0x00, SIIMEMIO, SIIBUG, ATA_SA150, "SiI 3112" }, { ATA_SII3112_1, 0x00, SIIMEMIO, SIIBUG, ATA_SA150, "SiI 3112" }, { ATA_SII0680, 0x00, SIIMEMIO, SIISETCLK, ATA_UDMA6, "SiI 0680" }, { ATA_CMD649, 0x00, 0, SIIINTR, ATA_UDMA5, "CMD 649" }, { ATA_CMD648, 0x00, 0, SIIINTR, ATA_UDMA4, "CMD 648" }, { ATA_CMD646, 0x07, 0, 0, ATA_UDMA2, "CMD 646U2" }, { ATA_CMD646, 0x00, 0, 0, ATA_WDMA2, "CMD 646" }, { 0, 0, 0, 0, 0, 0}}; char buffer[64]; if (!(idx = ata_match_chip(dev, ids))) return ENXIO; sprintf(buffer, "%s %s controller", idx->text, ata_mode2str(idx->max_dma)); device_set_desc_copy(dev, buffer); ctlr->chip = idx; ctlr->chipinit = ata_sii_chipinit; return 0; } static int ata_sii_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); int rid = ATA_IRQ_RID; if (!(ctlr->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "unable to map interrupt\n"); return ENXIO; } if (ctlr->chip->cfg1 == SIIMEMIO) { if ((bus_setup_intr(dev, ctlr->r_irq, ATA_INTR_FLAGS, ata_sii_intr, ctlr, &ctlr->handle))) { device_printf(dev, "unable to setup interrupt\n"); return ENXIO; } ctlr->r_type2 = SYS_RES_MEMORY; - ctlr->r_rid2 = 0x24; + ctlr->r_rid2 = PCIR_BAR(5); if (!(ctlr->r_res2 = bus_alloc_resource_any(dev, ctlr->r_type2, &ctlr->r_rid2, RF_ACTIVE))) return ENXIO; if (ctlr->chip->cfg2 & SIISETCLK) { if ((pci_read_config(dev, 0x8a, 1) & 0x30) != 0x10) pci_write_config(dev, 0x8a, (pci_read_config(dev, 0x8a, 1) & 0xcf)|0x10,1); if ((pci_read_config(dev, 0x8a, 1) & 0x30) != 0x10) device_printf(dev, "%s could not set ATA133 clock\n", ctlr->chip->text); } /* enable interrupt as BIOS might not */ pci_write_config(dev, 0x8a, (pci_read_config(dev, 0x8a, 1) & 0x3f), 1); if (ctlr->chip->cfg2 & SII4CH) { ATA_OUTL(ctlr->r_res2, 0x0200, 0x00000002); ctlr->channels = 4; } ctlr->allocate = ata_sii_allocate; if (ctlr->chip->max_dma >= ATA_SA150) { ctlr->reset = ata_sii_reset; ctlr->setmode = ata_sata_setmode; } else ctlr->setmode = ata_sii_setmode; } else { if ((bus_setup_intr(dev, ctlr->r_irq, ATA_INTR_FLAGS, ctlr->chip->cfg2 & SIIINTR ? ata_cmd_intr : ata_cmd_old_intr, ctlr, &ctlr->handle))) { device_printf(dev, "unable to setup interrupt\n"); return ENXIO; } if ((pci_read_config(dev, 0x51, 1) & 0x08) != 0x08) { device_printf(dev, "HW has secondary channel disabled\n"); ctlr->channels = 1; } /* enable interrupt as BIOS might not */ pci_write_config(dev, 0x71, 0x01, 1); ctlr->setmode = ata_cmd_setmode; } return 0; } static int ata_sii_allocate(device_t dev, struct ata_channel *ch) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); int unit01 = (ch->unit & 1), unit10 = (ch->unit & 2); int i; for (i = ATA_DATA; i <= ATA_STATUS; i++) { ch->r_io[i].res = ctlr->r_res2; ch->r_io[i].offset = 0x80 + i + (unit01 << 6) + (unit10 << 8); } ch->r_io[ATA_ALTSTAT].res = ctlr->r_res2; ch->r_io[ATA_ALTSTAT].offset = 0x8a + (unit01 << 6) + (unit10 << 8); ch->r_io[ATA_BMCMD_PORT].res = ctlr->r_res2; ch->r_io[ATA_BMCMD_PORT].offset = 0x00 + (unit01 << 3) + (unit10 << 8); ch->r_io[ATA_BMSTAT_PORT].res = ctlr->r_res2; ch->r_io[ATA_BMSTAT_PORT].offset = 0x02 + (unit01 << 3) + (unit10 << 8); ch->r_io[ATA_BMDTP_PORT].res = ctlr->r_res2; ch->r_io[ATA_BMDTP_PORT].offset = 0x04 + (unit01 << 3) + (unit10 << 8); ch->r_io[ATA_BMDEVSPEC_0].res = ctlr->r_res2; ch->r_io[ATA_BMDEVSPEC_0].offset = 0xa1 + (unit01 << 6) + (unit10 << 8); ch->r_io[ATA_BMDEVSPEC_1].res = ctlr->r_res2; ch->r_io[ATA_BMDEVSPEC_1].offset = 0x100 + (unit01 << 7) + (unit10 << 8); ch->r_io[ATA_IDX_ADDR].res = ctlr->r_res2; if (ctlr->chip->max_dma >= ATA_SA150) ch->flags |= ATA_NO_SLAVE; ctlr->dmainit(ch); if (ctlr->chip->cfg2 & SIIBUG) ch->dma->boundary = 8 * 1024; ata_generic_hw(ch); return 0; } static void ata_sii_reset(struct ata_channel *ch) { ATA_IDX_OUTL(ch, ATA_BMDEVSPEC_1, 0x00000001); DELAY(25000); ATA_IDX_OUTL(ch, ATA_BMDEVSPEC_1, 0x00000000); DELAY(250000); } static void ata_sii_intr(void *data) { struct ata_pci_controller *ctlr = data; struct ata_channel *ch; int unit; /* implement this as a toggle instead to balance load XXX */ for (unit = 0; unit < ctlr->channels; unit++) { if (!(ch = ctlr->interrupt[unit].argument)) continue; if (ATA_IDX_INB(ch, ATA_BMDEVSPEC_0) & 0x08) { if (ch->dma && (ch->dma->flags & ATA_DMA_ACTIVE)) { int bmstat = ATA_IDX_INB(ch, ATA_BMSTAT_PORT) & ATA_BMSTAT_MASK; if (!(bmstat & ATA_BMSTAT_INTERRUPT)) continue; ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, bmstat & ~ATA_BMSTAT_ERROR); DELAY(1); } ctlr->interrupt[unit].function(ch); } } } static void ata_cmd_intr(void *data) { struct ata_pci_controller *ctlr = data; struct ata_channel *ch; u_int8_t reg71; int unit; /* implement this as a toggle instead to balance load XXX */ for (unit = 0; unit < 2; unit++) { if (!(ch = ctlr->interrupt[unit].argument)) continue; if (((reg71 = pci_read_config(device_get_parent(ch->dev), 0x71, 1)) & (ch->unit ? 0x08 : 0x04))) { pci_write_config(device_get_parent(ch->dev), 0x71, reg71 & ~(ch->unit ? 0x04 : 0x08), 1); if (ch->dma && (ch->dma->flags & ATA_DMA_ACTIVE)) { int bmstat = ATA_IDX_INB(ch, ATA_BMSTAT_PORT) & ATA_BMSTAT_MASK; if ((bmstat & (ATA_BMSTAT_ACTIVE | ATA_BMSTAT_INTERRUPT)) != ATA_BMSTAT_INTERRUPT) continue; ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, bmstat & ~ATA_BMSTAT_ERROR); DELAY(1); } ctlr->interrupt[unit].function(ch); } } } static void ata_cmd_old_intr(void *data) { struct ata_pci_controller *ctlr = data; struct ata_channel *ch; int unit; /* implement this as a toggle instead to balance load XXX */ for (unit = 0; unit < 2; unit++) { if (!(ch = ctlr->interrupt[unit].argument)) continue; if (ch->dma && (ch->dma->flags & ATA_DMA_ACTIVE)) { int bmstat = ATA_IDX_INB(ch, ATA_BMSTAT_PORT) & ATA_BMSTAT_MASK; if ((bmstat & (ATA_BMSTAT_ACTIVE | ATA_BMSTAT_INTERRUPT)) != ATA_BMSTAT_INTERRUPT) continue; ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, bmstat & ~ATA_BMSTAT_ERROR); DELAY(1); } ctlr->interrupt[unit].function(ch); } } static void ata_sii_setmode(struct ata_device *atadev, int mode) { device_t parent = device_get_parent(atadev->channel->dev); struct ata_pci_controller *ctlr = device_get_softc(parent); int rego = (atadev->channel->unit << 4) + (ATA_DEV(atadev->unit) << 1); int mreg = atadev->channel->unit ? 0x84 : 0x80; int mask = 0x03 << (ATA_DEV(atadev->unit) << 2); int mval = pci_read_config(parent, mreg, 1) & ~mask; int error; mode = ata_limit_mode(atadev, mode, ctlr->chip->max_dma); if (ctlr->chip->cfg2 & SIISETCLK) { if (mode > ATA_UDMA2 && (pci_read_config(parent, 0x79, 1) & (atadev->channel->unit ? 0x02 : 0x01))) { ata_prtdev(atadev, "DMA limited to UDMA33, non-ATA66 cable or device\n"); mode = ATA_UDMA2; } } else mode = ata_check_80pin(atadev, mode); error = ata_controlcmd(atadev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode); if (bootverbose) ata_prtdev(atadev, "%ssetting %s on %s chip\n", (error) ? "FAILURE " : "", ata_mode2str(mode), ctlr->chip->text); if (error) return; if (mode >= ATA_UDMA0) { u_int8_t udmatimings[] = { 0xf, 0xb, 0x7, 0x5, 0x3, 0x2, 0x1 }; u_int8_t ureg = 0xac + rego; pci_write_config(parent, mreg, mval | (0x03 << (ATA_DEV(atadev->unit) << 2)), 1); pci_write_config(parent, ureg, (pci_read_config(parent, ureg, 1) & ~0x3f) | udmatimings[mode & ATA_MODE_MASK], 1); } else if (mode >= ATA_WDMA0) { u_int8_t dreg = 0xa8 + rego; u_int16_t dmatimings[] = { 0x2208, 0x10c2, 0x10c1 }; pci_write_config(parent, mreg, mval | (0x02 << (ATA_DEV(atadev->unit) << 2)), 1); pci_write_config(parent, dreg, dmatimings[mode & ATA_MODE_MASK], 2); } else { u_int8_t preg = 0xa4 + rego; u_int16_t piotimings[] = { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 }; pci_write_config(parent, mreg, mval | (0x01 << (ATA_DEV(atadev->unit) << 2)), 1); pci_write_config(parent, preg, piotimings[mode & ATA_MODE_MASK], 2); } atadev->mode = mode; } static void ata_cmd_setmode(struct ata_device *atadev, int mode) { device_t parent = device_get_parent(atadev->channel->dev); struct ata_pci_controller *ctlr = device_get_softc(parent); int devno = (atadev->channel->unit << 1) + ATA_DEV(atadev->unit); int error; mode = ata_limit_mode(atadev, mode, ctlr->chip->max_dma); mode = ata_check_80pin(atadev, mode); error = ata_controlcmd(atadev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode); if (bootverbose) ata_prtdev(atadev, "%ssetting %s on %s chip\n", (error) ? "FAILURE " : "", ata_mode2str(mode), ctlr->chip->text); if (!error) { int treg = 0x54 + ((devno < 3) ? (devno << 1) : 7); int ureg = atadev->channel->unit ? 0x7b : 0x73; if (mode >= ATA_UDMA0) { int udmatimings[][2] = { { 0x31, 0xc2 }, { 0x21, 0x82 }, { 0x11, 0x42 }, { 0x25, 0x8a }, { 0x15, 0x4a }, { 0x05, 0x0a } }; u_int8_t umode = pci_read_config(parent, ureg, 1); umode &= ~(atadev->unit == ATA_MASTER ? 0x35 : 0xca); umode |= udmatimings[mode & ATA_MODE_MASK][ATA_DEV(atadev->unit)]; pci_write_config(parent, ureg, umode, 1); } else if (mode >= ATA_WDMA0) { int dmatimings[] = { 0x87, 0x32, 0x3f }; pci_write_config(parent, treg, dmatimings[mode & ATA_MODE_MASK], 1); pci_write_config(parent, ureg, pci_read_config(parent, ureg, 1) & ~(atadev->unit == ATA_MASTER ? 0x35 : 0xca), 1); } else { int piotimings[] = { 0xa9, 0x57, 0x44, 0x32, 0x3f }; pci_write_config(parent, treg, piotimings[(mode & ATA_MODE_MASK) - ATA_PIO0], 1); pci_write_config(parent, ureg, pci_read_config(parent, ureg, 1) & ~(atadev->unit == ATA_MASTER ? 0x35 : 0xca), 1); } atadev->mode = mode; } } /* * SiS chipset support functions */ int ata_sis_ident(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); struct ata_chip_id *idx; static struct ata_chip_id ids[] = {{ ATA_SIS964_1,0x00, SISSATA, 0, ATA_SA150, "SiS 964" }, /* south */ { ATA_SIS964, 0x00, SIS133NEW, 0, ATA_UDMA6, "SiS 964" }, /* south */ { ATA_SIS963, 0x00, SIS133NEW, 0, ATA_UDMA6, "SiS 963" }, /* south */ { ATA_SIS962, 0x00, SIS133NEW, 0, ATA_UDMA6, "SiS 962" }, /* south */ { ATA_SIS745, 0x00, SIS100NEW, 0, ATA_UDMA5, "SiS 745" }, /* 1chip */ { ATA_SIS735, 0x00, SIS100NEW, 0, ATA_UDMA5, "SiS 735" }, /* 1chip */ { ATA_SIS733, 0x00, SIS100NEW, 0, ATA_UDMA5, "SiS 733" }, /* 1chip */ { ATA_SIS730, 0x00, SIS100OLD, 0, ATA_UDMA5, "SiS 730" }, /* 1chip */ { ATA_SIS635, 0x00, SIS100NEW, 0, ATA_UDMA5, "SiS 635" }, /* 1chip */ { ATA_SIS633, 0x00, SIS100NEW, 0, ATA_UDMA5, "SiS 633" }, /* unknown */ { ATA_SIS630, 0x30, SIS100OLD, 0, ATA_UDMA5, "SiS 630S"}, /* 1chip */ { ATA_SIS630, 0x00, SIS66, 0, ATA_UDMA4, "SiS 630" }, /* 1chip */ { ATA_SIS620, 0x00, SIS66, 0, ATA_UDMA4, "SiS 620" }, /* 1chip */ { ATA_SIS550, 0x00, SIS66, 0, ATA_UDMA5, "SiS 550" }, { ATA_SIS540, 0x00, SIS66, 0, ATA_UDMA4, "SiS 540" }, { ATA_SIS530, 0x00, SIS66, 0, ATA_UDMA4, "SiS 530" }, { ATA_SIS5513, 0xc2, SIS33, 1, ATA_UDMA2, "SiS 5513" }, { ATA_SIS5513, 0x00, SIS33, 1, ATA_WDMA2, "SiS 5513" }, { 0, 0, 0, 0, 0, 0 }}; char buffer[64]; int found = 0; if (!(idx = ata_find_chip(dev, ids, -pci_get_slot(dev)))) return ENXIO; if (idx->cfg2 && !found) { u_int8_t reg57 = pci_read_config(dev, 0x57, 1); pci_write_config(dev, 0x57, (reg57 & 0x7f), 1); if (pci_read_config(dev, PCIR_DEVVENDOR, 4) == ATA_SIS5518) { found = 1; idx->cfg1 = SIS133NEW; idx->max_dma = ATA_UDMA6; sprintf(buffer, "SiS 962/963 %s controller", ata_mode2str(idx->max_dma)); } pci_write_config(dev, 0x57, reg57, 1); } if (idx->cfg2 && !found) { u_int8_t reg4a = pci_read_config(dev, 0x4a, 1); pci_write_config(dev, 0x4a, (reg4a | 0x10), 1); if (pci_read_config(dev, PCIR_DEVVENDOR, 4) == ATA_SIS5517) { struct ata_chip_id id[] = {{ ATA_SISSOUTH, 0x10, 0, 0, 0, "" }, { 0, 0, 0, 0, 0, 0 }}; found = 1; if (ata_find_chip(dev, id, pci_get_slot(dev))) { idx->cfg1 = SIS133OLD; idx->max_dma = ATA_UDMA6; } else { idx->cfg1 = SIS100NEW; idx->max_dma = ATA_UDMA5; } sprintf(buffer, "SiS 961 %s controller",ata_mode2str(idx->max_dma)); } pci_write_config(dev, 0x4a, reg4a, 1); } if (!found) sprintf(buffer,"%s %s controller",idx->text,ata_mode2str(idx->max_dma)); device_set_desc_copy(dev, buffer); ctlr->chip = idx; ctlr->chipinit = ata_sis_chipinit; return 0; } static int ata_sis_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); if (ata_setup_interrupt(dev)) return ENXIO; switch (ctlr->chip->cfg1) { case SIS33: break; case SIS66: case SIS100OLD: pci_write_config(dev, 0x52, pci_read_config(dev, 0x52, 1) & ~0x04, 1); break; case SIS100NEW: case SIS133OLD: pci_write_config(dev, 0x49, pci_read_config(dev, 0x49, 1) & ~0x01, 1); break; case SIS133NEW: pci_write_config(dev, 0x50, pci_read_config(dev, 0x50, 2) | 0x0008, 2); pci_write_config(dev, 0x52, pci_read_config(dev, 0x52, 2) | 0x0008, 2); break; case SISSATA: pci_write_config(dev, PCIR_COMMAND, pci_read_config(dev, PCIR_COMMAND, 2) & ~0x0400, 2); ctlr->setmode = ata_sata_setmode; return 0; default: return ENXIO; } ctlr->setmode = ata_sis_setmode; return 0; } static void ata_sis_setmode(struct ata_device *atadev, int mode) { device_t parent = device_get_parent(atadev->channel->dev); struct ata_pci_controller *ctlr = device_get_softc(parent); int devno = (atadev->channel->unit << 1) + ATA_DEV(atadev->unit); int error; mode = ata_limit_mode(atadev, mode, ctlr->chip->max_dma); if (ctlr->chip->cfg1 == SIS133NEW) { if (mode > ATA_UDMA2 && pci_read_config(parent, atadev->channel->unit?0x52:0x50,2)&0x8000){ ata_prtdev(atadev, "DMA limited to UDMA33, non-ATA66 cable or device\n"); mode = ATA_UDMA2; } } else { if (mode > ATA_UDMA2 && pci_read_config(parent, 0x48, 1)&(atadev->channel->unit?0x20:0x10)){ ata_prtdev(atadev, "DMA limited to UDMA33, non-ATA66 cable or device\n"); mode = ATA_UDMA2; } } error = ata_controlcmd(atadev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode); if (bootverbose) ata_prtdev(atadev, "%ssetting %s on %s chip\n", (error) ? "FAILURE " : "", ata_mode2str(mode), ctlr->chip->text); if (!error) { switch (ctlr->chip->cfg1) { case SIS133NEW: { u_int32_t timings[] = { 0x28269008, 0x0c266008, 0x04263008, 0x0c0a3008, 0x05093008, 0x22196008, 0x0c0a3008, 0x05093008, 0x050939fc, 0x050936ac, 0x0509347c, 0x0509325c, 0x0509323c, 0x0509322c, 0x0509321c}; u_int32_t reg; reg = (pci_read_config(parent, 0x57, 1)&0x40?0x70:0x40)+(devno<<2); pci_write_config(parent, reg, timings[ata_mode2idx(mode)], 4); break; } case SIS133OLD: { u_int16_t timings[] = { 0x00cb, 0x0067, 0x0044, 0x0033, 0x0031, 0x0044, 0x0033, 0x0031, 0x8f31, 0x8a31, 0x8731, 0x8531, 0x8331, 0x8231, 0x8131 }; u_int16_t reg = 0x40 + (devno << 1); pci_write_config(parent, reg, timings[ata_mode2idx(mode)], 2); break; } case SIS100NEW: { u_int16_t timings[] = { 0x00cb, 0x0067, 0x0044, 0x0033, 0x0031, 0x0044, 0x0033, 0x0031, 0x8b31, 0x8731, 0x8531, 0x8431, 0x8231, 0x8131 }; u_int16_t reg = 0x40 + (devno << 1); pci_write_config(parent, reg, timings[ata_mode2idx(mode)], 2); break; } case SIS100OLD: case SIS66: case SIS33: { u_int16_t timings[] = { 0x0c0b, 0x0607, 0x0404, 0x0303, 0x0301, 0x0404, 0x0303, 0x0301, 0xf301, 0xd301, 0xb301, 0xa301, 0x9301, 0x8301 }; u_int16_t reg = 0x40 + (devno << 1); pci_write_config(parent, reg, timings[ata_mode2idx(mode)], 2); break; } } atadev->mode = mode; } } /* VIA chipsets */ int ata_via_ident(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); struct ata_chip_id *idx; static struct ata_chip_id ids[] = {{ ATA_VIA82C586, 0x02, VIA33, 0x00, ATA_UDMA2, "VIA 82C586B" }, { ATA_VIA82C586, 0x00, VIA33, 0x00, ATA_WDMA2, "VIA 82C586" }, { ATA_VIA82C596, 0x12, VIA66, VIACLK, ATA_UDMA4, "VIA 82C596B" }, { ATA_VIA82C596, 0x00, VIA33, 0x00, ATA_UDMA2, "VIA 82C596" }, { ATA_VIA82C686, 0x40, VIA100, VIABUG, ATA_UDMA5, "VIA 82C686B"}, { ATA_VIA82C686, 0x10, VIA66, VIACLK, ATA_UDMA4, "VIA 82C686A" }, { ATA_VIA82C686, 0x00, VIA33, 0x00, ATA_UDMA2, "VIA 82C686" }, { ATA_VIA8231, 0x00, VIA100, VIABUG, ATA_UDMA5, "VIA 8231" }, { ATA_VIA8233, 0x00, VIA100, 0x00, ATA_UDMA5, "VIA 8233" }, { ATA_VIA8233C, 0x00, VIA100, 0x00, ATA_UDMA5, "VIA 8233C" }, { ATA_VIA8233A, 0x00, VIA133, 0x00, ATA_UDMA6, "VIA 8233A" }, { ATA_VIA8235, 0x00, VIA133, 0x00, ATA_UDMA6, "VIA 8235" }, { ATA_VIA8237, 0x00, VIA133, 0x00, ATA_UDMA6, "VIA 8237" }, { 0, 0, 0, 0, 0, 0 }}; static struct ata_chip_id new_ids[] = {{ ATA_VIA6410, 0x00, 0x00, 0x00, ATA_UDMA6, "VIA 6410" }, { ATA_VIA6420, 0x00, 0x00, 0x00, ATA_SA150, "VIA 6420" }, { 0, 0, 0, 0, 0, 0 }}; char buffer[64]; if (pci_get_devid(dev) == ATA_VIA82C571) { if (!(idx = ata_find_chip(dev, ids, -99))) return ENXIO; } else { if (!(idx = ata_match_chip(dev, new_ids))) return ENXIO; } sprintf(buffer, "%s %s controller", idx->text, ata_mode2str(idx->max_dma)); device_set_desc_copy(dev, buffer); ctlr->chip = idx; ctlr->chipinit = ata_via_chipinit; return 0; } static int ata_via_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); if (ata_setup_interrupt(dev)) return ENXIO; if (ctlr->chip->max_dma >= ATA_SA150) { pci_write_config(dev, PCIR_COMMAND, pci_read_config(dev, PCIR_COMMAND, 2) & ~0x0400, 2); ctlr->setmode = ata_sata_setmode; return 0; } /* prepare for ATA-66 on the 82C686a and 82C596b */ if (ctlr->chip->cfg2 & VIACLK) pci_write_config(dev, 0x50, 0x030b030b, 4); /* the southbridge might need the data corruption fix */ if (ctlr->chip->cfg2 & VIABUG) ata_via_southbridge_fixup(dev); /* set fifo configuration half'n'half */ pci_write_config(dev, 0x43, (pci_read_config(dev, 0x43, 1) & 0x90) | 0x2a, 1); /* set status register read retry */ pci_write_config(dev, 0x44, pci_read_config(dev, 0x44, 1) | 0x08, 1); /* set DMA read & end-of-sector fifo flush */ pci_write_config(dev, 0x46, (pci_read_config(dev, 0x46, 1) & 0x0c) | 0xf0, 1); /* set sector size */ pci_write_config(dev, 0x60, DEV_BSIZE, 2); pci_write_config(dev, 0x68, DEV_BSIZE, 2); ctlr->setmode = ata_via_family_setmode; return 0; } static void ata_via_southbridge_fixup(device_t dev) { device_t *children; int nchildren, i; if (device_get_children(device_get_parent(dev), &children, &nchildren)) return; for (i = 0; i < nchildren; i++) { if (pci_get_devid(children[i]) == ATA_VIA8363 || pci_get_devid(children[i]) == ATA_VIA8371 || pci_get_devid(children[i]) == ATA_VIA8662 || pci_get_devid(children[i]) == ATA_VIA8361) { u_int8_t reg76 = pci_read_config(children[i], 0x76, 1); if ((reg76 & 0xf0) != 0xd0) { device_printf(dev, "Correcting VIA config for southbridge data corruption bug\n"); pci_write_config(children[i], 0x75, 0x80, 1); pci_write_config(children[i], 0x76, (reg76 & 0x0f) | 0xd0, 1); } break; } } free(children, M_TEMP); } /* common code for VIA, AMD & nVidia */ static void ata_via_family_setmode(struct ata_device *atadev, int mode) { device_t parent = device_get_parent(atadev->channel->dev); struct ata_pci_controller *ctlr = device_get_softc(parent); u_int8_t timings[] = { 0xa8, 0x65, 0x42, 0x22, 0x20, 0x42, 0x22, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20 }; int modes[][7] = { { 0xc2, 0xc1, 0xc0, 0x00, 0x00, 0x00, 0x00 }, /* VIA ATA33 */ { 0xee, 0xec, 0xea, 0xe9, 0xe8, 0x00, 0x00 }, /* VIA ATA66 */ { 0xf7, 0xf6, 0xf4, 0xf2, 0xf1, 0xf0, 0x00 }, /* VIA ATA100 */ { 0xf7, 0xf7, 0xf6, 0xf4, 0xf2, 0xf1, 0xf0 }, /* VIA ATA133 */ { 0xc2, 0xc1, 0xc0, 0xc4, 0xc5, 0xc6, 0xc7 }}; /* AMD/nVIDIA */ int devno = (atadev->channel->unit << 1) + ATA_DEV(atadev->unit); int reg = 0x53 - devno; int error; mode = ata_limit_mode(atadev, mode, ctlr->chip->max_dma); if (ctlr->chip->cfg2 & AMDCABLE) { if (mode > ATA_UDMA2 && !(pci_read_config(parent, 0x42, 1) & (1 << devno))) { ata_prtdev(atadev, "DMA limited to UDMA33, non-ATA66 cable or device\n"); mode = ATA_UDMA2; } } else mode = ata_check_80pin(atadev, mode); if (ctlr->chip->cfg2 & NVIDIA) reg += 0x10; if (ctlr->chip->cfg1 != VIA133) pci_write_config(parent, reg - 0x08, timings[ata_mode2idx(mode)], 1); error = ata_controlcmd(atadev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode); if (bootverbose) ata_prtdev(atadev, "%ssetting %s on %s chip\n", (error) ? "FAILURE " : "", ata_mode2str(mode), ctlr->chip->text); if (!error) { if (mode >= ATA_UDMA0) pci_write_config(parent, reg, modes[ctlr->chip->cfg1][mode & ATA_MODE_MASK], 1); else pci_write_config(parent, reg, 0x8b, 1); atadev->mode = mode; } } /* misc functions */ static struct ata_chip_id * ata_find_chip(device_t dev, struct ata_chip_id *index, int slot) { device_t *children; int nchildren, i; if (device_get_children(device_get_parent(dev), &children, &nchildren)) return 0; while (index->chipid != 0) { for (i = 0; i < nchildren; i++) { if (((slot >= 0 && pci_get_slot(children[i]) == slot) || (slot < 0 && pci_get_slot(children[i]) <= -slot)) && pci_get_devid(children[i]) == index->chipid && pci_get_revid(children[i]) >= index->chiprev) { free(children, M_TEMP); return index; } } index++; } free(children, M_TEMP); return NULL; } static struct ata_chip_id * ata_match_chip(device_t dev, struct ata_chip_id *index) { while (index->chipid != 0) { if (pci_get_devid(dev) == index->chipid && pci_get_revid(dev) >= index->chiprev) return index; index++; } return NULL; } static int ata_setup_interrupt(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); int rid = ATA_IRQ_RID; if (!ata_legacy(dev)) { if (!(ctlr->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "unable to map interrupt\n"); return ENXIO; } if ((bus_setup_intr(dev, ctlr->r_irq, ATA_INTR_FLAGS, ata_generic_intr, ctlr, &ctlr->handle))) { device_printf(dev, "unable to setup interrupt\n"); return ENXIO; } } return 0; } static void ata_serialize(struct ata_channel *ch, int flags) { struct ata_pci_controller *scp = device_get_softc(device_get_parent(ch->dev)); switch (flags) { case ATA_LF_LOCK: if (scp->locked_ch == ch->unit) break; while (!atomic_cmpset_acq_int(&scp->locked_ch, -1, ch->unit)) tsleep(ch->locking, PRIBIO, "atasrl", 1); break; case ATA_LF_UNLOCK: if (scp->locked_ch == -1 || scp->locked_ch != ch->unit) break; atomic_store_rel_int(&scp->locked_ch, -1); wakeup(ch->locking); break; } return; } static int ata_check_80pin(struct ata_device *atadev, int mode) { if (mode > ATA_UDMA2 && !(atadev->param->hwres & ATA_CABLE_ID)) { ata_prtdev(atadev,"DMA limited to UDMA33, non-ATA66 cable or device\n"); mode = ATA_UDMA2; } return mode; } static int ata_mode2idx(int mode) { if ((mode & ATA_DMA_MASK) == ATA_UDMA0) return (mode & ATA_MODE_MASK) + 8; if ((mode & ATA_DMA_MASK) == ATA_WDMA0) return (mode & ATA_MODE_MASK) + 5; return (mode & ATA_MODE_MASK) - ATA_PIO0; } diff --git a/sys/dev/ata/ata-dma.c b/sys/dev/ata/ata-dma.c index d140d7d012fe..910ffddca72d 100644 --- a/sys/dev/ata/ata-dma.c +++ b/sys/dev/ata/ata-dma.c @@ -1,287 +1,285 @@ /*- * Copyright (c) 1998 - 2004 Søren Schmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* prototypes */ static void ata_dmaalloc(struct ata_channel *); static void ata_dmafree(struct ata_channel *); static void ata_dmasetupd_cb(void *, bus_dma_segment_t *, int, int); static int ata_dmaload(struct ata_device *, caddr_t, int32_t, int); static int ata_dmaunload(struct ata_channel *); /* local vars */ static MALLOC_DEFINE(M_ATADMA, "ATA DMA", "ATA driver DMA"); /* misc defines */ #define MAXSEGSZ PAGE_SIZE #define MAXTABSZ PAGE_SIZE #define MAXWSPCSZ 256 #define MAXCTLDMASZ (2 * (MAXTABSZ + MAXPHYS)) struct ata_dc_cb_args { bus_addr_t maddr; int error; }; void ata_dmainit(struct ata_channel *ch) { if ((ch->dma = malloc(sizeof(struct ata_dma), M_ATADMA, M_NOWAIT|M_ZERO))) { ch->dma->alloc = ata_dmaalloc; ch->dma->free = ata_dmafree; ch->dma->load = ata_dmaload; ch->dma->unload = ata_dmaunload; ch->dma->alignment = 2; ch->dma->max_iosize = 64 * 1024; ch->dma->boundary = 64 * 1024; } } static void ata_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) { struct ata_dc_cb_args *cba = (struct ata_dc_cb_args *)xsc; if (!(cba->error = error)) cba->maddr = segs[0].ds_addr; } static void ata_dmaalloc(struct ata_channel *ch) { struct ata_dc_cb_args ccba; if (bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MAXCTLDMASZ, ATA_DMA_ENTRIES, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, NULL, NULL, &ch->dma->dmatag)) goto error; if (bus_dma_tag_create(ch->dma->dmatag, PAGE_SIZE, PAGE_SIZE, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MAXTABSZ, 1, MAXTABSZ, BUS_DMA_ALLOCNOW, NULL, NULL, &ch->dma->cdmatag)) goto error; if (bus_dma_tag_create(ch->dma->dmatag,ch->dma->alignment,ch->dma->boundary, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, ch->dma->max_iosize, ATA_DMA_ENTRIES, ch->dma->boundary, BUS_DMA_ALLOCNOW, NULL, NULL, &ch->dma->ddmatag)) goto error; if (bus_dmamem_alloc(ch->dma->cdmatag, (void **)&ch->dma->dmatab, 0, &ch->dma->cdmamap)) goto error; if (bus_dmamap_load(ch->dma->cdmatag, ch->dma->cdmamap, ch->dma->dmatab, MAXTABSZ, ata_dmasetupc_cb, &ccba, 0) || ccba.error) { bus_dmamem_free(ch->dma->cdmatag, ch->dma->dmatab, ch->dma->cdmamap); goto error; } ch->dma->mdmatab = ccba.maddr; if (bus_dmamap_create(ch->dma->ddmatag, 0, &ch->dma->ddmamap)) goto error; if (bus_dma_tag_create(ch->dma->dmatag, PAGE_SIZE, PAGE_SIZE, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MAXWSPCSZ, 1, MAXWSPCSZ, BUS_DMA_ALLOCNOW, NULL, NULL, &ch->dma->wdmatag)) goto error; if (bus_dmamem_alloc(ch->dma->wdmatag, (void **)&ch->dma->workspace, 0, &ch->dma->wdmamap)) goto error; if (bus_dmamap_load(ch->dma->wdmatag, ch->dma->wdmamap, ch->dma->workspace, MAXWSPCSZ, ata_dmasetupc_cb, &ccba, 0) || ccba.error) { bus_dmamem_free(ch->dma->wdmatag, ch->dma->workspace, ch->dma->wdmamap); goto error; } ch->dma->wdmatab = ccba.maddr; return; error: ata_printf(ch, -1, "WARNING - DMA allocation failed, disabling DMA\n"); ata_dmafree(ch); free(ch->dma, M_ATADMA); ch->dma = NULL; } static void ata_dmafree(struct ata_channel *ch) { if (ch->dma->wdmatab) { bus_dmamap_unload(ch->dma->wdmatag, ch->dma->wdmamap); bus_dmamem_free(ch->dma->wdmatag, ch->dma->workspace, ch->dma->wdmamap); ch->dma->wdmatab = 0; ch->dma->wdmamap = NULL; ch->dma->workspace = NULL; } if (ch->dma->wdmatag) { bus_dma_tag_destroy(ch->dma->wdmatag); ch->dma->wdmatag = NULL; } if (ch->dma->mdmatab) { bus_dmamap_unload(ch->dma->cdmatag, ch->dma->cdmamap); bus_dmamem_free(ch->dma->cdmatag, ch->dma->dmatab, ch->dma->cdmamap); ch->dma->mdmatab = 0; ch->dma->cdmamap = NULL; ch->dma->dmatab = NULL; } if (ch->dma->ddmamap) { bus_dmamap_destroy(ch->dma->ddmatag, ch->dma->ddmamap); ch->dma->ddmamap = NULL; } if (ch->dma->cdmatag) { bus_dma_tag_destroy(ch->dma->cdmatag); ch->dma->cdmatag = NULL; } if (ch->dma->ddmatag) { bus_dma_tag_destroy(ch->dma->ddmatag); ch->dma->ddmatag = NULL; } if (ch->dma->dmatag) { bus_dma_tag_destroy(ch->dma->dmatag); ch->dma->dmatag = NULL; } } struct ata_dmasetup_data_cb_args { struct ata_dmaentry *dmatab; int error; }; static void ata_dmasetupd_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) { struct ata_dmasetup_data_cb_args *cba = (struct ata_dmasetup_data_cb_args *)xsc; bus_size_t cnt; u_int32_t lastcount; int i, j; cba->error = error; if (error != 0) return; lastcount = j = 0; for (i = 0; i < nsegs; i++) { /* * A maximum segment size was specified for bus_dma_tag_create, but * some busdma code does not seem to honor this, so fix up if needed. */ for (cnt = 0; cnt < segs[i].ds_len; cnt += MAXSEGSZ, j++) { cba->dmatab[j].base = htole32(segs[i].ds_addr + cnt); lastcount = ulmin(segs[i].ds_len - cnt, MAXSEGSZ) & 0xffff; cba->dmatab[j].count = htole32(lastcount); } } cba->dmatab[j - 1].count = htole32(lastcount | ATA_DMA_EOT); } static int ata_dmaload(struct ata_device *atadev, caddr_t data, int32_t count, int dir) { struct ata_channel *ch = atadev->channel; struct ata_dmasetup_data_cb_args cba; if (ch->dma->flags & ATA_DMA_ACTIVE) { ata_prtdev(atadev, "FAILURE - already active DMA on this device\n"); return -1; } if (!count) { ata_prtdev(atadev, "FAILURE - zero length DMA transfer attempted\n"); return -1; } if (((uintptr_t)data & (ch->dma->alignment - 1)) || (count & (ch->dma->alignment - 1))) { ata_prtdev(atadev, "FAILURE - non aligned DMA transfer attempted\n"); return -1; } if (count > ch->dma->max_iosize) { ata_prtdev(atadev, "FAILURE - oversized DMA transfer attempted %d > %d\n", count, ch->dma->max_iosize); return -1; } cba.dmatab = ch->dma->dmatab; bus_dmamap_sync(ch->dma->cdmatag, ch->dma->cdmamap, BUS_DMASYNC_PREWRITE); if (bus_dmamap_load(ch->dma->ddmatag, ch->dma->ddmamap, data, count, ata_dmasetupd_cb, &cba, 0) || cba.error) return -1; bus_dmamap_sync(ch->dma->ddmatag, ch->dma->ddmamap, dir ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); ch->dma->cur_iosize = count; - ch->dma->flags = dir ? (ATA_DMA_ACTIVE | ATA_DMA_READ) : ATA_DMA_ACTIVE; - + ch->dma->flags = dir ? (ATA_DMA_LOADED | ATA_DMA_READ) : ATA_DMA_LOADED; return 0; } int ata_dmaunload(struct ata_channel *ch) { bus_dmamap_sync(ch->dma->cdmatag, ch->dma->cdmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_sync(ch->dma->ddmatag, ch->dma->ddmamap, (ch->dma->flags & ATA_DMA_READ) != 0 ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ch->dma->ddmatag, ch->dma->ddmamap); ch->dma->cur_iosize = 0; - ch->dma->flags = 0; - + ch->dma->flags &= ~ATA_DMA_LOADED; return 0; } diff --git a/sys/dev/ata/ata-lowlevel.c b/sys/dev/ata/ata-lowlevel.c index 29a95ad2e5b6..e305085f03e2 100644 --- a/sys/dev/ata/ata-lowlevel.c +++ b/sys/dev/ata/ata-lowlevel.c @@ -1,851 +1,851 @@ /*- * Copyright (c) 1998 - 2004 Søren Schmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ata.h" #include #include #include #include #include #include #include #include #include #include #include #include /* prototypes */ static int ata_generic_transaction(struct ata_request *); static void ata_generic_interrupt(void *); static void ata_generic_reset(struct ata_channel *); static int ata_wait(struct ata_device *, u_int8_t); /*static int ata_command(struct ata_device *, u_int8_t, u_int64_t, u_int16_t, u_int16_t);*/ static void ata_pio_read(struct ata_request *, int); static void ata_pio_write(struct ata_request *, int); /* local vars */ static int atadebug = 0; /* * low level ATA functions */ void ata_generic_hw(struct ata_channel *ch) { ch->hw.reset = ata_generic_reset; ch->hw.transaction = ata_generic_transaction; ch->hw.interrupt = ata_generic_interrupt; ch->hw.command = ata_generic_command; } /* must be called with ATA channel locked */ static int ata_generic_transaction(struct ata_request *request) { struct ata_channel *ch = request->device->channel; /* safetybelt for HW that went away */ if (!request->device->param || request->device->channel->flags&ATA_HWGONE) { request->retries = 0; request->result = ENXIO; return ATA_OP_FINISHED; } - /* record the request as running */ - ch->running = request; - ATA_DEBUG_RQ(request, "transaction"); /* disable ATAPI DMA writes if HW doesn't support it */ if ((ch->flags & ATA_ATAPI_DMA_RO) && ((request->flags & (ATA_R_ATAPI | ATA_R_DMA | ATA_R_WRITE)) == (ATA_R_ATAPI | ATA_R_DMA | ATA_R_WRITE))) request->flags &= ~ATA_R_DMA; switch (request->flags & (ATA_R_ATAPI | ATA_R_DMA)) { /* ATA PIO data transfer and control commands */ default: { /* record command direction here as our request might be gone later */ int write = (request->flags & ATA_R_WRITE); /* issue command */ if (ch->hw.command(request->device, request->u.ata.command, request->u.ata.lba, request->u.ata.count, request->u.ata.feature)) { ata_prtdev(request->device, "error issueing PIO command\n"); request->result = EIO; break; } /* device reset doesn't interrupt */ if (request->u.ata.command == ATA_ATAPI_RESET) { int timeout = 1000000; do { DELAY(10); request->status = ATA_IDX_INB(ch, ATA_STATUS); } while (request->status & ATA_S_BUSY && timeout--); if (timeout) printf("ATAPI_RESET time = %dus\n", (1000000-timeout)*10); else printf("ATAPI_RESET timeout\n"); - if (request->status & ATA_S_ERROR) { + if (request->status & ATA_S_ERROR) request->error = ATA_IDX_INB(ch, ATA_ERROR); - //request->result = EIO; - } break; } /* if write command output the data */ if (write) { if (ata_wait(request->device, (ATA_S_READY | ATA_S_DSC | ATA_S_DRQ)) < 0) { ata_prtdev(request->device,"timeout waiting for write DRQ"); request->result = EIO; break; } ata_pio_write(request, request->transfersize); } } - /* return and wait for interrupt */ + /* record the request as running and return for interrupt */ + ch->running = request; return ATA_OP_CONTINUES; /* ATA DMA data transfer commands */ case ATA_R_DMA: /* check sanity, setup SG list and DMA engine */ if (ch->dma->load(request->device, request->data, request->bytecount, request->flags & ATA_R_READ)) { ata_prtdev(request->device, "setting up DMA failed\n"); request->result = EIO; break; } /* issue command */ if (ch->hw.command(request->device, request->u.ata.command, request->u.ata.lba, request->u.ata.count, request->u.ata.feature)) { ata_prtdev(request->device, "error issuing DMA command\n"); request->result = EIO; break; } /* start DMA engine */ if (ch->dma->start(ch)) { ata_prtdev(request->device, "error starting DMA\n"); request->result = EIO; break; } - /* return and wait for interrupt */ + /* record the request as running and return for interrupt */ + ch->running = request; return ATA_OP_CONTINUES; /* ATAPI PIO commands */ case ATA_R_ATAPI: /* is this just a POLL DSC command ? */ if (request->u.atapi.ccb[0] == ATAPI_POLL_DSC) { ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | request->device->unit); DELAY(10); if (!(ATA_IDX_INB(ch, ATA_ALTSTAT)&ATA_S_DSC)) request->result = EBUSY; break; } /* start ATAPI operation */ if (ch->hw.command(request->device, ATA_PACKET_CMD, request->transfersize << 8, 0, 0)) { ata_prtdev(request->device, "error issuing ATA PACKET command\n"); request->result = EIO; break; } /* command interrupt device ? just return and wait for interrupt */ - if ((request->device->param->config & ATA_DRQ_MASK) == ATA_DRQ_INTR) + if ((request->device->param->config & ATA_DRQ_MASK) == ATA_DRQ_INTR) { + ch->running = request; return ATA_OP_CONTINUES; + } /* wait for ready to write ATAPI command block */ { int timeout = 5000; /* might be less for fast devices */ while (timeout--) { int reason = ATA_IDX_INB(ch, ATA_IREASON); int status = ATA_IDX_INB(ch, ATA_STATUS); if (((reason & (ATA_I_CMD | ATA_I_IN)) | (status & (ATA_S_DRQ | ATA_S_BUSY))) == ATAPI_P_CMDOUT) break; DELAY(20); } if (timeout <= 0) { ata_prtdev(request->device, "timeout waiting for ATAPI ready\n"); request->result = EIO; break; } } /* this seems to be needed for some (slow) devices */ DELAY(10); /* output actual command block */ ATA_IDX_OUTSW_STRM(ch, ATA_DATA, (int16_t *)request->u.atapi.ccb, (request->device->param->config & ATA_PROTO_MASK) == ATA_PROTO_ATAPI_12 ? 6 : 8); - /* return and wait for interrupt */ + /* record the request as running and return for interrupt */ + ch->running = request; return ATA_OP_CONTINUES; case ATA_R_ATAPI|ATA_R_DMA: /* is this just a POLL DSC command ? */ if (request->u.atapi.ccb[0] == ATAPI_POLL_DSC) { ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | request->device->unit); DELAY(10); if (!(ATA_IDX_INB(ch, ATA_ALTSTAT)&ATA_S_DSC)) request->result = EBUSY; break; } /* check sanity, setup SG list and DMA engine */ if (ch->dma->load(request->device, request->data, request->bytecount, request->flags & ATA_R_READ)) { ata_prtdev(request->device, "setting up DMA failed\n"); request->result = EIO; break; } /* start ATAPI operation */ if (ch->hw.command(request->device, ATA_PACKET_CMD, 0, 0, ATA_F_DMA)) { ata_prtdev(request->device, "error issuing ATAPI packet command\n"); request->result = EIO; break; } /* wait for ready to write ATAPI command block */ { int timeout = 5000; /* might be less for fast devices */ while (timeout--) { int reason = ATA_IDX_INB(ch, ATA_IREASON); int status = ATA_IDX_INB(ch, ATA_STATUS); if (((reason & (ATA_I_CMD | ATA_I_IN)) | (status & (ATA_S_DRQ | ATA_S_BUSY))) == ATAPI_P_CMDOUT) break; DELAY(20); } if (timeout <= 0) { ata_prtdev(request->device,"timeout waiting for ATAPI ready\n"); request->result = EIO; break; } } /* this seems to be needed for some (slow) devices */ DELAY(10); /* output actual command block */ ATA_IDX_OUTSW_STRM(ch, ATA_DATA, (int16_t *)request->u.atapi.ccb, (request->device->param->config & ATA_PROTO_MASK) == ATA_PROTO_ATAPI_12 ? 6 : 8); /* start DMA engine */ if (ch->dma->start(ch)) { request->result = EIO; break; } - /* return and wait for interrupt */ + /* record the request as running and return for interrupt */ + ch->running = request; return ATA_OP_CONTINUES; } /* request finish here */ - if (ch->dma->flags & ATA_DMA_ACTIVE) + if (ch->dma->flags & ATA_DMA_LOADED) ch->dma->unload(ch); - ch->running = NULL; return ATA_OP_FINISHED; } static void ata_generic_interrupt(void *data) { struct ata_channel *ch = (struct ata_channel *)data; struct ata_request *request = ch->running; int length; /* ignore this interrupt if there is no running request */ if (!request) return; ATA_DEBUG_RQ(request, "interrupt"); /* ignore interrupt if device is busy */ if (!(request->flags & ATA_R_TIMEOUT) && ATA_IDX_INB(ch, ATA_ALTSTAT) & ATA_S_BUSY) { DELAY(100); if (!(ATA_IDX_INB(ch, ATA_ALTSTAT) & ATA_S_DRQ)) return; } ATA_DEBUG_RQ(request, "interrupt accepted"); /* clear interrupt and get status */ request->status = ATA_IDX_INB(ch, ATA_STATUS); /* register interrupt */ if (!(request->flags & ATA_R_TIMEOUT)) request->flags |= ATA_R_INTR_SEEN; switch (request->flags & (ATA_R_ATAPI | ATA_R_DMA | ATA_R_CONTROL)) { /* ATA PIO data transfer and control commands */ default: /* on control commands read back registers to the request struct */ if (request->flags & ATA_R_CONTROL) { request->u.ata.count = ATA_IDX_INB(ch, ATA_COUNT); request->u.ata.lba = ATA_IDX_INB(ch, ATA_SECTOR) | (ATA_IDX_INB(ch, ATA_CYL_LSB) << 8) | (ATA_IDX_INB(ch, ATA_CYL_MSB) << 16); } /* if we got an error we are done with the HW */ if (request->status & ATA_S_ERROR) { request->error = ATA_IDX_INB(ch, ATA_ERROR); break; } /* are we moving data ? */ if (request->flags & (ATA_R_READ | ATA_R_WRITE)) { /* if read data get it */ if (request->flags & ATA_R_READ) ata_pio_read(request, request->transfersize); /* update how far we've gotten */ request->donecount += request->transfersize; /* do we need a scoop more ? */ if (request->bytecount > request->donecount) { /* set this transfer size according to HW capabilities */ request->transfersize = min((request->bytecount - request->donecount), request->transfersize); /* clear interrupt seen flag as we need to wait again */ request->flags &= ~ATA_R_INTR_SEEN; /* if data write command, output the data */ if (request->flags & ATA_R_WRITE) { /* if we get an error here we are done with the HW */ if (ata_wait(request->device, (ATA_S_READY | ATA_S_DSC | ATA_S_DRQ)) < 0) { ata_prtdev(request->device, "timeout waiting for write DRQ"); request->status = ATA_IDX_INB(ch, ATA_STATUS); break; } /* output data and return waiting for new interrupt */ ata_pio_write(request, request->transfersize); return; } /* if data read command, return & wait for interrupt */ if (request->flags & ATA_R_READ) return; } } /* done with HW */ break; /* ATA DMA data transfer commands */ case ATA_R_DMA: /* stop DMA engine and get status */ if (ch->dma->stop) request->dmastat = ch->dma->stop(ch); /* did we get error or data */ if (request->status & ATA_S_ERROR) request->error = ATA_IDX_INB(ch, ATA_ERROR); else if (request->dmastat & ATA_BMSTAT_ERROR) request->status |= ATA_S_ERROR; else request->donecount = request->bytecount; /* release SG list etc */ ch->dma->unload(ch); /* done with HW */ break; /* ATAPI PIO commands */ case ATA_R_ATAPI: length = ATA_IDX_INB(ch, ATA_CYL_LSB)|(ATA_IDX_INB(ch, ATA_CYL_MSB)<<8); switch ((ATA_IDX_INB(ch, ATA_IREASON) & (ATA_I_CMD | ATA_I_IN)) | (request->status & ATA_S_DRQ)) { case ATAPI_P_CMDOUT: /* this seems to be needed for some (slow) devices */ DELAY(10); if (!(request->status & ATA_S_DRQ)) { ata_prtdev(request->device, "command interrupt without DRQ\n"); request->status = ATA_S_ERROR; break; } ATA_IDX_OUTSW_STRM(ch, ATA_DATA, (int16_t *)request->u.atapi.ccb, (request->device->param->config & ATA_PROTO_MASK)== ATA_PROTO_ATAPI_12 ? 6 : 8); /* return wait for interrupt */ return; case ATAPI_P_WRITE: if (request->flags & ATA_R_READ) { request->status = ATA_S_ERROR; ata_prtdev(request->device, "%s trying to write on read buffer\n", ata_cmd2str(request)); break; } ata_pio_write(request, length); request->donecount += length; /* set next transfer size according to HW capabilities */ request->transfersize = min((request->bytecount-request->donecount), request->transfersize); /* return wait for interrupt */ return; case ATAPI_P_READ: if (request->flags & ATA_R_WRITE) { request->status = ATA_S_ERROR; ata_prtdev(request->device, "%s trying to read on write buffer\n", ata_cmd2str(request)); break; } ata_pio_read(request, length); request->donecount += length; /* set next transfer size according to HW capabilities */ request->transfersize = min((request->bytecount-request->donecount), request->transfersize); /* return wait for interrupt */ return; case ATAPI_P_DONEDRQ: ata_prtdev(request->device, "WARNING - %s DONEDRQ non conformant device\n", ata_cmd2str(request)); if (request->flags & ATA_R_READ) { ata_pio_read(request, length); request->donecount += length; } else if (request->flags & ATA_R_WRITE) { ata_pio_write(request, length); request->donecount += length; } else request->status = ATA_S_ERROR; /* FALLTHROUGH */ case ATAPI_P_ABORT: case ATAPI_P_DONE: if (request->status & (ATA_S_ERROR | ATA_S_DWF)) request->error = ATA_IDX_INB(ch, ATA_ERROR); break; default: ata_prtdev(request->device, "unknown transfer phase\n"); request->status = ATA_S_ERROR; } /* done with HW */ break; /* ATAPI DMA commands */ case ATA_R_ATAPI|ATA_R_DMA: /* stop the engine and get engine status */ if (ch->dma->stop) request->dmastat = ch->dma->stop(ch); /* did we get error or data */ if (request->status & (ATA_S_ERROR | ATA_S_DWF)) request->error = ATA_IDX_INB(ch, ATA_ERROR); else if (request->dmastat & ATA_BMSTAT_ERROR) request->status |= ATA_S_ERROR; else request->donecount = request->bytecount; /* release SG list etc */ ch->dma->unload(ch); /* done with HW */ break; } /* if we timed out the unlocking of the ATA channel is done later */ if (!(request->flags & ATA_R_TIMEOUT)) { ch->running = NULL; ATA_UNLOCK_CH(ch); ch->locking(ch, ATA_LF_UNLOCK); } /* schedule completition for this request */ ata_finish(request); } /* must be called with ATA channel locked */ static void ata_generic_reset(struct ata_channel *ch) { u_int8_t err, lsb, msb, ostat0, ostat1; u_int8_t stat0 = 0, stat1 = 0; int mask = 0, timeout; /* reset host end of channel (if supported) */ if (ch->reset) ch->reset(ch); /* do we have any signs of ATA/ATAPI HW being present ? */ ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_MASTER); DELAY(10); ostat0 = ATA_IDX_INB(ch, ATA_STATUS); if ((ostat0 & 0xf8) != 0xf8 && ostat0 != 0xa5) { stat0 = ATA_S_BUSY; mask |= 0x01; } ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_SLAVE); DELAY(10); ostat1 = ATA_IDX_INB(ch, ATA_STATUS); /* in some setups we dont want to test for a slave */ if (!(ch->flags & ATA_NO_SLAVE)) { if ((ostat1 & 0xf8) != 0xf8 && ostat1 != 0xa5) { stat1 = ATA_S_BUSY; mask |= 0x02; } } if (bootverbose) ata_printf(ch, -1, "reset tp1 mask=%02x ostat0=%02x ostat1=%02x\n", mask, ostat0, ostat1); /* if nothing showed up there is no need to get any further */ /* SOS is that too strong?, we just might loose devices here XXX */ ch->devices = 0; if (!mask) return; /* reset (both) devices on this channel */ ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_MASTER); DELAY(10); ATA_IDX_OUTB(ch, ATA_ALTSTAT, ATA_A_IDS | ATA_A_RESET); DELAY(10000); ATA_IDX_OUTB(ch, ATA_ALTSTAT, ATA_A_IDS); DELAY(100000); ATA_IDX_INB(ch, ATA_ERROR); /* wait for BUSY to go inactive */ for (timeout = 0; timeout < 310; timeout++) { if (stat0 & ATA_S_BUSY) { ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_MASTER); DELAY(10); err = ATA_IDX_INB(ch, ATA_ERROR); lsb = ATA_IDX_INB(ch, ATA_CYL_LSB); msb = ATA_IDX_INB(ch, ATA_CYL_MSB); stat0 = ATA_IDX_INB(ch, ATA_STATUS); if (bootverbose) ata_printf(ch, ATA_MASTER, "stat=0x%02x err=0x%02x lsb=0x%02x msb=0x%02x\n", stat0, err, lsb, msb); if (!(stat0 & ATA_S_BUSY)) { if ((err & 0x7f) == ATA_E_ILI) { if (lsb == ATAPI_MAGIC_LSB && msb == ATAPI_MAGIC_MSB) { ch->devices |= ATA_ATAPI_MASTER; } else if (stat0 & ATA_S_READY) { ch->devices |= ATA_ATA_MASTER; } } else if ((stat0 & 0x4f) && err == lsb && err == msb) { stat0 |= ATA_S_BUSY; } } } if (!((mask == 0x03) && (stat0 & ATA_S_BUSY)) && (stat1 & ATA_S_BUSY)) { ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_SLAVE); DELAY(10); err = ATA_IDX_INB(ch, ATA_ERROR); lsb = ATA_IDX_INB(ch, ATA_CYL_LSB); msb = ATA_IDX_INB(ch, ATA_CYL_MSB); stat1 = ATA_IDX_INB(ch, ATA_STATUS); if (bootverbose) ata_printf(ch, ATA_SLAVE, " stat=0x%02x err=0x%02x lsb=0x%02x msb=0x%02x\n", stat1, err, lsb, msb); if (!(stat1 & ATA_S_BUSY)) { if ((err & 0x7f) == ATA_E_ILI) { if (lsb == ATAPI_MAGIC_LSB && msb == ATAPI_MAGIC_MSB) { ch->devices |= ATA_ATAPI_SLAVE; } else if (stat1 & ATA_S_READY) { ch->devices |= ATA_ATA_SLAVE; } } else if ((stat1 & 0x4f) && err == lsb && err == msb) { stat1 |= ATA_S_BUSY; } } } if (mask == 0x01) /* wait for master only */ if (!(stat0 & ATA_S_BUSY) || (stat0 == 0xff && timeout > 5)) break; if (mask == 0x02) /* wait for slave only */ if (!(stat1 & ATA_S_BUSY) || (stat1 == 0xff && timeout > 5)) break; if (mask == 0x03) { /* wait for both master & slave */ if (!(stat0 & ATA_S_BUSY) && !(stat1 & ATA_S_BUSY)) break; if (stat0 == 0xff && timeout > 5) mask &= ~0x01; if (stat1 == 0xff && timeout > 5) mask &= ~0x02; } DELAY(100000); } if (bootverbose) ata_printf(ch, -1, "reset tp2 stat0=%02x stat1=%02x devices=0x%b\n", stat0, stat1, ch->devices, "\20\4ATAPI_SLAVE\3ATAPI_MASTER\2ATA_SLAVE\1ATA_MASTER"); } static int ata_wait(struct ata_device *atadev, u_int8_t mask) { u_int8_t status; int timeout = 0; DELAY(1); /* wait 5 seconds for device to get !BUSY */ while (timeout < 5000000) { status = ATA_IDX_INB(atadev->channel, ATA_STATUS); /* if drive fails status, reselect the drive just to be sure */ if (status == 0xff) { ata_prtdev(atadev, "WARNING no status, reselecting device\n"); ATA_IDX_OUTB(atadev->channel, ATA_DRIVE, ATA_D_IBM | atadev->unit); DELAY(10); status = ATA_IDX_INB(atadev->channel, ATA_STATUS); if (status == 0xff) return -1; } /* are we done ? */ if (!(status & ATA_S_BUSY)) break; if (timeout > 1000) { timeout += 1000; DELAY(1000); } else { timeout += 10; DELAY(10); } } if (timeout >= 5000000) return -1; if (!mask) return (status & ATA_S_ERROR); DELAY(1); /* wait 50 msec for bits wanted */ timeout = 5000; while (timeout--) { status = ATA_IDX_INB(atadev->channel, ATA_STATUS); if ((status & mask) == mask) return (status & ATA_S_ERROR); DELAY (10); } return -1; } int ata_generic_command(struct ata_device *atadev, u_int8_t command, u_int64_t lba, u_int16_t count, u_int16_t feature) { if (atadebug) ata_prtdev(atadev, "ata_command: addr=%04lx, command=%02x, " "lba=%jd, count=%d, feature=%d\n", rman_get_start(atadev->channel->r_io[ATA_DATA].res), command, (intmax_t)lba, count, feature); /* select device */ ATA_IDX_OUTB(atadev->channel, ATA_DRIVE, ATA_D_IBM | atadev->unit); /* ready to issue command ? */ if (ata_wait(atadev, 0) < 0) { ata_prtdev(atadev, "timeout sending command=%02x\n", command); return -1; } /* enable interrupt */ ATA_IDX_OUTB(atadev->channel, ATA_ALTSTAT, ATA_A_4BIT); /* only use 48bit addressing if needed (avoid bugs and overhead) */ if ((lba > 268435455 || count > 256) && atadev->param && atadev->param->support.command2 & ATA_SUPPORT_ADDRESS48) { /* translate command into 48bit version */ switch (command) { case ATA_READ: command = ATA_READ48; break; case ATA_READ_MUL: command = ATA_READ_MUL48; break; case ATA_READ_DMA: command = ATA_READ_DMA48; break; case ATA_READ_DMA_QUEUED: command = ATA_READ_DMA_QUEUED48; break; case ATA_WRITE: command = ATA_WRITE48; break; case ATA_WRITE_MUL: command = ATA_WRITE_MUL48; break; case ATA_WRITE_DMA: command = ATA_WRITE_DMA48; break; case ATA_WRITE_DMA_QUEUED: command = ATA_WRITE_DMA_QUEUED48; break; case ATA_FLUSHCACHE: command = ATA_FLUSHCACHE48; break; default: ata_prtdev(atadev, "can't translate cmd to 48bit version\n"); return -1; } ATA_IDX_OUTB(atadev->channel, ATA_FEATURE, (feature>>8) & 0xff); ATA_IDX_OUTB(atadev->channel, ATA_FEATURE, feature & 0xff); ATA_IDX_OUTB(atadev->channel, ATA_COUNT, (count>>8) & 0xff); ATA_IDX_OUTB(atadev->channel, ATA_COUNT, count & 0xff); ATA_IDX_OUTB(atadev->channel, ATA_SECTOR, (lba>>24) & 0xff); ATA_IDX_OUTB(atadev->channel, ATA_SECTOR, lba & 0xff); ATA_IDX_OUTB(atadev->channel, ATA_CYL_LSB, (lba>>32) & 0xff); ATA_IDX_OUTB(atadev->channel, ATA_CYL_LSB, (lba>>8) & 0xff); ATA_IDX_OUTB(atadev->channel, ATA_CYL_MSB, (lba>>40) & 0xff); ATA_IDX_OUTB(atadev->channel, ATA_CYL_MSB, (lba>>16) & 0xff); ATA_IDX_OUTB(atadev->channel, ATA_DRIVE, ATA_D_LBA | atadev->unit); atadev->channel->flags |= ATA_48BIT_ACTIVE; } else { ATA_IDX_OUTB(atadev->channel, ATA_FEATURE, feature); ATA_IDX_OUTB(atadev->channel, ATA_COUNT, count); ATA_IDX_OUTB(atadev->channel, ATA_SECTOR, lba & 0xff); ATA_IDX_OUTB(atadev->channel, ATA_CYL_LSB, (lba>>8) & 0xff); ATA_IDX_OUTB(atadev->channel, ATA_CYL_MSB, (lba>>16) & 0xff); if (atadev->flags & ATA_D_USE_CHS) ATA_IDX_OUTB(atadev->channel, ATA_DRIVE, ATA_D_IBM | atadev->unit | ((lba>>24) & 0xf)); else ATA_IDX_OUTB(atadev->channel, ATA_DRIVE, ATA_D_IBM | ATA_D_LBA | atadev->unit|((lba>>24)&0xf)); atadev->channel->flags &= ~ATA_48BIT_ACTIVE; } /* issue command to controller */ ATA_IDX_OUTB(atadev->channel, ATA_CMD, command); return 0; } static void ata_pio_read(struct ata_request *request, int length) { int size = min(request->transfersize, length); struct ata_channel *ch = request->device->channel; int resid; if (ch->flags & ATA_USE_16BIT || (size % sizeof(int32_t))) ATA_IDX_INSW_STRM(ch, ATA_DATA, (void*)((uintptr_t)request->data+request->donecount), size / sizeof(int16_t)); else ATA_IDX_INSL_STRM(ch, ATA_DATA, (void*)((uintptr_t)request->data+request->donecount), size / sizeof(int32_t)); if (request->transfersize < length) { ata_prtdev(request->device, "WARNING - %s read data overrun %d>%d\n", ata_cmd2str(request), length, request->transfersize); for (resid = request->transfersize; resid < length; resid += sizeof(int16_t)) ATA_IDX_INW(ch, ATA_DATA); } } static void ata_pio_write(struct ata_request *request, int length) { int size = min(request->transfersize, length); struct ata_channel *ch = request->device->channel; int resid; if (ch->flags & ATA_USE_16BIT || (size % sizeof(int32_t))) ATA_IDX_OUTSW_STRM(ch, ATA_DATA, (void*)((uintptr_t)request->data+request->donecount), size / sizeof(int16_t)); else ATA_IDX_OUTSL_STRM(ch, ATA_DATA, (void*)((uintptr_t)request->data+request->donecount), size / sizeof(int32_t)); if (request->transfersize < length) { ata_prtdev(request->device, "WARNING - %s write data underrun %d>%d\n", ata_cmd2str(request), length, request->transfersize); for (resid = request->transfersize; resid < length; resid += sizeof(int16_t)) ATA_IDX_OUTW(ch, ATA_DATA, 0); } } diff --git a/sys/dev/ata/ata-pci.c b/sys/dev/ata/ata-pci.c index 16ac6f452cb6..24691488c2ad 100644 --- a/sys/dev/ata/ata-pci.c +++ b/sys/dev/ata/ata-pci.c @@ -1,563 +1,565 @@ /*- * Copyright (c) 1998 - 2004 Søren Schmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ata.h" #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __alpha__ #include #endif #include #include #include #include #include /* local vars */ static MALLOC_DEFINE(M_ATAPCI, "ATA PCI", "ATA driver PCI"); /* misc defines */ #define IOMASK 0xfffffffc /* prototypes */ static int ata_pci_allocate(device_t, struct ata_channel *); static void ata_pci_dmainit(struct ata_channel *); static void ata_pci_locknoop(struct ata_channel *, int); int ata_legacy(device_t dev) { return ((pci_read_config(dev, PCIR_PROGIF, 1)&PCIP_STORAGE_IDE_MASTERDEV) && ((pci_read_config(dev, PCIR_PROGIF, 1) & (PCIP_STORAGE_IDE_MODEPRIM | PCIP_STORAGE_IDE_MODESEC)) != (PCIP_STORAGE_IDE_MODEPRIM | PCIP_STORAGE_IDE_MODESEC))); } static int ata_pci_probe(device_t dev) { if (pci_get_class(dev) != PCIC_STORAGE) return ENXIO; switch (pci_get_vendor(dev)) { case ATA_ACARD_ID: if (!ata_acard_ident(dev)) return 0; break; case ATA_ACER_LABS_ID: if (!ata_ali_ident(dev)) return 0; break; case ATA_AMD_ID: if (!ata_amd_ident(dev)) return 0; break; case ATA_CYRIX_ID: if (!ata_cyrix_ident(dev)) return 0; break; case ATA_CYPRESS_ID: if (!ata_cypress_ident(dev)) return 0; break; case ATA_HIGHPOINT_ID: if (!ata_highpoint_ident(dev)) return 0; break; case ATA_INTEL_ID: if (!ata_intel_ident(dev)) return 0; break; case ATA_NATIONAL_ID: if (!ata_national_ident(dev)) return 0; break; case ATA_NVIDIA_ID: if (!ata_nvidia_ident(dev)) return 0; break; case ATA_PROMISE_ID: if (!ata_promise_ident(dev)) return 0; break; case ATA_SERVERWORKS_ID: if (!ata_serverworks_ident(dev)) return 0; break; case ATA_SILICON_IMAGE_ID: if (!ata_sii_ident(dev)) return 0; break; case ATA_SIS_ID: if (!ata_sis_ident(dev)) return 0; break; case ATA_VIA_ID: if (!ata_via_ident(dev)) return 0; break; case 0x16ca: if (pci_get_devid(dev) == 0x000116ca) { ata_generic_ident(dev); device_set_desc(dev, "Cenatek Rocket Drive controller"); return 0; } break; case 0x1042: if (pci_get_devid(dev)==0x10001042 || pci_get_devid(dev)==0x10011042) { ata_generic_ident(dev); device_set_desc(dev, "RZ 100? ATA controller !WARNING! buggy HW data loss possible"); return 0; } break; } /* unknown chipset, try generic DMA if it seems possible */ if ((pci_get_class(dev) == PCIC_STORAGE) && (pci_get_subclass(dev) == PCIS_STORAGE_IDE)) return ata_generic_ident(dev); return ENXIO; } static int ata_pci_attach(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); u_int32_t cmd; int unit; /* do chipset specific setups only needed once */ if (ata_legacy(dev) || pci_read_config(dev, PCIR_BAR(2), 4) & IOMASK) ctlr->channels = 2; else ctlr->channels = 1; ctlr->allocate = ata_pci_allocate; ctlr->dmainit = ata_pci_dmainit; ctlr->locking = ata_pci_locknoop; /* if needed try to enable busmastering */ cmd = pci_read_config(dev, PCIR_COMMAND, 2); if (!(cmd & PCIM_CMD_BUSMASTEREN)) { pci_write_config(dev, PCIR_COMMAND, cmd | PCIM_CMD_BUSMASTEREN, 2); cmd = pci_read_config(dev, PCIR_COMMAND, 2); } /* if busmastering mode "stuck" use it */ if ((cmd & PCIM_CMD_BUSMASTEREN) == PCIM_CMD_BUSMASTEREN) { ctlr->r_type1 = SYS_RES_IOPORT; ctlr->r_rid1 = ATA_BMADDR_RID; ctlr->r_res1 = bus_alloc_resource_any(dev, ctlr->r_type1, &ctlr->r_rid1, RF_ACTIVE); } ctlr->chipinit(dev); /* attach all channels on this controller */ for (unit = 0; unit < ctlr->channels; unit++) { if (unit == 0 && (pci_get_progif(dev) & 0x81) == 0x80) { device_add_child(dev, "ata", unit); continue; } if (unit == 1 && (pci_get_progif(dev) & 0x84) == 0x80) { device_add_child(dev, "ata", unit); continue; } device_add_child(dev, "ata", devclass_find_free_unit(ata_devclass, 2)); } return bus_generic_attach(dev); } static int ata_pci_detach(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); struct ata_channel *ch; int unit; /* mark HW as gone, we dont want to issue commands to HW no longer there */ for (unit = 0; unit < ctlr->channels; unit++) { if ((ch = ctlr->interrupt[unit].argument)) ch->flags |= ATA_HWGONE; } bus_generic_detach(dev); if (ctlr->r_irq) { bus_teardown_intr(dev, ctlr->r_irq, ctlr->handle); bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ctlr->r_irq); } if (ctlr->r_res2) bus_release_resource(dev, ctlr->r_type2, ctlr->r_rid2, ctlr->r_res2); if (ctlr->r_res1) bus_release_resource(dev, ctlr->r_type1, ctlr->r_rid1, ctlr->r_res1); return 0; } static int ata_pci_print_child(device_t dev, device_t child) { struct ata_channel *ch = device_get_softc(child); int retval = 0; retval += bus_print_child_header(dev, child); retval += printf(": at 0x%lx", rman_get_start(ch->r_io[ATA_IDX_ADDR].res)); if (ata_legacy(dev)) retval += printf(" irq %d", 14 + ch->unit); retval += bus_print_child_footer(dev, child); return retval; } static struct resource * ata_pci_alloc_resource(device_t dev, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { struct ata_pci_controller *controller = device_get_softc(dev); int unit = ((struct ata_channel *)device_get_softc(child))->unit; struct resource *res = NULL; int myrid; if (type == SYS_RES_IOPORT) { switch (*rid) { case ATA_IOADDR_RID: if (ata_legacy(dev)) { start = (unit ? ATA_SECONDARY : ATA_PRIMARY); count = ATA_IOSIZE; end = start + count - 1; } myrid = PCIR_BAR(0) + (unit << 3); res = BUS_ALLOC_RESOURCE(device_get_parent(dev), dev, SYS_RES_IOPORT, &myrid, start, end, count, flags); break; case ATA_ALTADDR_RID: if (ata_legacy(dev)) { start = (unit ? ATA_SECONDARY : ATA_PRIMARY) + ATA_ALTOFFSET; count = ATA_ALTIOSIZE; end = start + count - 1; } myrid = PCIR_BAR(1) + (unit << 3); res = BUS_ALLOC_RESOURCE(device_get_parent(dev), dev, SYS_RES_IOPORT, &myrid, start, end, count, flags); break; } return res; } if (type == SYS_RES_IRQ && *rid == ATA_IRQ_RID) { if (ata_legacy(dev)) { #ifdef __alpha__ return alpha_platform_alloc_ide_intr(unit); #else int irq = (unit == 0 ? 14 : 15); return BUS_ALLOC_RESOURCE(device_get_parent(dev), child, SYS_RES_IRQ, rid, irq, irq, 1, flags); #endif } else { return controller->r_irq; } } return 0; } static int ata_pci_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { int unit = ((struct ata_channel *)device_get_softc(child))->unit; if (type == SYS_RES_IOPORT) { switch (rid) { case ATA_IOADDR_RID: return BUS_RELEASE_RESOURCE(device_get_parent(dev), dev, SYS_RES_IOPORT, PCIR_BAR(0) + (unit << 3), r); break; case ATA_ALTADDR_RID: return BUS_RELEASE_RESOURCE(device_get_parent(dev), dev, SYS_RES_IOPORT, PCIR_BAR(1) + (unit << 3), r); break; default: return ENOENT; } } if (type == SYS_RES_IRQ) { if (rid != ATA_IRQ_RID) return ENOENT; if (ata_legacy(dev)) { #ifdef __alpha__ return alpha_platform_release_ide_intr(unit, r); #else return BUS_RELEASE_RESOURCE(device_get_parent(dev), child, SYS_RES_IRQ, rid, r); #endif } else return 0; } return EINVAL; } static int ata_pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_intr_t *function, void *argument, void **cookiep) { if (ata_legacy(dev)) { #ifdef __alpha__ return alpha_platform_setup_ide_intr(child, irq, function, argument, cookiep); #else return BUS_SETUP_INTR(device_get_parent(dev), child, irq, flags, function, argument, cookiep); #endif } else { struct ata_pci_controller *controller = device_get_softc(dev); int unit = ((struct ata_channel *)device_get_softc(child))->unit; controller->interrupt[unit].function = function; controller->interrupt[unit].argument = argument; *cookiep = controller; return 0; } } static int ata_pci_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { if (ata_legacy(dev)) { #ifdef __alpha__ return alpha_platform_teardown_ide_intr(child, irq, cookie); #else return BUS_TEARDOWN_INTR(device_get_parent(dev), child, irq, cookie); #endif } else { struct ata_pci_controller *controller = device_get_softc(dev); int unit = ((struct ata_channel *)device_get_softc(child))->unit; controller->interrupt[unit].function = NULL; controller->interrupt[unit].argument = NULL; return 0; } } static int ata_pci_allocate(device_t dev, struct ata_channel *ch) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct resource *io = NULL, *altio = NULL; int i, rid; rid = ATA_IOADDR_RID; io = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0, ~0, ATA_IOSIZE, RF_ACTIVE); if (!io) return ENXIO; rid = ATA_ALTADDR_RID; altio = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0, ~0, ATA_ALTIOSIZE, RF_ACTIVE); if (!altio) { bus_release_resource(dev, SYS_RES_IOPORT, ATA_IOADDR_RID, io); return ENXIO; } for (i = ATA_DATA; i <= ATA_STATUS; i ++) { ch->r_io[i].res = io; ch->r_io[i].offset = i; } ch->r_io[ATA_ALTSTAT].res = altio; ch->r_io[ATA_ALTSTAT].offset = ata_legacy(device_get_parent(dev)) ? 0 : 2; ch->r_io[ATA_IDX_ADDR].res = io; if (ctlr->r_res1) { for (i = ATA_BMCMD_PORT; i <= ATA_BMDTP_PORT; i++) { ch->r_io[i].res = ctlr->r_res1; ch->r_io[i].offset = (i - ATA_BMCMD_PORT)+(ch->unit * ATA_BMIOSIZE); } /* if simplex controller, only allow DMA on primary channel */ ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, ATA_IDX_INB(ch, ATA_BMSTAT_PORT) & (ATA_BMSTAT_DMA_MASTER | ATA_BMSTAT_DMA_SLAVE)); if (ch->unit > 0 && (ATA_IDX_INB(ch, ATA_BMSTAT_PORT) & ATA_BMSTAT_DMA_SIMPLEX)) device_printf(dev, "simplex device, DMA on primary only\n"); else ctlr->dmainit(ch); } ata_generic_hw(ch); return 0; } static int ata_pci_dmastart(struct ata_channel *ch) { ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, (ATA_IDX_INB(ch, ATA_BMSTAT_PORT) | (ATA_BMSTAT_INTERRUPT | ATA_BMSTAT_ERROR))); ATA_IDX_OUTL(ch, ATA_BMDTP_PORT, ch->dma->mdmatab); ATA_IDX_OUTB(ch, ATA_BMCMD_PORT, (ATA_IDX_INB(ch, ATA_BMCMD_PORT) & ~ATA_BMCMD_WRITE_READ) | ((ch->dma->flags & ATA_DMA_READ) ? ATA_BMCMD_WRITE_READ : 0) | ATA_BMCMD_START_STOP); + ch->dma->flags |= ATA_DMA_ACTIVE; return 0; } static int ata_pci_dmastop(struct ata_channel *ch) { int error; error = ATA_IDX_INB(ch, ATA_BMSTAT_PORT) & ATA_BMSTAT_MASK; ATA_IDX_OUTB(ch, ATA_BMCMD_PORT, ATA_IDX_INB(ch, ATA_BMCMD_PORT) & ~ATA_BMCMD_START_STOP); + ch->dma->flags &= ~ATA_DMA_ACTIVE; ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, ATA_BMSTAT_INTERRUPT | ATA_BMSTAT_ERROR); return error; } static void ata_pci_dmainit(struct ata_channel *ch) { ata_dmainit(ch); if (ch->dma) { ch->dma->start = ata_pci_dmastart; ch->dma->stop = ata_pci_dmastop; } } static void ata_pci_locknoop(struct ata_channel *ch, int flags) { } static device_method_t ata_pci_methods[] = { /* device interface */ DEVMETHOD(device_probe, ata_pci_probe), DEVMETHOD(device_attach, ata_pci_attach), DEVMETHOD(device_detach, ata_pci_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* bus methods */ DEVMETHOD(bus_print_child, ata_pci_print_child), DEVMETHOD(bus_alloc_resource, ata_pci_alloc_resource), DEVMETHOD(bus_release_resource, ata_pci_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, ata_pci_setup_intr), DEVMETHOD(bus_teardown_intr, ata_pci_teardown_intr), { 0, 0 } }; static driver_t ata_pci_driver = { "atapci", ata_pci_methods, sizeof(struct ata_pci_controller), }; static devclass_t ata_pci_devclass; DRIVER_MODULE(atapci, pci, ata_pci_driver, ata_pci_devclass, 0, 0); static int ata_pcisub_probe(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); device_t *children; int count, error, i; /* take care of green memory */ bzero(ch, sizeof(struct ata_channel)); /* find channel number on this controller */ device_get_children(device_get_parent(dev), &children, &count); for (i = 0; i < count; i++) { if (children[i] == dev) ch->unit = i; } free(children, M_TEMP); ch->device[MASTER].setmode = ctlr->setmode; ch->device[SLAVE].setmode = ctlr->setmode; ch->locking = ctlr->locking; ch->reset = ctlr->reset; if ((error = ctlr->allocate(dev, ch))) return error; return ata_probe(dev); } static device_method_t ata_pcisub_methods[] = { /* device interface */ DEVMETHOD(device_probe, ata_pcisub_probe), DEVMETHOD(device_attach, ata_attach), DEVMETHOD(device_detach, ata_detach), DEVMETHOD(device_suspend, ata_suspend), DEVMETHOD(device_resume, ata_resume), { 0, 0 } }; static driver_t ata_pcisub_driver = { "ata", ata_pcisub_methods, sizeof(struct ata_channel), }; DRIVER_MODULE(ata, atapci, ata_pcisub_driver, ata_devclass, 0, 0); diff --git a/sys/dev/ata/ata-queue.c b/sys/dev/ata/ata-queue.c index 8b2b318cfaf6..34edd2dd759e 100644 --- a/sys/dev/ata/ata-queue.c +++ b/sys/dev/ata/ata-queue.c @@ -1,624 +1,624 @@ /*- * Copyright (c) 1998 - 2004 Søren Schmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ata.h" #include #include #include #include #include #include #include #include #include #include #include #include #include /* prototypes */ static void ata_completed(void *, int); static void ata_timeout(struct ata_request *); static char *ata_skey2str(u_int8_t); void ata_queue_request(struct ata_request *request) { /* mark request as virgin */ request->result = request->status = request->error = 0; if (!request->callback && !(request->flags & ATA_R_REQUEUE)) sema_init(&request->done, 0, "ATA request done"); /* in IMMEDIATE_MODE we dont queue but call HW directly */ /* used only during reinit for getparm and config */ if ((request->device->channel->flags & ATA_IMMEDIATE_MODE) && (request->flags & (ATA_R_CONTROL | ATA_R_IMMEDIATE))) { /* arm timeout */ if (!request->timeout_handle.callout && !dumping) { request->timeout_handle = timeout((timeout_t*)ata_timeout, request, request->timeout*hz); } /* kick HW into action */ if (request->device->channel->hw.transaction(request)==ATA_OP_FINISHED){ if (!request->callback) sema_destroy(&request->done); return; } } else { /* put request on the locked queue at the specified location */ mtx_lock(&request->device->channel->queue_mtx); if (request->flags & ATA_R_IMMEDIATE) TAILQ_INSERT_HEAD(&request->device->channel->ata_queue, request, chain); else TAILQ_INSERT_TAIL(&request->device->channel->ata_queue, request, chain); mtx_unlock(&request->device->channel->queue_mtx); ATA_DEBUG_RQ(request, "queued"); ata_start(request->device->channel); } /* if this is a requeued request callback/sleep has been setup */ if (request->flags & ATA_R_REQUEUE) return; /* if this is not a callback wait until request is completed */ if (!request->callback) { ATA_DEBUG_RQ(request, "wait for completition"); sema_wait(&request->done); sema_destroy(&request->done); } } int ata_controlcmd(struct ata_device *atadev, u_int8_t command, u_int16_t feature, u_int64_t lba, u_int16_t count) { struct ata_request *request = ata_alloc_request(); int error = ENOMEM; if (request) { request->device = atadev; request->u.ata.command = command; request->u.ata.lba = lba; request->u.ata.count = count; request->u.ata.feature = feature; request->flags = ATA_R_CONTROL; request->timeout = 5; request->retries = -1; ata_queue_request(request); error = request->result; ata_free_request(request); } return error; } int ata_atapicmd(struct ata_device *atadev, u_int8_t *ccb, caddr_t data, int count, int flags, int timeout) { struct ata_request *request = ata_alloc_request(); int error = ENOMEM; if (request) { request->device = atadev; if ((atadev->param->config & ATA_PROTO_MASK) == ATA_PROTO_ATAPI_12) bcopy(ccb, request->u.atapi.ccb, 12); else bcopy(ccb, request->u.atapi.ccb, 16); request->data = data; request->bytecount = count; request->transfersize = min(request->bytecount, 65534); request->flags = flags | ATA_R_ATAPI; request->timeout = timeout; ata_queue_request(request); error = request->result; ata_free_request(request); } return error; } void ata_start(struct ata_channel *ch) { struct ata_request *request; /* if in immediate mode, just skip start requests (stall queue) */ if (ch->flags & ATA_IMMEDIATE_MODE) return; - /* lock the ATA HW for this request */ - mtx_lock(&ch->queue_mtx); - ch->locking(ch, ATA_LF_LOCK); - if (!ATA_LOCK_CH(ch)) { - mtx_unlock(&ch->queue_mtx); - return; - } - /* if we dont have any work, ask the subdriver(s) */ + mtx_lock(&ch->queue_mtx); if (TAILQ_EMPTY(&ch->ata_queue)) { mtx_unlock(&ch->queue_mtx); if (ch->device[MASTER].start) ch->device[MASTER].start(&ch->device[MASTER]); if (ch->device[SLAVE].start) ch->device[SLAVE].start(&ch->device[SLAVE]); mtx_lock(&ch->queue_mtx); } + + /* if we have work todo, try to lock the ATA HW and start transaction */ if ((request = TAILQ_FIRST(&ch->ata_queue))) { + ch->locking(ch, ATA_LF_LOCK); + if (!ATA_LOCK_CH(ch)) { + mtx_unlock(&ch->queue_mtx); + return; + } + TAILQ_REMOVE(&ch->ata_queue, request, chain); mtx_unlock(&ch->queue_mtx); ATA_DEBUG_RQ(request, "starting"); /* arm timeout */ if (!request->timeout_handle.callout && !dumping) { request->timeout_handle = timeout((timeout_t*)ata_timeout, request, request->timeout*hz); } /* kick HW into action and wait for interrupt if it flies*/ if (ch->hw.transaction(request) == ATA_OP_CONTINUES) return; - } - /* unlock ATA channel HW */ - ATA_UNLOCK_CH(ch); - ch->locking(ch, ATA_LF_UNLOCK); + /* unlock ATA channel HW */ + ATA_UNLOCK_CH(ch); + ch->locking(ch, ATA_LF_UNLOCK); - /* if we have a request here it failed and should be completed */ - if (request) + /* finish up this (failed) request */ ata_finish(request); + } else mtx_unlock(&ch->queue_mtx); } void ata_finish(struct ata_request *request) { ATA_DEBUG_RQ(request, "taskqueue completition"); /* request is done schedule it for completition */ if (request->device->channel->flags & ATA_IMMEDIATE_MODE) { ata_completed(request, 0); } else { if (request->bio && !(request->flags & ATA_R_TIMEOUT)) bio_taskqueue(request->bio, (bio_task_t *)ata_completed, request); else { TASK_INIT(&request->task, 0, ata_completed, request); taskqueue_enqueue(taskqueue_thread, &request->task); } } } /* current command finished, clean up and return result */ static void ata_completed(void *context, int dummy) { struct ata_request *request = (struct ata_request *)context; struct ata_channel *channel = request->device->channel; ATA_DEBUG_RQ(request, "completed called"); if (request->flags & ATA_R_TIMEOUT) { /* if negative retry count just give up and unlock channel HW */ if (request->retries < 0) { if (!(request->flags & ATA_R_QUIET)) ata_prtdev(request->device, "FAILURE - %s no interrupt\n", ata_cmd2str(request)); request->result = EIO; ATA_UNLOCK_CH(channel); channel->locking(channel, ATA_LF_UNLOCK); } else { /* reset controller and devices */ ata_reinit(channel); /* if retries still permit, reinject this request */ if (request->retries-- > 0) { request->flags &= ~ATA_R_TIMEOUT; request->flags |= (ATA_R_IMMEDIATE | ATA_R_REQUEUE); ata_queue_request(request); return; } /* otherwise just finish with error */ else { if (!(request->flags & ATA_R_QUIET)) ata_prtdev(request->device, "FAILURE - %s timed out\n", ata_cmd2str(request)); if (!request->result) request->result = EIO; } } } else { /* untimeout request now we have control back */ untimeout((timeout_t *)ata_timeout, request, request->timeout_handle); /* do the all the magic for completition evt retry etc etc */ if ((request->status & (ATA_S_CORR | ATA_S_ERROR)) == ATA_S_CORR) { ata_prtdev(request->device, "WARNING - %s soft error (ECC corrected)", ata_cmd2str(request)); if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL))) printf(" LBA=%llu", (unsigned long long)request->u.ata.lba); printf("\n"); } /* if this is a UDMA CRC error, retry request */ if (request->flags & ATA_R_DMA && request->error & ATA_E_ICRC) { if (request->retries-- > 0) { ata_prtdev(request->device, "WARNING - %s UDMA ICRC error (retrying request)", ata_cmd2str(request)); if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL))) printf(" LBA=%llu", (unsigned long long)request->u.ata.lba); printf("\n"); request->flags |= (ATA_R_IMMEDIATE | ATA_R_REQUEUE); ata_queue_request(request); return; } } } switch (request->flags & ATA_R_ATAPI) { /* ATA errors */ default: if (!request->result && request->status & ATA_S_ERROR) { if (!(request->flags & ATA_R_QUIET)) { ata_prtdev(request->device, "FAILURE - %s status=%b error=%b", ata_cmd2str(request), request->status, "\20\10BUSY\7READY\6DMA_READY" "\5DSC\4DRQ\3CORRECTABLE\2INDEX\1ERROR", request->error, "\20\10ICRC\7UNCORRECTABLE" "\6MEDIA_CHANGED\5NID_NOT_FOUND\4MEDIA_CHANGE_REQEST" "\3ABORTED\2NO_MEDIA\1ILLEGAL_LENGTH"); if ((request->flags & ATA_R_DMA) && (request->dmastat & ATA_BMSTAT_ERROR)) printf(" dma=0x%02x", request->dmastat); if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL))) printf(" LBA=%llu", (unsigned long long)request->u.ata.lba); printf("\n"); } /* SOS this could be more precise ? XXX */ request->result = EIO; } break; /* ATAPI errors */ case ATA_R_ATAPI: /* skip if result already set */ if (request->result) break; /* if we have a sensekey -> request sense from device */ if (request->error & ATA_SK_MASK && request->u.atapi.ccb[0] != ATAPI_REQUEST_SENSE) { static u_int8_t ccb[16] = { ATAPI_REQUEST_SENSE, 0, 0, 0, sizeof(struct atapi_sense), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; request->u.atapi.sense_key = request->error; request->u.atapi.sense_cmd = request->u.atapi.ccb[0]; bcopy(ccb, request->u.atapi.ccb, 16); request->data = (caddr_t)&request->u.atapi.sense_data; request->bytecount = sizeof(struct atapi_sense); request->transfersize = sizeof(struct atapi_sense); request->timeout = 5; request->flags &= (ATA_R_ATAPI | ATA_R_QUIET); request->flags |= (ATA_R_READ | ATA_R_IMMEDIATE | ATA_R_REQUEUE); ata_queue_request(request); return; } switch (request->u.atapi.sense_key & ATA_SK_MASK) { case ATA_SK_RECOVERED_ERROR: ata_prtdev(request->device, "WARNING - %s recovered error\n", ata_cmd2str(request)); /* FALLTHROUGH */ case ATA_SK_NO_SENSE: request->result = 0; break; case ATA_SK_NOT_READY: request->result = EBUSY; break; case ATA_SK_UNIT_ATTENTION: request->device->flags |= ATA_D_MEDIA_CHANGED; request->result = EIO; break; default: request->result = EIO; if (request->flags & ATA_R_QUIET) break; ata_prtdev(request->device, "FAILURE - %s %s asc=0x%02x ascq=0x%02x ", ata_cmd2str(request), ata_skey2str( (request->u.atapi.sense_key & ATA_SK_MASK) >> 4), request->u.atapi.sense_data.asc, request->u.atapi.sense_data.ascq); if (request->u.atapi.sense_data.sksv) printf("sks=0x%02x 0x%02x 0x%02x ", request->u.atapi.sense_data.sk_specific, request->u.atapi.sense_data.sk_specific1, request->u.atapi.sense_data.sk_specific2); printf("error=%b\n", (request->u.atapi.sense_key & ATA_E_MASK), "\20\4MEDIA_CHANGE_REQUEST\3ABORTED" "\2NO_MEDIA\1ILLEGAL_LENGTH"); } if ((request->u.atapi.sense_key ? request->u.atapi.sense_key : request->error) & ATA_E_MASK) request->result = EIO; } ATA_DEBUG_RQ(request, "completed callback/wakeup"); if (request->callback) (request->callback)(request); else sema_post(&request->done); ata_start(channel); } static void ata_timeout(struct ata_request *request) { ATA_DEBUG_RQ(request, "timeout"); /* clear timeout etc */ request->timeout_handle.callout = NULL; if (request->flags & ATA_R_INTR_SEEN) { if (request->retries-- > 0) { ata_prtdev(request->device, "WARNING - %s interrupt was seen but timeout fired", ata_cmd2str(request)); if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL))) printf(" LBA=%llu", (unsigned long long)request->u.ata.lba); printf("\n"); /* re-arm timeout */ if (!request->timeout_handle.callout && !dumping) { request->timeout_handle = timeout((timeout_t*)ata_timeout, request, request->timeout * hz); } } else { ata_prtdev(request->device, "WARNING - %s interrupt was seen but taskqueue stalled", ata_cmd2str(request)); if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL))) printf(" LBA=%llu", (unsigned long long)request->u.ata.lba); printf("\n"); ata_completed(request, 0); } return; } /* report that we timed out */ if (!(request->flags & ATA_R_QUIET) && request->retries > 0) { ata_prtdev(request->device, "TIMEOUT - %s retrying (%d retr%s left)", ata_cmd2str(request), request->retries, request->retries == 1 ? "y" : "ies"); if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL))) printf(" LBA=%llu", (unsigned long long)request->u.ata.lba); printf("\n"); } /* now simulate the missing interrupt */ request->flags |= ATA_R_TIMEOUT; request->device->channel->hw.interrupt(request->device->channel); return; } void ata_fail_requests(struct ata_channel *ch, struct ata_device *device) { struct ata_request *request; /* fail all requests queued on this channel */ mtx_lock(&ch->queue_mtx); while ((request = TAILQ_FIRST(&ch->ata_queue))) { if (!device || request->device == device) { TAILQ_REMOVE(&ch->ata_queue, request, chain); request->result = ENXIO; if (request->callback) (request->callback)(request); else sema_post(&request->done); } } mtx_unlock(&ch->queue_mtx); /* if we have a request "in flight" fail it as well */ if ((request = ch->running) && (!device || request->device == device)) { untimeout((timeout_t *)ata_timeout, request, request->timeout_handle); ATA_UNLOCK_CH(request->device->channel); request->device->channel->locking(request->device->channel, ATA_LF_UNLOCK); request->result = ENXIO; if (request->callback) (request->callback)(request); else sema_post(&request->done); } } char * ata_cmd2str(struct ata_request *request) { static char buffer[20]; if (request->flags & ATA_R_ATAPI) { switch (request->u.atapi.sense_key ? request->u.atapi.sense_cmd : request->u.atapi.ccb[0]) { case 0x00: return ("TEST_UNIT_READY"); case 0x01: return ("REZERO"); case 0x03: return ("REQUEST_SENSE"); case 0x04: return ("FORMAT"); case 0x08: return ("READ"); case 0x0a: return ("WRITE"); case 0x10: return ("WEOF"); case 0x11: return ("SPACE"); case 0x12: return ("INQUIRY"); case 0x15: return ("MODE_SELECT"); case 0x19: return ("ERASE"); case 0x1a: return ("MODE_SENSE"); case 0x1b: return ("START_STOP"); case 0x1e: return ("PREVENT_ALLOW"); case 0x23: return ("ATAPI_READ_FORMAT_CAPACITIES"); case 0x25: return ("READ_CAPACITY"); case 0x28: return ("READ_BIG"); case 0x2a: return ("WRITE_BIG"); case 0x2b: return ("LOCATE"); case 0x34: return ("READ_POSITION"); case 0x35: return ("SYNCHRONIZE_CACHE"); case 0x3b: return ("WRITE_BUFFER"); case 0x3c: return ("READ_BUFFER"); case 0x42: return ("READ_SUBCHANNEL"); case 0x43: return ("READ_TOC"); case 0x45: return ("PLAY_10"); case 0x47: return ("PLAY_MSF"); case 0x48: return ("PLAY_TRACK"); case 0x4b: return ("PAUSE"); case 0x51: return ("READ_DISK_INFO"); case 0x52: return ("READ_TRACK_INFO"); case 0x53: return ("RESERVE_TRACK"); case 0x54: return ("SEND_OPC_INFO"); case 0x55: return ("MODE_SELECT_BIG"); case 0x58: return ("REPAIR_TRACK"); case 0x59: return ("READ_MASTER_CUE"); case 0x5a: return ("MODE_SENSE_BIG"); case 0x5b: return ("CLOSE_TRACK/SESSION"); case 0x5c: return ("READ_BUFFER_CAPACITY"); case 0x5d: return ("SEND_CUE_SHEET"); case 0xa1: return ("BLANK_CMD"); case 0xa3: return ("SEND_KEY"); case 0xa4: return ("REPORT_KEY"); case 0xa5: return ("PLAY_12"); case 0xa6: return ("LOAD_UNLOAD"); case 0xad: return ("READ_DVD_STRUCTURE"); case 0xb4: return ("PLAY_CD"); case 0xbb: return ("SET_SPEED"); case 0xbd: return ("MECH_STATUS"); case 0xbe: return ("READ_CD"); case 0xff: return ("POLL_DSC"); } } else { switch (request->u.ata.command) { case 0x00: return ("NOP"); case 0x08: return ("ATAPI_RESET"); case 0x20: return ("READ"); case 0x24: return ("READ48"); case 0x25: return ("READ_DMA48"); case 0x26: return ("READ_DMA_QUEUED48"); case 0x29: return ("READ_MUL48"); case 0x30: return ("WRITE"); case 0x34: return ("WRITE48"); case 0x35: return ("WRITE_DMA48"); case 0x36: return ("WRITE_DMA_QUEUED48"); case 0x39: return ("WRITE_MUL48"); case 0xa0: return ("PACKET_CMD"); case 0xa1: return ("ATAPI_IDENTIFY"); case 0xa2: return ("SERVICE"); case 0xc4: return ("READ_MUL"); case 0xc5: return ("WRITE_MUL"); case 0xc6: return ("SET_MULTI"); case 0xc7: return ("READ_DMA_QUEUED"); case 0xc8: return ("READ_DMA"); case 0xca: return ("WRITE_DMA"); case 0xcc: return ("WRITE_DMA_QUEUED"); case 0xe6: return ("SLEEP"); case 0xe7: return ("FLUSHCACHE"); case 0xea: return ("FLUSHCACHE48"); case 0xec: return ("ATA_IDENTIFY"); case 0xef: switch (request->u.ata.feature) { case 0x03: return ("SETFEATURES SET TRANSFER MODE"); case 0x02: return ("SETFEATURES ENABLE WCACHE"); case 0x82: return ("SETFEATURES DISABLE WCACHE"); case 0xaa: return ("SETFEATURES ENABLE RCACHE"); case 0x55: return ("SETFEATURES DISABLE RCACHE"); } sprintf(buffer, "SETFEATURES 0x%02x", request->u.ata.feature); return buffer; } } sprintf(buffer, "unknown CMD (0x%02x)", request->u.ata.command); return buffer; } static char * ata_skey2str(u_int8_t skey) { switch (skey) { case 0x00: return ("NO SENSE"); case 0x01: return ("RECOVERED ERROR"); case 0x02: return ("NOT READY"); case 0x03: return ("MEDIUM ERROR"); case 0x04: return ("HARDWARE ERROR"); case 0x05: return ("ILLEGAL REQUEST"); case 0x06: return ("UNIT ATTENTION"); case 0x07: return ("DATA PROTECT"); case 0x08: return ("BLANK CHECK"); case 0x09: return ("VENDOR SPECIFIC"); case 0x0a: return ("COPY ABORTED"); case 0x0b: return ("ABORTED COMMAND"); case 0x0c: return ("EQUAL"); case 0x0d: return ("VOLUME OVERFLOW"); case 0x0e: return ("MISCOMPARE"); case 0x0f: return ("RESERVED"); default: return("UNKNOWN"); } }