diff --git a/sys/dev/ata/ata-all.c b/sys/dev/ata/ata-all.c index 12dcfb61a7d9..e1a630ae3edd 100644 --- a/sys/dev/ata/ata-all.c +++ b/sys/dev/ata/ata-all.c @@ -1,1047 +1,1047 @@ /*- * Copyright (c) 1998 - 2006 Søren Schmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ata.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __alpha__ #include #endif #include #include /* device structure */ static d_ioctl_t ata_ioctl; static struct cdevsw ata_cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT, /* we need this as newbus isn't mpsafe */ .d_ioctl = ata_ioctl, .d_name = "ata", }; /* prototypes */ static void ata_boot_attach(void); static device_t ata_add_child(device_t, struct ata_device *, int); static int ata_getparam(struct ata_device *, int); static void bswap(int8_t *, int); static void btrim(int8_t *, int); static void bpack(int8_t *, int8_t *, int); /* global vars */ MALLOC_DEFINE(M_ATA, "ata_generic", "ATA driver generic layer"); int (*ata_raid_ioctl_func)(u_long cmd, caddr_t data) = NULL; devclass_t ata_devclass; uma_zone_t ata_request_zone; uma_zone_t ata_composite_zone; int ata_wc = 1; /* local vars */ static struct intr_config_hook *ata_delayed_attach = NULL; static int ata_dma = 1; static int atapi_dma = 1; /* sysctl vars */ SYSCTL_NODE(_hw, OID_AUTO, ata, CTLFLAG_RD, 0, "ATA driver parameters"); TUNABLE_INT("hw.ata.ata_dma", &ata_dma); SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma, CTLFLAG_RDTUN, &ata_dma, 0, "ATA disk DMA mode control"); TUNABLE_INT("hw.ata.atapi_dma", &atapi_dma); SYSCTL_INT(_hw_ata, OID_AUTO, atapi_dma, CTLFLAG_RDTUN, &atapi_dma, 0, "ATAPI device DMA mode control"); TUNABLE_INT("hw.ata.wc", &ata_wc); SYSCTL_INT(_hw_ata, OID_AUTO, wc, CTLFLAG_RDTUN, &ata_wc, 0, "ATA disk write caching"); /* * newbus device interface related functions */ int ata_probe(device_t dev) { return 0; } int ata_attach(device_t dev) { struct ata_channel *ch = device_get_softc(dev); int error, rid; /* check that we have a virgin channel to attach */ if (ch->r_irq) return EEXIST; /* initialize the softc basics */ ch->dev = dev; ch->state = ATA_IDLE; bzero(&ch->state_mtx, sizeof(struct mtx)); mtx_init(&ch->state_mtx, "ATA state lock", NULL, MTX_DEF); bzero(&ch->queue_mtx, sizeof(struct mtx)); mtx_init(&ch->queue_mtx, "ATA queue lock", NULL, MTX_DEF); TAILQ_INIT(&ch->ata_queue); /* reset the controller HW, the channel and device(s) */ while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit) tsleep(&error, PRIBIO, "ataatch", 1); ATA_RESET(dev); ATA_LOCKING(dev, ATA_LF_UNLOCK); /* setup interrupt delivery */ rid = ATA_IRQ_RID; ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (!ch->r_irq) { device_printf(dev, "unable to allocate interrupt\n"); return ENXIO; } if ((error = bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, (driver_intr_t *)ata_interrupt, ch, &ch->ih))) { device_printf(dev, "unable to setup interrupt\n"); return error; } /* probe and attach devices on this channel unless we are in early boot */ if (!ata_delayed_attach) ata_identify(dev); return 0; } int ata_detach(device_t dev) { struct ata_channel *ch = device_get_softc(dev); device_t *children; int nchildren, i; /* check that we have a valid channel to detach */ if (!ch->r_irq) return ENXIO; /* detach & delete all children */ if (!device_get_children(dev, &children, &nchildren)) { for (i = 0; i < nchildren; i++) if (children[i]) device_delete_child(dev, children[i]); free(children, M_TEMP); } /* release resources */ bus_teardown_intr(dev, ch->r_irq, ch->ih); bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); ch->r_irq = NULL; mtx_destroy(&ch->state_mtx); mtx_destroy(&ch->queue_mtx); return 0; } int ata_reinit(device_t dev) { struct ata_channel *ch = device_get_softc(dev); struct ata_request *request; device_t *children; int nchildren, i; /* check that we have a valid channel to reinit */ if (!ch || !ch->r_irq) return ENXIO; if (bootverbose) device_printf(dev, "reiniting channel ..\n"); /* poll for locking the channel */ while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit) tsleep(&dev, PRIBIO, "atarini", 1); /* unconditionally grap the channel lock */ mtx_lock(&ch->state_mtx); ch->state = ATA_STALL_QUEUE; mtx_unlock(&ch->state_mtx); /* reset the controller HW, the channel and device(s) */ ATA_RESET(dev); /* reinit the children and delete any that fails */ if (!device_get_children(dev, &children, &nchildren)) { mtx_lock(&Giant); /* newbus suckage it needs Giant */ for (i = 0; i < nchildren; i++) { if (children[i] && device_is_attached(children[i])) if (ATA_REINIT(children[i])) { /* * if we have a running request and its device matches * this child we need to inform the request that the * device is gone and remove it from ch->running */ mtx_lock(&ch->state_mtx); if (ch->running && ch->running->dev == children[i]) { callout_stop(&ch->running->callout); request = ch->running; - ch->running = NULL; + ch->running = NULL; } else request = NULL; - mtx_unlock(&ch->state_mtx); + mtx_unlock(&ch->state_mtx); if (request) { request->result = ENXIO; device_printf(request->dev, "FAILURE - device detached\n"); /* if not timeout finish request here */ if (!(request->flags & ATA_R_TIMEOUT)) ata_finish(request); - } + } device_delete_child(dev, children[i]); } } free(children, M_TEMP); mtx_unlock(&Giant); /* newbus suckage dealt with, release Giant */ } /* catch request in ch->running if we havn't already */ mtx_lock(&ch->state_mtx); if ((request = ch->running)) callout_stop(&request->callout); ch->running = NULL; mtx_unlock(&ch->state_mtx); /* if we got one put it on the queue again */ if (request) { device_printf(request->dev, "WARNING - %s requeued due to channel reset", ata_cmd2str(request)); if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL))) printf(" LBA=%llu", (unsigned long long)request->u.ata.lba); printf("\n"); request->flags |= ATA_R_REQUEUE; ata_queue_request(request); } /* we're done release the channel for new work */ mtx_lock(&ch->state_mtx); ch->state = ATA_IDLE; mtx_unlock(&ch->state_mtx); ATA_LOCKING(dev, ATA_LF_UNLOCK); if (bootverbose) device_printf(dev, "reinit done ..\n"); /* kick off requests on the queue */ ata_start(dev); return 0; } int ata_suspend(device_t dev) { struct ata_channel *ch; /* check for valid device */ if (!dev || !(ch = device_get_softc(dev))) return ENXIO; /* wait for the channel to be IDLE before entering suspend mode */ while (1) { mtx_lock(&ch->state_mtx); if (ch->state == ATA_IDLE) { ch->state = ATA_ACTIVE; mtx_unlock(&ch->state_mtx); break; } mtx_unlock(&ch->state_mtx); tsleep(ch, PRIBIO, "atasusp", hz/10); } ATA_LOCKING(dev, ATA_LF_UNLOCK); return 0; } int ata_resume(device_t dev) { struct ata_channel *ch; int error; /* check for valid device */ if (!dev || !(ch = device_get_softc(dev))) return ENXIO; /* reinit the devices, we dont know what mode/state they are in */ error = ata_reinit(dev); /* kick off requests on the queue */ ata_start(dev); return error; } int ata_interrupt(void *data) { struct ata_channel *ch = (struct ata_channel *)data; struct ata_request *request; mtx_lock(&ch->state_mtx); do { /* ignore interrupt if its not for us */ if (ch->hw.status && !ch->hw.status(ch->dev)) break; /* do we have a running request */ if (!(request = ch->running)) break; ATA_DEBUG_RQ(request, "interrupt"); /* safetycheck for the right state */ if (ch->state != ATA_ACTIVE && ch->state != ATA_STALL_QUEUE) { device_printf(request->dev, "interrupt on idle channel ignored\n"); break; } /* * we have the HW locks, so end the tranaction for this request * if it finishes immediately otherwise wait for next interrupt */ if (ch->hw.end_transaction(request) == ATA_OP_FINISHED) { ch->running = NULL; if (ch->state == ATA_ACTIVE) ch->state = ATA_IDLE; mtx_unlock(&ch->state_mtx); ATA_LOCKING(ch->dev, ATA_LF_UNLOCK); ata_finish(request); return 1; } } while (0); mtx_unlock(&ch->state_mtx); return 0; } /* * device related interfaces */ static int ata_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int32_t flag, struct thread *td) { device_t device, *children; struct ata_ioc_devices *devices = (struct ata_ioc_devices *)data; int *value = (int *)data; int i, nchildren, error = ENOTTY; switch (cmd) { case IOCATAGMAXCHANNEL: *value = devclass_get_maxunit(ata_devclass); error = 0; break; case IOCATAREINIT: if (*value > devclass_get_maxunit(ata_devclass) || !(device = devclass_get_device(ata_devclass, *value))) return ENXIO; error = ata_reinit(device); ata_start(device); break; case IOCATAATTACH: if (*value > devclass_get_maxunit(ata_devclass) || !(device = devclass_get_device(ata_devclass, *value))) return ENXIO; /* XXX SOS should enable channel HW on controller */ error = ata_attach(device); break; case IOCATADETACH: if (*value > devclass_get_maxunit(ata_devclass) || !(device = devclass_get_device(ata_devclass, *value))) return ENXIO; error = ata_detach(device); /* XXX SOS should disable channel HW on controller */ break; case IOCATADEVICES: if (devices->channel > devclass_get_maxunit(ata_devclass) || !(device = devclass_get_device(ata_devclass, devices->channel))) return ENXIO; bzero(devices->name[0], 32); bzero(&devices->params[0], sizeof(struct ata_params)); bzero(devices->name[1], 32); bzero(&devices->params[1], sizeof(struct ata_params)); if (!device_get_children(device, &children, &nchildren)) { for (i = 0; i < nchildren; i++) { if (children[i] && device_is_attached(children[i])) { struct ata_device *atadev = device_get_softc(children[i]); if (atadev->unit == ATA_MASTER) { strncpy(devices->name[0], device_get_nameunit(children[i]), 32); bcopy(&atadev->param, &devices->params[0], sizeof(struct ata_params)); } if (atadev->unit == ATA_SLAVE) { strncpy(devices->name[1], device_get_nameunit(children[i]), 32); bcopy(&atadev->param, &devices->params[1], sizeof(struct ata_params)); } } } free(children, M_TEMP); error = 0; } else error = ENODEV; break; default: if (ata_raid_ioctl_func) error = ata_raid_ioctl_func(cmd, data); } return error; } int ata_device_ioctl(device_t dev, u_long cmd, caddr_t data) { struct ata_device *atadev = device_get_softc(dev); struct ata_ioc_request *ioc_request = (struct ata_ioc_request *)data; struct ata_params *params = (struct ata_params *)data; int *mode = (int *)data; struct ata_request *request; caddr_t buf; int error; switch (cmd) { case IOCATAREQUEST: if (!(buf = malloc(ioc_request->count, M_ATA, M_NOWAIT))) { return ENOMEM; } if (!(request = ata_alloc_request())) { free(buf, M_ATA); return ENOMEM; } if (ioc_request->flags & ATA_CMD_WRITE) { error = copyin(ioc_request->data, buf, ioc_request->count); if (error) { free(buf, M_ATA); ata_free_request(request); return error; } } request->dev = dev; if (ioc_request->flags & ATA_CMD_ATAPI) { request->flags = ATA_R_ATAPI; bcopy(ioc_request->u.atapi.ccb, request->u.atapi.ccb, 16); } else { request->u.ata.command = ioc_request->u.ata.command; request->u.ata.feature = ioc_request->u.ata.feature; request->u.ata.lba = ioc_request->u.ata.lba; request->u.ata.count = ioc_request->u.ata.count; } request->timeout = ioc_request->timeout; request->data = buf; request->bytecount = ioc_request->count; request->transfersize = request->bytecount; if (ioc_request->flags & ATA_CMD_CONTROL) request->flags |= ATA_R_CONTROL; if (ioc_request->flags & ATA_CMD_READ) request->flags |= ATA_R_READ; if (ioc_request->flags & ATA_CMD_WRITE) request->flags |= ATA_R_WRITE; ata_queue_request(request); if (!(request->flags & ATA_R_ATAPI)) { ioc_request->u.ata.command = request->u.ata.command; ioc_request->u.ata.feature = request->u.ata.feature; ioc_request->u.ata.lba = request->u.ata.lba; ioc_request->u.ata.count = request->u.ata.count; } ioc_request->error = request->result; if (ioc_request->flags & ATA_CMD_READ) error = copyout(buf, ioc_request->data, ioc_request->count); else error = 0; free(buf, M_ATA); ata_free_request(request); return error; case IOCATAGPARM: ata_getparam(atadev, 0); bcopy(&atadev->param, params, sizeof(struct ata_params)); return 0; case IOCATASMODE: atadev->mode = *mode; ATA_SETMODE(device_get_parent(dev), dev); return 0; case IOCATAGMODE: *mode = atadev->mode; return 0; default: return ENOTTY; } } static void ata_boot_attach(void) { struct ata_channel *ch; int ctlr; mtx_lock(&Giant); /* newbus suckage it needs Giant */ /* kick of probe and attach on all channels */ for (ctlr = 0; ctlr < devclass_get_maxunit(ata_devclass); ctlr++) { if ((ch = devclass_get_softc(ata_devclass, ctlr))) { ata_identify(ch->dev); } } /* release the hook that got us here, we are only needed once during boot */ if (ata_delayed_attach) { config_intrhook_disestablish(ata_delayed_attach); ata_delayed_attach = NULL; free(ata_delayed_attach, M_TEMP); } mtx_unlock(&Giant); /* newbus suckage dealt with, release Giant */ } /* * misc support functions */ static device_t ata_add_child(device_t parent, struct ata_device *atadev, int unit) { device_t child; if ((child = device_add_child(parent, NULL, unit))) { device_set_softc(child, atadev); device_quiet(child); atadev->dev = child; atadev->max_iosize = DEV_BSIZE; atadev->mode = ATA_PIO_MAX; } return child; } static int ata_getparam(struct ata_device *atadev, int init) { struct ata_channel *ch = device_get_softc(device_get_parent(atadev->dev)); struct ata_request *request; u_int8_t command = 0; int error = ENOMEM, retries = 2; if (ch->devices & (atadev->unit == ATA_MASTER ? ATA_ATA_MASTER : ATA_ATA_SLAVE)) command = ATA_ATA_IDENTIFY; if (ch->devices & (atadev->unit == ATA_MASTER ? ATA_ATAPI_MASTER : ATA_ATAPI_SLAVE)) command = ATA_ATAPI_IDENTIFY; if (!command) return ENXIO; while (retries-- > 0 && error) { if (!(request = ata_alloc_request())) break; request->dev = atadev->dev; request->timeout = 1; request->retries = 0; request->u.ata.command = command; request->flags = (ATA_R_READ|ATA_R_AT_HEAD|ATA_R_DIRECT|ATA_R_QUIET); request->data = (void *)&atadev->param; request->bytecount = sizeof(struct ata_params); request->donecount = 0; request->transfersize = DEV_BSIZE; ata_queue_request(request); error = request->result; ata_free_request(request); } if (!error && (isprint(atadev->param.model[0]) || isprint(atadev->param.model[1]))) { struct ata_params *atacap = &atadev->param; char buffer[64]; #if BYTE_ORDER == BIG_ENDIAN int16_t *ptr; for (ptr = (int16_t *)atacap; ptr < (int16_t *)atacap + sizeof(struct ata_params)/2; ptr++) { *ptr = bswap16(*ptr); } #endif if (!(!strncmp(atacap->model, "FX", 2) || !strncmp(atacap->model, "NEC", 3) || !strncmp(atacap->model, "Pioneer", 7) || !strncmp(atacap->model, "SHARP", 5))) { bswap(atacap->model, sizeof(atacap->model)); bswap(atacap->revision, sizeof(atacap->revision)); bswap(atacap->serial, sizeof(atacap->serial)); } btrim(atacap->model, sizeof(atacap->model)); bpack(atacap->model, atacap->model, sizeof(atacap->model)); btrim(atacap->revision, sizeof(atacap->revision)); bpack(atacap->revision, atacap->revision, sizeof(atacap->revision)); btrim(atacap->serial, sizeof(atacap->serial)); bpack(atacap->serial, atacap->serial, sizeof(atacap->serial)); if (bootverbose) printf("ata%d-%s: pio=%s wdma=%s udma=%s cable=%s wire\n", ch->unit, atadev->unit == ATA_MASTER ? "master":"slave", ata_mode2str(ata_pmode(atacap)), ata_mode2str(ata_wmode(atacap)), ata_mode2str(ata_umode(atacap)), (atacap->hwres & ATA_CABLE_ID) ? "80":"40"); if (init) { sprintf(buffer, "%.40s/%.8s", atacap->model, atacap->revision); device_set_desc_copy(atadev->dev, buffer); if (atadev->param.config & ATA_PROTO_ATAPI) { if (atapi_dma && ch->dma && (atadev->param.config & ATA_DRQ_MASK) != ATA_DRQ_INTR && ata_umode(&atadev->param) >= ATA_UDMA2) atadev->mode = ATA_DMA_MAX; } else { if (ata_dma && ch->dma && (ata_umode(&atadev->param) > 0 || ata_wmode(&atadev->param) > 0)) atadev->mode = ATA_DMA_MAX; } } } else { if (!error) error = ENXIO; } return error; } int ata_identify(device_t dev) { struct ata_channel *ch = device_get_softc(dev); struct ata_device *master = NULL, *slave = NULL; device_t master_child = NULL, slave_child = NULL; int master_unit = -1, slave_unit = -1; if (ch->devices & (ATA_ATA_MASTER | ATA_ATAPI_MASTER)) { if (!(master = malloc(sizeof(struct ata_device), M_ATA, M_NOWAIT | M_ZERO))) { device_printf(dev, "out of memory\n"); return ENOMEM; } master->unit = ATA_MASTER; } if (ch->devices & (ATA_ATA_SLAVE | ATA_ATAPI_SLAVE)) { if (!(slave = malloc(sizeof(struct ata_device), M_ATA, M_NOWAIT | M_ZERO))) { free(master, M_ATA); device_printf(dev, "out of memory\n"); return ENOMEM; } slave->unit = ATA_SLAVE; } #ifdef ATA_STATIC_ID if (ch->devices & ATA_ATA_MASTER) master_unit = (device_get_unit(dev) << 1); #endif if (master && !(master_child = ata_add_child(dev, master, master_unit))) { free(master, M_ATA); master = NULL; } #ifdef ATA_STATIC_ID if (ch->devices & ATA_ATA_SLAVE) slave_unit = (device_get_unit(dev) << 1) + 1; #endif if (slave && !(slave_child = ata_add_child(dev, slave, slave_unit))) { free(slave, M_ATA); slave = NULL; } if (slave && ata_getparam(slave, 1)) { device_delete_child(dev, slave_child); free(slave, M_ATA); } if (master && ata_getparam(master, 1)) { device_delete_child(dev, master_child); free(master, M_ATA); } bus_generic_probe(dev); bus_generic_attach(dev); return 0; } void ata_default_registers(device_t dev) { struct ata_channel *ch = device_get_softc(dev); /* fill in the defaults from whats setup already */ ch->r_io[ATA_ERROR].res = ch->r_io[ATA_FEATURE].res; ch->r_io[ATA_ERROR].offset = ch->r_io[ATA_FEATURE].offset; ch->r_io[ATA_IREASON].res = ch->r_io[ATA_COUNT].res; ch->r_io[ATA_IREASON].offset = ch->r_io[ATA_COUNT].offset; ch->r_io[ATA_STATUS].res = ch->r_io[ATA_COMMAND].res; ch->r_io[ATA_STATUS].offset = ch->r_io[ATA_COMMAND].offset; ch->r_io[ATA_ALTSTAT].res = ch->r_io[ATA_CONTROL].res; ch->r_io[ATA_ALTSTAT].offset = ch->r_io[ATA_CONTROL].offset; } void ata_modify_if_48bit(struct ata_request *request) { struct ata_channel *ch = device_get_softc(device_get_parent(request->dev)); struct ata_device *atadev = device_get_softc(request->dev); atadev->flags &= ~ATA_D_48BIT_ACTIVE; if ((request->u.ata.lba >= ATA_MAX_28BIT_LBA || request->u.ata.count > 256) && atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) { /* translate command into 48bit version */ switch (request->u.ata.command) { case ATA_READ: request->u.ata.command = ATA_READ48; break; case ATA_READ_MUL: request->u.ata.command = ATA_READ_MUL48; break; case ATA_READ_DMA: if (ch->flags & ATA_NO_48BIT_DMA) { if (request->transfersize > DEV_BSIZE) request->u.ata.command = ATA_READ_MUL48; else request->u.ata.command = ATA_READ48; request->flags &= ~ATA_R_DMA; } else request->u.ata.command = ATA_READ_DMA48; break; case ATA_READ_DMA_QUEUED: if (ch->flags & ATA_NO_48BIT_DMA) { if (request->transfersize > DEV_BSIZE) request->u.ata.command = ATA_READ_MUL48; else request->u.ata.command = ATA_READ48; request->flags &= ~ATA_R_DMA; } else request->u.ata.command = ATA_READ_DMA_QUEUED48; break; case ATA_WRITE: request->u.ata.command = ATA_WRITE48; break; case ATA_WRITE_MUL: request->u.ata.command = ATA_WRITE_MUL48; break; case ATA_WRITE_DMA: if (ch->flags & ATA_NO_48BIT_DMA) { if (request->transfersize > DEV_BSIZE) request->u.ata.command = ATA_WRITE_MUL48; else request->u.ata.command = ATA_WRITE48; request->flags &= ~ATA_R_DMA; } else request->u.ata.command = ATA_WRITE_DMA48; break; case ATA_WRITE_DMA_QUEUED: if (ch->flags & ATA_NO_48BIT_DMA) { if (request->transfersize > DEV_BSIZE) request->u.ata.command = ATA_WRITE_MUL48; else request->u.ata.command = ATA_WRITE48; request->u.ata.command = ATA_WRITE48; request->flags &= ~ATA_R_DMA; } else request->u.ata.command = ATA_WRITE_DMA_QUEUED48; break; case ATA_FLUSHCACHE: request->u.ata.command = ATA_FLUSHCACHE48; break; case ATA_READ_NATIVE_MAX_ADDDRESS: request->u.ata.command = ATA_READ_NATIVE_MAX_ADDDRESS48; break; case ATA_SET_MAX_ADDRESS: request->u.ata.command = ATA_SET_MAX_ADDRESS48; break; default: return; } atadev->flags |= ATA_D_48BIT_ACTIVE; } } void ata_udelay(int interval) { /* for now just use DELAY, the timer/sleep subsytems are not there yet */ if (1 || interval < (1000000/hz) || ata_delayed_attach) DELAY(interval); else tsleep(&interval, PRIBIO, "ataslp", interval/(1000000/hz)); } char * ata_mode2str(int mode) { switch (mode) { case -1: return "UNSUPPORTED"; case ATA_PIO0: return "PIO0"; case ATA_PIO1: return "PIO1"; case ATA_PIO2: return "PIO2"; case ATA_PIO3: return "PIO3"; case ATA_PIO4: return "PIO4"; case ATA_WDMA0: return "WDMA0"; case ATA_WDMA1: return "WDMA1"; case ATA_WDMA2: return "WDMA2"; case ATA_UDMA0: return "UDMA16"; case ATA_UDMA1: return "UDMA25"; case ATA_UDMA2: return "UDMA33"; case ATA_UDMA3: return "UDMA40"; case ATA_UDMA4: return "UDMA66"; case ATA_UDMA5: return "UDMA100"; case ATA_UDMA6: return "UDMA133"; case ATA_SA150: return "SATA150"; case ATA_SA300: return "SATA300"; default: if (mode & ATA_DMA_MASK) return "BIOSDMA"; else return "BIOSPIO"; } } int ata_pmode(struct ata_params *ap) { if (ap->atavalid & ATA_FLAG_64_70) { if (ap->apiomodes & 0x02) return ATA_PIO4; if (ap->apiomodes & 0x01) return ATA_PIO3; } if (ap->mwdmamodes & 0x04) return ATA_PIO4; if (ap->mwdmamodes & 0x02) return ATA_PIO3; if (ap->mwdmamodes & 0x01) return ATA_PIO2; if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x200) return ATA_PIO2; if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x100) return ATA_PIO1; if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x000) return ATA_PIO0; return ATA_PIO0; } int ata_wmode(struct ata_params *ap) { if (ap->mwdmamodes & 0x04) return ATA_WDMA2; if (ap->mwdmamodes & 0x02) return ATA_WDMA1; if (ap->mwdmamodes & 0x01) return ATA_WDMA0; return -1; } int ata_umode(struct ata_params *ap) { if (ap->atavalid & ATA_FLAG_88) { if (ap->udmamodes & 0x40) return ATA_UDMA6; if (ap->udmamodes & 0x20) return ATA_UDMA5; if (ap->udmamodes & 0x10) return ATA_UDMA4; if (ap->udmamodes & 0x08) return ATA_UDMA3; if (ap->udmamodes & 0x04) return ATA_UDMA2; if (ap->udmamodes & 0x02) return ATA_UDMA1; if (ap->udmamodes & 0x01) return ATA_UDMA0; } return -1; } int ata_limit_mode(device_t dev, int mode, int maxmode) { struct ata_device *atadev = device_get_softc(dev); if (maxmode && mode > maxmode) mode = maxmode; if (mode >= ATA_UDMA0 && ata_umode(&atadev->param) > 0) return min(mode, ata_umode(&atadev->param)); if (mode >= ATA_WDMA0 && ata_wmode(&atadev->param) > 0) return min(mode, ata_wmode(&atadev->param)); if (mode > ata_pmode(&atadev->param)) return min(mode, ata_pmode(&atadev->param)); return mode; } static void bswap(int8_t *buf, int len) { u_int16_t *ptr = (u_int16_t*)(buf + len); while (--ptr >= (u_int16_t*)buf) *ptr = ntohs(*ptr); } static void btrim(int8_t *buf, int len) { int8_t *ptr; for (ptr = buf; ptr < buf+len; ++ptr) if (!*ptr || *ptr == '_') *ptr = ' '; for (ptr = buf + len - 1; ptr >= buf && *ptr == ' '; --ptr) *ptr = 0; } static void bpack(int8_t *src, int8_t *dst, int len) { int i, j, blank; for (i = j = blank = 0 ; i < len; i++) { if (blank && src[i] == ' ') continue; if (blank && src[i] != ' ') { dst[j++] = src[i]; blank = 0; continue; } if (src[i] == ' ') { blank = 1; if (i == 0) continue; } dst[j++] = src[i]; } if (j < len) dst[j] = 0x00; } /* * module handeling */ static int ata_module_event_handler(module_t mod, int what, void *arg) { static struct cdev *atacdev; switch (what) { case MOD_LOAD: /* register controlling device */ atacdev = make_dev(&ata_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "ata"); if (cold) { /* register boot attach to be run when interrupts are enabled */ if (!(ata_delayed_attach = (struct intr_config_hook *) malloc(sizeof(struct intr_config_hook), M_TEMP, M_NOWAIT | M_ZERO))) { printf("ata: malloc of delayed attach hook failed\n"); return EIO; } ata_delayed_attach->ich_func = (void*)ata_boot_attach; if (config_intrhook_establish(ata_delayed_attach) != 0) { printf("ata: config_intrhook_establish failed\n"); free(ata_delayed_attach, M_TEMP); } } return 0; case MOD_UNLOAD: /* deregister controlling device */ destroy_dev(atacdev); return 0; default: return EOPNOTSUPP; } } static moduledata_t ata_moduledata = { "ata", ata_module_event_handler, NULL }; DECLARE_MODULE(ata, ata_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND); MODULE_VERSION(ata, 1); static void ata_init(void) { ata_request_zone = uma_zcreate("ata_request", sizeof(struct ata_request), NULL, NULL, NULL, NULL, 0, 0); ata_composite_zone = uma_zcreate("ata_composite", sizeof(struct ata_composite), NULL, NULL, NULL, NULL, 0, 0); } SYSINIT(ata_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_init, NULL); static void ata_uninit(void) { uma_zdestroy(ata_composite_zone); uma_zdestroy(ata_request_zone); } SYSUNINIT(ata_unregister, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_uninit, NULL); diff --git a/sys/dev/ata/ata-all.h b/sys/dev/ata/ata-all.h index 0e5d05240987..1efb447595f4 100644 --- a/sys/dev/ata/ata-all.h +++ b/sys/dev/ata/ata-all.h @@ -1,677 +1,677 @@ /*- * Copyright (c) 1998 - 2006 Søren Schmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* ATA register defines */ #define ATA_DATA 0 /* (RW) data */ #define ATA_FEATURE 1 /* (W) feature */ #define ATA_F_DMA 0x01 /* enable DMA */ #define ATA_F_OVL 0x02 /* enable overlap */ #define ATA_COUNT 2 /* (W) sector count */ #define ATA_SECTOR 3 /* (RW) sector # */ #define ATA_CYL_LSB 4 /* (RW) cylinder# LSB */ #define ATA_CYL_MSB 5 /* (RW) cylinder# MSB */ #define ATA_DRIVE 6 /* (W) Sector/Drive/Head */ #define ATA_D_LBA 0x40 /* use LBA addressing */ #define ATA_D_IBM 0xa0 /* 512 byte sectors, ECC */ #define ATA_COMMAND 7 /* (W) command */ #define ATA_ERROR 8 /* (R) error */ #define ATA_E_ILI 0x01 /* illegal length */ #define ATA_E_NM 0x02 /* no media */ #define ATA_E_ABORT 0x04 /* command aborted */ #define ATA_E_MCR 0x08 /* media change request */ #define ATA_E_IDNF 0x10 /* ID not found */ #define ATA_E_MC 0x20 /* media changed */ #define ATA_E_UNC 0x40 /* uncorrectable data */ #define ATA_E_ICRC 0x80 /* UDMA crc error */ #define ATA_E_MASK 0x0f /* error mask */ #define ATA_SK_MASK 0xf0 /* sense key mask */ #define ATA_SK_NO_SENSE 0x00 /* no specific sense key info */ #define ATA_SK_RECOVERED_ERROR 0x10 /* command OK, data recovered */ #define ATA_SK_NOT_READY 0x20 /* no access to drive */ #define ATA_SK_MEDIUM_ERROR 0x30 /* non-recovered data error */ #define ATA_SK_HARDWARE_ERROR 0x40 /* non-recoverable HW failure */ #define ATA_SK_ILLEGAL_REQUEST 0x50 /* invalid command param(s) */ #define ATA_SK_UNIT_ATTENTION 0x60 /* media changed */ #define ATA_SK_DATA_PROTECT 0x70 /* write protect */ #define ATA_SK_BLANK_CHECK 0x80 /* blank check */ #define ATA_SK_VENDOR_SPECIFIC 0x90 /* vendor specific skey */ #define ATA_SK_COPY_ABORTED 0xa0 /* copy aborted */ #define ATA_SK_ABORTED_COMMAND 0xb0 /* command aborted, try again */ #define ATA_SK_EQUAL 0xc0 /* equal */ #define ATA_SK_VOLUME_OVERFLOW 0xd0 /* volume overflow */ #define ATA_SK_MISCOMPARE 0xe0 /* data dont match the medium */ #define ATA_SK_RESERVED 0xf0 #define ATA_IREASON 9 /* (R) interrupt reason */ #define ATA_I_CMD 0x01 /* cmd (1) | data (0) */ #define ATA_I_IN 0x02 /* read (1) | write (0) */ #define ATA_I_RELEASE 0x04 /* released bus (1) */ #define ATA_I_TAGMASK 0xf8 /* tag mask */ #define ATA_STATUS 10 /* (R) status */ #define ATA_ALTSTAT 11 /* (R) alternate status */ #define ATA_S_ERROR 0x01 /* error */ #define ATA_S_INDEX 0x02 /* index */ #define ATA_S_CORR 0x04 /* data corrected */ #define ATA_S_DRQ 0x08 /* data request */ #define ATA_S_DSC 0x10 /* drive seek completed */ #define ATA_S_SERVICE 0x10 /* drive needs service */ #define ATA_S_DWF 0x20 /* drive write fault */ #define ATA_S_DMA 0x20 /* DMA ready */ #define ATA_S_READY 0x40 /* drive ready */ #define ATA_S_BUSY 0x80 /* busy */ #define ATA_CONTROL 12 /* (W) control */ #define ATA_CTLOFFSET 0x206 /* control register offset */ #define ATA_PCCARD_CTLOFFSET 0x0e /* do for PCCARD devices */ #define ATA_PC98_CTLOFFSET 0x10c /* do for PC98 devices */ #define ATA_A_IDS 0x02 /* disable interrupts */ #define ATA_A_RESET 0x04 /* RESET controller */ #define ATA_A_4BIT 0x08 /* 4 head bits */ #define ATA_A_HOB 0x80 /* High Order Byte enable */ /* SATA register defines */ #define ATA_SSTATUS 13 #define ATA_SS_DET_MASK 0x0000000f #define ATA_SS_DET_NO_DEVICE 0x00000000 #define ATA_SS_DET_DEV_PRESENT 0x00000001 #define ATA_SS_DET_PHY_ONLINE 0x00000003 #define ATA_SS_DET_PHY_OFFLINE 0x00000004 #define ATA_SS_SPD_MASK 0x000000f0 #define ATA_SS_SPD_NO_SPEED 0x00000000 #define ATA_SS_SPD_GEN1 0x00000010 #define ATA_SS_SPD_GEN2 0x00000020 #define ATA_SS_IPM_MASK 0x00000f00 #define ATA_SS_IPM_NO_DEVICE 0x00000000 #define ATA_SS_IPM_ACTIVE 0x00000100 #define ATA_SS_IPM_PARTIAL 0x00000200 #define ATA_SS_IPM_SLUMBER 0x00000600 #define ATA_SS_CONWELL_MASK \ (ATA_SS_DET_MASK|ATA_SS_SPD_MASK|ATA_SS_IPM_MASK) #define ATA_SS_CONWELL_GEN1 \ (ATA_SS_DET_PHY_ONLINE|ATA_SS_SPD_GEN1|ATA_SS_IPM_ACTIVE) #define ATA_SS_CONWELL_GEN2 \ (ATA_SS_DET_PHY_ONLINE|ATA_SS_SPD_GEN2|ATA_SS_IPM_ACTIVE) #define ATA_SERROR 14 #define ATA_SE_DATA_CORRECTED 0x00000001 #define ATA_SE_COMM_CORRECTED 0x00000002 #define ATA_SE_DATA_ERR 0x00000100 #define ATA_SE_COMM_ERR 0x00000200 #define ATA_SE_PROT_ERR 0x00000400 #define ATA_SE_HOST_ERR 0x00000800 #define ATA_SE_PHY_CHANGED 0x00010000 #define ATA_SE_PHY_IERROR 0x00020000 #define ATA_SE_COMM_WAKE 0x00040000 #define ATA_SE_DECODE_ERR 0x00080000 #define ATA_SE_PARITY_ERR 0x00100000 #define ATA_SE_CRC_ERR 0x00200000 #define ATA_SE_HANDSHAKE_ERR 0x00400000 #define ATA_SE_LINKSEQ_ERR 0x00800000 #define ATA_SE_TRANSPORT_ERR 0x01000000 #define ATA_SE_UNKNOWN_FIS 0x02000000 #define ATA_SCONTROL 15 #define ATA_SC_DET_MASK 0x0000000f #define ATA_SC_DET_IDLE 0x00000000 #define ATA_SC_DET_RESET 0x00000001 #define ATA_SC_DET_DISABLE 0x00000004 #define ATA_SC_SPD_MASK 0x000000f0 #define ATA_SC_SPD_NO_SPEED 0x00000000 #define ATA_SC_SPD_SPEED_GEN1 0x00000010 #define ATA_SC_SPD_SPEED_GEN2 0x00000020 #define ATA_SC_IPM_MASK 0x00000f00 #define ATA_SC_IPM_NONE 0x00000000 #define ATA_SC_IPM_DIS_PARTIAL 0x00000100 #define ATA_SC_IPM_DIS_SLUMBER 0x00000200 #define ATA_SACTIVE 16 /* SATA AHCI v1.0 register defines */ #define ATA_AHCI_CAP 0x00 #define ATA_AHCI_NPMASK 0x1f #define ATA_AHCI_GHC 0x04 #define ATA_AHCI_GHC_AE 0x80000000 #define ATA_AHCI_GHC_IE 0x00000002 #define ATA_AHCI_GHC_HR 0x80000001 #define ATA_AHCI_IS 0x08 #define ATA_AHCI_PI 0x0c #define ATA_AHCI_VS 0x10 #define ATA_AHCI_OFFSET 0x80 #define ATA_AHCI_P_CLB 0x100 #define ATA_AHCI_P_CLBU 0x104 #define ATA_AHCI_P_FB 0x108 #define ATA_AHCI_P_FBU 0x10c #define ATA_AHCI_P_IS 0x110 #define ATA_AHCI_P_IE 0x114 #define ATA_AHCI_P_IX_DHR 0x00000001 #define ATA_AHCI_P_IX_PS 0x00000002 #define ATA_AHCI_P_IX_DS 0x00000004 #define ATA_AHCI_P_IX_SDB 0x00000008 #define ATA_AHCI_P_IX_UF 0x00000010 #define ATA_AHCI_P_IX_DP 0x00000020 #define ATA_AHCI_P_IX_PC 0x00000040 #define ATA_AHCI_P_IX_DI 0x00000080 #define ATA_AHCI_P_IX_PRC 0x00400000 #define ATA_AHCI_P_IX_IPM 0x00800000 #define ATA_AHCI_P_IX_OF 0x01000000 #define ATA_AHCI_P_IX_INF 0x04000000 #define ATA_AHCI_P_IX_IF 0x08000000 #define ATA_AHCI_P_IX_HBD 0x10000000 #define ATA_AHCI_P_IX_HBF 0x20000000 #define ATA_AHCI_P_IX_TFE 0x40000000 #define ATA_AHCI_P_IX_CPD 0x80000000 #define ATA_AHCI_P_CMD 0x118 #define ATA_AHCI_P_CMD_ST 0x00000001 #define ATA_AHCI_P_CMD_SUD 0x00000002 #define ATA_AHCI_P_CMD_POD 0x00000004 #define ATA_AHCI_P_CMD_CLO 0x00000008 #define ATA_AHCI_P_CMD_FRE 0x00000010 #define ATA_AHCI_P_CMD_CCS_MASK 0x00001f00 #define ATA_AHCI_P_CMD_ISS 0x00002000 #define ATA_AHCI_P_CMD_FR 0x00004000 #define ATA_AHCI_P_CMD_CR 0x00008000 #define ATA_AHCI_P_CMD_CPS 0x00010000 #define ATA_AHCI_P_CMD_PMA 0x00020000 #define ATA_AHCI_P_CMD_HPCP 0x00040000 #define ATA_AHCI_P_CMD_ISP 0x00080000 #define ATA_AHCI_P_CMD_CPD 0x00100000 #define ATA_AHCI_P_CMD_ATAPI 0x01000000 #define ATA_AHCI_P_CMD_DLAE 0x02000000 #define ATA_AHCI_P_CMD_ALPE 0x04000000 #define ATA_AHCI_P_CMD_ASP 0x08000000 #define ATA_AHCI_P_CMD_ICC_MASK 0xf0000000 #define ATA_AHCI_P_CMD_NOOP 0x00000000 #define ATA_AHCI_P_CMD_ACTIVE 0x10000000 #define ATA_AHCI_P_CMD_PARTIAL 0x20000000 #define ATA_AHCI_P_CMD_SLUMPER 0x60000000 #define ATA_AHCI_P_TFD 0x120 #define ATA_AHCI_P_SIG 0x124 #define ATA_AHCI_P_SSTS 0x128 #define ATA_AHCI_P_SCTL 0x12c #define ATA_AHCI_P_SERR 0x130 #define ATA_AHCI_P_SACT 0x134 #define ATA_AHCI_P_CI 0x138 #define ATA_AHCI_CL_SIZE 32 #define ATA_AHCI_CL_OFFSET 0 #define ATA_AHCI_FB_OFFSET 1024 #define ATA_AHCI_CT_OFFSET 1024+256 #define ATA_AHCI_CT_SG_OFFSET 128 #define ATA_AHCI_CT_SIZE 256 /* DMA register defines */ #define ATA_DMA_ENTRIES 256 #define ATA_DMA_EOT 0x80000000 #define ATA_BMCMD_PORT 17 #define ATA_BMCMD_START_STOP 0x01 #define ATA_BMCMD_WRITE_READ 0x08 #define ATA_BMDEVSPEC_0 18 #define ATA_BMSTAT_PORT 19 #define ATA_BMSTAT_ACTIVE 0x01 #define ATA_BMSTAT_ERROR 0x02 #define ATA_BMSTAT_INTERRUPT 0x04 #define ATA_BMSTAT_MASK 0x07 #define ATA_BMSTAT_DMA_MASTER 0x20 #define ATA_BMSTAT_DMA_SLAVE 0x40 #define ATA_BMSTAT_DMA_SIMPLEX 0x80 #define ATA_BMDEVSPEC_1 20 #define ATA_BMDTP_PORT 21 #define ATA_IDX_ADDR 22 #define ATA_IDX_DATA 23 #define ATA_MAX_RES 24 /* misc defines */ #define ATA_PRIMARY 0x1f0 #define ATA_SECONDARY 0x170 #define ATA_PC98_BANK 0x432 #define ATA_IOSIZE 0x08 #define ATA_PC98_IOSIZE 0x10 #define ATA_CTLIOSIZE 0x01 #define ATA_BMIOSIZE 0x08 #define ATA_PC98_BANKIOSIZE 0x01 #define ATA_IOADDR_RID 0 #define ATA_CTLADDR_RID 1 #define ATA_BMADDR_RID 0x20 #define ATA_PC98_CTLADDR_RID 8 #define ATA_PC98_BANKADDR_RID 9 #define ATA_IRQ_RID 0 #define ATA_DEV(device) ((device == ATA_MASTER) ? 0 : 1) #define ATA_CFA_MAGIC 0x848A #define ATAPI_MAGIC_LSB 0x14 #define ATAPI_MAGIC_MSB 0xeb #define ATAPI_P_READ (ATA_S_DRQ | ATA_I_IN) #define ATAPI_P_WRITE (ATA_S_DRQ) #define ATAPI_P_CMDOUT (ATA_S_DRQ | ATA_I_CMD) #define ATAPI_P_DONEDRQ (ATA_S_DRQ | ATA_I_CMD | ATA_I_IN) #define ATAPI_P_DONE (ATA_I_CMD | ATA_I_IN) #define ATAPI_P_ABORT 0 #define ATA_INTR_FLAGS (INTR_MPSAFE|INTR_TYPE_BIO|INTR_ENTROPY) #define ATA_OP_CONTINUES 0 #define ATA_OP_FINISHED 1 #define ATA_MAX_28BIT_LBA 268435455UL /* ATAPI request sense structure */ struct atapi_sense { u_int8_t error_code :7; /* current or deferred errors */ u_int8_t valid :1; /* follows ATAPI spec */ u_int8_t segment; /* Segment number */ u_int8_t sense_key :4; /* sense key */ u_int8_t reserved2_4 :1; /* reserved */ u_int8_t ili :1; /* incorrect length indicator */ u_int8_t eom :1; /* end of medium */ u_int8_t filemark :1; /* filemark */ u_int32_t cmd_info __packed; /* cmd information */ u_int8_t sense_length; /* additional sense len (n-7) */ u_int32_t cmd_specific_info __packed; /* additional cmd spec info */ u_int8_t asc; /* additional sense code */ u_int8_t ascq; /* additional sense code qual */ u_int8_t replaceable_unit_code; /* replaceable unit code */ u_int8_t sk_specific :7; /* sense key specific */ u_int8_t sksv :1; /* sense key specific info OK */ u_int8_t sk_specific1; /* sense key specific */ u_int8_t sk_specific2; /* sense key specific */ }; /* structure used for composite atomic operations */ -#define MAX_COMPOSITES 32 /* u_int32_t bits */ +#define MAX_COMPOSITES 32 /* u_int32_t bits */ struct ata_composite { struct mtx lock; /* control lock */ u_int32_t rd_needed; /* needed read subdisks */ u_int32_t rd_done; /* done read subdisks */ u_int32_t wr_needed; /* needed write subdisks */ u_int32_t wr_depend; /* write depends on subdisks */ u_int32_t wr_done; /* done write subdisks */ struct ata_request *request[MAX_COMPOSITES]; - u_int32_t residual; /* bytes still to transfer */ + u_int32_t residual; /* bytes still to transfer */ caddr_t data_1; caddr_t data_2; }; /* structure used to queue an ATA/ATAPI request */ struct ata_request { device_t dev; /* device handle */ union { struct { u_int8_t command; /* command reg */ u_int16_t feature; /* feature reg */ u_int16_t count; /* count reg */ u_int64_t lba; /* lba reg */ } ata; struct { u_int8_t ccb[16]; /* ATAPI command block */ struct atapi_sense sense_data; /* ATAPI request sense data */ u_int8_t sense_key; /* ATAPI request sense key */ u_int8_t sense_cmd; /* ATAPI saved command */ } atapi; } u; u_int32_t bytecount; /* bytes to transfer */ u_int32_t transfersize; /* bytes pr transfer */ caddr_t data; /* pointer to data buf */ int flags; #define ATA_R_CONTROL 0x00000001 #define ATA_R_READ 0x00000002 #define ATA_R_WRITE 0x00000004 #define ATA_R_ATAPI 0x00000008 #define ATA_R_DMA 0x00000010 #define ATA_R_QUIET 0x00000020 #define ATA_R_TIMEOUT 0x00000040 #define ATA_R_ORDERED 0x00000100 #define ATA_R_AT_HEAD 0x00000200 #define ATA_R_REQUEUE 0x00000400 #define ATA_R_THREAD 0x00000800 #define ATA_R_DIRECT 0x00001000 #define ATA_R_DEBUG 0x10000000 u_int8_t status; /* ATA status */ u_int8_t error; /* ATA error */ u_int8_t dmastat; /* DMA status */ u_int32_t donecount; /* bytes transferred */ int result; /* result error code */ void (*callback)(struct ata_request *request); struct sema done; /* request done sema */ int retries; /* retry count */ int timeout; /* timeout for this cmd */ struct callout callout; /* callout management */ struct task task; /* task management */ struct bio *bio; /* bio for this request */ int this; /* this request ID */ struct ata_composite *composite; /* for composite atomic ops */ void *driver; /* driver specific */ TAILQ_ENTRY(ata_request) chain; /* list management */ }; /* define this for debugging request processing */ #if 0 #define ATA_DEBUG_RQ(request, string) \ { \ if (request->flags & ATA_R_DEBUG) \ device_printf(request->dev, "req=%p %s " string "\n", \ request, ata_cmd2str(request)); \ } #else #define ATA_DEBUG_RQ(request, string) #endif /* structure describing an ATA/ATAPI device */ struct ata_device { device_t dev; /* device handle */ int unit; /* physical unit */ #define ATA_MASTER 0x00 #define ATA_SLAVE 0x10 struct ata_params param; /* ata param structure */ int mode; /* current transfermode */ u_int32_t max_iosize; /* max IO size */ int cmd; /* last cmd executed */ int flags; #define ATA_D_USE_CHS 0x0001 #define ATA_D_MEDIA_CHANGED 0x0002 #define ATA_D_ENC_PRESENT 0x0004 #define ATA_D_48BIT_ACTIVE 0x0008 }; /* structure for holding DMA Physical Region Descriptors (PRD) entries */ struct ata_dma_prdentry { u_int32_t addr; u_int32_t count; }; /* structure used by the setprd function */ struct ata_dmasetprd_args { void *dmatab; int nsegs; int error; }; /* structure holding DMA related information */ struct ata_dma { bus_dma_tag_t dmatag; /* parent DMA tag */ bus_dma_tag_t sg_tag; /* SG list DMA tag */ bus_dmamap_t sg_map; /* SG list DMA map */ void *sg; /* DMA transfer table */ bus_addr_t sg_bus; /* bus address of dmatab */ bus_dma_tag_t data_tag; /* data DMA tag */ bus_dmamap_t data_map; /* data DMA map */ bus_dma_tag_t work_tag; /* workspace DMA tag */ bus_dmamap_t work_map; /* workspace DMA map */ u_int8_t *work; /* workspace */ bus_addr_t work_bus; /* bus address of dmatab */ u_int32_t alignment; /* DMA SG list alignment */ u_int32_t boundary; /* DMA SG list boundary */ u_int32_t segsize; /* DMA SG list segment size */ u_int32_t max_iosize; /* DMA data max IO size */ u_int32_t cur_iosize; /* DMA data current IO size */ int flags; #define ATA_DMA_READ 0x01 /* transaction is a read */ #define ATA_DMA_LOADED 0x02 /* DMA tables etc loaded */ #define ATA_DMA_ACTIVE 0x04 /* DMA transfer in progress */ void (*alloc)(device_t dev); void (*free)(device_t dev); void (*setprd)(void *xsc, bus_dma_segment_t *segs, int nsegs, int error); int (*load)(device_t dev, caddr_t data, int32_t count, int dir, void *addr, int *nsegs); int (*unload)(device_t dev); int (*start)(device_t dev); int (*stop)(device_t dev); void (*reset)(device_t dev); }; /* structure holding lowlevel functions */ struct ata_lowlevel { int (*status)(device_t dev); int (*begin_transaction)(struct ata_request *request); int (*end_transaction)(struct ata_request *request); int (*command)(struct ata_request *request); }; /* structure holding resources for an ATA channel */ struct ata_resource { struct resource *res; int offset; }; /* structure describing an ATA channel */ struct ata_channel { device_t dev; /* device handle */ int unit; /* physical channel */ struct ata_resource r_io[ATA_MAX_RES];/* I/O resources */ struct resource *r_irq; /* interrupt of this channel */ void *ih; /* interrupt handle */ struct ata_lowlevel hw; /* lowlevel HW functions */ struct ata_dma *dma; /* DMA data / functions */ int flags; /* channel flags */ #define ATA_NO_SLAVE 0x01 #define ATA_USE_16BIT 0x02 #define ATA_ATAPI_DMA_RO 0x04 #define ATA_NO_48BIT_DMA 0x08 -#define ATA_ALWAYS_DMASTAT 0x10 +#define ATA_ALWAYS_DMASTAT 0x10 int devices; /* what is present */ #define ATA_ATA_MASTER 0x01 #define ATA_ATA_SLAVE 0x02 #define ATA_ATAPI_MASTER 0x04 #define ATA_ATAPI_SLAVE 0x08 struct mtx state_mtx; /* state lock */ int state; /* ATA channel state */ #define ATA_IDLE 0x0000 #define ATA_ACTIVE 0x0001 #define ATA_STALL_QUEUE 0x0002 struct mtx queue_mtx; /* queue lock */ TAILQ_HEAD(, ata_request) ata_queue; /* head of ATA queue */ struct ata_request *freezepoint; /* composite freezepoint */ struct ata_request *running; /* currently running request */ }; /* disk bay/enclosure related */ #define ATA_LED_OFF 0x00 #define ATA_LED_RED 0x01 #define ATA_LED_GREEN 0x02 #define ATA_LED_ORANGE 0x03 #define ATA_LED_MASK 0x03 /* externs */ extern int (*ata_raid_ioctl_func)(u_long cmd, caddr_t data); extern devclass_t ata_devclass; extern int ata_wc; /* public prototypes */ /* ata-all.c: */ int ata_probe(device_t dev); int ata_attach(device_t dev); int ata_detach(device_t dev); int ata_reinit(device_t dev); int ata_suspend(device_t dev); int ata_resume(device_t dev); int ata_interrupt(void *data); int ata_device_ioctl(device_t dev, u_long cmd, caddr_t data); int ata_identify(device_t dev); void ata_default_registers(device_t dev); void ata_modify_if_48bit(struct ata_request *request); void ata_udelay(int interval); char *ata_mode2str(int mode); int ata_pmode(struct ata_params *ap); int ata_wmode(struct ata_params *ap); int ata_umode(struct ata_params *ap); int ata_limit_mode(device_t dev, int mode, int maxmode); /* ata-queue.c: */ int ata_controlcmd(device_t dev, u_int8_t command, u_int16_t feature, u_int64_t lba, u_int16_t count); int ata_atapicmd(device_t dev, u_int8_t *ccb, caddr_t data, int count, int flags, int timeout); void ata_queue_request(struct ata_request *request); void ata_start(device_t dev); void ata_finish(struct ata_request *request); void ata_timeout(struct ata_request *); void ata_catch_inflight(device_t dev); void ata_fail_requests(device_t dev); char *ata_cmd2str(struct ata_request *request); /* ata-lowlevel.c: */ void ata_generic_hw(device_t dev); int ata_begin_transaction(struct ata_request *); int ata_end_transaction(struct ata_request *); void ata_generic_reset(device_t dev); int ata_generic_command(struct ata_request *request); /* macros for alloc/free of struct ata_request */ extern uma_zone_t ata_request_zone; #define ata_alloc_request() uma_zalloc(ata_request_zone, M_NOWAIT | M_ZERO) #define ata_free_request(request) uma_zfree(ata_request_zone, request) /* macros for alloc/free of struct ata_composite */ extern uma_zone_t ata_composite_zone; #define ata_alloc_composite() uma_zalloc(ata_composite_zone, M_NOWAIT | M_ZERO) #define ata_free_composite(composite) uma_zfree(ata_composite_zone, composite) MALLOC_DECLARE(M_ATA); /* misc newbus defines */ #define GRANDPARENT(dev) device_get_parent(device_get_parent(dev)) /* macros to hide busspace uglyness */ #define ATA_INB(res, offset) \ bus_space_read_1(rman_get_bustag((res)), \ rman_get_bushandle((res)), (offset)) #define ATA_INW(res, offset) \ bus_space_read_2(rman_get_bustag((res)), \ rman_get_bushandle((res)), (offset)) #define ATA_INL(res, offset) \ bus_space_read_4(rman_get_bustag((res)), \ rman_get_bushandle((res)), (offset)) #define ATA_INSW(res, offset, addr, count) \ bus_space_read_multi_2(rman_get_bustag((res)), \ rman_get_bushandle((res)), \ (offset), (addr), (count)) #define ATA_INSW_STRM(res, offset, addr, count) \ bus_space_read_multi_stream_2(rman_get_bustag((res)), \ rman_get_bushandle((res)), \ (offset), (addr), (count)) #define ATA_INSL(res, offset, addr, count) \ bus_space_read_multi_4(rman_get_bustag((res)), \ rman_get_bushandle((res)), \ (offset), (addr), (count)) #define ATA_INSL_STRM(res, offset, addr, count) \ bus_space_read_multi_stream_4(rman_get_bustag((res)), \ rman_get_bushandle((res)), \ (offset), (addr), (count)) #define ATA_OUTB(res, offset, value) \ bus_space_write_1(rman_get_bustag((res)), \ rman_get_bushandle((res)), (offset), (value)) #define ATA_OUTW(res, offset, value) \ bus_space_write_2(rman_get_bustag((res)), \ rman_get_bushandle((res)), (offset), (value)) #define ATA_OUTL(res, offset, value) \ bus_space_write_4(rman_get_bustag((res)), \ rman_get_bushandle((res)), (offset), (value)) #define ATA_OUTSW(res, offset, addr, count) \ bus_space_write_multi_2(rman_get_bustag((res)), \ rman_get_bushandle((res)), \ (offset), (addr), (count)) #define ATA_OUTSW_STRM(res, offset, addr, count) \ bus_space_write_multi_stream_2(rman_get_bustag((res)), \ rman_get_bushandle((res)), \ (offset), (addr), (count)) #define ATA_OUTSL(res, offset, addr, count) \ bus_space_write_multi_4(rman_get_bustag((res)), \ rman_get_bushandle((res)), \ (offset), (addr), (count)) #define ATA_OUTSL_STRM(res, offset, addr, count) \ bus_space_write_multi_stream_4(rman_get_bustag((res)), \ rman_get_bushandle((res)), \ (offset), (addr), (count)) #define ATA_IDX_INB(ch, idx) \ ATA_INB(ch->r_io[idx].res, ch->r_io[idx].offset) #define ATA_IDX_INW(ch, idx) \ ATA_INW(ch->r_io[idx].res, ch->r_io[idx].offset) #define ATA_IDX_INL(ch, idx) \ ATA_INL(ch->r_io[idx].res, ch->r_io[idx].offset) #define ATA_IDX_INSW(ch, idx, addr, count) \ ATA_INSW(ch->r_io[idx].res, ch->r_io[idx].offset, addr, count) #define ATA_IDX_INSW_STRM(ch, idx, addr, count) \ ATA_INSW_STRM(ch->r_io[idx].res, ch->r_io[idx].offset, addr, count) #define ATA_IDX_INSL(ch, idx, addr, count) \ ATA_INSL(ch->r_io[idx].res, ch->r_io[idx].offset, addr, count) #define ATA_IDX_INSL_STRM(ch, idx, addr, count) \ ATA_INSL_STRM(ch->r_io[idx].res, ch->r_io[idx].offset, addr, count) #define ATA_IDX_OUTB(ch, idx, value) \ ATA_OUTB(ch->r_io[idx].res, ch->r_io[idx].offset, value) #define ATA_IDX_OUTW(ch, idx, value) \ ATA_OUTW(ch->r_io[idx].res, ch->r_io[idx].offset, value) #define ATA_IDX_OUTL(ch, idx, value) \ ATA_OUTL(ch->r_io[idx].res, ch->r_io[idx].offset, value) #define ATA_IDX_OUTSW(ch, idx, addr, count) \ ATA_OUTSW(ch->r_io[idx].res, ch->r_io[idx].offset, addr, count) #define ATA_IDX_OUTSW_STRM(ch, idx, addr, count) \ ATA_OUTSW_STRM(ch->r_io[idx].res, ch->r_io[idx].offset, addr, count) #define ATA_IDX_OUTSL(ch, idx, addr, count) \ ATA_OUTSL(ch->r_io[idx].res, ch->r_io[idx].offset, addr, count) #define ATA_IDX_OUTSL_STRM(ch, idx, addr, count) \ ATA_OUTSL_STRM(ch->r_io[idx].res, ch->r_io[idx].offset, addr, count) diff --git a/sys/dev/ata/ata-chipset.c b/sys/dev/ata/ata-chipset.c index a5979c82c36f..be5909895ac4 100644 --- a/sys/dev/ata/ata-chipset.c +++ b/sys/dev/ata/ata-chipset.c @@ -1,4804 +1,4806 @@ /*- * Copyright (c) 1998 - 2006 Søren Schmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ata.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* local prototypes */ /* ata-chipset.c */ static int ata_generic_chipinit(device_t dev); static void ata_generic_intr(void *data); static void ata_generic_setmode(device_t dev, int mode); static void ata_sata_phy_enable(struct ata_channel *ch); static void ata_sata_phy_event(void *context, int dummy); static int ata_sata_connect(struct ata_channel *ch); static void ata_sata_setmode(device_t dev, int mode); static int ata_ahci_allocate(device_t dev); static int ata_ahci_status(device_t dev); static int ata_ahci_begin_transaction(struct ata_request *request); static int ata_ahci_end_transaction(struct ata_request *request); static void ata_ahci_reset(device_t dev); static void ata_ahci_dmasetprd(void *xsc, bus_dma_segment_t *segs, int nsegs, int error); static void ata_ahci_dmainit(device_t dev); static int ata_ahci_setup_fis(u_int8_t *fis, struct ata_request *request); static int ata_acard_chipinit(device_t dev); static int ata_acard_allocate(device_t dev); static int ata_acard_status(device_t dev); static void ata_acard_850_setmode(device_t dev, int mode); static void ata_acard_86X_setmode(device_t dev, int mode); static int ata_ali_chipinit(device_t dev); static int ata_ali_allocate(device_t dev); static int ata_ali_sata_allocate(device_t dev); static void ata_ali_reset(device_t dev); static void ata_ali_setmode(device_t dev, int mode); static int ata_amd_chipinit(device_t dev); static int ata_ati_chipinit(device_t dev); static void ata_ati_setmode(device_t dev, int mode); static int ata_cyrix_chipinit(device_t dev); static void ata_cyrix_setmode(device_t dev, int mode); static int ata_cypress_chipinit(device_t dev); static void ata_cypress_setmode(device_t dev, int mode); static int ata_highpoint_chipinit(device_t dev); static int ata_highpoint_allocate(device_t dev); static void ata_highpoint_setmode(device_t dev, int mode); static int ata_highpoint_check_80pin(device_t dev, int mode); static int ata_intel_chipinit(device_t dev); static int ata_intel_allocate(device_t dev); static void ata_intel_reset(device_t dev); static void ata_intel_old_setmode(device_t dev, int mode); static void ata_intel_new_setmode(device_t dev, int mode); static int ata_intel_31244_allocate(device_t dev); static int ata_intel_31244_status(device_t dev); static int ata_intel_31244_command(struct ata_request *request); static void ata_intel_31244_reset(device_t dev); static int ata_ite_chipinit(device_t dev); static void ata_ite_setmode(device_t dev, int mode); static int ata_marvell_chipinit(device_t dev); static int ata_marvell_allocate(device_t dev); static int ata_marvell_status(device_t dev); static int ata_marvell_begin_transaction(struct ata_request *request); static int ata_marvell_end_transaction(struct ata_request *request); static void ata_marvell_reset(device_t dev); static void ata_marvell_dmasetprd(void *xsc, bus_dma_segment_t *segs, int nsegs, int error); static void ata_marvell_dmainit(device_t dev); static int ata_national_chipinit(device_t dev); static void ata_national_setmode(device_t dev, int mode); static int ata_nvidia_chipinit(device_t dev); static int ata_nvidia_allocate(device_t dev); static int ata_nvidia_status(device_t dev); static void ata_nvidia_reset(device_t dev); static int ata_promise_chipinit(device_t dev); static int ata_promise_allocate(device_t dev); static int ata_promise_status(device_t dev); static int ata_promise_dmastart(device_t dev); static int ata_promise_dmastop(device_t dev); static void ata_promise_dmareset(device_t dev); static void ata_promise_dmainit(device_t dev); static void ata_promise_setmode(device_t dev, int mode); static int ata_promise_tx2_allocate(device_t dev); static int ata_promise_tx2_status(device_t dev); static int ata_promise_mio_allocate(device_t dev); static void ata_promise_mio_intr(void *data); static int ata_promise_mio_status(device_t dev); static int ata_promise_mio_command(struct ata_request *request); static void ata_promise_mio_reset(device_t dev); static void ata_promise_mio_dmainit(device_t dev); static void ata_promise_mio_setmode(device_t dev, int mode); static void ata_promise_sx4_intr(void *data); static int ata_promise_sx4_command(struct ata_request *request); static int ata_promise_apkt(u_int8_t *bytep, struct ata_request *request); static void ata_promise_queue_hpkt(struct ata_pci_controller *ctlr, u_int32_t hpkt); static void ata_promise_next_hpkt(struct ata_pci_controller *ctlr); static int ata_serverworks_chipinit(device_t dev); static void ata_serverworks_setmode(device_t dev, int mode); static int ata_sii_chipinit(device_t dev); static int ata_cmd_allocate(device_t dev); static int ata_cmd_status(device_t dev); static void ata_cmd_setmode(device_t dev, int mode); static int ata_sii_allocate(device_t dev); static int ata_sii_status(device_t dev); static void ata_sii_reset(device_t dev); static void ata_sii_setmode(device_t dev, int mode); static int ata_sis_chipinit(device_t dev); static int ata_sis_allocate(device_t dev); static void ata_sis_reset(device_t dev); static void ata_sis_setmode(device_t dev, int mode); static int ata_via_chipinit(device_t dev); static int ata_via_allocate(device_t dev); static void ata_via_reset(device_t dev); static void ata_via_southbridge_fixup(device_t dev); static void ata_via_family_setmode(device_t dev, int mode); static struct ata_chip_id *ata_match_chip(device_t dev, struct ata_chip_id *index); static struct ata_chip_id *ata_find_chip(device_t dev, struct ata_chip_id *index, int slot); static int ata_setup_interrupt(device_t dev); static int ata_serialize(device_t dev, int flags); static void ata_print_cable(device_t dev, u_int8_t *who); static int ata_atapi(device_t dev); static int ata_check_80pin(device_t dev, int mode); static int ata_mode2idx(int mode); /* * generic ATA support functions */ int ata_generic_ident(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); device_set_desc(dev, "GENERIC ATA controller"); ctlr->chipinit = ata_generic_chipinit; return 0; } static int ata_generic_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); if (ata_setup_interrupt(dev)) return ENXIO; ctlr->setmode = ata_generic_setmode; return 0; } static void ata_generic_intr(void *data) { struct ata_pci_controller *ctlr = data; struct ata_channel *ch; int unit; for (unit = 0; unit < ctlr->channels; unit++) { if ((ch = ctlr->interrupt[unit].argument)) ctlr->interrupt[unit].function(ch); } } static void ata_generic_setmode(device_t dev, int mode) { struct ata_device *atadev = device_get_softc(dev); mode = ata_limit_mode(dev, mode, ATA_UDMA2); mode = ata_check_80pin(dev, mode); if (!ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode)) atadev->mode = mode; } /* * SATA support functions */ static void ata_sata_phy_enable(struct ata_channel *ch) { int loop, retry; if ((ATA_IDX_INL(ch, ATA_SCONTROL) & ATA_SC_DET_MASK) == ATA_SC_DET_IDLE) { ata_sata_connect(ch); return; } for (retry = 0; retry < 10; retry++) { for (loop = 0; loop < 10; loop++) { ATA_IDX_OUTL(ch, ATA_SCONTROL, ATA_SC_DET_RESET); ata_udelay(100); if ((ATA_IDX_INL(ch, ATA_SCONTROL) & ATA_SC_DET_MASK) == ATA_SC_DET_RESET) break; } ata_udelay(5000); for (loop = 0; loop < 10; loop++) { ATA_IDX_OUTL(ch, ATA_SCONTROL, ATA_SC_DET_IDLE); ata_udelay(100); if ((ATA_IDX_INL(ch, ATA_SCONTROL) & ATA_SC_DET_MASK) == 0) { ata_sata_connect(ch); return; } } } } static void ata_sata_phy_event(void *context, int dummy) { struct ata_connect_task *tp = (struct ata_connect_task *)context; struct ata_channel *ch = device_get_softc(tp->dev); device_t *children; int nchildren, i; mtx_lock(&Giant); /* newbus suckage it needs Giant */ if (tp->action == ATA_C_ATTACH) { if (bootverbose) device_printf(tp->dev, "CONNECTED\n"); ata_sata_connect(ch); ata_identify(tp->dev); } if (tp->action == ATA_C_DETACH) { if (!device_get_children(tp->dev, &children, &nchildren)) { for (i = 0; i < nchildren; i++) if (children[i]) device_delete_child(tp->dev, children[i]); free(children, M_TEMP); } mtx_lock(&ch->state_mtx); ch->state = ATA_IDLE; mtx_unlock(&ch->state_mtx); if (bootverbose) device_printf(tp->dev, "DISCONNECTED\n"); } mtx_unlock(&Giant); /* suckage code dealt with, release Giant */ free(tp, M_ATA); } static int ata_sata_connect(struct ata_channel *ch) { u_int32_t status; int timeout; /* wait up to 1 second for "connect well" */ for (timeout = 0; timeout < 100 ; timeout++) { status = ATA_IDX_INL(ch, ATA_SSTATUS); if ((status & ATA_SS_CONWELL_MASK) == ATA_SS_CONWELL_GEN1 || (status & ATA_SS_CONWELL_MASK) == ATA_SS_CONWELL_GEN2) break; ata_udelay(10000); } if (timeout >= 100) { if (bootverbose) device_printf(ch->dev, "SATA connect status=%08x\n", status); return 0; } /* clear SATA error register */ ATA_IDX_OUTL(ch, ATA_SERROR, ATA_IDX_INL(ch, ATA_SERROR)); /* find out what type device we got poll for spec'd 31 seconds */ /* XXX SOS 10 secs for now as I have little patience */ ch->devices = 0; for (timeout = 0; timeout < 1000; timeout++) { if (ATA_IDX_INB(ch, ATA_STATUS) & ATA_S_BUSY) DELAY(10000); else break; } if (bootverbose) device_printf(ch->dev, "SATA connect ready time=%dms\n", timeout * 10); if (timeout < 1000) { if ((ATA_IDX_INB(ch, ATA_CYL_LSB) == ATAPI_MAGIC_LSB) && (ATA_IDX_INB(ch, ATA_CYL_MSB) == ATAPI_MAGIC_MSB)) ch->devices = ATA_ATAPI_MASTER; else ch->devices = ATA_ATA_MASTER; } if (bootverbose) device_printf(ch->dev, "sata_connect devices=0x%b\n", ch->devices, "\20\3ATAPI_MASTER\1ATA_MASTER"); return 1; } static void ata_sata_setmode(device_t dev, int mode) { struct ata_device *atadev = device_get_softc(dev); /* * if we detect that the device isn't a real SATA device we limit * the transfer mode to UDMA5/ATA100. * this works around the problems some devices has with the * Marvell 88SX8030 SATA->PATA converters and UDMA6/ATA133. */ if (atadev->param.satacapabilities != 0x0000 && atadev->param.satacapabilities != 0xffff) { - struct ata_channel *ch = device_get_softc(device_get_parent(dev)); + struct ata_channel *ch = device_get_softc(device_get_parent(dev)); int status; /* on some drives we need to set the transfer mode */ ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, ata_limit_mode(dev, mode, ATA_UDMA6)); /* query SATA STATUS for the speed */ status = ATA_IDX_INL(ch, ATA_SSTATUS); if ((status & ATA_SS_CONWELL_MASK) == ATA_SS_CONWELL_GEN2) atadev->mode = ATA_SA300; else atadev->mode = ATA_SA150; } else { mode = ata_limit_mode(dev, mode, ATA_UDMA5); if (!ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode)) atadev->mode = mode; } } /* * AHCI v1.0 compliant SATA chipset support functions */ struct ata_ahci_dma_prd { u_int64_t dba; u_int32_t reserved; u_int32_t dbc; /* 0 based */ #define ATA_AHCI_PRD_MASK 0x003fffff /* max 4MB */ #define ATA_AHCI_PRD_IPC (1<<31) } __packed; struct ata_ahci_cmd_tab { u_int8_t cfis[64]; u_int8_t acmd[32]; u_int8_t reserved[32]; struct ata_ahci_dma_prd prd_tab[16]; } __packed; struct ata_ahci_cmd_list { u_int16_t cmd_flags; u_int16_t prd_length; /* PRD entries */ u_int32_t bytecount; u_int64_t cmd_table_phys; /* 128byte aligned */ } __packed; static int ata_ahci_allocate(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); int offset = (ch->unit << 7); /* setup legacy cruft we need */ ch->r_io[ATA_CYL_LSB].res = ctlr->r_res2; ch->r_io[ATA_CYL_LSB].offset = ATA_AHCI_P_SIG + 1 + offset; ch->r_io[ATA_CYL_MSB].res = ctlr->r_res2; ch->r_io[ATA_CYL_MSB].offset = ATA_AHCI_P_SIG + 3 + offset; ch->r_io[ATA_STATUS].res = ctlr->r_res2; ch->r_io[ATA_STATUS].offset = ATA_AHCI_P_TFD + offset; ch->r_io[ATA_ALTSTAT].res = ctlr->r_res2; ch->r_io[ATA_ALTSTAT].offset = ATA_AHCI_P_TFD + offset; /* set the SATA resources */ ch->r_io[ATA_SSTATUS].res = ctlr->r_res2; ch->r_io[ATA_SSTATUS].offset = ATA_AHCI_P_SSTS + offset; ch->r_io[ATA_SERROR].res = ctlr->r_res2; ch->r_io[ATA_SERROR].offset = ATA_AHCI_P_SERR + offset; ch->r_io[ATA_SCONTROL].res = ctlr->r_res2; ch->r_io[ATA_SCONTROL].offset = ATA_AHCI_P_SCTL + offset; ch->r_io[ATA_SACTIVE].res = ctlr->r_res2; ch->r_io[ATA_SACTIVE].offset = ATA_AHCI_P_SACT + offset; ch->hw.status = ata_ahci_status; ch->hw.begin_transaction = ata_ahci_begin_transaction; ch->hw.end_transaction = ata_ahci_end_transaction; ch->hw.command = NULL; /* not used here */ /* setup the work areas */ ATA_OUTL(ctlr->r_res2, ATA_AHCI_P_CLB + offset, ch->dma->work_bus + ATA_AHCI_CL_OFFSET); ATA_OUTL(ctlr->r_res2, ATA_AHCI_P_CLBU + offset, 0x00000000); ATA_OUTL(ctlr->r_res2, ATA_AHCI_P_FB + offset, ch->dma->work_bus + ATA_AHCI_FB_OFFSET); ATA_OUTL(ctlr->r_res2, ATA_AHCI_P_FBU + offset, 0x00000000); /* enable wanted port interrupts */ ATA_OUTL(ctlr->r_res2, ATA_AHCI_P_IE + offset, (ATA_AHCI_P_IX_CPD | ATA_AHCI_P_IX_TFE | ATA_AHCI_P_IX_HBF | ATA_AHCI_P_IX_HBD | ATA_AHCI_P_IX_IF | ATA_AHCI_P_IX_OF | ATA_AHCI_P_IX_PRC | ATA_AHCI_P_IX_PC | ATA_AHCI_P_IX_DP | ATA_AHCI_P_IX_UF | ATA_AHCI_P_IX_SDB | ATA_AHCI_P_IX_DS | ATA_AHCI_P_IX_PS | ATA_AHCI_P_IX_DHR)); /* start operations on this channel */ ATA_OUTL(ctlr->r_res2, ATA_AHCI_P_CMD + offset, (ATA_AHCI_P_CMD_ACTIVE | ATA_AHCI_P_CMD_FRE | ATA_AHCI_P_CMD_POD | ATA_AHCI_P_CMD_SUD | ATA_AHCI_P_CMD_ST)); return 0; } static int ata_ahci_status(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); struct ata_connect_task *tp; u_int32_t action, status, error, issued; int offset = (ch->unit << 7); int tag = 0; action = ATA_INL(ctlr->r_res2, ATA_AHCI_IS) & (1 << ch->unit); if (action) { error = ATA_INL(ctlr->r_res2, ATA_AHCI_P_SERR + offset); status = ATA_INL(ctlr->r_res2, ATA_AHCI_P_IS + offset); issued = ATA_INL(ctlr->r_res2, ATA_AHCI_P_CI + offset); ATA_OUTL(ctlr->r_res2, ATA_AHCI_P_SERR + offset, error); ATA_OUTL(ctlr->r_res2, ATA_AHCI_P_IS + offset, status); /* do we have cold connect surprise */ if (status & ATA_AHCI_P_IX_CPD) { printf("ata_ahci_intr status=%08x error=%08x issued=%08x\n", status, error, issued); } /* check for and handle connect events */ if (((status & (ATA_AHCI_P_IX_PRC | ATA_AHCI_P_IX_PC)) == ATA_AHCI_P_IX_PC) && (tp = (struct ata_connect_task *) malloc(sizeof(struct ata_connect_task), M_ATA, M_NOWAIT | M_ZERO))) { if (bootverbose) device_printf(ch->dev, "CONNECT requested\n"); tp->action = ATA_C_ATTACH; tp->dev = ch->dev; TASK_INIT(&tp->task, 0, ata_sata_phy_event, tp); taskqueue_enqueue(taskqueue_thread, &tp->task); } /* check for and handle disconnect events */ if (((status & (ATA_AHCI_P_IX_PRC | ATA_AHCI_P_IX_PC)) == ATA_AHCI_P_IX_PRC) && (tp = (struct ata_connect_task *) malloc(sizeof(struct ata_connect_task), M_ATA, M_NOWAIT | M_ZERO))) { if (bootverbose) device_printf(ch->dev, "DISCONNECT requested\n"); tp->action = ATA_C_DETACH; tp->dev = ch->dev; TASK_INIT(&tp->task, 0, ata_sata_phy_event, tp); taskqueue_enqueue(taskqueue_thread, &tp->task); } /* clear interrupt */ ATA_OUTL(ctlr->r_res2, ATA_AHCI_IS, action); - /* do we have any device action ? */ + /* do we have any device action ? */ if (!(issued & (1 << tag))) return 1; } return 0; } /* must be called with ATA channel locked and state_mtx held */ static int ata_ahci_begin_transaction(struct ata_request *request) { struct ata_pci_controller *ctlr=device_get_softc(GRANDPARENT(request->dev)); struct ata_channel *ch = device_get_softc(device_get_parent(request->dev)); struct ata_ahci_cmd_tab *ctp; struct ata_ahci_cmd_list *clp; int fis_size, entries; int tag = 0; /* get a piece of the workspace for this request */ ctp = (struct ata_ahci_cmd_tab *) (ch->dma->work + ATA_AHCI_CT_OFFSET + (ATA_AHCI_CT_SIZE * tag)); /* setup the FIS for this request */ /* XXX SOS ATAPI missing still */ if (!(fis_size = ata_ahci_setup_fis(&ctp->cfis[0], request))) { device_printf(request->dev, "setting up SATA FIS failed\n"); request->result = EIO; return ATA_OP_FINISHED; } /* if request moves data setup and load SG list */ if (request->flags & (ATA_R_READ | ATA_R_WRITE)) { if (ch->dma->load(ch->dev, request->data, request->bytecount, request->flags & ATA_R_READ, ctp->prd_tab, &entries)) { device_printf(request->dev, "setting up DMA failed\n"); request->result = EIO; return ATA_OP_FINISHED; } } /* setup the command list entry */ clp = (struct ata_ahci_cmd_list *) (ch->dma->work + ATA_AHCI_CL_OFFSET + (ATA_AHCI_CL_SIZE * tag)); clp->prd_length = entries; clp->cmd_flags = (request->flags & ATA_R_WRITE ? (1<<6) : 0) | (request->flags & ATA_R_ATAPI ? (1<<5) : 0) | (fis_size / sizeof(u_int32_t)); clp->bytecount = 0; clp->cmd_table_phys = htole64(ch->dma->work_bus + ATA_AHCI_CT_OFFSET + (ATA_AHCI_CT_SIZE * tag)); /* clear eventual ACTIVE bit */ ATA_IDX_OUTL(ch, ATA_SACTIVE, ATA_IDX_INL(ch, ATA_SACTIVE) & (1 << tag)); /* issue the command */ ATA_OUTL(ctlr->r_res2, ATA_AHCI_P_CI + (ch->unit << 7), (1 << tag)); /* start the timeout */ callout_reset(&request->callout, request->timeout * hz, (timeout_t*)ata_timeout, request); return ATA_OP_CONTINUES; } /* must be called with ATA channel locked and state_mtx held */ static int ata_ahci_end_transaction(struct ata_request *request) { struct ata_pci_controller *ctlr=device_get_softc(GRANDPARENT(request->dev)); struct ata_channel *ch = device_get_softc(device_get_parent(request->dev)); struct ata_ahci_cmd_list *clp; u_int32_t tf_data; int tag = 0; /* kill the timeout */ callout_stop(&request->callout); /* get status */ tf_data = ATA_INL(ctlr->r_res2, ATA_AHCI_P_TFD + (ch->unit << 7)); request->status = tf_data; /* if error status get details */ if (request->status & ATA_S_ERROR) request->error = tf_data >> 8; /* record how much data we actually moved */ clp = (struct ata_ahci_cmd_list *) (ch->dma->work + ATA_AHCI_CL_OFFSET + (ATA_AHCI_CL_SIZE * tag)); request->donecount = clp->bytecount; /* release SG list etc */ ch->dma->unload(ch->dev); return ATA_OP_FINISHED; } static void ata_ahci_reset(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); u_int32_t cmd; int offset = (ch->unit << 7); /* kill off all activity on this channel */ cmd = ATA_INL(ctlr->r_res2, ATA_AHCI_P_CMD + offset); ATA_OUTL(ctlr->r_res2, ATA_AHCI_P_CMD + offset, cmd & ~(ATA_AHCI_P_CMD_CR | ATA_AHCI_P_CMD_FR | ATA_AHCI_P_CMD_FRE | ATA_AHCI_P_CMD_ST)); DELAY(500000); /* XXX SOS this is not entirely wrong */ /* spin up device */ ATA_OUTL(ctlr->r_res2, ATA_AHCI_P_CMD + offset, ATA_AHCI_P_CMD_SUD); ata_sata_phy_enable(ch); /* clear any interrupts pending on this channel */ ATA_OUTL(ctlr->r_res2, ATA_AHCI_P_IS + offset, ATA_INL(ctlr->r_res2, ATA_AHCI_P_IS + offset)); /* start operations on this channel */ ATA_OUTL(ctlr->r_res2, ATA_AHCI_P_CMD + offset, (ATA_AHCI_P_CMD_ACTIVE | ATA_AHCI_P_CMD_FRE | ATA_AHCI_P_CMD_POD | ATA_AHCI_P_CMD_SUD | ATA_AHCI_P_CMD_ST)); } static void ata_ahci_dmasetprd(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) { struct ata_dmasetprd_args *args = xsc; struct ata_ahci_dma_prd *prd = args->dmatab; int i; if (!(args->error = error)) { for (i = 0; i < nsegs; i++) { prd[i].dba = htole64(segs[i].ds_addr); prd[i].dbc = htole32((segs[i].ds_len - 1) & ATA_AHCI_PRD_MASK); } } args->nsegs = nsegs; } static void ata_ahci_dmainit(device_t dev) { struct ata_channel *ch = device_get_softc(dev); ata_dmainit(dev); if (ch->dma) { /* note start and stop are not used here */ ch->dma->setprd = ata_ahci_dmasetprd; ch->dma->max_iosize = 8192 * DEV_BSIZE; } } static int ata_ahci_setup_fis(u_int8_t *fis, struct ata_request *request) { struct ata_device *atadev = device_get_softc(request->dev); int idx = 0; /* XXX SOS add ATAPI commands support later */ ata_modify_if_48bit(request); fis[idx++] = 0x27; /* host to device */ fis[idx++] = 0x80; /* command FIS (note PM goes here) */ fis[idx++] = request->u.ata.command; fis[idx++] = request->u.ata.feature; fis[idx++] = request->u.ata.lba; fis[idx++] = request->u.ata.lba >> 8; fis[idx++] = request->u.ata.lba >> 16; fis[idx] = ATA_D_LBA | atadev->unit; if (atadev->flags & ATA_D_48BIT_ACTIVE) idx++; else fis[idx++] |= (request->u.ata.lba >> 24 & 0x0f); fis[idx++] = request->u.ata.lba >> 24; fis[idx++] = request->u.ata.lba >> 32; fis[idx++] = request->u.ata.lba >> 40; fis[idx++] = request->u.ata.feature >> 8; fis[idx++] = request->u.ata.count; fis[idx++] = request->u.ata.count >> 8; fis[idx++] = 0x00; fis[idx++] = ATA_A_4BIT; fis[idx++] = 0x00; fis[idx++] = 0x00; fis[idx++] = 0x00; fis[idx++] = 0x00; return idx; } /* * Acard chipset support functions */ int ata_acard_ident(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); struct ata_chip_id *idx; static struct ata_chip_id ids[] = {{ ATA_ATP850R, 0, ATPOLD, 0x00, ATA_UDMA2, "ATP850" }, { ATA_ATP860A, 0, 0, 0x00, ATA_UDMA4, "ATP860A" }, { ATA_ATP860R, 0, 0, 0x00, ATA_UDMA4, "ATP860R" }, { ATA_ATP865A, 0, 0, 0x00, ATA_UDMA6, "ATP865A" }, { ATA_ATP865R, 0, 0, 0x00, ATA_UDMA6, "ATP865R" }, { 0, 0, 0, 0, 0, 0}}; char buffer[64]; if (!(idx = ata_match_chip(dev, ids))) return ENXIO; sprintf(buffer, "Acard %s %s controller", idx->text, ata_mode2str(idx->max_dma)); device_set_desc_copy(dev, buffer); ctlr->chip = idx; ctlr->chipinit = ata_acard_chipinit; return 0; } static int ata_acard_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); if (ata_setup_interrupt(dev)) return ENXIO; ctlr->allocate = ata_acard_allocate; if (ctlr->chip->cfg1 == ATPOLD) { ctlr->setmode = ata_acard_850_setmode; ctlr->locking = ata_serialize; } else ctlr->setmode = ata_acard_86X_setmode; return 0; } static int ata_acard_allocate(device_t dev) { struct ata_channel *ch = device_get_softc(dev); /* setup the usual register normal pci style */ if (ata_pci_allocate(dev)) return ENXIO; ch->hw.status = ata_acard_status; return 0; } static int ata_acard_status(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); if (ctlr->chip->cfg1 == ATPOLD && ATA_LOCKING(ch->dev, ATA_LF_WHICH) != ch->unit) return 0; if (ch->dma && (ch->dma->flags & ATA_DMA_ACTIVE)) { int bmstat = ATA_IDX_INB(ch, ATA_BMSTAT_PORT) & ATA_BMSTAT_MASK; if ((bmstat & (ATA_BMSTAT_ACTIVE | ATA_BMSTAT_INTERRUPT)) != ATA_BMSTAT_INTERRUPT) return 0; ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, bmstat & ~ATA_BMSTAT_ERROR); DELAY(1); ATA_IDX_OUTB(ch, ATA_BMCMD_PORT, ATA_IDX_INB(ch, ATA_BMCMD_PORT)&~ATA_BMCMD_START_STOP); DELAY(1); } if (ATA_IDX_INB(ch, ATA_ALTSTAT) & ATA_S_BUSY) { DELAY(100); if (ATA_IDX_INB(ch, ATA_ALTSTAT) & ATA_S_BUSY) return 0; } return 1; } static void ata_acard_850_setmode(device_t dev, int mode) { device_t gparent = GRANDPARENT(dev); struct ata_pci_controller *ctlr = device_get_softc(gparent); struct ata_channel *ch = device_get_softc(device_get_parent(dev)); struct ata_device *atadev = device_get_softc(dev); int devno = (ch->unit << 1) + ATA_DEV(atadev->unit); int error; mode = ata_limit_mode(dev, mode, ata_atapi(dev) ? ATA_PIO_MAX : ctlr->chip->max_dma); /* XXX SOS missing WDMA0+1 + PIO modes */ if (mode >= ATA_WDMA2) { error = ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode); if (bootverbose) device_printf(dev, "%ssetting %s on %s chip\n", (error) ? "FAILURE " : "", ata_mode2str(mode), ctlr->chip->text); if (!error) { u_int8_t reg54 = pci_read_config(gparent, 0x54, 1); reg54 &= ~(0x03 << (devno << 1)); if (mode >= ATA_UDMA0) reg54 |= (((mode & ATA_MODE_MASK) + 1) << (devno << 1)); pci_write_config(gparent, 0x54, reg54, 1); pci_write_config(gparent, 0x4a, 0xa6, 1); pci_write_config(gparent, 0x40 + (devno << 1), 0x0301, 2); atadev->mode = mode; return; } } /* we could set PIO mode timings, but we assume the BIOS did that */ } static void ata_acard_86X_setmode(device_t dev, int mode) { device_t gparent = GRANDPARENT(dev); struct ata_pci_controller *ctlr = device_get_softc(gparent); struct ata_channel *ch = device_get_softc(device_get_parent(dev)); struct ata_device *atadev = device_get_softc(dev); int devno = (ch->unit << 1) + ATA_DEV(atadev->unit); int error; mode = ata_limit_mode(dev, mode, ata_atapi(dev) ? ATA_PIO_MAX : ctlr->chip->max_dma); mode = ata_check_80pin(dev, mode); /* XXX SOS missing WDMA0+1 + PIO modes */ if (mode >= ATA_WDMA2) { error = ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode); if (bootverbose) device_printf(dev, "%ssetting %s on %s chip\n", (error) ? "FAILURE " : "", ata_mode2str(mode), ctlr->chip->text); if (!error) { u_int16_t reg44 = pci_read_config(gparent, 0x44, 2); reg44 &= ~(0x000f << (devno << 2)); if (mode >= ATA_UDMA0) reg44 |= (((mode & ATA_MODE_MASK) + 1) << (devno << 2)); pci_write_config(gparent, 0x44, reg44, 2); pci_write_config(gparent, 0x4a, 0xa6, 1); pci_write_config(gparent, 0x40 + devno, 0x31, 1); atadev->mode = mode; return; } } /* we could set PIO mode timings, but we assume the BIOS did that */ } /* * Acer Labs Inc (ALI) chipset support functions */ int ata_ali_ident(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); struct ata_chip_id *idx; static struct ata_chip_id ids[] = {{ ATA_ALI_5289, 0x00, 2, ALISATA, ATA_SA150, "M5289" }, { ATA_ALI_5287, 0x00, 4, ALISATA, ATA_SA150, "M5287" }, { ATA_ALI_5281, 0x00, 2, ALISATA, ATA_SA150, "M5281" }, { ATA_ALI_5229, 0xc5, 0, ALINEW, ATA_UDMA6, "M5229" }, { ATA_ALI_5229, 0xc4, 0, ALINEW, ATA_UDMA5, "M5229" }, { ATA_ALI_5229, 0xc2, 0, ALINEW, ATA_UDMA4, "M5229" }, { ATA_ALI_5229, 0x20, 0, ALIOLD, ATA_UDMA2, "M5229" }, { ATA_ALI_5229, 0x00, 0, ALIOLD, ATA_WDMA2, "M5229" }, { 0, 0, 0, 0, 0, 0}}; char buffer[64]; if (!(idx = ata_match_chip(dev, ids))) return ENXIO; sprintf(buffer, "AcerLabs %s %s controller", idx->text, ata_mode2str(idx->max_dma)); device_set_desc_copy(dev, buffer); ctlr->chip = idx; ctlr->chipinit = ata_ali_chipinit; return 0; } static int ata_ali_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); if (ata_setup_interrupt(dev)) return ENXIO; switch (ctlr->chip->cfg2) { case ALISATA: pci_write_config(dev, PCIR_COMMAND, pci_read_config(dev, PCIR_COMMAND, 2) & ~0x0400, 2); ctlr->channels = ctlr->chip->cfg1; ctlr->allocate = ata_ali_sata_allocate; ctlr->setmode = ata_sata_setmode; break; case ALINEW: /* use device interrupt as byte count end */ pci_write_config(dev, 0x4a, pci_read_config(dev, 0x4a, 1) | 0x20, 1); /* enable cable detection and UDMA support on newer chips */ pci_write_config(dev, 0x4b, pci_read_config(dev, 0x4b, 1) | 0x09, 1); /* enable ATAPI UDMA mode */ pci_write_config(dev, 0x53, pci_read_config(dev, 0x53, 1) | 0x01, 1); /* only chips with revision > 0xc4 can do 48bit DMA */ if (ctlr->chip->chiprev <= 0xc4) device_printf(dev, "using PIO transfers above 137GB as workaround for " "48bit DMA access bug, expect reduced performance\n"); ctlr->allocate = ata_ali_allocate; ctlr->reset = ata_ali_reset; ctlr->setmode = ata_ali_setmode; break; case ALIOLD: /* deactivate the ATAPI FIFO and enable ATAPI UDMA */ pci_write_config(dev, 0x53, pci_read_config(dev, 0x53, 1) | 0x03, 1); ctlr->setmode = ata_ali_setmode; break; } return 0; } static int ata_ali_allocate(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); /* setup the usual register normal pci style */ if (ata_pci_allocate(dev)) return ENXIO; /* older chips can't do 48bit DMA transfers */ if (ctlr->chip->chiprev <= 0xc4) ch->flags |= ATA_NO_48BIT_DMA; return 0; } static int ata_ali_sata_allocate(device_t dev) { device_t parent = device_get_parent(dev); struct ata_pci_controller *ctlr = device_get_softc(parent); struct ata_channel *ch = device_get_softc(dev); struct resource *io = NULL, *ctlio = NULL; int unit01 = (ch->unit & 1), unit10 = (ch->unit & 2); int i, rid; rid = PCIR_BAR(0) + (unit01 ? 8 : 0); io = bus_alloc_resource_any(parent, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (!io) return ENXIO; rid = PCIR_BAR(1) + (unit01 ? 8 : 0); ctlio = bus_alloc_resource_any(parent, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (!ctlio) { bus_release_resource(dev, SYS_RES_IOPORT, ATA_IOADDR_RID, io); return ENXIO; } for (i = ATA_DATA; i <= ATA_COMMAND; i ++) { ch->r_io[i].res = io; ch->r_io[i].offset = i + (unit10 ? 8 : 0); } ch->r_io[ATA_CONTROL].res = ctlio; ch->r_io[ATA_CONTROL].offset = 2 + (unit10 ? 4 : 0); ch->r_io[ATA_IDX_ADDR].res = io; ata_default_registers(dev); if (ctlr->r_res1) { for (i = ATA_BMCMD_PORT; i <= ATA_BMDTP_PORT; i++) { ch->r_io[i].res = ctlr->r_res1; ch->r_io[i].offset = (i - ATA_BMCMD_PORT)+(ch->unit * ATA_BMIOSIZE); } } ch->flags |= ATA_NO_SLAVE; /* XXX SOS PHY handling awkward in ALI chip not supported yet */ ata_pci_hw(dev); return 0; } static void ata_ali_reset(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); device_t *children; int nchildren, i; ata_generic_reset(dev); /* * workaround for datacorruption bug found on at least SUN Blade-100 * find the ISA function on the southbridge and disable then enable * the ATA channel tristate buffer */ if (ctlr->chip->chiprev == 0xc3 || ctlr->chip->chiprev == 0xc2) { if (!device_get_children(GRANDPARENT(dev), &children, &nchildren)) { for (i = 0; i < nchildren; i++) { if (pci_get_devid(children[i]) == ATA_ALI_1533) { pci_write_config(children[i], 0x58, pci_read_config(children[i], 0x58, 1) & ~(0x04 << ch->unit), 1); pci_write_config(children[i], 0x58, pci_read_config(children[i], 0x58, 1) | (0x04 << ch->unit), 1); break; } } free(children, M_TEMP); } } } static void ata_ali_setmode(device_t dev, int mode) { device_t gparent = GRANDPARENT(dev); struct ata_pci_controller *ctlr = device_get_softc(gparent); struct ata_channel *ch = device_get_softc(device_get_parent(dev)); struct ata_device *atadev = device_get_softc(dev); int devno = (ch->unit << 1) + ATA_DEV(atadev->unit); int error; mode = ata_limit_mode(dev, mode, ctlr->chip->max_dma); if (ctlr->chip->cfg2 & ALINEW) { if (mode > ATA_UDMA2 && pci_read_config(gparent, 0x4a, 1) & (1 << ch->unit)) { ata_print_cable(dev, "controller"); mode = ATA_UDMA2; } } else mode = ata_check_80pin(dev, mode); if (ctlr->chip->cfg2 & ALIOLD) { /* doesn't support ATAPI DMA on write */ ch->flags |= ATA_ATAPI_DMA_RO; if (ch->devices & ATA_ATAPI_MASTER && ch->devices & ATA_ATAPI_SLAVE) { /* doesn't support ATAPI DMA on two ATAPI devices */ device_printf(dev, "two atapi devices on this channel, no DMA\n"); mode = ata_limit_mode(dev, mode, ATA_PIO_MAX); } } error = ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode); if (bootverbose) device_printf(dev, "%ssetting %s on %s chip\n", (error) ? "FAILURE " : "", ata_mode2str(mode), ctlr->chip->text); if (!error) { if (mode >= ATA_UDMA0) { u_int8_t udma[] = {0x0c, 0x0b, 0x0a, 0x09, 0x08, 0x0f, 0x0d}; u_int32_t word54 = pci_read_config(gparent, 0x54, 4); word54 &= ~(0x000f000f << (devno << 2)); word54 |= (((udma[mode&ATA_MODE_MASK]<<16)|0x05)<<(devno<<2)); pci_write_config(gparent, 0x54, word54, 4); pci_write_config(gparent, 0x58 + (ch->unit << 2), 0x00310001, 4); } else { u_int32_t piotimings[] = { 0x006d0003, 0x00580002, 0x00440001, 0x00330001, 0x00310001, 0x00440001, 0x00330001, 0x00310001}; pci_write_config(gparent, 0x54, pci_read_config(gparent, 0x54, 4) & ~(0x0008000f << (devno << 2)), 4); pci_write_config(gparent, 0x58 + (ch->unit << 2), piotimings[ata_mode2idx(mode)], 4); } atadev->mode = mode; } } /* * American Micro Devices (AMD) chipset support functions */ int ata_amd_ident(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); struct ata_chip_id *idx; static struct ata_chip_id ids[] = {{ ATA_AMD756, 0x00, AMDNVIDIA, 0x00, ATA_UDMA4, "756" }, { ATA_AMD766, 0x00, AMDNVIDIA, AMDCABLE|AMDBUG, ATA_UDMA5, "766" }, { ATA_AMD768, 0x00, AMDNVIDIA, AMDCABLE, ATA_UDMA5, "768" }, { ATA_AMD8111, 0x00, AMDNVIDIA, AMDCABLE, ATA_UDMA6, "8111" }, { 0, 0, 0, 0, 0, 0}}; char buffer[64]; if (!(idx = ata_match_chip(dev, ids))) return ENXIO; sprintf(buffer, "AMD %s %s controller", idx->text, ata_mode2str(idx->max_dma)); device_set_desc_copy(dev, buffer); ctlr->chip = idx; ctlr->chipinit = ata_amd_chipinit; return 0; } static int ata_amd_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); if (ata_setup_interrupt(dev)) return ENXIO; /* disable/set prefetch, postwrite */ if (ctlr->chip->cfg2 & AMDBUG) pci_write_config(dev, 0x41, pci_read_config(dev, 0x41, 1) & 0x0f, 1); else pci_write_config(dev, 0x41, pci_read_config(dev, 0x41, 1) | 0xf0, 1); ctlr->setmode = ata_via_family_setmode; return 0; } /* * ATI chipset support functions */ int ata_ati_ident(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); struct ata_chip_id *idx; static struct ata_chip_id ids[] = {{ ATA_ATI_IXP200, 0x00, 0, 0, ATA_UDMA5, "IXP200" }, { ATA_ATI_IXP300, 0x00, 0, 0, ATA_UDMA6, "IXP300" }, { ATA_ATI_IXP400, 0x00, 0, 0, ATA_UDMA6, "IXP400" }, { ATA_ATI_IXP300_S1, 0x00, SIIMEMIO, 0, ATA_SA150, "IXP300" }, { ATA_ATI_IXP400_S1, 0x00, SIIMEMIO, 0, ATA_SA150, "IXP400" }, { ATA_ATI_IXP400_S2, 0x00, SIIMEMIO, 0, ATA_SA150, "IXP400" }, { 0, 0, 0, 0, 0, 0}}; char buffer[64]; if (!(idx = ata_match_chip(dev, ids))) return ENXIO; sprintf(buffer, "ATI %s %s controller", idx->text, ata_mode2str(idx->max_dma)); device_set_desc_copy(dev, buffer); ctlr->chip = idx; /* the ATI SATA controller is actually a SiI 3112 controller*/ if (ctlr->chip->cfg1 & SIIMEMIO) ctlr->chipinit = ata_sii_chipinit; else ctlr->chipinit = ata_ati_chipinit; return 0; } static int ata_ati_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); if (ata_setup_interrupt(dev)) return ENXIO; ctlr->setmode = ata_ati_setmode; return 0; } static void ata_ati_setmode(device_t dev, int mode) { device_t gparent = GRANDPARENT(dev); struct ata_pci_controller *ctlr = device_get_softc(gparent); struct ata_channel *ch = device_get_softc(device_get_parent(dev)); struct ata_device *atadev = device_get_softc(dev); int devno = (ch->unit << 1) + ATA_DEV(atadev->unit); int offset = (devno ^ 0x01) << 3; int error; u_int8_t piotimings[] = { 0x5d, 0x47, 0x34, 0x22, 0x20, 0x34, 0x22, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20 }; u_int8_t dmatimings[] = { 0x77, 0x21, 0x20 }; mode = ata_limit_mode(dev, mode, ctlr->chip->max_dma); mode = ata_check_80pin(dev, mode); error = ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode); if (bootverbose) device_printf(dev, "%ssetting %s on %s chip\n", (error) ? "FAILURE " : "", ata_mode2str(mode), ctlr->chip->text); if (!error) { if (mode >= ATA_UDMA0) { pci_write_config(gparent, 0x56, (pci_read_config(gparent, 0x56, 2) & ~(0xf << (devno << 2))) | ((mode & ATA_MODE_MASK) << (devno << 2)), 2); pci_write_config(gparent, 0x54, pci_read_config(gparent, 0x54, 1) | (0x01 << devno), 1); pci_write_config(gparent, 0x44, (pci_read_config(gparent, 0x44, 4) & ~(0xff << offset)) | (dmatimings[2] << offset), 4); } else if (mode >= ATA_WDMA0) { pci_write_config(gparent, 0x54, pci_read_config(gparent, 0x54, 1) & ~(0x01 << devno), 1); pci_write_config(gparent, 0x44, (pci_read_config(gparent, 0x44, 4) & ~(0xff << offset)) | (dmatimings[mode & ATA_MODE_MASK] << offset), 4); } else pci_write_config(gparent, 0x54, pci_read_config(gparent, 0x54, 1) & ~(0x01 << devno), 1); pci_write_config(gparent, 0x4a, (pci_read_config(gparent, 0x4a, 2) & ~(0xf << (devno << 2))) | (((mode - ATA_PIO0) & ATA_MODE_MASK) << (devno<<2)),2); pci_write_config(gparent, 0x40, (pci_read_config(gparent, 0x40, 4) & ~(0xff << offset)) | (piotimings[ata_mode2idx(mode)] << offset), 4); atadev->mode = mode; } } /* * Cyrix chipset support functions */ int ata_cyrix_ident(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); if (pci_get_devid(dev) == ATA_CYRIX_5530) { device_set_desc(dev, "Cyrix 5530 ATA33 controller"); ctlr->chipinit = ata_cyrix_chipinit; return 0; } return ENXIO; } static int ata_cyrix_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); if (ata_setup_interrupt(dev)) return ENXIO; if (ctlr->r_res1) ctlr->setmode = ata_cyrix_setmode; else ctlr->setmode = ata_generic_setmode; return 0; } static void ata_cyrix_setmode(device_t dev, int mode) { struct ata_channel *ch = device_get_softc(device_get_parent(dev)); struct ata_device *atadev = device_get_softc(dev); int devno = (ch->unit << 1) + ATA_DEV(atadev->unit); u_int32_t piotiming[] = { 0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010 }; u_int32_t dmatiming[] = { 0x00077771, 0x00012121, 0x00002020 }; u_int32_t udmatiming[] = { 0x00921250, 0x00911140, 0x00911030 }; int error; ch->dma->alignment = 16; ch->dma->max_iosize = 126 * DEV_BSIZE; mode = ata_limit_mode(dev, mode, ATA_UDMA2); error = ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode); if (bootverbose) device_printf(dev, "%ssetting %s on Cyrix chip\n", (error) ? "FAILURE " : "", ata_mode2str(mode)); if (!error) { if (mode >= ATA_UDMA0) { ATA_OUTL(ch->r_io[ATA_BMCMD_PORT].res, 0x24 + (devno << 3), udmatiming[mode & ATA_MODE_MASK]); } else if (mode >= ATA_WDMA0) { ATA_OUTL(ch->r_io[ATA_BMCMD_PORT].res, 0x24 + (devno << 3), dmatiming[mode & ATA_MODE_MASK]); } else { ATA_OUTL(ch->r_io[ATA_BMCMD_PORT].res, 0x20 + (devno << 3), piotiming[mode & ATA_MODE_MASK]); } atadev->mode = mode; } } /* * Cypress chipset support functions */ int ata_cypress_ident(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); /* * the Cypress chip is a mess, it contains two ATA functions, but * both channels are visible on the first one. * simply ignore the second function for now, as the right * solution (ignoring the second channel on the first function) * doesn't work with the crappy ATA interrupt setup on the alpha. */ if (pci_get_devid(dev) == ATA_CYPRESS_82C693 && pci_get_function(dev) == 1 && pci_get_subclass(dev) == PCIS_STORAGE_IDE) { device_set_desc(dev, "Cypress 82C693 ATA controller"); ctlr->chipinit = ata_cypress_chipinit; return 0; } return ENXIO; } static int ata_cypress_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); if (ata_setup_interrupt(dev)) return ENXIO; ctlr->setmode = ata_cypress_setmode; return 0; } static void ata_cypress_setmode(device_t dev, int mode) { device_t gparent = GRANDPARENT(dev); struct ata_channel *ch = device_get_softc(device_get_parent(dev)); struct ata_device *atadev = device_get_softc(dev); int error; mode = ata_limit_mode(dev, mode, ATA_WDMA2); /* XXX SOS missing WDMA0+1 + PIO modes */ if (mode == ATA_WDMA2) { error = ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode); if (bootverbose) device_printf(dev, "%ssetting WDMA2 on Cypress chip\n", error ? "FAILURE " : ""); if (!error) { pci_write_config(gparent, ch->unit ? 0x4e : 0x4c, 0x2020, 2); atadev->mode = mode; return; } } /* we could set PIO mode timings, but we assume the BIOS did that */ } /* * HighPoint chipset support functions */ int ata_highpoint_ident(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); struct ata_chip_id *idx; static struct ata_chip_id ids[] = {{ ATA_HPT374, 0x07, HPT374, 0x00, ATA_UDMA6, "HPT374" }, { ATA_HPT372, 0x02, HPT372, 0x00, ATA_UDMA6, "HPT372N" }, { ATA_HPT372, 0x01, HPT372, 0x00, ATA_UDMA6, "HPT372" }, { ATA_HPT371, 0x01, HPT372, 0x00, ATA_UDMA6, "HPT371" }, { ATA_HPT366, 0x05, HPT372, 0x00, ATA_UDMA6, "HPT372" }, { ATA_HPT366, 0x03, HPT370, 0x00, ATA_UDMA5, "HPT370" }, { ATA_HPT366, 0x02, HPT366, 0x00, ATA_UDMA4, "HPT368" }, { ATA_HPT366, 0x00, HPT366, HPTOLD, ATA_UDMA4, "HPT366" }, { ATA_HPT302, 0x01, HPT372, 0x00, ATA_UDMA6, "HPT302" }, { 0, 0, 0, 0, 0, 0}}; char buffer[64]; if (!(idx = ata_match_chip(dev, ids))) return ENXIO; strcpy(buffer, idx->text); if (idx->cfg1 == HPT374) { if (pci_get_function(dev) == 0) strcat(buffer, " (channel 0+1)"); else if (pci_get_function(dev) == 1) strcat(buffer, " (channel 2+3)"); } sprintf(buffer, "HighPoint %s %s controller", buffer, ata_mode2str(idx->max_dma)); device_set_desc_copy(dev, buffer); ctlr->chip = idx; ctlr->chipinit = ata_highpoint_chipinit; return 0; } static int ata_highpoint_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); if (ata_setup_interrupt(dev)) return ENXIO; if (ctlr->chip->cfg2 == HPTOLD) { /* disable interrupt prediction */ pci_write_config(dev, 0x51, (pci_read_config(dev, 0x51, 1) & ~0x80), 1); } else { /* disable interrupt prediction */ pci_write_config(dev, 0x51, (pci_read_config(dev, 0x51, 1) & ~0x03), 1); pci_write_config(dev, 0x55, (pci_read_config(dev, 0x55, 1) & ~0x03), 1); /* enable interrupts */ pci_write_config(dev, 0x5a, (pci_read_config(dev, 0x5a, 1) & ~0x10), 1); /* set clocks etc */ if (ctlr->chip->cfg1 < HPT372) pci_write_config(dev, 0x5b, 0x22, 1); else pci_write_config(dev, 0x5b, (pci_read_config(dev, 0x5b, 1) & 0x01) | 0x20, 1); } ctlr->allocate = ata_highpoint_allocate; ctlr->setmode = ata_highpoint_setmode; return 0; } static int ata_highpoint_allocate(device_t dev) { struct ata_channel *ch = device_get_softc(dev); /* setup the usual register normal pci style */ if (ata_pci_allocate(dev)) return ENXIO; ch->flags |= ATA_ALWAYS_DMASTAT; return 0; } static void ata_highpoint_setmode(device_t dev, int mode) { device_t gparent = GRANDPARENT(dev); struct ata_pci_controller *ctlr = device_get_softc(gparent); struct ata_channel *ch = device_get_softc(device_get_parent(dev)); struct ata_device *atadev = device_get_softc(dev); int devno = (ch->unit << 1) + ATA_DEV(atadev->unit); int error; u_int32_t timings33[][4] = { /* HPT366 HPT370 HPT372 HPT374 mode */ { 0x40d0a7aa, 0x06914e57, 0x0d029d5e, 0x0ac1f48a }, /* PIO 0 */ { 0x40d0a7a3, 0x06914e43, 0x0d029d26, 0x0ac1f465 }, /* PIO 1 */ { 0x40d0a753, 0x06514e33, 0x0c829ca6, 0x0a81f454 }, /* PIO 2 */ { 0x40c8a742, 0x06514e22, 0x0c829c84, 0x0a81f443 }, /* PIO 3 */ { 0x40c8a731, 0x06514e21, 0x0c829c62, 0x0a81f442 }, /* PIO 4 */ { 0x20c8a797, 0x26514e97, 0x2c82922e, 0x228082ea }, /* MWDMA 0 */ { 0x20c8a732, 0x26514e33, 0x2c829266, 0x22808254 }, /* MWDMA 1 */ { 0x20c8a731, 0x26514e21, 0x2c829262, 0x22808242 }, /* MWDMA 2 */ { 0x10c8a731, 0x16514e31, 0x1c829c62, 0x121882ea }, /* UDMA 0 */ { 0x10cba731, 0x164d4e31, 0x1c9a9c62, 0x12148254 }, /* UDMA 1 */ { 0x10caa731, 0x16494e31, 0x1c929c62, 0x120c8242 }, /* UDMA 2 */ { 0x10cfa731, 0x166d4e31, 0x1c8e9c62, 0x128c8242 }, /* UDMA 3 */ { 0x10c9a731, 0x16454e31, 0x1c8a9c62, 0x12ac8242 }, /* UDMA 4 */ { 0, 0x16454e31, 0x1c8a9c62, 0x12848242 }, /* UDMA 5 */ { 0, 0, 0x1c869c62, 0x12808242 } /* UDMA 6 */ }; mode = ata_limit_mode(dev, mode, ctlr->chip->max_dma); if (ctlr->chip->cfg1 == HPT366 && ata_atapi(dev)) mode = ata_limit_mode(dev, mode, ATA_PIO_MAX); mode = ata_highpoint_check_80pin(dev, mode); /* * most if not all HPT chips cant really handle that the device is * running at ATA_UDMA6/ATA133 speed, so we cheat at set the device to * a max of ATA_UDMA5/ATA100 to guard against suboptimal performance */ error = ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, ata_limit_mode(dev, mode, ATA_UDMA5)); if (bootverbose) device_printf(dev, "%ssetting %s on HighPoint chip\n", (error) ? "FAILURE " : "", ata_mode2str(mode)); if (!error) pci_write_config(gparent, 0x40 + (devno << 2), timings33[ata_mode2idx(mode)][ctlr->chip->cfg1], 4); atadev->mode = mode; } static int ata_highpoint_check_80pin(device_t dev, int mode) { device_t gparent = GRANDPARENT(dev); struct ata_pci_controller *ctlr = device_get_softc(gparent); struct ata_channel *ch = device_get_softc(device_get_parent(dev)); u_int8_t reg, val, res; if (ctlr->chip->cfg1 == HPT374 && pci_get_function(gparent) == 1) { reg = ch->unit ? 0x57 : 0x53; val = pci_read_config(gparent, reg, 1); pci_write_config(gparent, reg, val | 0x80, 1); } else { reg = 0x5b; val = pci_read_config(gparent, reg, 1); pci_write_config(gparent, reg, val & 0xfe, 1); } res = pci_read_config(gparent, 0x5a, 1) & (ch->unit ? 0x1:0x2); pci_write_config(gparent, reg, val, 1); if (mode > ATA_UDMA2 && res) { ata_print_cable(dev, "controller"); mode = ATA_UDMA2; } return mode; } /* * Intel chipset support functions */ int ata_intel_ident(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); struct ata_chip_id *idx; static struct ata_chip_id ids[] = {{ ATA_I82371FB, 0, 0, 0x00, ATA_WDMA2, "PIIX" }, { ATA_I82371SB, 0, 0, 0x00, ATA_WDMA2, "PIIX3" }, { ATA_I82371AB, 0, 0, 0x00, ATA_UDMA2, "PIIX4" }, { ATA_I82443MX, 0, 0, 0x00, ATA_UDMA2, "PIIX4" }, { ATA_I82451NX, 0, 0, 0x00, ATA_UDMA2, "PIIX4" }, { ATA_I82801AB, 0, 0, 0x00, ATA_UDMA2, "ICH0" }, { ATA_I82801AA, 0, 0, 0x00, ATA_UDMA4, "ICH" }, { ATA_I82372FB, 0, 0, 0x00, ATA_UDMA4, "ICH" }, { ATA_I82801BA, 0, 0, 0x00, ATA_UDMA5, "ICH2" }, { ATA_I82801BA_1, 0, 0, 0x00, ATA_UDMA5, "ICH2" }, { ATA_I82801CA, 0, 0, 0x00, ATA_UDMA5, "ICH3" }, { ATA_I82801CA_1, 0, 0, 0x00, ATA_UDMA5, "ICH3" }, { ATA_I82801DB, 0, 0, 0x00, ATA_UDMA5, "ICH4" }, { ATA_I82801DB_1, 0, 0, 0x00, ATA_UDMA5, "ICH4" }, { ATA_I82801EB, 0, 0, 0x00, ATA_UDMA5, "ICH5" }, { ATA_I82801EB_S1, 0, 0, 0x00, ATA_SA150, "ICH5" }, { ATA_I82801EB_R1, 0, 0, 0x00, ATA_SA150, "ICH5" }, { ATA_I6300ESB, 0, 0, 0x00, ATA_UDMA5, "6300ESB" }, { ATA_I6300ESB_S1, 0, 0, 0x00, ATA_SA150, "6300ESB" }, { ATA_I6300ESB_R1, 0, 0, 0x00, ATA_SA150, "6300ESB" }, { ATA_I82801FB, 0, 0, 0x00, ATA_UDMA5, "ICH6" }, { ATA_I82801FB_S1, 0, AHCI, 0x00, ATA_SA150, "ICH6" }, { ATA_I82801FB_R1, 0, AHCI, 0x00, ATA_SA150, "ICH6" }, { ATA_I82801FB_M, 0, AHCI, 0x00, ATA_SA150, "ICH6" }, { ATA_I82801GB, 0, 0, 0x00, ATA_UDMA5, "ICH7" }, { ATA_I82801GB_S1, 0, AHCI, 0x00, ATA_SA300, "ICH7" }, { ATA_I82801GB_R1, 0, AHCI, 0x00, ATA_SA300, "ICH7" }, { ATA_I82801GB_M, 0, AHCI, 0x00, ATA_SA300, "ICH7" }, { ATA_I82801GB_AH, 0, AHCI, 0x00, ATA_SA300, "ICH7" }, - { ATA_I31244, 0, 0, 0x00, ATA_SA150, "31244" }, + { ATA_I31244, 0, 0, 0x00, ATA_SA150, "31244" }, { 0, 0, 0, 0, 0, 0}}; char buffer[64]; if (!(idx = ata_match_chip(dev, ids))) return ENXIO; sprintf(buffer, "Intel %s %s controller", idx->text, ata_mode2str(idx->max_dma)); device_set_desc_copy(dev, buffer); ctlr->chip = idx; ctlr->chipinit = ata_intel_chipinit; return 0; } static int ata_intel_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); if (ata_setup_interrupt(dev)) return ENXIO; /* good old PIIX needs special treatment (not implemented) */ if (ctlr->chip->chipid == ATA_I82371FB) { ctlr->setmode = ata_intel_old_setmode; } /* the intel 31244 needs special care if in DPA mode */ else if (ctlr->chip->chipid == ATA_I31244) { if (pci_get_subclass(dev) != PCIS_STORAGE_IDE) { ctlr->r_type2 = SYS_RES_MEMORY; ctlr->r_rid2 = PCIR_BAR(0); if (!(ctlr->r_res2 = bus_alloc_resource_any(dev, ctlr->r_type2, - &ctlr->r_rid2, + &ctlr->r_rid2, RF_ACTIVE))) - return ENXIO; + return ENXIO; ctlr->channels = 4; ctlr->allocate = ata_intel_31244_allocate; ctlr->reset = ata_intel_31244_reset; } ctlr->setmode = ata_sata_setmode; } /* non SATA intel chips goes here */ else if (ctlr->chip->max_dma < ATA_SA150) { ctlr->allocate = ata_intel_allocate; ctlr->setmode = ata_intel_new_setmode; } /* SATA parts can be either compat or AHCI */ else { /* force all ports active "the legacy way" */ pci_write_config(dev, 0x92, pci_read_config(dev, 0x92, 2) | 0x0f,2); ctlr->allocate = ata_intel_allocate; ctlr->reset = ata_intel_reset; /* if we have AHCI capability and BAR(5) as a memory resource */ if (ctlr->chip->cfg1 == AHCI) { ctlr->r_type2 = SYS_RES_MEMORY; ctlr->r_rid2 = PCIR_BAR(5); if ((ctlr->r_res2 = bus_alloc_resource_any(dev, ctlr->r_type2, &ctlr->r_rid2, RF_ACTIVE))) { /* is AHCI or RAID mode enabled in BIOS ? */ if (pci_read_config(dev, 0x90, 1) & 0xc0) { /* enable AHCI mode */ ATA_OUTL(ctlr->r_res2, ATA_AHCI_GHC, ATA_AHCI_GHC_AE); /* get the number of HW channels */ ctlr->channels = (ATA_INL(ctlr->r_res2, ATA_AHCI_CAP) & ATA_AHCI_NPMASK) + 1; /* enable AHCI interrupts */ ATA_OUTL(ctlr->r_res2, ATA_AHCI_GHC, ATA_INL(ctlr->r_res2, ATA_AHCI_GHC) | ATA_AHCI_GHC_IE); ctlr->allocate = ata_ahci_allocate; ctlr->reset = ata_ahci_reset; ctlr->dmainit = ata_ahci_dmainit; } } } ctlr->setmode = ata_sata_setmode; /* enable PCI interrupt */ pci_write_config(dev, PCIR_COMMAND, pci_read_config(dev, PCIR_COMMAND, 2) & ~0x0400, 2); } return 0; } static int ata_intel_allocate(device_t dev) { struct ata_channel *ch = device_get_softc(dev); /* setup the usual register normal pci style */ if (ata_pci_allocate(dev)) return ENXIO; ch->flags |= ATA_ALWAYS_DMASTAT; return 0; } static void ata_intel_reset(device_t dev) { device_t parent = device_get_parent(dev); struct ata_pci_controller *ctlr = device_get_softc(parent); struct ata_channel *ch = device_get_softc(dev); int mask, timeout; /* ICH6 & ICH7 in compat mode has 4 SATA ports as master/slave on 2 ch's */ if (ctlr->chip->cfg1) { mask = (0x0005 << ch->unit); } else { /* ICH5 in compat mode has SATA ports as master/slave on 1 channel */ if (pci_read_config(parent, 0x90, 1) & 0x04) mask = 0x0003; else { mask = (0x0001 << ch->unit); /* XXX SOS should be in intel_allocate if we grow it */ ch->flags |= ATA_NO_SLAVE; } } pci_write_config(parent, 0x92, pci_read_config(parent, 0x92, 2) & ~mask, 2); DELAY(10); pci_write_config(parent, 0x92, pci_read_config(parent, 0x92, 2) | mask, 2); /* wait up to 1 sec for "connect well" */ for (timeout = 0; timeout < 100 ; timeout++) { if (((pci_read_config(parent, 0x92, 2) & (mask << 4)) == (mask << 4)) && (ATA_IDX_INB(ch, ATA_STATUS) != 0xff)) break; ata_udelay(10000); } ata_generic_reset(dev); } static void ata_intel_old_setmode(device_t dev, int mode) { /* NOT YET */ } static void ata_intel_new_setmode(device_t dev, int mode) { device_t gparent = GRANDPARENT(dev); struct ata_pci_controller *ctlr = device_get_softc(gparent); struct ata_channel *ch = device_get_softc(device_get_parent(dev)); struct ata_device *atadev = device_get_softc(dev); int devno = (ch->unit << 1) + ATA_DEV(atadev->unit); u_int32_t reg40 = pci_read_config(gparent, 0x40, 4); u_int8_t reg44 = pci_read_config(gparent, 0x44, 1); u_int8_t reg48 = pci_read_config(gparent, 0x48, 1); u_int16_t reg4a = pci_read_config(gparent, 0x4a, 2); u_int16_t reg54 = pci_read_config(gparent, 0x54, 2); u_int32_t mask40 = 0, new40 = 0; u_int8_t mask44 = 0, new44 = 0; int error; u_int8_t timings[] = { 0x00, 0x00, 0x10, 0x21, 0x23, 0x10, 0x21, 0x23, 0x23, 0x23, 0x23, 0x23, 0x23, 0x23 }; mode = ata_limit_mode(dev, mode, ctlr->chip->max_dma); if ( mode > ATA_UDMA2 && !(reg54 & (0x10 << devno))) { ata_print_cable(dev, "controller"); mode = ATA_UDMA2; } error = ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode); if (bootverbose) device_printf(dev, "%ssetting %s on %s chip\n", (error) ? "FAILURE " : "", ata_mode2str(mode), ctlr->chip->text); if (error) return; if (mode >= ATA_UDMA0) { pci_write_config(gparent, 0x48, reg48 | (0x0001 << devno), 2); pci_write_config(gparent, 0x4a, (reg4a & ~(0x3 << (devno << 2))) | ((0x01 + !(mode & 0x01)) << (devno << 2)), 2); } else { pci_write_config(gparent, 0x48, reg48 & ~(0x0001 << devno), 2); pci_write_config(gparent, 0x4a, (reg4a & ~(0x3 << (devno << 2))), 2); } reg54 |= 0x0400; if (mode >= ATA_UDMA2) pci_write_config(gparent, 0x54, reg54 | (0x1 << devno), 2); else pci_write_config(gparent, 0x54, reg54 & ~(0x1 << devno), 2); if (mode >= ATA_UDMA5) pci_write_config(gparent, 0x54, reg54 | (0x1000 << devno), 2); else pci_write_config(gparent, 0x54, reg54 & ~(0x1000 << devno), 2); reg40 &= ~0x00ff00ff; reg40 |= 0x40774077; if (atadev->unit == ATA_MASTER) { mask40 = 0x3300; new40 = timings[ata_mode2idx(mode)] << 8; } else { mask44 = 0x0f; new44 = ((timings[ata_mode2idx(mode)] & 0x30) >> 2) | (timings[ata_mode2idx(mode)] & 0x03); } if (ch->unit) { mask40 <<= 16; new40 <<= 16; mask44 <<= 4; new44 <<= 4; } pci_write_config(gparent, 0x40, (reg40 & ~mask40) | new40, 4); pci_write_config(gparent, 0x44, (reg44 & ~mask44) | new44, 1); atadev->mode = mode; } static int ata_intel_31244_allocate(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); int i; int ch_offset; ch_offset = 0x200 + ch->unit * 0x200; for (i = ATA_DATA; i < ATA_MAX_RES; i++) ch->r_io[i].res = ctlr->r_res2; ch->r_io[ATA_DATA].offset = ch_offset + 0x00; ch->r_io[ATA_FEATURE].offset = ch_offset + 0x06; ch->r_io[ATA_COUNT].offset = ch_offset + 0x08; ch->r_io[ATA_SECTOR].offset = ch_offset + 0x0c; ch->r_io[ATA_CYL_LSB].offset = ch_offset + 0x10; ch->r_io[ATA_CYL_MSB].offset = ch_offset + 0x14; ch->r_io[ATA_DRIVE].offset = ch_offset + 0x18; ch->r_io[ATA_COMMAND].offset = ch_offset + 0x1d; ch->r_io[ATA_ERROR].offset = ch_offset + 0x04; ch->r_io[ATA_STATUS].offset = ch_offset + 0x1c; ch->r_io[ATA_ALTSTAT].offset = ch_offset + 0x28; ch->r_io[ATA_CONTROL].offset = ch_offset + 0x29; ch->r_io[ATA_SSTATUS].offset = ch_offset + 0x100; ch->r_io[ATA_SERROR].offset = ch_offset + 0x104; ch->r_io[ATA_SCONTROL].offset = ch_offset + 0x108; ch->r_io[ATA_BMCMD_PORT].offset = ch_offset + 0x70; ch->r_io[ATA_BMSTAT_PORT].offset = ch_offset + 0x72; ch->r_io[ATA_BMDTP_PORT].offset = ch_offset + 0x74; ch->flags |= ATA_NO_SLAVE; ata_pci_hw(dev); ch->hw.status = ata_intel_31244_status; ch->hw.command = ata_intel_31244_command; /* enable PHY state change interrupt */ ATA_OUTL(ctlr->r_res2, 0x4, ATA_INL(ctlr->r_res2, 0x04) | (0x01 << (ch->unit << 3))); return 0; } static int ata_intel_31244_status(device_t dev) { struct ata_channel *ch = device_get_softc(dev); u_int32_t status = ATA_IDX_INL(ch, ATA_SSTATUS); u_int32_t error = ATA_IDX_INL(ch, ATA_SERROR); struct ata_connect_task *tp; /* check for PHY related interrupts on SATA capable HW */ if (error) { /* clear error bits/interrupt */ ATA_IDX_OUTL(ch, ATA_SERROR, error); /* if we have a connection event deal with it */ if ((error & ATA_SE_PHY_CHANGED) && (tp = (struct ata_connect_task *) malloc(sizeof(struct ata_connect_task), M_ATA, M_NOWAIT | M_ZERO))) { if ((status & ATA_SS_CONWELL_MASK) == ATA_SS_CONWELL_GEN1) { if (bootverbose) device_printf(ch->dev, "CONNECT requested\n"); tp->action = ATA_C_ATTACH; } else { if (bootverbose) device_printf(ch->dev, "DISCONNECT requested\n"); tp->action = ATA_C_DETACH; } tp->dev = ch->dev; TASK_INIT(&tp->task, 0, ata_sata_phy_event, tp); taskqueue_enqueue(taskqueue_thread, &tp->task); } } /* any drive action to take care of ? */ return 1; } static int ata_intel_31244_command(struct ata_request *request) { struct ata_channel *ch = device_get_softc(device_get_parent(request->dev)); struct ata_device *atadev = device_get_softc(request->dev); u_int64_t lba; if (!(atadev->flags & ATA_D_48BIT_ACTIVE)) return (ata_generic_command(request)); lba = request->u.ata.lba; ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_D_LBA | atadev->unit); /* enable interrupt */ ATA_IDX_OUTB(ch, ATA_CONTROL, ATA_A_4BIT); ATA_IDX_OUTW(ch, ATA_FEATURE, request->u.ata.feature); ATA_IDX_OUTW(ch, ATA_COUNT, request->u.ata.count); ATA_IDX_OUTW(ch, ATA_SECTOR, ((lba >> 16) & 0xff00) | (lba & 0x00ff)); ATA_IDX_OUTW(ch, ATA_CYL_LSB, ((lba >> 24) & 0xff00) | ((lba >> 8) & 0x00ff)); ATA_IDX_OUTW(ch, ATA_CYL_MSB, ((lba >> 32) & 0xff00) | ((lba >> 16) & 0x00ff)); /* issue command to controller */ ATA_IDX_OUTB(ch, ATA_COMMAND, request->u.ata.command); return 0; } static void ata_intel_31244_reset(device_t dev) { struct ata_channel *ch = device_get_softc(dev); ata_sata_phy_enable(ch); } /* * Integrated Technology Express Inc. (ITE) chipset support functions */ int ata_ite_ident(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); struct ata_chip_id *idx; static struct ata_chip_id ids[] = {{ ATA_IT8212F, 0x00, 0x00, 0x00, ATA_UDMA6, "IT8212F" }, { ATA_IT8211F, 0x00, 0x00, 0x00, ATA_UDMA6, "IT8211F" }, { 0, 0, 0, 0, 0, 0}}; char buffer[64]; if (!(idx = ata_match_chip(dev, ids))) return ENXIO; sprintf(buffer, "ITE %s %s controller", idx->text, ata_mode2str(idx->max_dma)); device_set_desc_copy(dev, buffer); ctlr->chip = idx; ctlr->chipinit = ata_ite_chipinit; return 0; } static int ata_ite_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); if (ata_setup_interrupt(dev)) return ENXIO; ctlr->setmode = ata_ite_setmode; /* set PCI mode and 66Mhz reference clock */ pci_write_config(dev, 0x50, pci_read_config(dev, 0x50, 1) & ~0x83, 1); /* set default active & recover timings */ pci_write_config(dev, 0x54, 0x31, 1); pci_write_config(dev, 0x56, 0x31, 1); return 0; } static void ata_ite_setmode(device_t dev, int mode) { device_t gparent = GRANDPARENT(dev); struct ata_channel *ch = device_get_softc(device_get_parent(dev)); struct ata_device *atadev = device_get_softc(dev); int devno = (ch->unit << 1) + ATA_DEV(atadev->unit); int error; /* correct the mode for what the HW supports */ mode = ata_limit_mode(dev, mode, ATA_UDMA6); /* check the CBLID bits for 80 conductor cable detection */ if (mode > ATA_UDMA2 && (pci_read_config(gparent, 0x40, 2) & (ch->unit ? (1<<3) : (1<<2)))) { ata_print_cable(dev, "controller"); mode = ATA_UDMA2; } /* set the wanted mode on the device */ error = ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode); if (bootverbose) device_printf(dev, "%s setting %s on ITE8212F chip\n", (error) ? "failed" : "success", ata_mode2str(mode)); /* if the device accepted the mode change, setup the HW accordingly */ if (!error) { if (mode >= ATA_UDMA0) { u_int8_t udmatiming[] = { 0x44, 0x42, 0x31, 0x21, 0x11, 0xa2, 0x91 }; /* enable UDMA mode */ pci_write_config(gparent, 0x50, pci_read_config(gparent, 0x50, 1) & ~(1 << (devno + 3)), 1); /* set UDMA timing */ pci_write_config(gparent, 0x56 + (ch->unit << 2) + ATA_DEV(atadev->unit), udmatiming[mode & ATA_MODE_MASK], 1); } else { u_int8_t chtiming[] = { 0xaa, 0xa3, 0xa1, 0x33, 0x31, 0x88, 0x32, 0x31 }; /* disable UDMA mode */ pci_write_config(gparent, 0x50, pci_read_config(gparent, 0x50, 1) | (1 << (devno + 3)), 1); /* set active and recover timing (shared between master & slave) */ if (pci_read_config(gparent, 0x54 + (ch->unit << 2), 1) < chtiming[ata_mode2idx(mode)]) pci_write_config(gparent, 0x54 + (ch->unit << 2), chtiming[ata_mode2idx(mode)], 1); } atadev->mode = mode; } } /* * Marvell chipset support functions */ #define ATA_MV_HOST_BASE(ch) \ ((ch->unit & 3) * 0x0100) + (ch->unit > 3 ? 0x30000 : 0x20000) #define ATA_MV_EDMA_BASE(ch) \ ((ch->unit & 3) * 0x2000) + (ch->unit > 3 ? 0x30000 : 0x20000) struct ata_marvell_response { - u_int16_t tag; - u_int8_t edma_status; - u_int8_t dev_status; - u_int32_t timestamp; + u_int16_t tag; + u_int8_t edma_status; + u_int8_t dev_status; + u_int32_t timestamp; }; struct ata_marvell_dma_prdentry { u_int32_t addrlo; u_int32_t count; u_int32_t addrhi; u_int32_t reserved; }; int ata_marvell_ident(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); struct ata_chip_id *idx; static struct ata_chip_id ids[] = {{ ATA_M88SX5040, 0, 4, MV5XXX, ATA_SA150, "88SX5040" }, { ATA_M88SX5041, 0, 4, MV5XXX, ATA_SA150, "88SX5041" }, { ATA_M88SX5080, 0, 8, MV5XXX, ATA_SA150, "88SX5080" }, { ATA_M88SX5081, 0, 8, MV5XXX, ATA_SA150, "88SX5081" }, { ATA_M88SX6041, 0, 4, MV6XXX, ATA_SA300, "88SX6041" }, { ATA_M88SX6081, 0, 8, MV6XXX, ATA_SA300, "88SX6081" }, { 0, 0, 0, 0, 0, 0}}; char buffer[64]; if (!(idx = ata_match_chip(dev, ids))) return ENXIO; sprintf(buffer, "Marvell %s %s controller", idx->text, ata_mode2str(idx->max_dma)); device_set_desc_copy(dev, buffer); ctlr->chip = idx; ctlr->chipinit = ata_marvell_chipinit; return 0; } static int ata_marvell_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); if (ata_setup_interrupt(dev)) return ENXIO; ctlr->r_type1 = SYS_RES_MEMORY; ctlr->r_rid1 = PCIR_BAR(0); if (!(ctlr->r_res1 = bus_alloc_resource_any(dev, ctlr->r_type1, &ctlr->r_rid1, RF_ACTIVE))) return ENXIO; /* mask all host controller interrupts */ ATA_OUTL(ctlr->r_res1, 0x01d64, 0x00000000); /* mask all PCI interrupts */ ATA_OUTL(ctlr->r_res1, 0x01d5c, 0x00000000); ctlr->allocate = ata_marvell_allocate; ctlr->reset = ata_marvell_reset; ctlr->dmainit = ata_marvell_dmainit; ctlr->setmode = ata_sata_setmode; ctlr->channels = ctlr->chip->cfg1; /* clear host controller interrupts */ ATA_OUTL(ctlr->r_res1, 0x20014, 0x00000000); if (ctlr->chip->cfg1 > 4) ATA_OUTL(ctlr->r_res1, 0x30014, 0x00000000); /* clear PCI interrupts */ ATA_OUTL(ctlr->r_res1, 0x01d58, 0x00000000); /* unmask PCI interrupts we want */ ATA_OUTL(ctlr->r_res1, 0x01d5c, 0x007fffff); /* unmask host controller interrupts we want */ ATA_OUTL(ctlr->r_res1, 0x01d64, 0x000000ff/*HC0*/ | 0x0001fe00/*HC1*/ | /*(1<<19) | (1<<20) | (1<<21) |*/(1<<22) | (1<<24) | (0x7f << 25)); pci_write_config(dev, PCIR_COMMAND, pci_read_config(dev, PCIR_COMMAND, 2) & ~0x0400, 2); return 0; } static int ata_marvell_allocate(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); bus_addr_t wordp = ch->dma->work_bus; int i; /* set legacy ATA resources */ for (i = ATA_DATA; i <= ATA_COMMAND; i++) { ch->r_io[i].res = ctlr->r_res1; ch->r_io[i].offset = 0x02100 + (i << 2) + ATA_MV_EDMA_BASE(ch); } ch->r_io[ATA_CONTROL].res = ctlr->r_res1; ch->r_io[ATA_CONTROL].offset = 0x02120 + ATA_MV_EDMA_BASE(ch); ch->r_io[ATA_IDX_ADDR].res = ctlr->r_res1; ata_default_registers(dev); /* set SATA resources */ switch (ctlr->chip->cfg2) { case MV5XXX: ch->r_io[ATA_SSTATUS].res = ctlr->r_res1; ch->r_io[ATA_SSTATUS].offset = 0x00100 + ATA_MV_HOST_BASE(ch); ch->r_io[ATA_SERROR].res = ctlr->r_res1; ch->r_io[ATA_SERROR].offset = 0x00104 + ATA_MV_HOST_BASE(ch); ch->r_io[ATA_SCONTROL].res = ctlr->r_res1; ch->r_io[ATA_SCONTROL].offset = 0x00108 + ATA_MV_HOST_BASE(ch); break; case MV6XXX: ch->r_io[ATA_SSTATUS].res = ctlr->r_res1; ch->r_io[ATA_SSTATUS].offset = 0x02300 + ATA_MV_EDMA_BASE(ch); ch->r_io[ATA_SERROR].res = ctlr->r_res1; ch->r_io[ATA_SERROR].offset = 0x02304 + ATA_MV_EDMA_BASE(ch); ch->r_io[ATA_SCONTROL].res = ctlr->r_res1; ch->r_io[ATA_SCONTROL].offset = 0x02308 + ATA_MV_EDMA_BASE(ch); ch->r_io[ATA_SACTIVE].res = ctlr->r_res1; ch->r_io[ATA_SACTIVE].offset = 0x02350 + ATA_MV_EDMA_BASE(ch); break; } ch->flags |= ATA_NO_SLAVE; ch->flags |= ATA_USE_16BIT; /* XXX SOS needed ? */ ata_generic_hw(dev); ch->hw.begin_transaction = ata_marvell_begin_transaction; ch->hw.end_transaction = ata_marvell_end_transaction; ch->hw.status = ata_marvell_status; /* disable the EDMA machinery */ ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000002); DELAY(100000); /* SOS should poll for disabled */ /* set configuration to non-queued 128b read transfers stop on error */ ATA_OUTL(ctlr->r_res1, 0x02000 + ATA_MV_EDMA_BASE(ch), (1<<11) | (1<<13)); /* request queue base high */ ATA_OUTL(ctlr->r_res1, 0x02010 + ATA_MV_EDMA_BASE(ch), (wordp >> 16) >> 16); /* request queue in ptr */ ATA_OUTL(ctlr->r_res1, 0x02014 + ATA_MV_EDMA_BASE(ch), wordp & 0xffffffff); /* request queue out ptr */ ATA_OUTL(ctlr->r_res1, 0x02018 + ATA_MV_EDMA_BASE(ch), 0x0); /* response queue base high */ wordp += 1024; ATA_OUTL(ctlr->r_res1, 0x0201c + ATA_MV_EDMA_BASE(ch), (wordp >> 16) >> 16); /* response queue in ptr */ ATA_OUTL(ctlr->r_res1, 0x02020 + ATA_MV_EDMA_BASE(ch), 0x0); /* response queue out ptr */ ATA_OUTL(ctlr->r_res1, 0x02024 + ATA_MV_EDMA_BASE(ch), wordp & 0xffffffff); /* clear SATA error register */ ATA_IDX_OUTL(ch, ATA_SERROR, ATA_IDX_INL(ch, ATA_SERROR)); /* clear any outstanding error interrupts */ ATA_OUTL(ctlr->r_res1, 0x02008 + ATA_MV_EDMA_BASE(ch), 0x0); /* unmask all error interrupts */ ATA_OUTL(ctlr->r_res1, 0x0200c + ATA_MV_EDMA_BASE(ch), ~0x0); /* enable EDMA machinery */ ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000001); return 0; } static int ata_marvell_status(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); u_int32_t cause = ATA_INL(ctlr->r_res1, 0x01d60); int shift = (ch->unit << 1) + (ch->unit > 3); /* do we have any errors flagged ? */ if (cause & (1 << shift)) { struct ata_connect_task *tp; u_int32_t error = ATA_INL(ctlr->r_res1, 0x02008 + ATA_MV_EDMA_BASE(ch)); /* check for and handle disconnect events */ if ((error & 0x00000008) && (tp = (struct ata_connect_task *) malloc(sizeof(struct ata_connect_task), M_ATA, M_NOWAIT | M_ZERO))) { if (bootverbose) device_printf(ch->dev, "DISCONNECT requested\n"); tp->action = ATA_C_DETACH; tp->dev = ch->dev; TASK_INIT(&tp->task, 0, ata_sata_phy_event, tp); taskqueue_enqueue(taskqueue_thread, &tp->task); } /* check for and handle connect events */ if ((error & 0x00000010) && (tp = (struct ata_connect_task *) malloc(sizeof(struct ata_connect_task), M_ATA, M_NOWAIT | M_ZERO))) { if (bootverbose) device_printf(ch->dev, "CONNECT requested\n"); tp->action = ATA_C_ATTACH; tp->dev = ch->dev; TASK_INIT(&tp->task, 0, ata_sata_phy_event, tp); taskqueue_enqueue(taskqueue_thread, &tp->task); } /* clear SATA error register */ ATA_IDX_OUTL(ch, ATA_SERROR, ATA_IDX_INL(ch, ATA_SERROR)); /* clear any outstanding error interrupts */ ATA_OUTL(ctlr->r_res1, 0x02008 + ATA_MV_EDMA_BASE(ch), 0x0); } /* do we have any device action ? */ return (cause & (2 << shift)); } /* must be called with ATA channel locked and state_mtx held */ static int ata_marvell_begin_transaction(struct ata_request *request) { struct ata_pci_controller *ctlr=device_get_softc(GRANDPARENT(request->dev)); struct ata_channel *ch = device_get_softc(device_get_parent(request->dev)); u_int32_t req_in; u_int8_t *bytep; u_int16_t *wordp; u_int32_t *quadp; int i, tag = 0x07; int dummy, error, slot; /* only DMA R/W goes through the EMDA machine */ if (request->u.ata.command != ATA_READ_DMA && request->u.ata.command != ATA_WRITE_DMA) { - /* disable the EDMA machinery */ - if (ATA_INL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch)) & 0x00000001) + /* disable the EDMA machinery */ + if (ATA_INL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch)) & 0x00000001) ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000002); return ata_begin_transaction(request); } /* check for 48 bit access and convert if needed */ ata_modify_if_48bit(request); /* check sanity, setup SG list and DMA engine */ if ((error = ch->dma->load(ch->dev, request->data, request->bytecount, - request->flags & ATA_R_READ, ch->dma->sg, - &dummy))) { - device_printf(request->dev, "setting up DMA failed\n"); - request->result = error; - return ATA_OP_FINISHED; + request->flags & ATA_R_READ, ch->dma->sg, + &dummy))) { + device_printf(request->dev, "setting up DMA failed\n"); + request->result = error; + return ATA_OP_FINISHED; } /* get next free request queue slot */ req_in = ATA_INL(ctlr->r_res1, 0x02014 + ATA_MV_EDMA_BASE(ch)); slot = (((req_in & ~0xfffffc00) >> 5) + 0) & 0x1f; bytep = (u_int8_t *)(ch->dma->work); bytep += (slot << 5); wordp = (u_int16_t *)bytep; quadp = (u_int32_t *)bytep; /* fill in this request */ quadp[0] = (long)ch->dma->sg_bus & 0xffffffff; quadp[1] = (ch->dma->sg_bus & 0xffffffff00000000) >> 32; wordp[4] = (request->flags & ATA_R_READ ? 0x01 : 0x00) | (tag<<1); i = 10; bytep[i++] = (request->u.ata.count >> 8) & 0xff; bytep[i++] = 0x10 | ATA_COUNT; bytep[i++] = request->u.ata.count & 0xff; bytep[i++] = 0x10 | ATA_COUNT; bytep[i++] = (request->u.ata.lba >> 24) & 0xff; bytep[i++] = 0x10 | ATA_SECTOR; bytep[i++] = request->u.ata.lba & 0xff; bytep[i++] = 0x10 | ATA_SECTOR; bytep[i++] = (request->u.ata.lba >> 32) & 0xff; bytep[i++] = 0x10 | ATA_CYL_LSB; bytep[i++] = (request->u.ata.lba >> 8) & 0xff; bytep[i++] = 0x10 | ATA_CYL_LSB; bytep[i++] = (request->u.ata.lba >> 40) & 0xff; bytep[i++] = 0x10 | ATA_CYL_MSB; bytep[i++] = (request->u.ata.lba >> 16) & 0xff; bytep[i++] = 0x10 | ATA_CYL_MSB; bytep[i++] = ATA_D_LBA | ATA_D_IBM | ((request->u.ata.lba >> 24) & 0xf); bytep[i++] = 0x10 | ATA_DRIVE; bytep[i++] = request->u.ata.command; bytep[i++] = 0x90 | ATA_COMMAND; /* enable EDMA machinery if needed */ if (!(ATA_INL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch)) & 0x00000001)) { ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000001); while (!(ATA_INL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch)) & 0x00000001)) DELAY(10); } /* tell EDMA it has a new request */ slot = (((req_in & ~0xfffffc00) >> 5) + 1) & 0x1f; req_in &= 0xfffffc00; req_in += (slot << 5); ATA_OUTL(ctlr->r_res1, 0x02014 + ATA_MV_EDMA_BASE(ch), req_in); return ATA_OP_CONTINUES; } /* must be called with ATA channel locked and state_mtx held */ static int ata_marvell_end_transaction(struct ata_request *request) { struct ata_pci_controller *ctlr=device_get_softc(GRANDPARENT(request->dev)); struct ata_channel *ch = device_get_softc(device_get_parent(request->dev)); int offset = (ch->unit > 3 ? 0x30014 : 0x20014); u_int32_t icr = ATA_INL(ctlr->r_res1, offset); int res; /* EDMA interrupt */ if ((icr & (0x0001 << (ch->unit & 3)))) { struct ata_marvell_response *response; u_int32_t rsp_in, rsp_out; int slot; /* unload SG list */ ch->dma->unload(ch->dev); /* stop timeout */ callout_stop(&request->callout); /* get response ptr's */ rsp_in = ATA_INL(ctlr->r_res1, 0x02020 + ATA_MV_EDMA_BASE(ch)); - rsp_out = ATA_INL(ctlr->r_res1, 0x02024 + ATA_MV_EDMA_BASE(ch)); + rsp_out = ATA_INL(ctlr->r_res1, 0x02024 + ATA_MV_EDMA_BASE(ch)); slot = (((rsp_in & ~0xffffff00) >> 3)) & 0x1f; rsp_out &= 0xffffff00; rsp_out += (slot << 3); response = (struct ata_marvell_response *) (int8_t *)(ch->dma->work) + 1024 + (slot << 3); /* record status for this request */ request->status = response->dev_status; request->error = 0; /* ack response */ - ATA_OUTL(ctlr->r_res1, 0x02024 + ATA_MV_EDMA_BASE(ch), rsp_out); + ATA_OUTL(ctlr->r_res1, 0x02024 + ATA_MV_EDMA_BASE(ch), rsp_out); /* update progress */ if (!(request->status & ATA_S_ERROR) && !(request->flags & ATA_R_TIMEOUT)) request->donecount = request->bytecount; res = ATA_OP_FINISHED; } /* legacy ATA interrupt */ else { res = ata_end_transaction(request); } /* ack interrupt */ ATA_OUTL(ctlr->r_res1, offset, ~(icr & (0x0101 << (ch->unit & 3)))); return res; } static void ata_marvell_reset(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); /* disable the EDMA machinery */ ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000002); while ((ATA_INL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch)) & 0x00000001)) DELAY(10); /* clear SATA error register */ ATA_IDX_OUTL(ch, ATA_SERROR, ATA_IDX_INL(ch, ATA_SERROR)); /* clear any outstanding error interrupts */ ATA_OUTL(ctlr->r_res1, 0x02008 + ATA_MV_EDMA_BASE(ch), 0x0); /* unmask all error interrupts */ ATA_OUTL(ctlr->r_res1, 0x0200c + ATA_MV_EDMA_BASE(ch), ~0x0); /* enable channel and test for devices */ ata_sata_phy_enable(ch); /* enable EDMA machinery */ ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000001); } static void ata_marvell_dmasetprd(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) { struct ata_dmasetprd_args *args = xsc; struct ata_marvell_dma_prdentry *prd = args->dmatab; int i; if ((args->error = error)) return; for (i = 0; i < nsegs; i++) { prd[i].addrlo = htole32(segs[i].ds_addr); prd[i].addrhi = 0; prd[i].count = htole32(segs[i].ds_len); } prd[i - 1].count |= htole32(ATA_DMA_EOT); } static void ata_marvell_dmainit(device_t dev) { struct ata_channel *ch = device_get_softc(dev); ata_dmainit(dev); if (ch->dma) { /* note start and stop are not used here */ - ch->dma->setprd = ata_marvell_dmasetprd; + ch->dma->setprd = ata_marvell_dmasetprd; } } /* * National chipset support functions */ int ata_national_ident(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); /* this chip is a clone of the Cyrix chip, bugs and all */ if (pci_get_devid(dev) == ATA_SC1100) { device_set_desc(dev, "National Geode SC1100 ATA33 controller"); ctlr->chipinit = ata_national_chipinit; return 0; } return ENXIO; } static int ata_national_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); if (ata_setup_interrupt(dev)) return ENXIO; ctlr->setmode = ata_national_setmode; return 0; } static void ata_national_setmode(device_t dev, int mode) { device_t gparent = GRANDPARENT(dev); struct ata_channel *ch = device_get_softc(device_get_parent(dev)); struct ata_device *atadev = device_get_softc(dev); int devno = (ch->unit << 1) + ATA_DEV(atadev->unit); u_int32_t piotiming[] = { 0x9172d132, 0x21717121, 0x00803020, 0x20102010, 0x00100010, 0x00803020, 0x20102010, 0x00100010, 0x00100010, 0x00100010, 0x00100010 }; u_int32_t dmatiming[] = { 0x80077771, 0x80012121, 0x80002020 }; u_int32_t udmatiming[] = { 0x80921250, 0x80911140, 0x80911030 }; int error; ch->dma->alignment = 16; ch->dma->max_iosize = 126 * DEV_BSIZE; mode = ata_limit_mode(dev, mode, ATA_UDMA2); error = ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode); if (bootverbose) device_printf(dev, "%s setting %s on National chip\n", (error) ? "failed" : "success", ata_mode2str(mode)); if (!error) { if (mode >= ATA_UDMA0) { pci_write_config(gparent, 0x44 + (devno << 3), udmatiming[mode & ATA_MODE_MASK], 4); } else if (mode >= ATA_WDMA0) { pci_write_config(gparent, 0x44 + (devno << 3), dmatiming[mode & ATA_MODE_MASK], 4); } else { pci_write_config(gparent, 0x44 + (devno << 3), pci_read_config(gparent, 0x44 + (devno << 3), 4) | 0x80000000, 4); } pci_write_config(gparent, 0x40 + (devno << 3), piotiming[ata_mode2idx(mode)], 4); atadev->mode = mode; } } /* * nVidia chipset support functions */ int ata_nvidia_ident(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); struct ata_chip_id *idx; static struct ata_chip_id ids[] = {{ ATA_NFORCE1, 0, AMDNVIDIA, NVIDIA, ATA_UDMA5, "nForce" }, { ATA_NFORCE2, 0, AMDNVIDIA, NVIDIA, ATA_UDMA6, "nForce2" }, { ATA_NFORCE2_PRO, 0, AMDNVIDIA, NVIDIA, ATA_UDMA6, "nForce2 Pro" }, { ATA_NFORCE2_PRO_S1, 0, 0, 0, ATA_SA150, "nForce2 Pro" }, { ATA_NFORCE3, 0, AMDNVIDIA, NVIDIA, ATA_UDMA6, "nForce3" }, { ATA_NFORCE3_PRO, 0, AMDNVIDIA, NVIDIA, ATA_UDMA6, "nForce3 Pro" }, { ATA_NFORCE3_PRO_S1, 0, 0, 0, ATA_SA150, "nForce3 Pro" }, { ATA_NFORCE3_PRO_S2, 0, 0, 0, ATA_SA150, "nForce3 Pro" }, { ATA_NFORCE_MCP04, 0, AMDNVIDIA, NVIDIA, ATA_UDMA6, "nForce MCP" }, { ATA_NFORCE_MCP04_S1, 0, 0, NV4OFF, ATA_SA150, "nForce MCP" }, { ATA_NFORCE_MCP04_S2, 0, 0, NV4OFF, ATA_SA150, "nForce MCP" }, { ATA_NFORCE_CK804, 0, AMDNVIDIA, NVIDIA, ATA_UDMA6, "nForce CK804" }, { ATA_NFORCE_CK804_S1, 0, 0, NV4OFF, ATA_SA300, "nForce CK804" }, { ATA_NFORCE_CK804_S2, 0, 0, NV4OFF, ATA_SA300, "nForce CK804" }, { ATA_NFORCE_MCP51, 0, AMDNVIDIA, NVIDIA, ATA_UDMA6, "nForce MCP51" }, { ATA_NFORCE_MCP51_S1, 0, 0, NV4OFF, ATA_SA300, "nForce MCP51" }, { ATA_NFORCE_MCP51_S2, 0, 0, NV4OFF, ATA_SA300, "nForce MCP51" }, { ATA_NFORCE_MCP55, 0, AMDNVIDIA, NVIDIA, ATA_UDMA6, "nForce MCP55" }, { ATA_NFORCE_MCP55_S1, 0, 0, NV4OFF, ATA_SA300, "nForce MCP55" }, { ATA_NFORCE_MCP55_S2, 0, 0, NV4OFF, ATA_SA300, "nForce MCP55" }, { 0, 0, 0, 0, 0, 0}} ; char buffer[64] ; if (!(idx = ata_match_chip(dev, ids))) return ENXIO; sprintf(buffer, "nVidia %s %s controller", idx->text, ata_mode2str(idx->max_dma)); device_set_desc_copy(dev, buffer); ctlr->chip = idx; ctlr->chipinit = ata_nvidia_chipinit; return 0; } static int ata_nvidia_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); if (ata_setup_interrupt(dev)) return ENXIO; if (ctlr->chip->max_dma >= ATA_SA150) { if (pci_read_config(dev, PCIR_BAR(5), 1) & 1) ctlr->r_type2 = SYS_RES_IOPORT; else ctlr->r_type2 = SYS_RES_MEMORY; ctlr->r_rid2 = PCIR_BAR(5); if ((ctlr->r_res2 = bus_alloc_resource_any(dev, ctlr->r_type2, &ctlr->r_rid2, RF_ACTIVE))) { int offset = ctlr->chip->cfg2 & NV4OFF ? 0x0440 : 0x0010; ctlr->allocate = ata_nvidia_allocate; ctlr->reset = ata_nvidia_reset; /* enable control access */ pci_write_config(dev, 0x50, pci_read_config(dev, 0x50, 1) | 0x04,1); /* clear interrupt status */ ATA_OUTB(ctlr->r_res2, offset, 0xff); /* enable device and PHY state change interrupts */ ATA_OUTB(ctlr->r_res2, offset + 1, 0xdd); /* enable PCI interrupt */ pci_write_config(dev, PCIR_COMMAND, pci_read_config(dev, PCIR_COMMAND, 2) & ~0x0400,2); } ctlr->setmode = ata_sata_setmode; } else { /* disable prefetch, postwrite */ pci_write_config(dev, 0x51, pci_read_config(dev, 0x51, 1) & 0x0f, 1); ctlr->setmode = ata_via_family_setmode; } return 0; } static int ata_nvidia_allocate(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); /* setup the usual register normal pci style */ if (ata_pci_allocate(dev)) return ENXIO; ch->r_io[ATA_SSTATUS].res = ctlr->r_res2; ch->r_io[ATA_SSTATUS].offset = (ch->unit << 6); ch->r_io[ATA_SERROR].res = ctlr->r_res2; ch->r_io[ATA_SERROR].offset = 0x04 + (ch->unit << 6); ch->r_io[ATA_SCONTROL].res = ctlr->r_res2; ch->r_io[ATA_SCONTROL].offset = 0x08 + (ch->unit << 6); ch->hw.status = ata_nvidia_status; ch->flags |= ATA_NO_SLAVE; return 0; } static int ata_nvidia_status(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); int offset = ctlr->chip->cfg2 & NV4OFF ? 0x0440 : 0x0010; struct ata_connect_task *tp; int shift = ch->unit << 2; u_int8_t status; /* get interrupt status */ status = ATA_INB(ctlr->r_res2, offset); /* check for and handle connect events */ if (((status & (0x0c << shift)) == (0x04 << shift)) && (tp = (struct ata_connect_task *) malloc(sizeof(struct ata_connect_task), M_ATA, M_NOWAIT | M_ZERO))) { if (bootverbose) device_printf(ch->dev, "CONNECT requested\n"); tp->action = ATA_C_ATTACH; tp->dev = ch->dev; TASK_INIT(&tp->task, 0, ata_sata_phy_event, tp); taskqueue_enqueue(taskqueue_thread, &tp->task); } /* check for and handle disconnect events */ if ((status & (0x08 << shift)) && (tp = (struct ata_connect_task *) malloc(sizeof(struct ata_connect_task), M_ATA, M_NOWAIT | M_ZERO))) { if (bootverbose) device_printf(ch->dev, "DISCONNECT requested\n"); tp->action = ATA_C_DETACH; tp->dev = ch->dev; TASK_INIT(&tp->task, 0, ata_sata_phy_event, tp); taskqueue_enqueue(taskqueue_thread, &tp->task); } /* clear interrupt status */ ATA_OUTB(ctlr->r_res2, offset, (0x0f << shift)); /* do we have any device action ? */ return (status & (0x01 << shift)); } static void ata_nvidia_reset(device_t dev) { struct ata_channel *ch = device_get_softc(dev); ata_sata_phy_enable(ch); } /* * Promise chipset support functions */ #define ATA_PDC_APKT_OFFSET 0x00000010 #define ATA_PDC_HPKT_OFFSET 0x00000040 #define ATA_PDC_ASG_OFFSET 0x00000080 #define ATA_PDC_LSG_OFFSET 0x000000c0 #define ATA_PDC_HSG_OFFSET 0x00000100 #define ATA_PDC_CHN_OFFSET 0x00000400 #define ATA_PDC_BUF_BASE 0x00400000 #define ATA_PDC_BUF_OFFSET 0x00100000 #define ATA_PDC_MAX_HPKT 8 #define ATA_PDC_WRITE_REG 0x00 #define ATA_PDC_WRITE_CTL 0x0e #define ATA_PDC_WRITE_END 0x08 #define ATA_PDC_WAIT_NBUSY 0x10 #define ATA_PDC_WAIT_READY 0x18 #define ATA_PDC_1B 0x20 #define ATA_PDC_2B 0x40 struct host_packet { u_int32_t addr; TAILQ_ENTRY(host_packet) chain; }; struct ata_promise_sx4 { struct mtx mtx; TAILQ_HEAD(, host_packet) queue; int busy; }; int ata_promise_ident(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); struct ata_chip_id *idx; static struct ata_chip_id ids[] = {{ ATA_PDC20246, 0, PROLD, 0x00, ATA_UDMA2, "PDC20246" }, { ATA_PDC20262, 0, PRNEW, 0x00, ATA_UDMA4, "PDC20262" }, { ATA_PDC20263, 0, PRNEW, 0x00, ATA_UDMA4, "PDC20263" }, { ATA_PDC20265, 0, PRNEW, 0x00, ATA_UDMA5, "PDC20265" }, { ATA_PDC20267, 0, PRNEW, 0x00, ATA_UDMA5, "PDC20267" }, { ATA_PDC20268, 0, PRTX, PRTX4, ATA_UDMA5, "PDC20268" }, { ATA_PDC20269, 0, PRTX, 0x00, ATA_UDMA6, "PDC20269" }, { ATA_PDC20270, 0, PRTX, PRTX4, ATA_UDMA5, "PDC20270" }, { ATA_PDC20271, 0, PRTX, 0x00, ATA_UDMA6, "PDC20271" }, { ATA_PDC20275, 0, PRTX, 0x00, ATA_UDMA6, "PDC20275" }, { ATA_PDC20276, 0, PRTX, PRSX6K, ATA_UDMA6, "PDC20276" }, { ATA_PDC20277, 0, PRTX, 0x00, ATA_UDMA6, "PDC20277" }, { ATA_PDC20318, 0, PRMIO, PRSATA, ATA_SA150, "PDC20318" }, { ATA_PDC20319, 0, PRMIO, PRSATA, ATA_SA150, "PDC20319" }, { ATA_PDC20371, 0, PRMIO, PRCMBO, ATA_SA150, "PDC20371" }, { ATA_PDC20375, 0, PRMIO, PRCMBO, ATA_SA150, "PDC20375" }, { ATA_PDC20376, 0, PRMIO, PRCMBO, ATA_SA150, "PDC20376" }, { ATA_PDC20377, 0, PRMIO, PRCMBO, ATA_SA150, "PDC20377" }, { ATA_PDC20378, 0, PRMIO, PRCMBO, ATA_SA150, "PDC20378" }, { ATA_PDC20379, 0, PRMIO, PRCMBO, ATA_SA150, "PDC20379" }, { ATA_PDC20571, 0, PRMIO, PRCMBO2, ATA_SA150, "PDC20571" }, { ATA_PDC20575, 0, PRMIO, PRCMBO2, ATA_SA150, "PDC20575" }, { ATA_PDC20579, 0, PRMIO, PRCMBO2, ATA_SA150, "PDC20579" }, - { ATA_PDC20580, 0, PRMIO, PRCMBO2, ATA_SA150, "PDC20580" }, + { ATA_PDC20771, 0, PRMIO, PRCMBO2, ATA_SA300, "PDC20771" }, + { ATA_PDC40775, 0, PRMIO, PRCMBO2, ATA_SA300, "PDC40775" }, { ATA_PDC20617, 0, PRMIO, PRPATA, ATA_UDMA6, "PDC20617" }, { ATA_PDC20618, 0, PRMIO, PRPATA, ATA_UDMA6, "PDC20618" }, { ATA_PDC20619, 0, PRMIO, PRPATA, ATA_UDMA6, "PDC20619" }, { ATA_PDC20620, 0, PRMIO, PRPATA, ATA_UDMA6, "PDC20620" }, { ATA_PDC20621, 0, PRMIO, PRSX4X, ATA_UDMA5, "PDC20621" }, { ATA_PDC20622, 0, PRMIO, PRSX4X, ATA_SA150, "PDC20622" }, { ATA_PDC40518, 0, PRMIO, PRSATA2, ATA_SA150, "PDC40518" }, { ATA_PDC40519, 0, PRMIO, PRSATA2, ATA_SA150, "PDC40519" }, { ATA_PDC40718, 0, PRMIO, PRSATA2, ATA_SA300, "PDC40718" }, { ATA_PDC40719, 0, PRMIO, PRSATA2, ATA_SA300, "PDC40719" }, + { ATA_PDC40779, 0, PRMIO, PRSATA2, ATA_SA300, "PDC40779" }, { 0, 0, 0, 0, 0, 0}}; char buffer[64]; uintptr_t devid = 0; if (!(idx = ata_match_chip(dev, ids))) return ENXIO; /* if we are on a SuperTrak SX6000 dont attach */ if ((idx->cfg2 & PRSX6K) && pci_get_class(GRANDPARENT(dev))==PCIC_BRIDGE && !BUS_READ_IVAR(device_get_parent(GRANDPARENT(dev)), GRANDPARENT(dev), PCI_IVAR_DEVID, &devid) && devid == ATA_I960RM) return ENXIO; strcpy(buffer, "Promise "); strcat(buffer, idx->text); /* if we are on a FastTrak TX4, adjust the interrupt resource */ if ((idx->cfg2 & PRTX4) && pci_get_class(GRANDPARENT(dev))==PCIC_BRIDGE && !BUS_READ_IVAR(device_get_parent(GRANDPARENT(dev)), GRANDPARENT(dev), PCI_IVAR_DEVID, &devid) && ((devid == ATA_DEC_21150) || (devid == ATA_DEC_21150_1))) { static long start = 0, end = 0; if (pci_get_slot(dev) == 1) { bus_get_resource(dev, SYS_RES_IRQ, 0, &start, &end); strcat(buffer, " (channel 0+1)"); } else if (pci_get_slot(dev) == 2 && start && end) { bus_set_resource(dev, SYS_RES_IRQ, 0, start, end); start = end = 0; strcat(buffer, " (channel 2+3)"); } else { start = end = 0; } } sprintf(buffer, "%s %s controller", buffer, ata_mode2str(idx->max_dma)); device_set_desc_copy(dev, buffer); ctlr->chip = idx; ctlr->chipinit = ata_promise_chipinit; return 0; } static int ata_promise_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); if (ata_setup_interrupt(dev)) return ENXIO; switch (ctlr->chip->cfg1) { case PRNEW: /* setup clocks */ ATA_OUTB(ctlr->r_res1, 0x11, ATA_INB(ctlr->r_res1, 0x11) | 0x0a); ctlr->dmainit = ata_promise_dmainit; /* FALLTHROUGH */ case PROLD: /* enable burst mode */ ATA_OUTB(ctlr->r_res1, 0x1f, ATA_INB(ctlr->r_res1, 0x1f) | 0x01); ctlr->allocate = ata_promise_allocate; ctlr->setmode = ata_promise_setmode; return 0; case PRTX: ctlr->allocate = ata_promise_tx2_allocate; ctlr->setmode = ata_promise_setmode; return 0; case PRMIO: ctlr->r_type1 = SYS_RES_MEMORY; ctlr->r_rid1 = PCIR_BAR(4); if (!(ctlr->r_res1 = bus_alloc_resource_any(dev, ctlr->r_type1, &ctlr->r_rid1, RF_ACTIVE))) goto failnfree; ctlr->r_type2 = SYS_RES_MEMORY; ctlr->r_rid2 = PCIR_BAR(3); if (!(ctlr->r_res2 = bus_alloc_resource_any(dev, ctlr->r_type2, &ctlr->r_rid2, RF_ACTIVE))) goto failnfree; switch (ctlr->chip->cfg2) { case PRSX4X: { struct ata_promise_sx4 *hpkt; u_int32_t dimm = ATA_INL(ctlr->r_res2, 0x000c0080); if (bus_teardown_intr(dev, ctlr->r_irq, ctlr->handle) || - bus_setup_intr(dev, ctlr->r_irq, ATA_INTR_FLAGS, - ata_promise_sx4_intr, ctlr, &ctlr->handle)) { - device_printf(dev, "unable to setup interrupt\n"); - goto failnfree; - } + bus_setup_intr(dev, ctlr->r_irq, ATA_INTR_FLAGS, + ata_promise_sx4_intr, ctlr, &ctlr->handle)) { + device_printf(dev, "unable to setup interrupt\n"); + goto failnfree; + } /* print info about cache memory */ device_printf(dev, "DIMM size %dMB @ 0x%08x%s\n", (((dimm >> 16) & 0xff)-((dimm >> 24) & 0xff)+1) << 4, ((dimm >> 24) & 0xff), ATA_INL(ctlr->r_res2, 0x000c0088) & (1<<16) ? " ECC enabled" : "" ); /* adjust cache memory parameters */ ATA_OUTL(ctlr->r_res2, 0x000c000c, (ATA_INL(ctlr->r_res2, 0x000c000c) & 0xffff0000)); /* setup host packet controls */ hpkt = malloc(sizeof(struct ata_promise_sx4), M_TEMP, M_NOWAIT | M_ZERO); mtx_init(&hpkt->mtx, "ATA promise HPKT lock", NULL, MTX_DEF); TAILQ_INIT(&hpkt->queue); hpkt->busy = 0; device_set_ivars(dev, hpkt); ctlr->allocate = ata_promise_mio_allocate; ctlr->reset = ata_promise_mio_reset; ctlr->dmainit = ata_promise_mio_dmainit; ctlr->setmode = ata_promise_setmode; ctlr->channels = 4; return 0; } case PRPATA: case PRCMBO: case PRSATA: /* * older "mio" type controllers need an interrupt intercept * function to compensate for the "reset on read" type interrupt * status register they have. */ if (bus_teardown_intr(dev, ctlr->r_irq, ctlr->handle) || - bus_setup_intr(dev, ctlr->r_irq, ATA_INTR_FLAGS, - ata_promise_mio_intr, ctlr, &ctlr->handle)) { - device_printf(dev, "unable to setup interrupt\n"); - goto failnfree; - } + bus_setup_intr(dev, ctlr->r_irq, ATA_INTR_FLAGS, + ata_promise_mio_intr, ctlr, &ctlr->handle)) { + device_printf(dev, "unable to setup interrupt\n"); + goto failnfree; + } /* prime fake interrupt register */ ATA_OUTL(ctlr->r_res2, 0x060, 0xffffffff); break; } ctlr->allocate = ata_promise_mio_allocate; ctlr->reset = ata_promise_mio_reset; ctlr->dmainit = ata_promise_mio_dmainit; ctlr->setmode = ata_promise_mio_setmode; switch (ctlr->chip->cfg2) { case PRPATA: ctlr->channels = ((ATA_INL(ctlr->r_res2, 0x48) & 0x01) > 0) + ((ATA_INL(ctlr->r_res2, 0x48) & 0x02) > 0) + 2; return 0; case PRCMBO: ATA_OUTL(ctlr->r_res2, 0x06c, 0x000000ff); ctlr->channels = ((ATA_INL(ctlr->r_res2, 0x48) & 0x02) > 0) + 3; return 0; case PRSATA: ATA_OUTL(ctlr->r_res2, 0x06c, 0x000000ff); ctlr->channels = 4; return 0; case PRCMBO2: ATA_OUTL(ctlr->r_res2, 0x060, 0x000000ff); ctlr->channels = 3; return 0; case PRSATA2: ATA_OUTL(ctlr->r_res2, 0x060, 0x000000ff); ctlr->channels = 4; return 0; } } failnfree: if (ctlr->r_res2) bus_release_resource(dev, ctlr->r_type2, ctlr->r_rid2, ctlr->r_res2); if (ctlr->r_res1) bus_release_resource(dev, ctlr->r_type1, ctlr->r_rid1, ctlr->r_res1); return ENXIO; } static int ata_promise_allocate(device_t dev) { struct ata_channel *ch = device_get_softc(dev); if (ata_pci_allocate(dev)) return ENXIO; ch->hw.status = ata_promise_status; return 0; } static int ata_promise_status(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); if (ATA_INL(ctlr->r_res1, 0x1c) & (ch->unit ? 0x00004000 : 0x00000400)) { return ata_pci_status(dev); } return 0; } static int ata_promise_dmastart(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(GRANDPARENT(dev)); struct ata_channel *ch = device_get_softc(device_get_parent(dev)); struct ata_device *atadev = device_get_softc(dev); if (atadev->flags & ATA_D_48BIT_ACTIVE) { ATA_OUTB(ctlr->r_res1, 0x11, ATA_INB(ctlr->r_res1, 0x11) | (ch->unit ? 0x08 : 0x02)); ATA_OUTL(ctlr->r_res1, ch->unit ? 0x24 : 0x20, ((ch->dma->flags & ATA_DMA_READ) ? 0x05000000 : 0x06000000) | (ch->dma->cur_iosize >> 1)); } ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, (ATA_IDX_INB(ch, ATA_BMSTAT_PORT) | (ATA_BMSTAT_INTERRUPT | ATA_BMSTAT_ERROR))); ATA_IDX_OUTL(ch, ATA_BMDTP_PORT, ch->dma->sg_bus); ATA_IDX_OUTB(ch, ATA_BMCMD_PORT, ((ch->dma->flags & ATA_DMA_READ) ? ATA_BMCMD_WRITE_READ : 0) | ATA_BMCMD_START_STOP); ch->flags |= ATA_DMA_ACTIVE; return 0; } static int ata_promise_dmastop(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(GRANDPARENT(dev)); struct ata_channel *ch = device_get_softc(device_get_parent(dev)); struct ata_device *atadev = device_get_softc(dev); int error; if (atadev->flags & ATA_D_48BIT_ACTIVE) { ATA_OUTB(ctlr->r_res1, 0x11, ATA_INB(ctlr->r_res1, 0x11) & ~(ch->unit ? 0x08 : 0x02)); ATA_OUTL(ctlr->r_res1, ch->unit ? 0x24 : 0x20, 0); } error = ATA_IDX_INB(ch, ATA_BMSTAT_PORT); ATA_IDX_OUTB(ch, ATA_BMCMD_PORT, ATA_IDX_INB(ch, ATA_BMCMD_PORT) & ~ATA_BMCMD_START_STOP); ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, ATA_BMSTAT_INTERRUPT | ATA_BMSTAT_ERROR); ch->flags &= ~ATA_DMA_ACTIVE; return error; } static void ata_promise_dmareset(device_t dev) { struct ata_channel *ch = device_get_softc(dev); ATA_IDX_OUTB(ch, ATA_BMCMD_PORT, ATA_IDX_INB(ch, ATA_BMCMD_PORT) & ~ATA_BMCMD_START_STOP); ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, ATA_BMSTAT_INTERRUPT | ATA_BMSTAT_ERROR); ch->flags &= ~ATA_DMA_ACTIVE; } static void ata_promise_dmainit(device_t dev) { struct ata_channel *ch = device_get_softc(dev); ata_dmainit(dev); if (ch->dma) { ch->dma->start = ata_promise_dmastart; ch->dma->stop = ata_promise_dmastop; ch->dma->reset = ata_promise_dmareset; } } static void ata_promise_setmode(device_t dev, int mode) { device_t gparent = GRANDPARENT(dev); struct ata_pci_controller *ctlr = device_get_softc(gparent); struct ata_channel *ch = device_get_softc(device_get_parent(dev)); struct ata_device *atadev = device_get_softc(dev); int devno = (ch->unit << 1) + ATA_DEV(atadev->unit); int error; u_int32_t timings[][2] = { /* PROLD PRNEW mode */ { 0x004ff329, 0x004fff2f }, /* PIO 0 */ { 0x004fec25, 0x004ff82a }, /* PIO 1 */ { 0x004fe823, 0x004ff026 }, /* PIO 2 */ { 0x004fe622, 0x004fec24 }, /* PIO 3 */ { 0x004fe421, 0x004fe822 }, /* PIO 4 */ { 0x004567f3, 0x004acef6 }, /* MWDMA 0 */ { 0x004467f3, 0x0048cef6 }, /* MWDMA 1 */ { 0x004367f3, 0x0046cef6 }, /* MWDMA 2 */ { 0x004367f3, 0x0046cef6 }, /* UDMA 0 */ { 0x004247f3, 0x00448ef6 }, /* UDMA 1 */ { 0x004127f3, 0x00436ef6 }, /* UDMA 2 */ { 0, 0x00424ef6 }, /* UDMA 3 */ { 0, 0x004127f3 }, /* UDMA 4 */ { 0, 0x004127f3 } /* UDMA 5 */ }; mode = ata_limit_mode(dev, mode, ctlr->chip->max_dma); switch (ctlr->chip->cfg1) { case PROLD: case PRNEW: if (mode > ATA_UDMA2 && (pci_read_config(gparent, 0x50, 2) & (ch->unit ? 1 << 11 : 1 << 10))) { ata_print_cable(dev, "controller"); mode = ATA_UDMA2; } if (ata_atapi(dev) && mode > ATA_PIO_MAX) mode = ata_limit_mode(dev, mode, ATA_PIO_MAX); break; case PRTX: ATA_IDX_OUTB(ch, ATA_BMDEVSPEC_0, 0x0b); if (mode > ATA_UDMA2 && ATA_IDX_INB(ch, ATA_BMDEVSPEC_1) & 0x04) { ata_print_cable(dev, "controller"); mode = ATA_UDMA2; } break; case PRMIO: if (mode > ATA_UDMA2 && (ATA_INL(ctlr->r_res2, (ctlr->chip->cfg2 & PRSX4X ? 0x000c0260 : 0x0260) + (ch->unit << 7)) & 0x01000000)) { ata_print_cable(dev, "controller"); mode = ATA_UDMA2; } break; } error = ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode); if (bootverbose) device_printf(dev, "%ssetting %s on %s chip\n", (error) ? "FAILURE " : "", ata_mode2str(mode), ctlr->chip->text); if (!error) { if (ctlr->chip->cfg1 < PRTX) pci_write_config(gparent, 0x60 + (devno << 2), timings[ata_mode2idx(mode)][ctlr->chip->cfg1], 4); atadev->mode = mode; } return; } static int ata_promise_tx2_allocate(device_t dev) { struct ata_channel *ch = device_get_softc(dev); if (ata_pci_allocate(dev)) return ENXIO; ch->hw.status = ata_promise_tx2_status; return 0; } static int ata_promise_tx2_status(device_t dev) { struct ata_channel *ch = device_get_softc(dev); ATA_IDX_OUTB(ch, ATA_BMDEVSPEC_0, 0x0b); if (ATA_IDX_INB(ch, ATA_BMDEVSPEC_1) & 0x20) { return ata_pci_status(dev); } return 0; } static int ata_promise_mio_allocate(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); int offset = (ctlr->chip->cfg2 & PRSX4X) ? 0x000c0000 : 0; int i; for (i = ATA_DATA; i <= ATA_COMMAND; i++) { ch->r_io[i].res = ctlr->r_res2; ch->r_io[i].offset = offset + 0x0200 + (i << 2) + (ch->unit << 7); } ch->r_io[ATA_CONTROL].res = ctlr->r_res2; ch->r_io[ATA_CONTROL].offset = offset + 0x0238 + (ch->unit << 7); ch->r_io[ATA_IDX_ADDR].res = ctlr->r_res2; ata_default_registers(dev); if ((ctlr->chip->cfg2 & (PRSATA | PRSATA2)) || ((ctlr->chip->cfg2 & (PRCMBO | PRCMBO2)) && ch->unit < 2)) { ch->r_io[ATA_SSTATUS].res = ctlr->r_res2; ch->r_io[ATA_SSTATUS].offset = 0x400 + (ch->unit << 8); ch->r_io[ATA_SERROR].res = ctlr->r_res2; ch->r_io[ATA_SERROR].offset = 0x404 + (ch->unit << 8); ch->r_io[ATA_SCONTROL].res = ctlr->r_res2; ch->r_io[ATA_SCONTROL].offset = 0x408 + (ch->unit << 8); ch->flags |= ATA_NO_SLAVE; } ch->flags |= ATA_USE_16BIT; ata_generic_hw(dev); if (ctlr->chip->cfg2 & PRSX4X) { ch->hw.command = ata_promise_sx4_command; } else { ch->hw.command = ata_promise_mio_command; ch->hw.status = ata_promise_mio_status; } return 0; } static void ata_promise_mio_intr(void *data) { struct ata_pci_controller *ctlr = data; struct ata_channel *ch; int unit; /* * since reading interrupt status register on early "mio" chips * clears the status bits we cannot read it for each channel later on * in the generic interrupt routine. * store the bits in an unused register in the chip so we can read * it from there safely to get around this "feature". */ ATA_OUTL(ctlr->r_res2, 0x060, ATA_INL(ctlr->r_res2, 0x040)); for (unit = 0; unit < ctlr->channels; unit++) { - if ((ch = ctlr->interrupt[unit].argument)) - ctlr->interrupt[unit].function(ch); + if ((ch = ctlr->interrupt[unit].argument)) + ctlr->interrupt[unit].function(ch); } ATA_OUTL(ctlr->r_res2, 0x060, 0xffffffff); } static int ata_promise_mio_status(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); struct ata_connect_task *tp; u_int32_t vector, status; switch (ctlr->chip->cfg2) { case PRPATA: case PRSATA: case PRCMBO: /* read and acknowledge interrupt */ vector = ATA_INL(ctlr->r_res2, 0x0060); /* read and clear interface status */ status = ATA_INL(ctlr->r_res2, 0x006c); ATA_OUTL(ctlr->r_res2, 0x006c, status & (0x00000011 << ch->unit)); break; case PRSATA2: case PRCMBO2: critical_enter(); /* read and acknowledge interrupt */ vector = ATA_INL(ctlr->r_res2, 0x0040); ATA_OUTL(ctlr->r_res2, 0x0040, (1 << (ch->unit + 1))); /* read and clear interface status */ status = ATA_INL(ctlr->r_res2, 0x0060); ATA_OUTL(ctlr->r_res2, 0x0060, status & (0x00000011 << ch->unit)); critical_exit(); break; default: return 0; } /* check for and handle disconnect events */ if ((status & (0x00000001 << ch->unit)) && (tp = (struct ata_connect_task *) malloc(sizeof(struct ata_connect_task), M_ATA, M_NOWAIT | M_ZERO))) { if (bootverbose) device_printf(ch->dev, "DISCONNECT requested\n"); tp->action = ATA_C_DETACH; tp->dev = ch->dev; TASK_INIT(&tp->task, 0, ata_sata_phy_event, tp); taskqueue_enqueue(taskqueue_thread, &tp->task); } /* check for and handle connect events */ if ((status & (0x00000010 << ch->unit)) && (tp = (struct ata_connect_task *) malloc(sizeof(struct ata_connect_task), M_ATA, M_NOWAIT | M_ZERO))) { if (bootverbose) device_printf(ch->dev, "CONNECT requested\n"); tp->action = ATA_C_ATTACH; tp->dev = ch->dev; TASK_INIT(&tp->task, 0, ata_sata_phy_event, tp); taskqueue_enqueue(taskqueue_thread, &tp->task); } /* do we have any device action ? */ return (vector & (1 << (ch->unit + 1))); } static int ata_promise_mio_command(struct ata_request *request) { struct ata_pci_controller *ctlr=device_get_softc(GRANDPARENT(request->dev)); struct ata_channel *ch = device_get_softc(device_get_parent(request->dev)); u_int32_t *wordp = (u_int32_t *)ch->dma->work; ATA_OUTL(ctlr->r_res2, (ch->unit + 1) << 2, 0x00000001); /* XXX SOS add ATAPI commands support later */ switch (request->u.ata.command) { default: return ata_generic_command(request); case ATA_READ_DMA: case ATA_READ_DMA48: wordp[0] = htole32(0x04 | ((ch->unit + 1) << 16) | (0x00 << 24)); break; case ATA_WRITE_DMA: case ATA_WRITE_DMA48: wordp[0] = htole32(0x00 | ((ch->unit + 1) << 16) | (0x00 << 24)); break; } wordp[1] = htole32(ch->dma->sg_bus); wordp[2] = 0; ata_promise_apkt((u_int8_t*)wordp, request); ATA_OUTL(ctlr->r_res2, 0x0240 + (ch->unit << 7), ch->dma->work_bus); return 0; } static void ata_promise_mio_reset(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); struct ata_promise_sx4 *hpktp; switch (ctlr->chip->cfg2) { case PRSX4X: /* softreset channel ATA module */ hpktp = device_get_ivars(ctlr->dev); ATA_OUTL(ctlr->r_res2, 0xc0260 + (ch->unit << 7), ch->unit + 1); ata_udelay(1000); ATA_OUTL(ctlr->r_res2, 0xc0260 + (ch->unit << 7), (ATA_INL(ctlr->r_res2, 0xc0260 + (ch->unit << 7)) & ~0x00003f9f) | (ch->unit + 1)); /* softreset HOST module */ /* XXX SOS what about other outstandings */ mtx_lock(&hpktp->mtx); ATA_OUTL(ctlr->r_res2, 0xc012c, (ATA_INL(ctlr->r_res2, 0xc012c) & ~0x00000f9f) | (1 << 11)); DELAY(10); ATA_OUTL(ctlr->r_res2, 0xc012c, (ATA_INL(ctlr->r_res2, 0xc012c) & ~0x00000f9f)); hpktp->busy = 0; mtx_unlock(&hpktp->mtx); ata_generic_reset(dev); break; case PRPATA: case PRCMBO: case PRSATA: if ((ctlr->chip->cfg2 == PRSATA) || ((ctlr->chip->cfg2 == PRCMBO) && (ch->unit < 2))) { /* mask plug/unplug intr */ ATA_OUTL(ctlr->r_res2, 0x06c, (0x00110000 << ch->unit)); } /* softreset channels ATA module */ ATA_OUTL(ctlr->r_res2, 0x0260 + (ch->unit << 7), (1 << 11)); ata_udelay(10000); ATA_OUTL(ctlr->r_res2, 0x0260 + (ch->unit << 7), (ATA_INL(ctlr->r_res2, 0x0260 + (ch->unit << 7)) & ~0x00003f9f) | (ch->unit + 1)); if ((ctlr->chip->cfg2 == PRSATA) || ((ctlr->chip->cfg2 == PRCMBO) && (ch->unit < 2))) { ata_sata_phy_enable(ch); /* reset and enable plug/unplug intr */ ATA_OUTL(ctlr->r_res2, 0x06c, (0x00000011 << ch->unit)); } else ata_generic_reset(dev); break; case PRCMBO2: case PRSATA2: if ((ctlr->chip->cfg2 == PRSATA2) || ((ctlr->chip->cfg2 == PRCMBO2) && (ch->unit < 2))) { /* set portmultiplier port */ ATA_OUTL(ctlr->r_res2, 0x4e8 + (ch->unit << 8), 0x0f); /* mask plug/unplug intr */ ATA_OUTL(ctlr->r_res2, 0x060, (0x00110000 << ch->unit)); } /* softreset channels ATA module */ ATA_OUTL(ctlr->r_res2, 0x0260 + (ch->unit << 7), (1 << 11)); ata_udelay(10000); ATA_OUTL(ctlr->r_res2, 0x0260 + (ch->unit << 7), (ATA_INL(ctlr->r_res2, 0x0260 + (ch->unit << 7)) & ~0x00003f9f) | (ch->unit + 1)); if ((ctlr->chip->cfg2 == PRSATA2) || ((ctlr->chip->cfg2 == PRCMBO2) && (ch->unit < 2))) { /* set PHY mode to "improved" */ ATA_OUTL(ctlr->r_res2, 0x414 + (ch->unit << 8), (ATA_INL(ctlr->r_res2, 0x414 + (ch->unit << 8)) & ~0x00000003) | 0x00000001); ata_sata_phy_enable(ch); /* reset and enable plug/unplug intr */ ATA_OUTL(ctlr->r_res2, 0x060, (0x00000011 << ch->unit)); /* set portmultiplier port */ ATA_OUTL(ctlr->r_res2, 0x4e8 + (ch->unit << 8), 0x00); } else ata_generic_reset(dev); break; } } static void ata_promise_mio_dmainit(device_t dev) { /* note start and stop are not used here */ ata_dmainit(dev); } static void ata_promise_mio_setmode(device_t dev, int mode) { device_t gparent = GRANDPARENT(dev); struct ata_pci_controller *ctlr = device_get_softc(gparent); struct ata_channel *ch = device_get_softc(device_get_parent(dev)); if ( (ctlr->chip->cfg2 == PRSATA) || ((ctlr->chip->cfg2 == PRCMBO) && (ch->unit < 2)) || (ctlr->chip->cfg2 == PRSATA2) || ((ctlr->chip->cfg2 == PRCMBO2) && (ch->unit < 2))) ata_sata_setmode(dev, mode); else ata_promise_setmode(dev, mode); } static void ata_promise_sx4_intr(void *data) { struct ata_pci_controller *ctlr = data; struct ata_channel *ch; u_int32_t vector = ATA_INL(ctlr->r_res2, 0x000c0480); int unit; for (unit = 0; unit < ctlr->channels; unit++) { if (vector & (1 << (unit + 1))) if ((ch = ctlr->interrupt[unit].argument)) ctlr->interrupt[unit].function(ch); if (vector & (1 << (unit + 5))) if ((ch = ctlr->interrupt[unit].argument)) ata_promise_queue_hpkt(ctlr, htole32((ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_HPKT_OFFSET)); if (vector & (1 << (unit + 9))) { ata_promise_next_hpkt(ctlr); if ((ch = ctlr->interrupt[unit].argument)) ctlr->interrupt[unit].function(ch); } if (vector & (1 << (unit + 13))) { ata_promise_next_hpkt(ctlr); if ((ch = ctlr->interrupt[unit].argument)) ATA_OUTL(ctlr->r_res2, 0x000c0240 + (ch->unit << 7), htole32((ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_APKT_OFFSET)); } } } static int ata_promise_sx4_command(struct ata_request *request) { device_t gparent = GRANDPARENT(request->dev); struct ata_pci_controller *ctlr = device_get_softc(gparent); struct ata_channel *ch = device_get_softc(device_get_parent(request->dev)); struct ata_dma_prdentry *prd = ch->dma->sg; caddr_t window = rman_get_virtual(ctlr->r_res1); u_int32_t *wordp; int i, idx, length = 0; /* XXX SOS add ATAPI commands support later */ switch (request->u.ata.command) { default: return -1; case ATA_ATA_IDENTIFY: case ATA_READ: case ATA_READ48: case ATA_READ_MUL: case ATA_READ_MUL48: case ATA_WRITE: case ATA_WRITE48: case ATA_WRITE_MUL: case ATA_WRITE_MUL48: ATA_OUTL(ctlr->r_res2, 0x000c0400 + ((ch->unit + 1) << 2), 0x00000001); return ata_generic_command(request); case ATA_SETFEATURES: case ATA_FLUSHCACHE: case ATA_FLUSHCACHE48: case ATA_SLEEP: case ATA_SET_MULTI: wordp = (u_int32_t *) (window + (ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_APKT_OFFSET); wordp[0] = htole32(0x08 | ((ch->unit + 1)<<16) | (0x00 << 24)); wordp[1] = 0; wordp[2] = 0; ata_promise_apkt((u_int8_t *)wordp, request); ATA_OUTL(ctlr->r_res2, 0x000c0484, 0x00000001); ATA_OUTL(ctlr->r_res2, 0x000c0400 + ((ch->unit + 1) << 2), 0x00000001); ATA_OUTL(ctlr->r_res2, 0x000c0240 + (ch->unit << 7), htole32((ch->unit * ATA_PDC_CHN_OFFSET)+ATA_PDC_APKT_OFFSET)); return 0; case ATA_READ_DMA: case ATA_READ_DMA48: case ATA_WRITE_DMA: case ATA_WRITE_DMA48: wordp = (u_int32_t *) (window + (ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_HSG_OFFSET); i = idx = 0; do { wordp[idx++] = prd[i].addr; wordp[idx++] = prd[i].count; length += (prd[i].count & ~ATA_DMA_EOT); } while (!(prd[i++].count & ATA_DMA_EOT)); wordp = (u_int32_t *) (window + (ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_LSG_OFFSET); wordp[0] = htole32((ch->unit * ATA_PDC_BUF_OFFSET) + ATA_PDC_BUF_BASE); wordp[1] = htole32(request->bytecount | ATA_DMA_EOT); wordp = (u_int32_t *) (window + (ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_ASG_OFFSET); wordp[0] = htole32((ch->unit * ATA_PDC_BUF_OFFSET) + ATA_PDC_BUF_BASE); wordp[1] = htole32(request->bytecount | ATA_DMA_EOT); wordp = (u_int32_t *) (window + (ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_HPKT_OFFSET); if (request->flags & ATA_R_READ) wordp[0] = htole32(0x14 | ((ch->unit+9)<<16) | ((ch->unit+5)<<24)); if (request->flags & ATA_R_WRITE) wordp[0] = htole32(0x00 | ((ch->unit+13)<<16) | (0x00<<24)); wordp[1] = htole32((ch->unit * ATA_PDC_CHN_OFFSET)+ATA_PDC_HSG_OFFSET); wordp[2] = htole32((ch->unit * ATA_PDC_CHN_OFFSET)+ATA_PDC_LSG_OFFSET); wordp[3] = 0; wordp = (u_int32_t *) (window + (ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_APKT_OFFSET); if (request->flags & ATA_R_READ) wordp[0] = htole32(0x04 | ((ch->unit+5)<<16) | (0x00<<24)); if (request->flags & ATA_R_WRITE) wordp[0] = htole32(0x10 | ((ch->unit+1)<<16) | ((ch->unit+13)<<24)); wordp[1] = htole32((ch->unit * ATA_PDC_CHN_OFFSET)+ATA_PDC_ASG_OFFSET); wordp[2] = 0; ata_promise_apkt((u_int8_t *)wordp, request); ATA_OUTL(ctlr->r_res2, 0x000c0484, 0x00000001); if (request->flags & ATA_R_READ) { ATA_OUTL(ctlr->r_res2, 0x000c0400 + ((ch->unit+5)<<2), 0x00000001); ATA_OUTL(ctlr->r_res2, 0x000c0400 + ((ch->unit+9)<<2), 0x00000001); ATA_OUTL(ctlr->r_res2, 0x000c0240 + (ch->unit << 7), htole32((ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_APKT_OFFSET)); } if (request->flags & ATA_R_WRITE) { ATA_OUTL(ctlr->r_res2, 0x000c0400 + ((ch->unit+1)<<2), 0x00000001); ATA_OUTL(ctlr->r_res2, 0x000c0400 + ((ch->unit+13)<<2), 0x00000001); ata_promise_queue_hpkt(ctlr, htole32((ch->unit * ATA_PDC_CHN_OFFSET) + ATA_PDC_HPKT_OFFSET)); } return 0; } } static int ata_promise_apkt(u_int8_t *bytep, struct ata_request *request) { struct ata_device *atadev = device_get_softc(request->dev); int i = 12; bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_PDC_WAIT_NBUSY|ATA_DRIVE; bytep[i++] = ATA_D_IBM | ATA_D_LBA | atadev->unit; bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_CTL; bytep[i++] = ATA_A_4BIT; if (atadev->flags & ATA_D_48BIT_ACTIVE) { bytep[i++] = ATA_PDC_2B | ATA_PDC_WRITE_REG | ATA_FEATURE; bytep[i++] = request->u.ata.feature >> 8; bytep[i++] = request->u.ata.feature; bytep[i++] = ATA_PDC_2B | ATA_PDC_WRITE_REG | ATA_COUNT; bytep[i++] = request->u.ata.count >> 8; bytep[i++] = request->u.ata.count; bytep[i++] = ATA_PDC_2B | ATA_PDC_WRITE_REG | ATA_SECTOR; bytep[i++] = request->u.ata.lba >> 24; bytep[i++] = request->u.ata.lba; bytep[i++] = ATA_PDC_2B | ATA_PDC_WRITE_REG | ATA_CYL_LSB; bytep[i++] = request->u.ata.lba >> 32; bytep[i++] = request->u.ata.lba >> 8; bytep[i++] = ATA_PDC_2B | ATA_PDC_WRITE_REG | ATA_CYL_MSB; bytep[i++] = request->u.ata.lba >> 40; bytep[i++] = request->u.ata.lba >> 16; bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_DRIVE; bytep[i++] = ATA_D_LBA | atadev->unit; } else { bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_FEATURE; bytep[i++] = request->u.ata.feature; bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_COUNT; bytep[i++] = request->u.ata.count; bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_SECTOR; bytep[i++] = request->u.ata.lba; bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_CYL_LSB; bytep[i++] = request->u.ata.lba >> 8; bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_CYL_MSB; bytep[i++] = request->u.ata.lba >> 16; bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_REG | ATA_DRIVE; bytep[i++] = (atadev->flags & ATA_D_USE_CHS ? 0 : ATA_D_LBA) | ATA_D_IBM | atadev->unit | ((request->u.ata.lba >> 24)&0xf); } bytep[i++] = ATA_PDC_1B | ATA_PDC_WRITE_END | ATA_COMMAND; bytep[i++] = request->u.ata.command; return i; } static void ata_promise_queue_hpkt(struct ata_pci_controller *ctlr, u_int32_t hpkt) { struct ata_promise_sx4 *hpktp = device_get_ivars(ctlr->dev); mtx_lock(&hpktp->mtx); if (hpktp->busy) { struct host_packet *hp = malloc(sizeof(struct host_packet), M_TEMP, M_NOWAIT | M_ZERO); hp->addr = hpkt; TAILQ_INSERT_TAIL(&hpktp->queue, hp, chain); } else { hpktp->busy = 1; ATA_OUTL(ctlr->r_res2, 0x000c0100, hpkt); } mtx_unlock(&hpktp->mtx); } static void ata_promise_next_hpkt(struct ata_pci_controller *ctlr) { struct ata_promise_sx4 *hpktp = device_get_ivars(ctlr->dev); struct host_packet *hp; mtx_lock(&hpktp->mtx); if ((hp = TAILQ_FIRST(&hpktp->queue))) { TAILQ_REMOVE(&hpktp->queue, hp, chain); ATA_OUTL(ctlr->r_res2, 0x000c0100, hp->addr); free(hp, M_TEMP); } else hpktp->busy = 0; mtx_unlock(&hpktp->mtx); } /* * ServerWorks chipset support functions */ int ata_serverworks_ident(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); struct ata_chip_id *idx; static struct ata_chip_id ids[] = {{ ATA_ROSB4, 0x00, SWKS33, 0x00, ATA_UDMA2, "ROSB4" }, { ATA_CSB5, 0x92, SWKS100, 0x00, ATA_UDMA5, "CSB5" }, { ATA_CSB5, 0x00, SWKS66, 0x00, ATA_UDMA4, "CSB5" }, { ATA_CSB6, 0x00, SWKS100, 0x00, ATA_UDMA5, "CSB6" }, { ATA_CSB6_1, 0x00, SWKS66, 0x00, ATA_UDMA4, "CSB6" }, { 0, 0, 0, 0, 0, 0}}; char buffer[64]; if (!(idx = ata_match_chip(dev, ids))) return ENXIO; sprintf(buffer, "ServerWorks %s %s controller", idx->text, ata_mode2str(idx->max_dma)); device_set_desc_copy(dev, buffer); ctlr->chip = idx; ctlr->chipinit = ata_serverworks_chipinit; return 0; } static int ata_serverworks_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); if (ata_setup_interrupt(dev)) return ENXIO; if (ctlr->chip->cfg1 == SWKS33) { device_t *children; int nchildren, i; /* locate the ISA part in the southbridge and enable UDMA33 */ if (!device_get_children(device_get_parent(dev), &children,&nchildren)){ for (i = 0; i < nchildren; i++) { if (pci_get_devid(children[i]) == ATA_ROSB4_ISA) { pci_write_config(children[i], 0x64, (pci_read_config(children[i], 0x64, 4) & ~0x00002000) | 0x00004000, 4); break; } } free(children, M_TEMP); } } else { pci_write_config(dev, 0x5a, (pci_read_config(dev, 0x5a, 1) & ~0x40) | (ctlr->chip->cfg1 == SWKS100) ? 0x03 : 0x02, 1); } ctlr->setmode = ata_serverworks_setmode; return 0; } static void ata_serverworks_setmode(device_t dev, int mode) { device_t gparent = GRANDPARENT(dev); struct ata_pci_controller *ctlr = device_get_softc(gparent); struct ata_channel *ch = device_get_softc(device_get_parent(dev)); struct ata_device *atadev = device_get_softc(dev); int devno = (ch->unit << 1) + ATA_DEV(atadev->unit); int offset = (devno ^ 0x01) << 3; int error; u_int8_t piotimings[] = { 0x5d, 0x47, 0x34, 0x22, 0x20, 0x34, 0x22, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20 }; u_int8_t dmatimings[] = { 0x77, 0x21, 0x20 }; mode = ata_limit_mode(dev, mode, ctlr->chip->max_dma); mode = ata_check_80pin(dev, mode); error = ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode); if (bootverbose) device_printf(dev, "%ssetting %s on %s chip\n", (error) ? "FAILURE " : "", ata_mode2str(mode), ctlr->chip->text); if (!error) { if (mode >= ATA_UDMA0) { pci_write_config(gparent, 0x56, (pci_read_config(gparent, 0x56, 2) & ~(0xf << (devno << 2))) | ((mode & ATA_MODE_MASK) << (devno << 2)), 2); pci_write_config(gparent, 0x54, pci_read_config(gparent, 0x54, 1) | (0x01 << devno), 1); pci_write_config(gparent, 0x44, (pci_read_config(gparent, 0x44, 4) & ~(0xff << offset)) | (dmatimings[2] << offset), 4); } else if (mode >= ATA_WDMA0) { pci_write_config(gparent, 0x54, pci_read_config(gparent, 0x54, 1) & ~(0x01 << devno), 1); pci_write_config(gparent, 0x44, (pci_read_config(gparent, 0x44, 4) & ~(0xff << offset)) | (dmatimings[mode & ATA_MODE_MASK] << offset), 4); } else pci_write_config(gparent, 0x54, pci_read_config(gparent, 0x54, 1) & ~(0x01 << devno), 1); pci_write_config(gparent, 0x40, (pci_read_config(gparent, 0x40, 4) & ~(0xff << offset)) | (piotimings[ata_mode2idx(mode)] << offset), 4); atadev->mode = mode; } } /* * Silicon Image Inc. (SiI) (former CMD) chipset support functions */ int ata_sii_ident(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); struct ata_chip_id *idx; static struct ata_chip_id ids[] = {{ ATA_SII3114, 0x00, SIIMEMIO, SII4CH, ATA_SA150, "SiI 3114" }, { ATA_SII3512, 0x02, SIIMEMIO, 0, ATA_SA150, "SiI 3512" }, { ATA_SII3112, 0x02, SIIMEMIO, 0, ATA_SA150, "SiI 3112" }, { ATA_SII3112_1, 0x02, SIIMEMIO, 0, ATA_SA150, "SiI 3112" }, { ATA_SII3512, 0x00, SIIMEMIO, SIIBUG, ATA_SA150, "SiI 3512" }, { ATA_SII3112, 0x00, SIIMEMIO, SIIBUG, ATA_SA150, "SiI 3112" }, { ATA_SII3112_1, 0x00, SIIMEMIO, SIIBUG, ATA_SA150, "SiI 3112" }, { ATA_SII0680, 0x00, SIIMEMIO, SIISETCLK, ATA_UDMA6, "SiI 0680" }, { ATA_CMD649, 0x00, 0, SIIINTR, ATA_UDMA5, "CMD 649" }, { ATA_CMD648, 0x00, 0, SIIINTR, ATA_UDMA4, "CMD 648" }, { ATA_CMD646, 0x07, 0, 0, ATA_UDMA2, "CMD 646U2" }, { ATA_CMD646, 0x00, 0, 0, ATA_WDMA2, "CMD 646" }, { 0, 0, 0, 0, 0, 0}}; char buffer[64]; if (!(idx = ata_match_chip(dev, ids))) return ENXIO; sprintf(buffer, "%s %s controller", idx->text, ata_mode2str(idx->max_dma)); device_set_desc_copy(dev, buffer); ctlr->chip = idx; ctlr->chipinit = ata_sii_chipinit; return 0; } static int ata_sii_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); if (ata_setup_interrupt(dev)) return ENXIO; if (ctlr->chip->cfg1 == SIIMEMIO) { ctlr->r_type2 = SYS_RES_MEMORY; ctlr->r_rid2 = PCIR_BAR(5); if (!(ctlr->r_res2 = bus_alloc_resource_any(dev, ctlr->r_type2, &ctlr->r_rid2, RF_ACTIVE))) return ENXIO; if (ctlr->chip->cfg2 & SIISETCLK) { if ((pci_read_config(dev, 0x8a, 1) & 0x30) != 0x10) pci_write_config(dev, 0x8a, (pci_read_config(dev, 0x8a, 1) & 0xcf)|0x10,1); if ((pci_read_config(dev, 0x8a, 1) & 0x30) != 0x10) device_printf(dev, "%s could not set ATA133 clock\n", ctlr->chip->text); } /* if we have 4 channels enable the second set */ if (ctlr->chip->cfg2 & SII4CH) { ATA_OUTL(ctlr->r_res2, 0x0200, 0x00000002); ctlr->channels = 4; } /* enable PCI interrupt as BIOS might not */ pci_write_config(dev, 0x8a, (pci_read_config(dev, 0x8a, 1) & 0x3f), 1); /* dont block interrupts from any channel */ pci_write_config(dev, 0x48, (pci_read_config(dev, 0x48, 4) & ~0x03c00000), 4); ctlr->allocate = ata_sii_allocate; if (ctlr->chip->max_dma >= ATA_SA150) { ctlr->reset = ata_sii_reset; ctlr->setmode = ata_sata_setmode; } else ctlr->setmode = ata_sii_setmode; } else { if ((pci_read_config(dev, 0x51, 1) & 0x08) != 0x08) { device_printf(dev, "HW has secondary channel disabled\n"); ctlr->channels = 1; } /* enable interrupt as BIOS might not */ pci_write_config(dev, 0x71, 0x01, 1); ctlr->allocate = ata_cmd_allocate; ctlr->setmode = ata_cmd_setmode; } return 0; } static int ata_cmd_allocate(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); /* setup the usual register normal pci style */ if (ata_pci_allocate(dev)) return ENXIO; ata_pci_allocate(dev); if (ctlr->chip->cfg2 & SIIINTR) ch->hw.status = ata_cmd_status; return 0; } static int ata_cmd_status(device_t dev) { struct ata_channel *ch = device_get_softc(dev); u_int8_t reg71; if (((reg71 = pci_read_config(device_get_parent(ch->dev), 0x71, 1)) & (ch->unit ? 0x08 : 0x04))) { pci_write_config(device_get_parent(ch->dev), 0x71, reg71 & ~(ch->unit ? 0x04 : 0x08), 1); return ata_pci_status(dev); } return 0; } static void ata_cmd_setmode(device_t dev, int mode) { device_t gparent = GRANDPARENT(dev); struct ata_pci_controller *ctlr = device_get_softc(gparent); struct ata_channel *ch = device_get_softc(device_get_parent(dev)); struct ata_device *atadev = device_get_softc(dev); int devno = (ch->unit << 1) + ATA_DEV(atadev->unit); int error; mode = ata_limit_mode(dev, mode, ctlr->chip->max_dma); mode = ata_check_80pin(dev, mode); error = ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode); if (bootverbose) device_printf(dev, "%ssetting %s on %s chip\n", (error) ? "FAILURE " : "", ata_mode2str(mode), ctlr->chip->text); if (!error) { int treg = 0x54 + ((devno < 3) ? (devno << 1) : 7); int ureg = ch->unit ? 0x7b : 0x73; if (mode >= ATA_UDMA0) { int udmatimings[][2] = { { 0x31, 0xc2 }, { 0x21, 0x82 }, { 0x11, 0x42 }, { 0x25, 0x8a }, { 0x15, 0x4a }, { 0x05, 0x0a } }; u_int8_t umode = pci_read_config(gparent, ureg, 1); umode &= ~(atadev->unit == ATA_MASTER ? 0x35 : 0xca); umode |= udmatimings[mode & ATA_MODE_MASK][ATA_DEV(atadev->unit)]; pci_write_config(gparent, ureg, umode, 1); } else if (mode >= ATA_WDMA0) { int dmatimings[] = { 0x87, 0x32, 0x3f }; pci_write_config(gparent, treg, dmatimings[mode & ATA_MODE_MASK],1); pci_write_config(gparent, ureg, pci_read_config(gparent, ureg, 1) & ~(atadev->unit == ATA_MASTER ? 0x35 : 0xca), 1); } else { int piotimings[] = { 0xa9, 0x57, 0x44, 0x32, 0x3f }; pci_write_config(gparent, treg, piotimings[(mode & ATA_MODE_MASK) - ATA_PIO0], 1); pci_write_config(gparent, ureg, pci_read_config(gparent, ureg, 1) & ~(atadev->unit == ATA_MASTER ? 0x35 : 0xca), 1); } atadev->mode = mode; } } static int ata_sii_allocate(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); int unit01 = (ch->unit & 1), unit10 = (ch->unit & 2); int i; for (i = ATA_DATA; i <= ATA_COMMAND; i++) { ch->r_io[i].res = ctlr->r_res2; ch->r_io[i].offset = 0x80 + i + (unit01 << 6) + (unit10 << 8); } ch->r_io[ATA_CONTROL].res = ctlr->r_res2; ch->r_io[ATA_CONTROL].offset = 0x8a + (unit01 << 6) + (unit10 << 8); ch->r_io[ATA_IDX_ADDR].res = ctlr->r_res2; ata_default_registers(dev); ch->r_io[ATA_BMCMD_PORT].res = ctlr->r_res2; ch->r_io[ATA_BMCMD_PORT].offset = 0x00 + (unit01 << 3) + (unit10 << 8); ch->r_io[ATA_BMSTAT_PORT].res = ctlr->r_res2; ch->r_io[ATA_BMSTAT_PORT].offset = 0x02 + (unit01 << 3) + (unit10 << 8); ch->r_io[ATA_BMDTP_PORT].res = ctlr->r_res2; ch->r_io[ATA_BMDTP_PORT].offset = 0x04 + (unit01 << 3) + (unit10 << 8); ch->r_io[ATA_BMDEVSPEC_0].res = ctlr->r_res2; ch->r_io[ATA_BMDEVSPEC_0].offset = 0xa1 + (unit01 << 6) + (unit10 << 8); if (ctlr->chip->max_dma >= ATA_SA150) { ch->r_io[ATA_SSTATUS].res = ctlr->r_res2; ch->r_io[ATA_SSTATUS].offset = 0x104 + (unit01 << 7) + (unit10 << 8); ch->r_io[ATA_SERROR].res = ctlr->r_res2; ch->r_io[ATA_SERROR].offset = 0x108 + (unit01 << 7) + (unit10 << 8); ch->r_io[ATA_SCONTROL].res = ctlr->r_res2; ch->r_io[ATA_SCONTROL].offset = 0x100 + (unit01 << 7) + (unit10 << 8); ch->flags |= ATA_NO_SLAVE; /* enable PHY state change interrupt */ ATA_OUTL(ctlr->r_res2, 0x148 + (unit01 << 7) + (unit10 << 8),(1 << 16)); } if ((ctlr->chip->cfg2 & SIIBUG) && ch->dma) { /* work around errata in early chips */ ch->dma->boundary = 16 * DEV_BSIZE; ch->dma->segsize = 15 * DEV_BSIZE; } ata_pci_hw(dev); ch->hw.status = ata_sii_status; return 0; } static int ata_sii_status(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); /* check for PHY related interrupts on SATA capable HW */ if (ctlr->chip->max_dma >= ATA_SA150) { u_int32_t status = ATA_IDX_INL(ch, ATA_SSTATUS); u_int32_t error = ATA_IDX_INL(ch, ATA_SERROR); struct ata_connect_task *tp; if (error) { /* clear error bits/interrupt */ ATA_IDX_OUTL(ch, ATA_SERROR, error); /* if we have a connection event deal with it */ if ((error & ATA_SE_PHY_CHANGED) && (tp = (struct ata_connect_task *) malloc(sizeof(struct ata_connect_task), M_ATA, M_NOWAIT | M_ZERO))) { if ((status & ATA_SS_CONWELL_MASK) == ATA_SS_CONWELL_GEN1) { if (bootverbose) device_printf(ch->dev, "CONNECT requested\n"); tp->action = ATA_C_ATTACH; } else { if (bootverbose) device_printf(ch->dev, "DISCONNECT requested\n"); tp->action = ATA_C_DETACH; } tp->dev = ch->dev; TASK_INIT(&tp->task, 0, ata_sata_phy_event, tp); taskqueue_enqueue(taskqueue_thread, &tp->task); } } } /* any drive action to take care of ? */ if (ATA_IDX_INB(ch, ATA_BMDEVSPEC_0) & 0x08) return ata_pci_status(dev); else return 0; } static void ata_sii_reset(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); int offset = ((ch->unit & 1) << 7) + ((ch->unit & 2) << 8); /* disable PHY state change interrupt */ ATA_OUTL(ctlr->r_res2, 0x148 + offset, ~(1 << 16)); /* reset controller part for this channel */ ATA_OUTL(ctlr->r_res2, 0x48, ATA_INL(ctlr->r_res2, 0x48) | (0xc0 >> ch->unit)); DELAY(1000); ATA_OUTL(ctlr->r_res2, 0x48, ATA_INL(ctlr->r_res2, 0x48) & ~(0xc0 >> ch->unit)); ata_sata_phy_enable(ch); /* enable PHY state change interrupt */ ATA_OUTL(ctlr->r_res2, 0x148 + offset, (1 << 16)); } static void ata_sii_setmode(device_t dev, int mode) { device_t gparent = GRANDPARENT(dev); struct ata_pci_controller *ctlr = device_get_softc(gparent); struct ata_channel *ch = device_get_softc(device_get_parent(dev)); struct ata_device *atadev = device_get_softc(dev); int rego = (ch->unit << 4) + (ATA_DEV(atadev->unit) << 1); int mreg = ch->unit ? 0x84 : 0x80; int mask = 0x03 << (ATA_DEV(atadev->unit) << 2); int mval = pci_read_config(gparent, mreg, 1) & ~mask; int error; mode = ata_limit_mode(dev, mode, ctlr->chip->max_dma); if (ctlr->chip->cfg2 & SIISETCLK) { if (mode > ATA_UDMA2 && (pci_read_config(gparent, 0x79, 1) & (ch->unit ? 0x02 : 0x01))) { ata_print_cable(dev, "controller"); mode = ATA_UDMA2; } } else mode = ata_check_80pin(dev, mode); error = ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode); if (bootverbose) device_printf(dev, "%ssetting %s on %s chip\n", (error) ? "FAILURE " : "", ata_mode2str(mode), ctlr->chip->text); if (error) return; if (mode >= ATA_UDMA0) { u_int8_t udmatimings[] = { 0xf, 0xb, 0x7, 0x5, 0x3, 0x2, 0x1 }; u_int8_t ureg = 0xac + rego; pci_write_config(gparent, mreg, mval | (0x03 << (ATA_DEV(atadev->unit) << 2)), 1); pci_write_config(gparent, ureg, (pci_read_config(gparent, ureg, 1) & ~0x3f) | udmatimings[mode & ATA_MODE_MASK], 1); } else if (mode >= ATA_WDMA0) { u_int8_t dreg = 0xa8 + rego; u_int16_t dmatimings[] = { 0x2208, 0x10c2, 0x10c1 }; pci_write_config(gparent, mreg, mval | (0x02 << (ATA_DEV(atadev->unit) << 2)), 1); pci_write_config(gparent, dreg, dmatimings[mode & ATA_MODE_MASK], 2); } else { u_int8_t preg = 0xa4 + rego; u_int16_t piotimings[] = { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 }; pci_write_config(gparent, mreg, mval | (0x01 << (ATA_DEV(atadev->unit) << 2)), 1); pci_write_config(gparent, preg, piotimings[mode & ATA_MODE_MASK], 2); } atadev->mode = mode; } /* * Silicon Integrated Systems Corp. (SiS) chipset support functions */ int ata_sis_ident(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); struct ata_chip_id *idx; static struct ata_chip_id ids[] = {{ ATA_SIS182, 0x00, SISSATA, 0, ATA_SA150, "182" }, /* south */ { ATA_SIS181, 0x00, SISSATA, 0, ATA_SA150, "181" }, /* south */ { ATA_SIS180, 0x00, SISSATA, 0, ATA_SA150, "180" }, /* south */ { ATA_SIS965, 0x00, SIS133NEW, 0, ATA_UDMA6, "965" }, /* south */ { ATA_SIS964, 0x00, SIS133NEW, 0, ATA_UDMA6, "964" }, /* south */ { ATA_SIS963, 0x00, SIS133NEW, 0, ATA_UDMA6, "963" }, /* south */ { ATA_SIS962, 0x00, SIS133NEW, 0, ATA_UDMA6, "962" }, /* south */ { ATA_SIS745, 0x00, SIS100NEW, 0, ATA_UDMA5, "745" }, /* 1chip */ { ATA_SIS735, 0x00, SIS100NEW, 0, ATA_UDMA5, "735" }, /* 1chip */ { ATA_SIS733, 0x00, SIS100NEW, 0, ATA_UDMA5, "733" }, /* 1chip */ { ATA_SIS730, 0x00, SIS100OLD, 0, ATA_UDMA5, "730" }, /* 1chip */ { ATA_SIS635, 0x00, SIS100NEW, 0, ATA_UDMA5, "635" }, /* 1chip */ { ATA_SIS633, 0x00, SIS100NEW, 0, ATA_UDMA5, "633" }, /* unknown */ { ATA_SIS630, 0x30, SIS100OLD, 0, ATA_UDMA5, "630S"}, /* 1chip */ { ATA_SIS630, 0x00, SIS66, 0, ATA_UDMA4, "630" }, /* 1chip */ { ATA_SIS620, 0x00, SIS66, 0, ATA_UDMA4, "620" }, /* 1chip */ { ATA_SIS550, 0x00, SIS66, 0, ATA_UDMA5, "550" }, { ATA_SIS540, 0x00, SIS66, 0, ATA_UDMA4, "540" }, { ATA_SIS530, 0x00, SIS66, 0, ATA_UDMA4, "530" }, { ATA_SIS5513, 0xc2, SIS33, 1, ATA_UDMA2, "5513" }, { ATA_SIS5513, 0x00, SIS33, 1, ATA_WDMA2, "5513" }, { 0, 0, 0, 0, 0, 0 }}; char buffer[64]; int found = 0; if (!(idx = ata_find_chip(dev, ids, -pci_get_slot(dev)))) return ENXIO; if (idx->cfg2 && !found) { u_int8_t reg57 = pci_read_config(dev, 0x57, 1); pci_write_config(dev, 0x57, (reg57 & 0x7f), 1); if (pci_read_config(dev, PCIR_DEVVENDOR, 4) == ATA_SIS5518) { found = 1; idx->cfg1 = SIS133NEW; idx->max_dma = ATA_UDMA6; sprintf(buffer, "SiS 962/963 %s controller", ata_mode2str(idx->max_dma)); } pci_write_config(dev, 0x57, reg57, 1); } if (idx->cfg2 && !found) { u_int8_t reg4a = pci_read_config(dev, 0x4a, 1); pci_write_config(dev, 0x4a, (reg4a | 0x10), 1); if (pci_read_config(dev, PCIR_DEVVENDOR, 4) == ATA_SIS5517) { struct ata_chip_id id[] = {{ ATA_SISSOUTH, 0x10, 0, 0, 0, "" }, { 0, 0, 0, 0, 0, 0 }}; found = 1; if (ata_find_chip(dev, id, pci_get_slot(dev))) { idx->cfg1 = SIS133OLD; idx->max_dma = ATA_UDMA6; } else { idx->cfg1 = SIS100NEW; idx->max_dma = ATA_UDMA5; } sprintf(buffer, "SiS 961 %s controller",ata_mode2str(idx->max_dma)); } pci_write_config(dev, 0x4a, reg4a, 1); } if (!found) sprintf(buffer,"SiS %s %s controller", idx->text, ata_mode2str(idx->max_dma)); device_set_desc_copy(dev, buffer); ctlr->chip = idx; ctlr->chipinit = ata_sis_chipinit; return 0; } static int ata_sis_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); if (ata_setup_interrupt(dev)) return ENXIO; switch (ctlr->chip->cfg1) { case SIS33: break; case SIS66: case SIS100OLD: pci_write_config(dev, 0x52, pci_read_config(dev, 0x52, 1) & ~0x04, 1); break; case SIS100NEW: case SIS133OLD: pci_write_config(dev, 0x49, pci_read_config(dev, 0x49, 1) & ~0x01, 1); break; case SIS133NEW: pci_write_config(dev, 0x50, pci_read_config(dev, 0x50, 2) | 0x0008, 2); pci_write_config(dev, 0x52, pci_read_config(dev, 0x52, 2) | 0x0008, 2); break; case SISSATA: ctlr->r_type2 = SYS_RES_IOPORT; ctlr->r_rid2 = PCIR_BAR(5); if ((ctlr->r_res2 = bus_alloc_resource_any(dev, ctlr->r_type2, &ctlr->r_rid2, RF_ACTIVE))) { pci_write_config(dev, PCIR_COMMAND, pci_read_config(dev, PCIR_COMMAND, 2) & ~0x0400,2); ctlr->allocate = ata_sis_allocate; ctlr->reset = ata_sis_reset; } ctlr->setmode = ata_sata_setmode; return 0; default: return ENXIO; } ctlr->setmode = ata_sis_setmode; return 0; } static int ata_sis_allocate(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); int offset = ch->unit << ((ctlr->chip->chipid == ATA_SIS182) ? 5 : 6); /* setup the usual register normal pci style */ if (ata_pci_allocate(dev)) return ENXIO; ch->r_io[ATA_SSTATUS].res = ctlr->r_res2; ch->r_io[ATA_SSTATUS].offset = 0x00 + offset; ch->r_io[ATA_SERROR].res = ctlr->r_res2; ch->r_io[ATA_SERROR].offset = 0x04 + offset; ch->r_io[ATA_SCONTROL].res = ctlr->r_res2; ch->r_io[ATA_SCONTROL].offset = 0x08 + offset; ch->flags |= ATA_NO_SLAVE; /* XXX SOS PHY hotplug handling missing in SiS chip ?? */ /* XXX SOS unknown how to enable PHY state change interrupt */ return 0; } static void ata_sis_reset(device_t dev) { struct ata_channel *ch = device_get_softc(dev); ata_sata_phy_enable(ch); } static void ata_sis_setmode(device_t dev, int mode) { device_t gparent = GRANDPARENT(dev); struct ata_pci_controller *ctlr = device_get_softc(gparent); struct ata_channel *ch = device_get_softc(device_get_parent(dev)); struct ata_device *atadev = device_get_softc(dev); int devno = (ch->unit << 1) + ATA_DEV(atadev->unit); int error; mode = ata_limit_mode(dev, mode, ctlr->chip->max_dma); if (ctlr->chip->cfg1 == SIS133NEW) { if (mode > ATA_UDMA2 && pci_read_config(gparent, ch->unit ? 0x52 : 0x50,2) & 0x8000) { ata_print_cable(dev, "controller"); mode = ATA_UDMA2; } } else { if (mode > ATA_UDMA2 && pci_read_config(gparent, 0x48, 1)&(ch->unit ? 0x20 : 0x10)) { ata_print_cable(dev, "controller"); mode = ATA_UDMA2; } } error = ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode); if (bootverbose) device_printf(dev, "%ssetting %s on %s chip\n", (error) ? "FAILURE " : "", ata_mode2str(mode), ctlr->chip->text); if (!error) { switch (ctlr->chip->cfg1) { case SIS133NEW: { u_int32_t timings[] = { 0x28269008, 0x0c266008, 0x04263008, 0x0c0a3008, 0x05093008, 0x22196008, 0x0c0a3008, 0x05093008, 0x050939fc, 0x050936ac, 0x0509347c, 0x0509325c, 0x0509323c, 0x0509322c, 0x0509321c}; u_int32_t reg; reg = (pci_read_config(gparent, 0x57, 1)&0x40?0x70:0x40)+(devno<<2); pci_write_config(gparent, reg, timings[ata_mode2idx(mode)], 4); break; } case SIS133OLD: { u_int16_t timings[] = { 0x00cb, 0x0067, 0x0044, 0x0033, 0x0031, 0x0044, 0x0033, 0x0031, 0x8f31, 0x8a31, 0x8731, 0x8531, 0x8331, 0x8231, 0x8131 }; u_int16_t reg = 0x40 + (devno << 1); pci_write_config(gparent, reg, timings[ata_mode2idx(mode)], 2); break; } case SIS100NEW: { u_int16_t timings[] = { 0x00cb, 0x0067, 0x0044, 0x0033, 0x0031, 0x0044, 0x0033, 0x0031, 0x8b31, 0x8731, 0x8531, 0x8431, 0x8231, 0x8131 }; u_int16_t reg = 0x40 + (devno << 1); pci_write_config(gparent, reg, timings[ata_mode2idx(mode)], 2); break; } case SIS100OLD: case SIS66: case SIS33: { u_int16_t timings[] = { 0x0c0b, 0x0607, 0x0404, 0x0303, 0x0301, 0x0404, 0x0303, 0x0301, 0xf301, 0xd301, 0xb301, 0xa301, 0x9301, 0x8301 }; u_int16_t reg = 0x40 + (devno << 1); pci_write_config(gparent, reg, timings[ata_mode2idx(mode)], 2); break; } } atadev->mode = mode; } } /* VIA Technologies Inc. chipset support functions */ int ata_via_ident(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); struct ata_chip_id *idx; static struct ata_chip_id ids[] = {{ ATA_VIA82C586, 0x02, VIA33, 0x00, ATA_UDMA2, "82C586B" }, { ATA_VIA82C586, 0x00, VIA33, 0x00, ATA_WDMA2, "82C586" }, { ATA_VIA82C596, 0x12, VIA66, VIACLK, ATA_UDMA4, "82C596B" }, { ATA_VIA82C596, 0x00, VIA33, 0x00, ATA_UDMA2, "82C596" }, { ATA_VIA82C686, 0x40, VIA100, VIABUG, ATA_UDMA5, "82C686B"}, { ATA_VIA82C686, 0x10, VIA66, VIACLK, ATA_UDMA4, "82C686A" }, { ATA_VIA82C686, 0x00, VIA33, 0x00, ATA_UDMA2, "82C686" }, { ATA_VIA8231, 0x00, VIA100, VIABUG, ATA_UDMA5, "8231" }, { ATA_VIA8233, 0x00, VIA100, 0x00, ATA_UDMA5, "8233" }, { ATA_VIA8233C, 0x00, VIA100, 0x00, ATA_UDMA5, "8233C" }, { ATA_VIA8233A, 0x00, VIA133, 0x00, ATA_UDMA6, "8233A" }, { ATA_VIA8235, 0x00, VIA133, 0x00, ATA_UDMA6, "8235" }, { ATA_VIA8237, 0x00, VIA133, 0x00, ATA_UDMA6, "8237" }, { ATA_VIA8251, 0x00, VIA133, 0x00, ATA_UDMA6, "8251" }, { 0, 0, 0, 0, 0, 0 }}; static struct ata_chip_id new_ids[] = {{ ATA_VIA6410, 0x00, 0, 0x00, ATA_UDMA6, "6410" }, { ATA_VIA6420, 0x00, 7, 0x00, ATA_SA150, "6420" }, { ATA_VIA6421, 0x00, 6, VIABAR, ATA_SA150, "6421" }, { ATA_VIA8251, 0x00, 0, VIAAHCI, ATA_SA150, "8251" }, { 0, 0, 0, 0, 0, 0 }}; char buffer[64]; if (pci_get_devid(dev) == ATA_VIA82C571) { if (!(idx = ata_find_chip(dev, ids, -99))) return ENXIO; } else { if (!(idx = ata_match_chip(dev, new_ids))) return ENXIO; } sprintf(buffer, "VIA %s %s controller", idx->text, ata_mode2str(idx->max_dma)); device_set_desc_copy(dev, buffer); ctlr->chip = idx; ctlr->chipinit = ata_via_chipinit; return 0; } static int ata_via_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); if (ata_setup_interrupt(dev)) return ENXIO; if (ctlr->chip->max_dma >= ATA_SA150) { ctlr->r_type2 = SYS_RES_IOPORT; ctlr->r_rid2 = PCIR_BAR(5); if ((ctlr->r_res2 = bus_alloc_resource_any(dev, ctlr->r_type2, &ctlr->r_rid2, RF_ACTIVE))) { pci_write_config(dev, PCIR_COMMAND, pci_read_config(dev, PCIR_COMMAND, 2) & ~0x0400,2); ctlr->allocate = ata_via_allocate; ctlr->reset = ata_via_reset; } ctlr->setmode = ata_sata_setmode; return 0; } /* prepare for ATA-66 on the 82C686a and 82C596b */ if (ctlr->chip->cfg2 & VIACLK) pci_write_config(dev, 0x50, 0x030b030b, 4); /* the southbridge might need the data corruption fix */ if (ctlr->chip->cfg2 & VIABUG) ata_via_southbridge_fixup(dev); /* set fifo configuration half'n'half */ pci_write_config(dev, 0x43, (pci_read_config(dev, 0x43, 1) & 0x90) | 0x2a, 1); /* set status register read retry */ pci_write_config(dev, 0x44, pci_read_config(dev, 0x44, 1) | 0x08, 1); /* set DMA read & end-of-sector fifo flush */ pci_write_config(dev, 0x46, (pci_read_config(dev, 0x46, 1) & 0x0c) | 0xf0, 1); /* set sector size */ pci_write_config(dev, 0x60, DEV_BSIZE, 2); pci_write_config(dev, 0x68, DEV_BSIZE, 2); ctlr->setmode = ata_via_family_setmode; return 0; } static int ata_via_allocate(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); /* newer SATA chips has resources in one BAR for each channel */ if (ctlr->chip->cfg2 & VIABAR) { struct resource *r_io; int i, rid; rid = PCIR_BAR(ch->unit); if (!(r_io = bus_alloc_resource_any(device_get_parent(dev), - SYS_RES_IOPORT, + SYS_RES_IOPORT, &rid, RF_ACTIVE))) return ENXIO; for (i = ATA_DATA; i <= ATA_COMMAND; i ++) { ch->r_io[i].res = r_io; ch->r_io[i].offset = i; } ch->r_io[ATA_CONTROL].res = r_io; ch->r_io[ATA_CONTROL].offset = 2 + ATA_IOSIZE; ch->r_io[ATA_IDX_ADDR].res = r_io; ata_default_registers(dev); for (i = ATA_BMCMD_PORT; i <= ATA_BMDTP_PORT; i++) { ch->r_io[i].res = ctlr->r_res1; ch->r_io[i].offset = i - ATA_BMCMD_PORT; } ata_pci_hw(dev); } else { /* setup the usual register normal pci style */ - if (ata_pci_allocate(dev)) + if (ata_pci_allocate(dev)) return ENXIO; } ch->r_io[ATA_SSTATUS].res = ctlr->r_res2; ch->r_io[ATA_SSTATUS].offset = (ch->unit << ctlr->chip->cfg1); ch->r_io[ATA_SERROR].res = ctlr->r_res2; ch->r_io[ATA_SERROR].offset = 0x04 + (ch->unit << ctlr->chip->cfg1); ch->r_io[ATA_SCONTROL].res = ctlr->r_res2; ch->r_io[ATA_SCONTROL].offset = 0x08 + (ch->unit << ctlr->chip->cfg1); ch->flags |= ATA_NO_SLAVE; /* XXX SOS PHY hotplug handling missing in VIA chip ?? */ /* XXX SOS unknown how to enable PHY state change interrupt */ return 0; } static void ata_via_reset(device_t dev) { struct ata_channel *ch = device_get_softc(dev); ata_sata_phy_enable(ch); } static void ata_via_southbridge_fixup(device_t dev) { device_t *children; int nchildren, i; if (device_get_children(device_get_parent(dev), &children, &nchildren)) return; for (i = 0; i < nchildren; i++) { if (pci_get_devid(children[i]) == ATA_VIA8363 || pci_get_devid(children[i]) == ATA_VIA8371 || pci_get_devid(children[i]) == ATA_VIA8662 || pci_get_devid(children[i]) == ATA_VIA8361) { u_int8_t reg76 = pci_read_config(children[i], 0x76, 1); if ((reg76 & 0xf0) != 0xd0) { device_printf(dev, "Correcting VIA config for southbridge data corruption bug\n"); pci_write_config(children[i], 0x75, 0x80, 1); pci_write_config(children[i], 0x76, (reg76 & 0x0f) | 0xd0, 1); } break; } } free(children, M_TEMP); } /* common code for VIA, AMD & nVidia */ static void ata_via_family_setmode(device_t dev, int mode) { device_t gparent = GRANDPARENT(dev); struct ata_pci_controller *ctlr = device_get_softc(gparent); struct ata_channel *ch = device_get_softc(device_get_parent(dev)); struct ata_device *atadev = device_get_softc(dev); u_int8_t timings[] = { 0xa8, 0x65, 0x42, 0x22, 0x20, 0x42, 0x22, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20 }; int modes[][7] = { { 0xc2, 0xc1, 0xc0, 0x00, 0x00, 0x00, 0x00 }, /* VIA ATA33 */ { 0xee, 0xec, 0xea, 0xe9, 0xe8, 0x00, 0x00 }, /* VIA ATA66 */ { 0xf7, 0xf6, 0xf4, 0xf2, 0xf1, 0xf0, 0x00 }, /* VIA ATA100 */ { 0xf7, 0xf7, 0xf6, 0xf4, 0xf2, 0xf1, 0xf0 }, /* VIA ATA133 */ { 0xc2, 0xc1, 0xc0, 0xc4, 0xc5, 0xc6, 0xc7 }}; /* AMD/nVIDIA */ int devno = (ch->unit << 1) + ATA_DEV(atadev->unit); int reg = 0x53 - devno; int error; mode = ata_limit_mode(dev, mode, ctlr->chip->max_dma); if (ctlr->chip->cfg2 & AMDCABLE) { if (mode > ATA_UDMA2 && !(pci_read_config(gparent, 0x42, 1) & (1 << devno))) { ata_print_cable(dev, "controller"); mode = ATA_UDMA2; } } else mode = ata_check_80pin(dev, mode); if (ctlr->chip->cfg2 & NVIDIA) reg += 0x10; if (ctlr->chip->cfg1 != VIA133) pci_write_config(gparent, reg - 0x08, timings[ata_mode2idx(mode)], 1); error = ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode); if (bootverbose) device_printf(dev, "%ssetting %s on %s chip\n", (error) ? "FAILURE " : "", ata_mode2str(mode), ctlr->chip->text); if (!error) { if (mode >= ATA_UDMA0) pci_write_config(gparent, reg, modes[ctlr->chip->cfg1][mode & ATA_MODE_MASK], 1); else pci_write_config(gparent, reg, 0x8b, 1); atadev->mode = mode; } } /* misc functions */ static struct ata_chip_id * ata_match_chip(device_t dev, struct ata_chip_id *index) { while (index->chipid != 0) { if (pci_get_devid(dev) == index->chipid && pci_get_revid(dev) >= index->chiprev) return index; index++; } return NULL; } static struct ata_chip_id * ata_find_chip(device_t dev, struct ata_chip_id *index, int slot) { device_t *children; int nchildren, i; if (device_get_children(device_get_parent(dev), &children, &nchildren)) return 0; while (index->chipid != 0) { for (i = 0; i < nchildren; i++) { if (((slot >= 0 && pci_get_slot(children[i]) == slot) || (slot < 0 && pci_get_slot(children[i]) <= -slot)) && pci_get_devid(children[i]) == index->chipid && pci_get_revid(children[i]) >= index->chiprev) { free(children, M_TEMP); return index; } } index++; } free(children, M_TEMP); return NULL; } static int ata_setup_interrupt(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); int rid = ATA_IRQ_RID; if (!ata_legacy(dev)) { if (!(ctlr->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "unable to map interrupt\n"); return ENXIO; } if ((bus_setup_intr(dev, ctlr->r_irq, ATA_INTR_FLAGS, ata_generic_intr, ctlr, &ctlr->handle))) { device_printf(dev, "unable to setup interrupt\n"); return ENXIO; } } return 0; } struct ata_serialize { struct mtx locked_mtx; int locked_ch; int restart_ch; }; static int ata_serialize(device_t dev, int flags) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); struct ata_serialize *serial; static int inited = 0; int res; if (!inited) { serial = malloc(sizeof(struct ata_serialize), M_TEMP, M_NOWAIT | M_ZERO); mtx_init(&serial->locked_mtx, "ATA serialize lock", NULL, MTX_DEF); serial->locked_ch = -1; serial->restart_ch = -1; device_set_ivars(ctlr->dev, serial); inited = 1; } else serial = device_get_ivars(ctlr->dev); mtx_lock(&serial->locked_mtx); switch (flags) { case ATA_LF_LOCK: if (serial->locked_ch == -1) serial->locked_ch = ch->unit; if (serial->locked_ch != ch->unit) serial->restart_ch = ch->unit; break; case ATA_LF_UNLOCK: if (serial->locked_ch == ch->unit) { serial->locked_ch = -1; if (serial->restart_ch != -1) { if ((ch = ctlr->interrupt[serial->restart_ch].argument)) { serial->restart_ch = -1; mtx_unlock(&serial->locked_mtx); ata_start(ch->dev); return -1; } } } break; case ATA_LF_WHICH: break; } res = serial->locked_ch; mtx_unlock(&serial->locked_mtx); return res; } static void ata_print_cable(device_t dev, u_int8_t *who) { device_printf(dev, "DMA limited to UDMA33, %s found non-ATA66 cable\n", who); } static int ata_atapi(device_t dev) { struct ata_channel *ch = device_get_softc(device_get_parent(dev)); struct ata_device *atadev = device_get_softc(dev); return ((atadev->unit == ATA_MASTER && ch->devices & ATA_ATAPI_MASTER) || (atadev->unit == ATA_SLAVE && ch->devices & ATA_ATAPI_SLAVE)); } static int ata_check_80pin(device_t dev, int mode) { struct ata_device *atadev = device_get_softc(dev); if (mode > ATA_UDMA2 && !(atadev->param.hwres & ATA_CABLE_ID)) { ata_print_cable(dev, "device"); mode = ATA_UDMA2; } return mode; } static int ata_mode2idx(int mode) { if ((mode & ATA_DMA_MASK) == ATA_UDMA0) return (mode & ATA_MODE_MASK) + 8; if ((mode & ATA_DMA_MASK) == ATA_WDMA0) return (mode & ATA_MODE_MASK) + 5; return (mode & ATA_MODE_MASK) - ATA_PIO0; } diff --git a/sys/dev/ata/ata-pci.c b/sys/dev/ata/ata-pci.c index 5fa4d5575861..535a8cf1c6d4 100644 --- a/sys/dev/ata/ata-pci.c +++ b/sys/dev/ata/ata-pci.c @@ -1,674 +1,674 @@ /*- * Copyright (c) 1998 - 2006 Søren Schmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ata.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __alpha__ #include #endif #include #include #include #include #include #include /* local vars */ static MALLOC_DEFINE(M_ATAPCI, "ata_pci", "ATA driver PCI"); /* misc defines */ #define IOMASK 0xfffffffc -#define ATA_PROBE_OK -10 +#define ATA_PROBE_OK -10 /* prototypes */ static void ata_pci_dmainit(device_t); int ata_legacy(device_t dev) { return ((pci_read_config(dev, PCIR_PROGIF, 1)&PCIP_STORAGE_IDE_MASTERDEV) && ((pci_read_config(dev, PCIR_PROGIF, 1) & (PCIP_STORAGE_IDE_MODEPRIM | PCIP_STORAGE_IDE_MODESEC)) != (PCIP_STORAGE_IDE_MODEPRIM | PCIP_STORAGE_IDE_MODESEC))); } int ata_pci_probe(device_t dev) { if (pci_get_class(dev) != PCIC_STORAGE) return ENXIO; switch (pci_get_vendor(dev)) { case ATA_ACARD_ID: if (!ata_acard_ident(dev)) return ATA_PROBE_OK; break; case ATA_ACER_LABS_ID: if (!ata_ali_ident(dev)) return ATA_PROBE_OK; break; case ATA_AMD_ID: if (!ata_amd_ident(dev)) return ATA_PROBE_OK; break; case ATA_ATI_ID: if (!ata_ati_ident(dev)) return ATA_PROBE_OK; break; case ATA_CYRIX_ID: if (!ata_cyrix_ident(dev)) return ATA_PROBE_OK; break; case ATA_CYPRESS_ID: if (!ata_cypress_ident(dev)) return ATA_PROBE_OK; break; case ATA_HIGHPOINT_ID: if (!ata_highpoint_ident(dev)) return ATA_PROBE_OK; break; case ATA_INTEL_ID: if (!ata_intel_ident(dev)) return ATA_PROBE_OK; break; case ATA_ITE_ID: if (!ata_ite_ident(dev)) return ATA_PROBE_OK; break; case ATA_MARVELL_ID: if (!ata_marvell_ident(dev)) return ATA_PROBE_OK; break; case ATA_NATIONAL_ID: if (!ata_national_ident(dev)) return ATA_PROBE_OK; break; case ATA_NVIDIA_ID: if (!ata_nvidia_ident(dev)) return ATA_PROBE_OK; break; case ATA_PROMISE_ID: if (!ata_promise_ident(dev)) return ATA_PROBE_OK; break; case ATA_SERVERWORKS_ID: if (!ata_serverworks_ident(dev)) return ATA_PROBE_OK; break; case ATA_SILICON_IMAGE_ID: if (!ata_sii_ident(dev)) return ATA_PROBE_OK; break; case ATA_SIS_ID: if (!ata_sis_ident(dev)) return ATA_PROBE_OK; break; case ATA_VIA_ID: if (!ata_via_ident(dev)) return ATA_PROBE_OK; break; case ATA_CENATEK_ID: if (pci_get_devid(dev) == ATA_CENATEK_ROCKET) { ata_generic_ident(dev); device_set_desc(dev, "Cenatek Rocket Drive controller"); return ATA_PROBE_OK; } break; case ATA_MICRON_ID: if (pci_get_devid(dev) == ATA_MICRON_RZ1000 || pci_get_devid(dev) == ATA_MICRON_RZ1001) { ata_generic_ident(dev); device_set_desc(dev, "RZ 100? ATA controller !WARNING! data loss/corruption risk"); return ATA_PROBE_OK; } break; } /* unknown chipset, try generic DMA if it seems possible */ if ((pci_get_class(dev) == PCIC_STORAGE) && (pci_get_subclass(dev) == PCIS_STORAGE_IDE)) { if (!ata_generic_ident(dev)) return ATA_PROBE_OK; } return ENXIO; } int ata_pci_attach(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); u_int32_t cmd; int unit; /* do chipset specific setups only needed once */ if (ata_legacy(dev) || pci_read_config(dev, PCIR_BAR(2), 4) & IOMASK) ctlr->channels = 2; else ctlr->channels = 1; ctlr->allocate = ata_pci_allocate; ctlr->dmainit = ata_pci_dmainit; ctlr->dev = dev; /* if needed try to enable busmastering */ cmd = pci_read_config(dev, PCIR_COMMAND, 2); if (!(cmd & PCIM_CMD_BUSMASTEREN)) { pci_write_config(dev, PCIR_COMMAND, cmd | PCIM_CMD_BUSMASTEREN, 2); cmd = pci_read_config(dev, PCIR_COMMAND, 2); } /* if busmastering mode "stuck" use it */ if ((cmd & PCIM_CMD_BUSMASTEREN) == PCIM_CMD_BUSMASTEREN) { ctlr->r_type1 = SYS_RES_IOPORT; ctlr->r_rid1 = ATA_BMADDR_RID; ctlr->r_res1 = bus_alloc_resource_any(dev, ctlr->r_type1, &ctlr->r_rid1, RF_ACTIVE); } if (ctlr->chipinit(dev)) return ENXIO; /* attach all channels on this controller */ for (unit = 0; unit < ctlr->channels; unit++) { if (unit == 0 && (pci_get_progif(dev) & 0x81) == 0x80) { device_add_child(dev, "ata", unit); continue; } if (unit == 1 && (pci_get_progif(dev) & 0x84) == 0x80) { device_add_child(dev, "ata", unit); continue; } device_add_child(dev, "ata", devclass_find_free_unit(ata_devclass, 2)); } bus_generic_attach(dev); return 0; } int ata_pci_detach(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); device_t *children; int nchildren, i; /* detach & delete all children */ if (!device_get_children(dev, &children, &nchildren)) { for (i = 0; i < nchildren; i++) device_delete_child(dev, children[i]); free(children, M_TEMP); } if (ctlr->r_irq) { bus_teardown_intr(dev, ctlr->r_irq, ctlr->handle); bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ctlr->r_irq); } if (ctlr->r_res2) bus_release_resource(dev, ctlr->r_type2, ctlr->r_rid2, ctlr->r_res2); if (ctlr->r_res1) bus_release_resource(dev, ctlr->r_type1, ctlr->r_rid1, ctlr->r_res1); return 0; } struct resource * ata_pci_alloc_resource(device_t dev, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { struct ata_pci_controller *controller = device_get_softc(dev); int unit = ((struct ata_channel *)device_get_softc(child))->unit; struct resource *res = NULL; int myrid; if (type == SYS_RES_IOPORT) { switch (*rid) { case ATA_IOADDR_RID: if (ata_legacy(dev)) { start = (unit ? ATA_SECONDARY : ATA_PRIMARY); count = ATA_IOSIZE; end = start + count - 1; } myrid = PCIR_BAR(0) + (unit << 3); res = BUS_ALLOC_RESOURCE(device_get_parent(dev), dev, SYS_RES_IOPORT, &myrid, start, end, count, flags); break; case ATA_CTLADDR_RID: if (ata_legacy(dev)) { start = (unit ? ATA_SECONDARY : ATA_PRIMARY) + ATA_CTLOFFSET; count = ATA_CTLIOSIZE; end = start + count - 1; } myrid = PCIR_BAR(1) + (unit << 3); res = BUS_ALLOC_RESOURCE(device_get_parent(dev), dev, SYS_RES_IOPORT, &myrid, start, end, count, flags); break; } } if (type == SYS_RES_IRQ && *rid == ATA_IRQ_RID) { if (ata_legacy(dev)) { #ifdef __alpha__ res = alpha_platform_alloc_ide_intr(unit); #else int irq = (unit == 0 ? 14 : 15); res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child, SYS_RES_IRQ, rid, irq, irq, 1, flags); #endif } else res = controller->r_irq; } return res; } int ata_pci_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { int unit = ((struct ata_channel *)device_get_softc(child))->unit; if (type == SYS_RES_IOPORT) { switch (rid) { case ATA_IOADDR_RID: return BUS_RELEASE_RESOURCE(device_get_parent(dev), dev, SYS_RES_IOPORT, PCIR_BAR(0) + (unit << 3), r); break; case ATA_CTLADDR_RID: return BUS_RELEASE_RESOURCE(device_get_parent(dev), dev, SYS_RES_IOPORT, PCIR_BAR(1) + (unit << 3), r); break; default: return ENOENT; } } if (type == SYS_RES_IRQ) { if (rid != ATA_IRQ_RID) return ENOENT; if (ata_legacy(dev)) { #ifdef __alpha__ return alpha_platform_release_ide_intr(unit, r); #else return BUS_RELEASE_RESOURCE(device_get_parent(dev), child, SYS_RES_IRQ, rid, r); #endif } else return 0; } return EINVAL; } int ata_pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_intr_t *function, void *argument, void **cookiep) { if (ata_legacy(dev)) { #ifdef __alpha__ return alpha_platform_setup_ide_intr(child, irq, function, argument, cookiep); #else return BUS_SETUP_INTR(device_get_parent(dev), child, irq, flags, function, argument, cookiep); #endif } else { struct ata_pci_controller *controller = device_get_softc(dev); int unit = ((struct ata_channel *)device_get_softc(child))->unit; controller->interrupt[unit].function = function; controller->interrupt[unit].argument = argument; *cookiep = controller; return 0; } } int ata_pci_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { if (ata_legacy(dev)) { #ifdef __alpha__ return alpha_platform_teardown_ide_intr(child, irq, cookie); #else return BUS_TEARDOWN_INTR(device_get_parent(dev), child, irq, cookie); #endif } else { struct ata_pci_controller *controller = device_get_softc(dev); int unit = ((struct ata_channel *)device_get_softc(child))->unit; controller->interrupt[unit].function = NULL; controller->interrupt[unit].argument = NULL; return 0; } } int ata_pci_allocate(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); struct resource *io = NULL, *ctlio = NULL; int i, rid; rid = ATA_IOADDR_RID; if (!(io = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE))) return ENXIO; rid = ATA_CTLADDR_RID; if (!(ctlio = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,RF_ACTIVE))){ bus_release_resource(dev, SYS_RES_IOPORT, ATA_IOADDR_RID, io); return ENXIO; } for (i = ATA_DATA; i <= ATA_COMMAND; i ++) { ch->r_io[i].res = io; ch->r_io[i].offset = i; } ch->r_io[ATA_CONTROL].res = ctlio; ch->r_io[ATA_CONTROL].offset = ata_legacy(device_get_parent(dev)) ? 0 : 2; ch->r_io[ATA_IDX_ADDR].res = io; ata_default_registers(dev); if (ctlr->r_res1) { for (i = ATA_BMCMD_PORT; i <= ATA_BMDTP_PORT; i++) { ch->r_io[i].res = ctlr->r_res1; ch->r_io[i].offset = (i - ATA_BMCMD_PORT) + (ch->unit*ATA_BMIOSIZE); } } ata_pci_hw(dev); return 0; } void ata_pci_hw(device_t dev) { struct ata_channel *ch = device_get_softc(dev); ata_generic_hw(dev); ch->hw.status = ata_pci_status; } int ata_pci_status(device_t dev) { struct ata_channel *ch = device_get_softc(dev); if (ch->dma && ((ch->flags & ATA_ALWAYS_DMASTAT) || (ch->dma->flags & ATA_DMA_ACTIVE))) { int bmstat = ATA_IDX_INB(ch, ATA_BMSTAT_PORT) & ATA_BMSTAT_MASK; - if ((bmstat & (ATA_BMSTAT_ACTIVE | ATA_BMSTAT_INTERRUPT)) != + if ((bmstat & (ATA_BMSTAT_ACTIVE | ATA_BMSTAT_INTERRUPT)) != ATA_BMSTAT_INTERRUPT) return 0; ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, bmstat & ~ATA_BMSTAT_ERROR); DELAY(1); } if (ATA_IDX_INB(ch, ATA_ALTSTAT) & ATA_S_BUSY) { - DELAY(100); - if (ATA_IDX_INB(ch, ATA_ALTSTAT) & ATA_S_BUSY) - return 0; + DELAY(100); + if (ATA_IDX_INB(ch, ATA_ALTSTAT) & ATA_S_BUSY) + return 0; } return 1; } static int ata_pci_dmastart(device_t dev) { struct ata_channel *ch = device_get_softc(device_get_parent(dev)); ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, (ATA_IDX_INB(ch, ATA_BMSTAT_PORT) | (ATA_BMSTAT_INTERRUPT | ATA_BMSTAT_ERROR))); ATA_IDX_OUTL(ch, ATA_BMDTP_PORT, ch->dma->sg_bus); ch->dma->flags |= ATA_DMA_ACTIVE; ATA_IDX_OUTB(ch, ATA_BMCMD_PORT, (ATA_IDX_INB(ch, ATA_BMCMD_PORT) & ~ATA_BMCMD_WRITE_READ) | ((ch->dma->flags & ATA_DMA_READ) ? ATA_BMCMD_WRITE_READ : 0) | ATA_BMCMD_START_STOP); return 0; } static int ata_pci_dmastop(device_t dev) { struct ata_channel *ch = device_get_softc(device_get_parent(dev)); int error; ATA_IDX_OUTB(ch, ATA_BMCMD_PORT, ATA_IDX_INB(ch, ATA_BMCMD_PORT) & ~ATA_BMCMD_START_STOP); ch->dma->flags &= ~ATA_DMA_ACTIVE; error = ATA_IDX_INB(ch, ATA_BMSTAT_PORT) & ATA_BMSTAT_MASK; ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, ATA_BMSTAT_INTERRUPT | ATA_BMSTAT_ERROR); return error; } static void ata_pci_dmareset(device_t dev) { struct ata_channel *ch = device_get_softc(dev); ATA_IDX_OUTB(ch, ATA_BMCMD_PORT, ATA_IDX_INB(ch, ATA_BMCMD_PORT) & ~ATA_BMCMD_START_STOP); ch->dma->flags &= ~ATA_DMA_ACTIVE; ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, ATA_BMSTAT_INTERRUPT | ATA_BMSTAT_ERROR); ch->dma->unload(dev); } static void ata_pci_dmainit(device_t dev) { struct ata_channel *ch = device_get_softc(dev); ata_dmainit(dev); if (ch->dma) { ch->dma->start = ata_pci_dmastart; ch->dma->stop = ata_pci_dmastop; ch->dma->reset = ata_pci_dmareset; } } static device_method_t ata_pci_methods[] = { /* device interface */ DEVMETHOD(device_probe, ata_pci_probe), DEVMETHOD(device_attach, ata_pci_attach), DEVMETHOD(device_detach, ata_pci_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* bus methods */ DEVMETHOD(bus_alloc_resource, ata_pci_alloc_resource), DEVMETHOD(bus_release_resource, ata_pci_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, ata_pci_setup_intr), DEVMETHOD(bus_teardown_intr, ata_pci_teardown_intr), { 0, 0 } }; devclass_t atapci_devclass; static driver_t ata_pci_driver = { "atapci", ata_pci_methods, sizeof(struct ata_pci_controller), }; DRIVER_MODULE(atapci, pci, ata_pci_driver, atapci_devclass, 0, 0); MODULE_VERSION(atapci, 1); MODULE_DEPEND(atapci, ata, 1, 1, 1); static int ata_pcichannel_probe(device_t dev) { struct ata_channel *ch = device_get_softc(dev); device_t *children; int count, i; char buffer[32]; /* take care of green memory */ bzero(ch, sizeof(struct ata_channel)); /* find channel number on this controller */ device_get_children(device_get_parent(dev), &children, &count); for (i = 0; i < count; i++) { if (children[i] == dev) ch->unit = i; } free(children, M_TEMP); sprintf(buffer, "ATA channel %d", ch->unit); device_set_desc_copy(dev, buffer); return ata_probe(dev); } static int ata_pcichannel_attach(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); int error; if (ctlr->dmainit) ctlr->dmainit(dev); if (ch->dma) ch->dma->alloc(dev); if ((error = ctlr->allocate(dev))) return error; return ata_attach(dev); } static int ata_pcichannel_detach(device_t dev) { struct ata_channel *ch = device_get_softc(dev); int error; if ((error = ata_detach(dev))) return error; if (ch->dma) ch->dma->free(dev); /* XXX SOS free resources for io and ctlio ?? */ return 0; } static int ata_pcichannel_locking(device_t dev, int mode) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); if (ctlr->locking) return ctlr->locking(dev, mode); else return ch->unit; } static void ata_pcichannel_reset(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); /* if DMA engine present reset it */ if (ch->dma) { if (ch->dma->reset) ch->dma->reset(dev); ch->dma->unload(dev); } /* reset the controller HW */ if (ctlr->reset) ctlr->reset(dev); else ata_generic_reset(dev); } static void ata_pcichannel_setmode(device_t parent, device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(GRANDPARENT(dev)); struct ata_device *atadev = device_get_softc(dev); int mode = atadev->mode; ctlr->setmode(dev, ATA_PIO_MAX); if (mode >= ATA_DMA) ctlr->setmode(dev, mode); } static device_method_t ata_pcichannel_methods[] = { /* device interface */ DEVMETHOD(device_probe, ata_pcichannel_probe), DEVMETHOD(device_attach, ata_pcichannel_attach), DEVMETHOD(device_detach, ata_pcichannel_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, ata_suspend), DEVMETHOD(device_resume, ata_resume), /* ATA methods */ DEVMETHOD(ata_setmode, ata_pcichannel_setmode), DEVMETHOD(ata_locking, ata_pcichannel_locking), DEVMETHOD(ata_reset, ata_pcichannel_reset), { 0, 0 } }; driver_t ata_pcichannel_driver = { "ata", ata_pcichannel_methods, sizeof(struct ata_channel), }; DRIVER_MODULE(ata, atapci, ata_pcichannel_driver, ata_devclass, 0, 0); diff --git a/sys/dev/ata/ata-pci.h b/sys/dev/ata/ata-pci.h index dbef07a779f4..59bf0f8aca92 100644 --- a/sys/dev/ata/ata-pci.h +++ b/sys/dev/ata/ata-pci.h @@ -1,421 +1,425 @@ /*- * Copyright (c) 2003 - 2006 Søren Schmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* structure holding chipset config info */ struct ata_chip_id { u_int32_t chipid; u_int8_t chiprev; int cfg1; int cfg2; u_int8_t max_dma; char *text; }; /* structure describing a PCI ATA controller */ struct ata_pci_controller { device_t dev; int r_type1; int r_rid1; struct resource *r_res1; int r_type2; int r_rid2; struct resource *r_res2; struct resource *r_irq; void *handle; struct ata_chip_id *chip; int channels; int (*chipinit)(device_t); int (*allocate)(device_t); int (*locking)(device_t, int); void (*reset)(device_t); void (*dmainit)(device_t); void (*setmode)(device_t, int); struct { void (*function)(void *); void *argument; } interrupt[8]; /* XXX SOS max ch# for now */ }; /* structure for SATA connection update hotplug/hotswap support */ struct ata_connect_task { struct task task; device_t dev; int action; #define ATA_C_ATTACH 1 #define ATA_C_DETACH 2 }; /* defines for known chipset PCI id's */ #define ATA_ACARD_ID 0x1191 #define ATA_ATP850 0x00021191 #define ATA_ATP850A 0x00041191 #define ATA_ATP850R 0x00051191 #define ATA_ATP860A 0x00061191 #define ATA_ATP860R 0x00071191 #define ATA_ATP865A 0x00081191 #define ATA_ATP865R 0x00091191 #define ATA_AMD_ID 0x1022 #define ATA_AMD755 0x74011022 #define ATA_AMD756 0x74091022 #define ATA_AMD766 0x74111022 #define ATA_AMD768 0x74411022 #define ATA_AMD8111 0x74691022 #define ATA_ACER_LABS_ID 0x10b9 -#define ATA_ALI_1533 0x153310b9 +#define ATA_ALI_1533 0x153310b9 #define ATA_ALI_5229 0x522910b9 #define ATA_ALI_5281 0x528110b9 #define ATA_ALI_5287 0x528710b9 #define ATA_ALI_5288 0x528810b9 #define ATA_ALI_5289 0x528910b9 -#define ATA_ATI_ID 0x1002 -#define ATA_ATI_IXP200 0x43491002 -#define ATA_ATI_IXP300 0x43691002 -#define ATA_ATI_IXP400 0x43761002 -#define ATA_ATI_IXP300_S1 0x436e1002 -#define ATA_ATI_IXP400_S1 0x43791002 -#define ATA_ATI_IXP400_S2 0x437a1002 +#define ATA_ATI_ID 0x1002 +#define ATA_ATI_IXP200 0x43491002 +#define ATA_ATI_IXP300 0x43691002 +#define ATA_ATI_IXP400 0x43761002 +#define ATA_ATI_IXP300_S1 0x436e1002 +#define ATA_ATI_IXP400_S1 0x43791002 +#define ATA_ATI_IXP400_S2 0x437a1002 #define ATA_CENATEK_ID 0x16ca #define ATA_CENATEK_ROCKET 0x000116ca #define ATA_CYRIX_ID 0x1078 #define ATA_CYRIX_5530 0x01021078 #define ATA_CYPRESS_ID 0x1080 #define ATA_CYPRESS_82C693 0xc6931080 #define ATA_DEC_21150 0x00221011 #define ATA_DEC_21150_1 0x00231011 #define ATA_HIGHPOINT_ID 0x1103 #define ATA_HPT366 0x00041103 #define ATA_HPT372 0x00051103 #define ATA_HPT302 0x00061103 #define ATA_HPT371 0x00071103 #define ATA_HPT374 0x00081103 #define ATA_INTEL_ID 0x8086 #define ATA_I960RM 0x09628086 #define ATA_I82371FB 0x12308086 #define ATA_I82371SB 0x70108086 #define ATA_I82371AB 0x71118086 #define ATA_I82443MX 0x71998086 #define ATA_I82451NX 0x84ca8086 #define ATA_I82372FB 0x76018086 #define ATA_I82801AB 0x24218086 #define ATA_I82801AA 0x24118086 #define ATA_I82801BA 0x244a8086 #define ATA_I82801BA_1 0x244b8086 #define ATA_I82801CA 0x248a8086 #define ATA_I82801CA_1 0x248b8086 #define ATA_I82801DB 0x24cb8086 #define ATA_I82801DB_1 0x24ca8086 #define ATA_I82801EB 0x24db8086 #define ATA_I82801EB_S1 0x24d18086 #define ATA_I82801EB_R1 0x24df8086 #define ATA_I6300ESB 0x25a28086 #define ATA_I6300ESB_S1 0x25a38086 #define ATA_I6300ESB_R1 0x25b08086 #define ATA_I82801FB 0x266f8086 #define ATA_I82801FB_S1 0x26518086 #define ATA_I82801FB_R1 0x26528086 #define ATA_I82801FB_M 0x26538086 #define ATA_I82801GB 0x27df8086 #define ATA_I82801GB_S1 0x27c08086 #define ATA_I82801GB_R1 0x27c38086 #define ATA_I82801GB_AH 0x27c18086 #define ATA_I82801GB_M 0x27c58086 -#define ATA_I31244 0x32008086 +#define ATA_I31244 0x32008086 #define ATA_ITE_ID 0x1283 #define ATA_IT8211F 0x82111283 #define ATA_IT8212F 0x82121283 #define ATA_MARVELL_ID 0x11ab #define ATA_M88SX5040 0x504011ab #define ATA_M88SX5041 0x504111ab #define ATA_M88SX5080 0x508011ab #define ATA_M88SX5081 0x508111ab #define ATA_M88SX6041 0x604111ab #define ATA_M88SX6081 0x608111ab #define ATA_MICRON_ID 0x1042 #define ATA_MICRON_RZ1000 0x10001042 #define ATA_MICRON_RZ1001 0x10011042 #define ATA_NATIONAL_ID 0x100b #define ATA_SC1100 0x0502100b #define ATA_NVIDIA_ID 0x10de #define ATA_NFORCE1 0x01bc10de #define ATA_NFORCE2 0x006510de #define ATA_NFORCE2_PRO 0x008510de #define ATA_NFORCE2_PRO_S1 0x008e10de #define ATA_NFORCE3 0x00d510de #define ATA_NFORCE3_PRO 0x00e510de #define ATA_NFORCE3_PRO_S1 0x00e310de #define ATA_NFORCE3_PRO_S2 0x00ee10de #define ATA_NFORCE_MCP04 0x003510de #define ATA_NFORCE_MCP04_S1 0x003610de #define ATA_NFORCE_MCP04_S2 0x003e10de #define ATA_NFORCE_CK804 0x005310de #define ATA_NFORCE_CK804_S1 0x005410de #define ATA_NFORCE_CK804_S2 0x005510de #define ATA_NFORCE_MCP51 0x026510de #define ATA_NFORCE_MCP51_S1 0x026610de #define ATA_NFORCE_MCP51_S2 0x026710de #define ATA_NFORCE_MCP55 0x036e10de #define ATA_NFORCE_MCP55_S1 0x037e10de #define ATA_NFORCE_MCP55_S2 0x037f10de #define ATA_PROMISE_ID 0x105a #define ATA_PDC20246 0x4d33105a #define ATA_PDC20262 0x4d38105a #define ATA_PDC20263 0x0d38105a #define ATA_PDC20265 0x0d30105a #define ATA_PDC20267 0x4d30105a #define ATA_PDC20268 0x4d68105a #define ATA_PDC20269 0x4d69105a #define ATA_PDC20270 0x6268105a #define ATA_PDC20271 0x6269105a #define ATA_PDC20275 0x1275105a #define ATA_PDC20276 0x5275105a #define ATA_PDC20277 0x7275105a #define ATA_PDC20318 0x3318105a #define ATA_PDC20319 0x3319105a #define ATA_PDC20371 0x3371105a #define ATA_PDC20375 0x3375105a #define ATA_PDC20376 0x3376105a #define ATA_PDC20377 0x3377105a #define ATA_PDC20378 0x3373105a #define ATA_PDC20379 0x3372105a #define ATA_PDC20571 0x3571105a #define ATA_PDC20575 0x3d75105a #define ATA_PDC20579 0x3574105a -#define ATA_PDC20580 0x3570105a +#define ATA_PDC20771 0x3570105a #define ATA_PDC40518 0x3d18105a #define ATA_PDC40519 0x3519105a #define ATA_PDC40718 0x3d17105a #define ATA_PDC40719 0x3515105a +#define ATA_PDC40775 0x3d73105a +#define ATA_PDC40779 0x3577105a #define ATA_PDC20617 0x6617105a #define ATA_PDC20618 0x6626105a #define ATA_PDC20619 0x6629105a #define ATA_PDC20620 0x6620105a #define ATA_PDC20621 0x6621105a #define ATA_PDC20622 0x6622105a +#define ATA_PDC20624 0x6624105a +#define ATA_PDC81518 0x8002105a #define ATA_SERVERWORKS_ID 0x1166 #define ATA_ROSB4_ISA 0x02001166 #define ATA_ROSB4 0x02111166 #define ATA_CSB5 0x02121166 #define ATA_CSB6 0x02131166 #define ATA_CSB6_1 0x02171166 #define ATA_SILICON_IMAGE_ID 0x1095 #define ATA_SII3114 0x31141095 #define ATA_SII3512 0x35121095 #define ATA_SII3112 0x31121095 #define ATA_SII3112_1 0x02401095 #define ATA_SII0680 0x06801095 #define ATA_CMD646 0x06461095 #define ATA_CMD648 0x06481095 #define ATA_CMD649 0x06491095 #define ATA_SIS_ID 0x1039 #define ATA_SISSOUTH 0x00081039 #define ATA_SIS5511 0x55111039 #define ATA_SIS5513 0x55131039 #define ATA_SIS5517 0x55171039 #define ATA_SIS5518 0x55181039 #define ATA_SIS5571 0x55711039 #define ATA_SIS5591 0x55911039 #define ATA_SIS5596 0x55961039 #define ATA_SIS5597 0x55971039 #define ATA_SIS5598 0x55981039 #define ATA_SIS5600 0x56001039 #define ATA_SIS530 0x05301039 #define ATA_SIS540 0x05401039 #define ATA_SIS550 0x05501039 #define ATA_SIS620 0x06201039 #define ATA_SIS630 0x06301039 #define ATA_SIS635 0x06351039 #define ATA_SIS633 0x06331039 #define ATA_SIS640 0x06401039 #define ATA_SIS645 0x06451039 #define ATA_SIS646 0x06461039 #define ATA_SIS648 0x06481039 #define ATA_SIS650 0x06501039 #define ATA_SIS651 0x06511039 #define ATA_SIS652 0x06521039 #define ATA_SIS655 0x06551039 #define ATA_SIS658 0x06581039 #define ATA_SIS661 0x06611039 #define ATA_SIS730 0x07301039 #define ATA_SIS733 0x07331039 #define ATA_SIS735 0x07351039 #define ATA_SIS740 0x07401039 #define ATA_SIS745 0x07451039 #define ATA_SIS746 0x07461039 #define ATA_SIS748 0x07481039 #define ATA_SIS750 0x07501039 #define ATA_SIS751 0x07511039 #define ATA_SIS752 0x07521039 #define ATA_SIS755 0x07551039 #define ATA_SIS961 0x09611039 #define ATA_SIS962 0x09621039 #define ATA_SIS963 0x09631039 #define ATA_SIS964 0x09641039 #define ATA_SIS965 0x09651039 #define ATA_SIS180 0x01801039 #define ATA_SIS181 0x01811039 #define ATA_SIS182 0x01821039 #define ATA_VIA_ID 0x1106 #define ATA_VIA82C571 0x05711106 #define ATA_VIA82C586 0x05861106 #define ATA_VIA82C596 0x05961106 #define ATA_VIA82C686 0x06861106 #define ATA_VIA8231 0x82311106 #define ATA_VIA8233 0x30741106 #define ATA_VIA8233A 0x31471106 #define ATA_VIA8233C 0x31091106 #define ATA_VIA8235 0x31771106 #define ATA_VIA8237 0x32271106 #define ATA_VIA8251 0x33491106 #define ATA_VIA8361 0x31121106 #define ATA_VIA8363 0x03051106 #define ATA_VIA8371 0x03911106 #define ATA_VIA8662 0x31021106 #define ATA_VIA6410 0x31641106 #define ATA_VIA6420 0x31491106 #define ATA_VIA6421 0x32491106 /* chipset setup related defines */ -#define AHCI 1 +#define AHCI 1 #define ATPOLD 1 #define ALIOLD 0x01 #define ALINEW 0x02 #define ALISATA 0x04 #define HPT366 0 #define HPT370 1 #define HPT372 2 #define HPT374 3 #define HPTOLD 0x01 #define MV5XXX 5 #define MV6XXX 6 #define PROLD 0 #define PRNEW 1 #define PRTX 2 #define PRMIO 3 #define PRTX4 0x01 #define PRSX4X 0x02 #define PRSX6K 0x04 #define PRPATA 0x08 #define PRCMBO 0x10 #define PRCMBO2 0x20 #define PRSATA 0x40 #define PRSATA2 0x80 #define SWKS33 0 #define SWKS66 1 #define SWKS100 2 #define SIIMEMIO 1 #define SIIINTR 0x01 #define SIISETCLK 0x02 #define SIIBUG 0x04 #define SII4CH 0x08 #define SIS_SOUTH 1 #define SISSATA 2 #define SIS133NEW 3 #define SIS133OLD 4 #define SIS100NEW 5 #define SIS100OLD 6 #define SIS66 7 #define SIS33 8 #define VIA33 0 #define VIA66 1 #define VIA100 2 #define VIA133 3 #define AMDNVIDIA 4 #define AMDCABLE 0x01 #define AMDBUG 0x02 #define NVIDIA 0x04 #define NV4OFF 0x08 #define VIACLK 0x10 #define VIABUG 0x20 #define VIABAR 0x40 #define VIAAHCI 0x80 /* global prototypes ata-pci.c */ int ata_pci_probe(device_t dev); int ata_pci_attach(device_t dev); int ata_pci_detach(device_t dev); struct resource * ata_pci_alloc_resource(device_t dev, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags); int ata_pci_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r); int ata_pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_intr_t *function, void *argument, void **cookiep); int ata_pci_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie); int ata_pci_allocate(device_t dev); void ata_pci_hw(device_t dev); int ata_pci_status(device_t dev); /* global prototypes ata-chipset.c */ int ata_generic_ident(device_t); int ata_acard_ident(device_t); int ata_ali_ident(device_t); int ata_amd_ident(device_t); int ata_ati_ident(device_t); int ata_cyrix_ident(device_t); int ata_cypress_ident(device_t); int ata_highpoint_ident(device_t); int ata_intel_ident(device_t); int ata_ite_ident(device_t); int ata_marvell_ident(device_t); int ata_national_ident(device_t); int ata_nvidia_ident(device_t); int ata_promise_ident(device_t); int ata_serverworks_ident(device_t); int ata_sii_ident(device_t); int ata_sis_ident(device_t); int ata_via_ident(device_t); int ata_legacy(device_t); /* global prototypes ata-dma.c */ void ata_dmainit(device_t); diff --git a/sys/dev/ata/ata-queue.c b/sys/dev/ata/ata-queue.c index 37a2f67a22a1..da590f49868b 100644 --- a/sys/dev/ata/ata-queue.c +++ b/sys/dev/ata/ata-queue.c @@ -1,730 +1,730 @@ /*- * Copyright (c) 1998 - 2006 Søren Schmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ata.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* prototypes */ static void ata_completed(void *, int); static void ata_sort_queue(struct ata_channel *ch, struct ata_request *request); static char *ata_skey2str(u_int8_t); void ata_queue_request(struct ata_request *request) { struct ata_channel *ch = device_get_softc(device_get_parent(request->dev)); /* mark request as virgin (this might be a ATA_R_REQUEUE) */ request->result = request->status = request->error = 0; callout_init_mtx(&request->callout, &ch->state_mtx, CALLOUT_RETURNUNLOCKED); if (!request->callback && !(request->flags & ATA_R_REQUEUE)) sema_init(&request->done, 0, "ATA request done"); /* in ATA_STALL_QUEUE state we call HW directly (used only during reinit) */ if ((ch->state & ATA_STALL_QUEUE) && (request->flags & ATA_R_CONTROL)) { mtx_lock(&ch->state_mtx); ch->running = request; if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) { ch->running = NULL; if (!request->callback) sema_destroy(&request->done); mtx_unlock(&ch->state_mtx); return; } mtx_unlock(&ch->state_mtx); } /* otherwise put request on the locked queue at the specified location */ else { mtx_lock(&ch->queue_mtx); if (request->flags & ATA_R_AT_HEAD) TAILQ_INSERT_HEAD(&ch->ata_queue, request, chain); else if (request->flags & ATA_R_ORDERED) ata_sort_queue(ch, request); else TAILQ_INSERT_TAIL(&ch->ata_queue, request, chain); mtx_unlock(&ch->queue_mtx); ATA_DEBUG_RQ(request, "queued"); ata_start(ch->dev); } /* if this is a requeued request callback/sleep we're done */ if (request->flags & ATA_R_REQUEUE) return; /* if this is not a callback wait until request is completed */ if (!request->callback) { ATA_DEBUG_RQ(request, "wait for completition"); while (!dumping && sema_timedwait(&request->done, request->timeout * hz * 4)) { device_printf(request->dev, "req=%p %s semaphore timeout !! DANGER Will Robinson !!\n", request, ata_cmd2str(request)); ata_start(ch->dev); } sema_destroy(&request->done); } } int ata_controlcmd(device_t dev, u_int8_t command, u_int16_t feature, u_int64_t lba, u_int16_t count) { struct ata_request *request = ata_alloc_request(); int error = ENOMEM; if (request) { request->dev = dev; request->u.ata.command = command; request->u.ata.lba = lba; request->u.ata.count = count; request->u.ata.feature = feature; request->flags = ATA_R_CONTROL; request->timeout = 1; request->retries = 0; ata_queue_request(request); error = request->result; ata_free_request(request); } return error; } int ata_atapicmd(device_t dev, u_int8_t *ccb, caddr_t data, int count, int flags, int timeout) { struct ata_request *request = ata_alloc_request(); struct ata_device *atadev = device_get_softc(dev); int error = ENOMEM; if (request) { request->dev = dev; if ((atadev->param.config & ATA_PROTO_MASK) == ATA_PROTO_ATAPI_12) bcopy(ccb, request->u.atapi.ccb, 12); else bcopy(ccb, request->u.atapi.ccb, 16); request->data = data; request->bytecount = count; request->transfersize = min(request->bytecount, 65534); request->flags = flags | ATA_R_ATAPI; request->timeout = timeout; request->retries = 0; ata_queue_request(request); error = request->result; ata_free_request(request); } return error; } void ata_start(device_t dev) { struct ata_channel *ch = device_get_softc(dev); struct ata_request *request; struct ata_composite *cptr; int dependencies = 0; /* if we have a request on the queue try to get it running */ mtx_lock(&ch->queue_mtx); if ((request = TAILQ_FIRST(&ch->ata_queue))) { /* we need the locking function to get the lock for this channel */ if (ATA_LOCKING(dev, ATA_LF_LOCK) == ch->unit) { /* check for composite dependencies */ if ((cptr = request->composite)) { mtx_lock(&cptr->lock); if ((request->flags & ATA_R_WRITE) && (cptr->wr_depend & cptr->rd_done) != cptr->wr_depend) { dependencies = 1; } mtx_unlock(&cptr->lock); } /* check we are in the right state and has no dependencies */ mtx_lock(&ch->state_mtx); if (ch->state == ATA_IDLE && !dependencies) { ATA_DEBUG_RQ(request, "starting"); TAILQ_REMOVE(&ch->ata_queue, request, chain); ch->running = request; ch->state = ATA_ACTIVE; /* if we are the freezing point release it */ if (ch->freezepoint == request) ch->freezepoint = NULL; if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) { ch->running = NULL; ch->state = ATA_IDLE; mtx_unlock(&ch->state_mtx); mtx_unlock(&ch->queue_mtx); ATA_LOCKING(dev, ATA_LF_UNLOCK); ata_finish(request); return; } if (dumping) { mtx_unlock(&ch->state_mtx); mtx_unlock(&ch->queue_mtx); while (!ata_interrupt(ch)) - DELAY(10); + DELAY(10); return; - } + } } mtx_unlock(&ch->state_mtx); } } mtx_unlock(&ch->queue_mtx); } void ata_finish(struct ata_request *request) { struct ata_channel *ch = device_get_softc(device_get_parent(request->dev)); /* * if in ATA_STALL_QUEUE state or request has ATA_R_DIRECT flags set * we need to call ata_complete() directly here (no taskqueue involvement) */ if (dumping || (ch->state & ATA_STALL_QUEUE) || (request->flags & ATA_R_DIRECT)) { ATA_DEBUG_RQ(request, "finish directly"); ata_completed(request, 0); } else { /* put request on the proper taskqueue for completition */ if (request->bio && !(request->flags & (ATA_R_THREAD | ATA_R_TIMEOUT))){ ATA_DEBUG_RQ(request, "finish bio_taskqueue"); bio_taskqueue(request->bio, (bio_task_t *)ata_completed, request); } else { TASK_INIT(&request->task, 0, ata_completed, request); ATA_DEBUG_RQ(request, "finish taskqueue_swi"); taskqueue_enqueue(taskqueue_swi, &request->task); } } } static void ata_completed(void *context, int dummy) { struct ata_request *request = (struct ata_request *)context; struct ata_channel *ch = device_get_softc(device_get_parent(request->dev)); struct ata_device *atadev = device_get_softc(request->dev); struct ata_composite *composite; ATA_DEBUG_RQ(request, "completed entered"); /* if we had a timeout, reinit channel and deal with the falldown */ if (request->flags & ATA_R_TIMEOUT) { /* * if reinit succeeds and the device doesn't get detached and * there are retries left we reinject this request */ if (!ata_reinit(ch->dev) && !request->result && (request->retries-- > 0)) { if (!(request->flags & ATA_R_QUIET)) { device_printf(request->dev, "TIMEOUT - %s retrying (%d retr%s left)", ata_cmd2str(request), request->retries, request->retries == 1 ? "y" : "ies"); if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL))) printf(" LBA=%llu", (unsigned long long)request->u.ata.lba); printf("\n"); } request->flags &= ~(ATA_R_TIMEOUT | ATA_R_DEBUG); request->flags |= (ATA_R_AT_HEAD | ATA_R_REQUEUE); ATA_DEBUG_RQ(request, "completed reinject"); ata_queue_request(request); return; } /* ran out of good intentions so finish with error */ if (!request->result) { if (!(request->flags & ATA_R_QUIET)) { if (request->dev) { device_printf(request->dev, "FAILURE - %s timed out", ata_cmd2str(request)); if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL))) printf(" LBA=%llu", (unsigned long long)request->u.ata.lba); printf("\n"); } } request->result = EIO; } } else { /* if this is a soft ECC error warn about it */ /* XXX SOS we could do WARF here */ if ((request->status & (ATA_S_CORR | ATA_S_ERROR)) == ATA_S_CORR) { device_printf(request->dev, "WARNING - %s soft error (ECC corrected)", ata_cmd2str(request)); if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL))) printf(" LBA=%llu", (unsigned long long)request->u.ata.lba); printf("\n"); } /* if this is a UDMA CRC error we reinject if there are retries left */ if (request->flags & ATA_R_DMA && request->error & ATA_E_ICRC) { if (request->retries-- > 0) { device_printf(request->dev, "WARNING - %s UDMA ICRC error (retrying request)", ata_cmd2str(request)); if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL))) printf(" LBA=%llu", (unsigned long long)request->u.ata.lba); printf("\n"); request->flags |= (ATA_R_AT_HEAD | ATA_R_REQUEUE); ata_queue_request(request); return; } } } switch (request->flags & ATA_R_ATAPI) { /* ATA errors */ default: if (!request->result && request->status & ATA_S_ERROR) { if (!(request->flags & ATA_R_QUIET)) { device_printf(request->dev, "FAILURE - %s status=%b error=%b", ata_cmd2str(request), request->status, "\20\10BUSY\7READY\6DMA_READY" "\5DSC\4DRQ\3CORRECTABLE\2INDEX\1ERROR", request->error, "\20\10ICRC\7UNCORRECTABLE" "\6MEDIA_CHANGED\5NID_NOT_FOUND" "\4MEDIA_CHANGE_REQEST" "\3ABORTED\2NO_MEDIA\1ILLEGAL_LENGTH"); if ((request->flags & ATA_R_DMA) && (request->dmastat & ATA_BMSTAT_ERROR)) printf(" dma=0x%02x", request->dmastat); if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL))) printf(" LBA=%llu", (unsigned long long)request->u.ata.lba); printf("\n"); } request->result = EIO; } break; /* ATAPI errors */ case ATA_R_ATAPI: /* skip if result already set */ if (request->result) break; /* if we have a sensekey -> request sense from device */ if (request->error & ATA_SK_MASK && request->u.atapi.ccb[0] != ATAPI_REQUEST_SENSE) { static u_int8_t ccb[16] = { ATAPI_REQUEST_SENSE, 0, 0, 0, sizeof(struct atapi_sense), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; request->u.atapi.sense_key = request->error; request->u.atapi.sense_cmd = request->u.atapi.ccb[0]; bcopy(ccb, request->u.atapi.ccb, 16); request->data = (caddr_t)&request->u.atapi.sense_data; request->bytecount = sizeof(struct atapi_sense); request->donecount = 0; request->transfersize = sizeof(struct atapi_sense); request->timeout = 5; request->flags &= (ATA_R_ATAPI | ATA_R_QUIET); request->flags |= (ATA_R_READ | ATA_R_AT_HEAD | ATA_R_REQUEUE); ATA_DEBUG_RQ(request, "autoissue request sense"); ata_queue_request(request); return; } switch (request->u.atapi.sense_key & ATA_SK_MASK) { case ATA_SK_RECOVERED_ERROR: device_printf(request->dev, "WARNING - %s recovered error\n", ata_cmd2str(request)); /* FALLTHROUGH */ case ATA_SK_NO_SENSE: request->result = 0; break; case ATA_SK_NOT_READY: request->result = EBUSY; break; case ATA_SK_UNIT_ATTENTION: atadev->flags |= ATA_D_MEDIA_CHANGED; request->result = EIO; break; default: request->result = EIO; if (request->flags & ATA_R_QUIET) break; device_printf(request->dev, "FAILURE - %s %s asc=0x%02x ascq=0x%02x ", ata_cmd2str(request), ata_skey2str( (request->u.atapi.sense_key & ATA_SK_MASK) >> 4), request->u.atapi.sense_data.asc, request->u.atapi.sense_data.ascq); if (request->u.atapi.sense_data.sksv) printf("sks=0x%02x 0x%02x 0x%02x ", request->u.atapi.sense_data.sk_specific, request->u.atapi.sense_data.sk_specific1, request->u.atapi.sense_data.sk_specific2); printf("error=%b\n", (request->u.atapi.sense_key & ATA_E_MASK), "\20\4MEDIA_CHANGE_REQUEST\3ABORTED" "\2NO_MEDIA\1ILLEGAL_LENGTH"); } if ((request->u.atapi.sense_key ? request->u.atapi.sense_key : request->error) & ATA_E_MASK) request->result = EIO; } ATA_DEBUG_RQ(request, "completed callback/wakeup"); /* if we are part of a composite operation we need to maintain progress */ if ((composite = request->composite)) { int index = 0; mtx_lock(&composite->lock); /* update whats done */ if (request->flags & ATA_R_READ) composite->rd_done |= (1 << request->this); if (request->flags & ATA_R_WRITE) composite->wr_done |= (1 << request->this); /* find ready to go dependencies */ if (composite->wr_depend && (composite->rd_done & composite->wr_depend)==composite->wr_depend && (composite->wr_needed & (~composite->wr_done))) { index = composite->wr_needed & ~composite->wr_done; } mtx_unlock(&composite->lock); /* if we have any ready candidates kick them off */ if (index) { int bit; for (bit = 0; bit < MAX_COMPOSITES; bit++) { if (index & (1 << bit)) - ata_start(device_get_parent(composite->request[bit]->dev)); + ata_start(device_get_parent(composite->request[bit]->dev)); } } } /* get results back to the initiator for this request */ if (request->callback) (request->callback)(request); else sema_post(&request->done); ata_start(ch->dev); } void ata_timeout(struct ata_request *request) { struct ata_channel *ch = device_get_softc(device_get_parent(request->dev)); //request->flags |= ATA_R_DEBUG; ATA_DEBUG_RQ(request, "timeout"); /* * if we have an ATA_ACTIVE request running, we flag the request * ATA_R_TIMEOUT so ata_finish will handle it correctly * also NULL out the running request so we wont loose * the race with an eventual interrupt arriving late */ if (ch->state == ATA_ACTIVE) { request->flags |= ATA_R_TIMEOUT; ch->running = NULL; mtx_unlock(&ch->state_mtx); ATA_LOCKING(ch->dev, ATA_LF_UNLOCK); ata_finish(request); } else { mtx_unlock(&ch->state_mtx); } } void ata_fail_requests(device_t dev) { struct ata_channel *ch = device_get_softc(device_get_parent(dev)); struct ata_request *request; /* do we have any outstanding request to care about ?*/ mtx_lock(&ch->state_mtx); if ((request = ch->running) && (!dev || request->dev == dev)) { callout_stop(&request->callout); ch->running = NULL; } else request = NULL; mtx_unlock(&ch->state_mtx); if (request) { request->result = ENXIO; ata_finish(request); } /* fail all requests queued on this channel for device dev if !NULL */ mtx_lock(&ch->queue_mtx); while ((request = TAILQ_FIRST(&ch->ata_queue))) { if (!dev || request->dev == dev) { TAILQ_REMOVE(&ch->ata_queue, request, chain); mtx_unlock(&ch->queue_mtx); request->result = ENXIO; ata_finish(request); mtx_lock(&ch->queue_mtx); } } mtx_unlock(&ch->queue_mtx); } static u_int64_t ata_get_lba(struct ata_request *request) { if (request->flags & ATA_R_ATAPI) { switch (request->u.atapi.ccb[0]) { case ATAPI_READ_BIG: case ATAPI_WRITE_BIG: case ATAPI_READ_CD: return (request->u.atapi.ccb[5]) | (request->u.atapi.ccb[4]<<8) | (request->u.atapi.ccb[3]<<16)|(request->u.atapi.ccb[2]<<24); case ATAPI_READ: case ATAPI_WRITE: return (request->u.atapi.ccb[4]) | (request->u.atapi.ccb[3]<<8) | (request->u.atapi.ccb[2]<<16); default: return 0; } } else return request->u.ata.lba; } static void ata_sort_queue(struct ata_channel *ch, struct ata_request *request) { struct ata_request *this, *next; this = TAILQ_FIRST(&ch->ata_queue); /* if the queue is empty just insert */ if (!this) { if (request->composite) ch->freezepoint = request; TAILQ_INSERT_TAIL(&ch->ata_queue, request, chain); return; } /* dont sort frozen parts of the queue */ if (ch->freezepoint) this = ch->freezepoint; /* if position is less than head we add after tipping point */ if (ata_get_lba(request) < ata_get_lba(this)) { while ((next = TAILQ_NEXT(this, chain))) { /* have we reached the tipping point */ if (ata_get_lba(next) < ata_get_lba(this)) { /* sort the insert */ do { if (ata_get_lba(request) < ata_get_lba(next)) break; this = next; } while ((next = TAILQ_NEXT(this, chain))); break; } this = next; } } /* we are after head so sort the insert before tipping point */ else { while ((next = TAILQ_NEXT(this, chain))) { if (ata_get_lba(next) < ata_get_lba(this) || ata_get_lba(request) < ata_get_lba(next)) break; this = next; } } if (request->composite) ch->freezepoint = request; TAILQ_INSERT_AFTER(&ch->ata_queue, this, request, chain); } char * ata_cmd2str(struct ata_request *request) { static char buffer[20]; if (request->flags & ATA_R_ATAPI) { switch (request->u.atapi.sense_key ? request->u.atapi.sense_cmd : request->u.atapi.ccb[0]) { case 0x00: return ("TEST_UNIT_READY"); case 0x01: return ("REZERO"); case 0x03: return ("REQUEST_SENSE"); case 0x04: return ("FORMAT"); case 0x08: return ("READ"); case 0x0a: return ("WRITE"); case 0x10: return ("WEOF"); case 0x11: return ("SPACE"); case 0x12: return ("INQUIRY"); case 0x15: return ("MODE_SELECT"); case 0x19: return ("ERASE"); case 0x1a: return ("MODE_SENSE"); case 0x1b: return ("START_STOP"); case 0x1e: return ("PREVENT_ALLOW"); case 0x23: return ("ATAPI_READ_FORMAT_CAPACITIES"); case 0x25: return ("READ_CAPACITY"); case 0x28: return ("READ_BIG"); case 0x2a: return ("WRITE_BIG"); case 0x2b: return ("LOCATE"); case 0x34: return ("READ_POSITION"); case 0x35: return ("SYNCHRONIZE_CACHE"); case 0x3b: return ("WRITE_BUFFER"); case 0x3c: return ("READ_BUFFER"); case 0x42: return ("READ_SUBCHANNEL"); case 0x43: return ("READ_TOC"); case 0x45: return ("PLAY_10"); case 0x47: return ("PLAY_MSF"); case 0x48: return ("PLAY_TRACK"); case 0x4b: return ("PAUSE"); case 0x51: return ("READ_DISK_INFO"); case 0x52: return ("READ_TRACK_INFO"); case 0x53: return ("RESERVE_TRACK"); case 0x54: return ("SEND_OPC_INFO"); case 0x55: return ("MODE_SELECT_BIG"); case 0x58: return ("REPAIR_TRACK"); case 0x59: return ("READ_MASTER_CUE"); case 0x5a: return ("MODE_SENSE_BIG"); case 0x5b: return ("CLOSE_TRACK/SESSION"); case 0x5c: return ("READ_BUFFER_CAPACITY"); case 0x5d: return ("SEND_CUE_SHEET"); case 0xa1: return ("BLANK_CMD"); case 0xa3: return ("SEND_KEY"); case 0xa4: return ("REPORT_KEY"); case 0xa5: return ("PLAY_12"); case 0xa6: return ("LOAD_UNLOAD"); case 0xad: return ("READ_DVD_STRUCTURE"); case 0xb4: return ("PLAY_CD"); case 0xbb: return ("SET_SPEED"); case 0xbd: return ("MECH_STATUS"); case 0xbe: return ("READ_CD"); case 0xff: return ("POLL_DSC"); } } else { switch (request->u.ata.command) { case 0x00: return ("NOP"); case 0x08: return ("DEVICE_RESET"); case 0x20: return ("READ"); case 0x24: return ("READ48"); case 0x25: return ("READ_DMA48"); case 0x26: return ("READ_DMA_QUEUED48"); case 0x29: return ("READ_MUL48"); case 0x30: return ("WRITE"); case 0x34: return ("WRITE48"); case 0x35: return ("WRITE_DMA48"); case 0x36: return ("WRITE_DMA_QUEUED48"); case 0x39: return ("WRITE_MUL48"); case 0x70: return ("SEEK"); case 0xa0: return ("PACKET_CMD"); case 0xa1: return ("ATAPI_IDENTIFY"); case 0xa2: return ("SERVICE"); case 0xc0: return ("CFA ERASE"); case 0xc4: return ("READ_MUL"); case 0xc5: return ("WRITE_MUL"); case 0xc6: return ("SET_MULTI"); case 0xc7: return ("READ_DMA_QUEUED"); case 0xc8: return ("READ_DMA"); case 0xca: return ("WRITE_DMA"); case 0xcc: return ("WRITE_DMA_QUEUED"); case 0xe6: return ("SLEEP"); case 0xe7: return ("FLUSHCACHE"); case 0xea: return ("FLUSHCACHE48"); case 0xec: return ("ATA_IDENTIFY"); case 0xef: switch (request->u.ata.feature) { case 0x03: return ("SETFEATURES SET TRANSFER MODE"); case 0x02: return ("SETFEATURES ENABLE WCACHE"); case 0x82: return ("SETFEATURES DISABLE WCACHE"); case 0xaa: return ("SETFEATURES ENABLE RCACHE"); case 0x55: return ("SETFEATURES DISABLE RCACHE"); } sprintf(buffer, "SETFEATURES 0x%02x", request->u.ata.feature); return buffer; } } sprintf(buffer, "unknown CMD (0x%02x)", request->u.ata.command); return buffer; } static char * ata_skey2str(u_int8_t skey) { switch (skey) { case 0x00: return ("NO SENSE"); case 0x01: return ("RECOVERED ERROR"); case 0x02: return ("NOT READY"); case 0x03: return ("MEDIUM ERROR"); case 0x04: return ("HARDWARE ERROR"); case 0x05: return ("ILLEGAL REQUEST"); case 0x06: return ("UNIT ATTENTION"); case 0x07: return ("DATA PROTECT"); case 0x08: return ("BLANK CHECK"); case 0x09: return ("VENDOR SPECIFIC"); case 0x0a: return ("COPY ABORTED"); case 0x0b: return ("ABORTED COMMAND"); case 0x0c: return ("EQUAL"); case 0x0d: return ("VOLUME OVERFLOW"); case 0x0e: return ("MISCOMPARE"); case 0x0f: return ("RESERVED"); default: return("UNKNOWN"); } } diff --git a/sys/dev/ata/ata-raid.c b/sys/dev/ata/ata-raid.c index 7580ab71ed0d..c4b51980d96a 100644 --- a/sys/dev/ata/ata-raid.c +++ b/sys/dev/ata/ata-raid.c @@ -1,4662 +1,4662 @@ /*- * Copyright (c) 2000 - 2006 Søren Schmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ata.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* prototypes */ static void ata_raid_done(struct ata_request *request); static void ata_raid_config_changed(struct ar_softc *rdp, int writeback); static int ata_raid_status(struct ata_ioc_raid_config *config); static int ata_raid_create(struct ata_ioc_raid_config *config); static int ata_raid_delete(int array); static int ata_raid_addspare(struct ata_ioc_raid_config *config); static int ata_raid_rebuild(int array); static int ata_raid_read_metadata(device_t subdisk); static int ata_raid_write_metadata(struct ar_softc *rdp); static int ata_raid_wipe_metadata(struct ar_softc *rdp); static int ata_raid_adaptec_read_meta(device_t dev, struct ar_softc **raidp); static int ata_raid_hptv2_read_meta(device_t dev, struct ar_softc **raidp); static int ata_raid_hptv2_write_meta(struct ar_softc *rdp); static int ata_raid_hptv3_read_meta(device_t dev, struct ar_softc **raidp); static int ata_raid_intel_read_meta(device_t dev, struct ar_softc **raidp); static int ata_raid_intel_write_meta(struct ar_softc *rdp); static int ata_raid_ite_read_meta(device_t dev, struct ar_softc **raidp); static int ata_raid_lsiv2_read_meta(device_t dev, struct ar_softc **raidp); static int ata_raid_lsiv3_read_meta(device_t dev, struct ar_softc **raidp); static int ata_raid_nvidia_read_meta(device_t dev, struct ar_softc **raidp); static int ata_raid_promise_read_meta(device_t dev, struct ar_softc **raidp, int native); static int ata_raid_promise_write_meta(struct ar_softc *rdp); static int ata_raid_sii_read_meta(device_t dev, struct ar_softc **raidp); static int ata_raid_sis_read_meta(device_t dev, struct ar_softc **raidp); static int ata_raid_sis_write_meta(struct ar_softc *rdp); static int ata_raid_via_read_meta(device_t dev, struct ar_softc **raidp); static int ata_raid_via_write_meta(struct ar_softc *rdp); static struct ata_request *ata_raid_init_request(struct ar_softc *rdp, struct bio *bio); static int ata_raid_send_request(struct ata_request *request); static int ata_raid_rw(device_t dev, u_int64_t lba, void *data, u_int bcount, int flags); static char * ata_raid_format(struct ar_softc *rdp); static char * ata_raid_type(struct ar_softc *rdp); static char * ata_raid_flags(struct ar_softc *rdp); /* debugging only */ static void ata_raid_print_meta(struct ar_softc *meta); static void ata_raid_adaptec_print_meta(struct adaptec_raid_conf *meta); static void ata_raid_hptv2_print_meta(struct hptv2_raid_conf *meta); static void ata_raid_hptv3_print_meta(struct hptv3_raid_conf *meta); static void ata_raid_intel_print_meta(struct intel_raid_conf *meta); static void ata_raid_ite_print_meta(struct ite_raid_conf *meta); static void ata_raid_lsiv2_print_meta(struct lsiv2_raid_conf *meta); static void ata_raid_lsiv3_print_meta(struct lsiv3_raid_conf *meta); static void ata_raid_nvidia_print_meta(struct nvidia_raid_conf *meta); static void ata_raid_promise_print_meta(struct promise_raid_conf *meta); static void ata_raid_sii_print_meta(struct sii_raid_conf *meta); static void ata_raid_sis_print_meta(struct sis_raid_conf *meta); static void ata_raid_via_print_meta(struct via_raid_conf *meta); /* internal vars */ static struct ar_softc *ata_raid_arrays[MAX_ARRAYS]; static MALLOC_DEFINE(M_AR, "ar_driver", "ATA PseudoRAID driver"); static devclass_t ata_raid_sub_devclass; static int testing = 0; /* device structures */ static disk_strategy_t ata_raid_strategy; static dumper_t ata_raid_dump; static void ata_raid_attach(struct ar_softc *rdp, int writeback) { char buffer[32]; int disk; mtx_init(&rdp->lock, "ATA PseudoRAID metadata lock", NULL, MTX_DEF); ata_raid_config_changed(rdp, writeback); /* sanitize arrays total_size % (width * interleave) == 0 */ if (rdp->type == AR_T_RAID0 || rdp->type == AR_T_RAID01 || rdp->type == AR_T_RAID5) { rdp->total_sectors = (rdp->total_sectors/(rdp->interleave*rdp->width))* (rdp->interleave * rdp->width); sprintf(buffer, " (stripe %d KB)", (rdp->interleave * DEV_BSIZE) / 1024); } else buffer[0] = '\0'; rdp->disk = disk_alloc(); rdp->disk->d_strategy = ata_raid_strategy; rdp->disk->d_dump = ata_raid_dump; rdp->disk->d_name = "ar"; rdp->disk->d_sectorsize = DEV_BSIZE; rdp->disk->d_mediasize = (off_t)rdp->total_sectors * DEV_BSIZE; rdp->disk->d_fwsectors = rdp->sectors; rdp->disk->d_fwheads = rdp->heads; rdp->disk->d_maxsize = 128 * DEV_BSIZE; rdp->disk->d_drv1 = rdp; rdp->disk->d_unit = rdp->lun; disk_create(rdp->disk, DISK_VERSION); printf("ar%d: %lluMB <%s %s%s> status: %s\n", rdp->lun, (unsigned long long)(rdp->total_sectors / ((1024L*1024L)/DEV_BSIZE)), ata_raid_format(rdp), ata_raid_type(rdp), buffer, ata_raid_flags(rdp)); if (testing || bootverbose) printf("ar%d: %llu sectors [%dC/%dH/%dS] <%s> subdisks defined as:\n", rdp->lun, (unsigned long long)rdp->total_sectors, rdp->cylinders, rdp->heads, rdp->sectors, rdp->name); for (disk = 0; disk < rdp->total_disks; disk++) { printf("ar%d: disk%d ", rdp->lun, disk); if (rdp->disks[disk].dev) { if (rdp->disks[disk].flags & AR_DF_PRESENT) { /* status of this disk in the array */ if (rdp->disks[disk].flags & AR_DF_ONLINE) printf("READY "); else if (rdp->disks[disk].flags & AR_DF_SPARE) printf("SPARE "); else printf("FREE "); /* what type of disk is this in the array */ switch (rdp->type) { case AR_T_RAID1: case AR_T_RAID01: if (disk < rdp->width) printf("(master) "); else printf("(mirror) "); } /* which physical disk is used */ printf("using %s at ata%d-%s\n", device_get_nameunit(rdp->disks[disk].dev), device_get_unit(device_get_parent(rdp->disks[disk].dev)), (((struct ata_device *) device_get_softc(rdp->disks[disk].dev))->unit == ATA_MASTER) ? "master" : "slave"); } else if (rdp->disks[disk].flags & AR_DF_ASSIGNED) printf("DOWN\n"); else printf("INVALID no RAID config on this subdisk\n"); } else printf("DOWN no device found for this subdisk\n"); } } static int ata_raid_ioctl(u_long cmd, caddr_t data) { struct ata_ioc_raid_config *config = (struct ata_ioc_raid_config *)data; int *lun = (int *)data; int error = EOPNOTSUPP; switch (cmd) { case IOCATARAIDSTATUS: error = ata_raid_status(config); break; case IOCATARAIDCREATE: error = ata_raid_create(config); break; case IOCATARAIDDELETE: error = ata_raid_delete(*lun); break; case IOCATARAIDADDSPARE: error = ata_raid_addspare(config); break; case IOCATARAIDREBUILD: error = ata_raid_rebuild(*lun); break; } return error; } static void ata_raid_strategy(struct bio *bp) { struct ar_softc *rdp = bp->bio_disk->d_drv1; struct ata_request *request; caddr_t data; u_int64_t blkno, lba, blk = 0; int count, chunk, drv, par = 0, change = 0; if (!(rdp->status & AR_S_READY) || (bp->bio_cmd != BIO_READ && bp->bio_cmd != BIO_WRITE)) { biofinish(bp, NULL, EIO); return; } bp->bio_resid = bp->bio_bcount; for (count = howmany(bp->bio_bcount, DEV_BSIZE), blkno = bp->bio_pblkno, data = bp->bio_data; count > 0; count -= chunk, blkno += chunk, data += (chunk * DEV_BSIZE)) { switch (rdp->type) { case AR_T_RAID1: drv = 0; lba = blkno; chunk = count; break; case AR_T_JBOD: case AR_T_SPAN: drv = 0; lba = blkno; while (lba >= rdp->disks[drv].sectors) lba -= rdp->disks[drv++].sectors; chunk = min(rdp->disks[drv].sectors - lba, count); break; case AR_T_RAID0: case AR_T_RAID01: chunk = blkno % rdp->interleave; drv = (blkno / rdp->interleave) % rdp->width; lba = (((blkno/rdp->interleave)/rdp->width)*rdp->interleave)+chunk; chunk = min(count, rdp->interleave - chunk); break; case AR_T_RAID5: drv = (blkno / rdp->interleave) % (rdp->width - 1); par = rdp->width - 1 - (blkno / (rdp->interleave * (rdp->width - 1))) % rdp->width; if (drv >= par) drv++; lba = ((blkno/rdp->interleave)/(rdp->width-1))*(rdp->interleave) + ((blkno%(rdp->interleave*(rdp->width-1)))%rdp->interleave); chunk = min(count, rdp->interleave - (lba % rdp->interleave)); break; default: printf("ar%d: unknown array type in ata_raid_strategy\n", rdp->lun); biofinish(bp, NULL, EIO); return; } /* offset on all but "first on HPTv2" */ if (!(drv == 0 && rdp->format == AR_F_HPTV2_RAID)) lba += rdp->offset_sectors; if (!(request = ata_raid_init_request(rdp, bp))) { biofinish(bp, NULL, EIO); return; } request->data = data; request->bytecount = chunk * DEV_BSIZE; request->u.ata.lba = lba; request->u.ata.count = request->bytecount / DEV_BSIZE; switch (rdp->type) { case AR_T_JBOD: case AR_T_SPAN: case AR_T_RAID0: if (((rdp->disks[drv].flags & (AR_DF_PRESENT|AR_DF_ONLINE)) == (AR_DF_PRESENT|AR_DF_ONLINE) && !rdp->disks[drv].dev)) { rdp->disks[drv].flags &= ~AR_DF_ONLINE; ata_raid_config_changed(rdp, 1); ata_free_request(request); biofinish(bp, NULL, EIO); return; } request->this = drv; request->dev = rdp->disks[request->this].dev; ata_raid_send_request(request); break; case AR_T_RAID1: case AR_T_RAID01: if ((rdp->disks[drv].flags & (AR_DF_PRESENT|AR_DF_ONLINE))==(AR_DF_PRESENT|AR_DF_ONLINE) && !rdp->disks[drv].dev) { rdp->disks[drv].flags &= ~AR_DF_ONLINE; change = 1; } if ((rdp->disks[drv + rdp->width].flags & (AR_DF_PRESENT|AR_DF_ONLINE))==(AR_DF_PRESENT|AR_DF_ONLINE) && !rdp->disks[drv + rdp->width].dev) { rdp->disks[drv + rdp->width].flags &= ~AR_DF_ONLINE; change = 1; } if (change) ata_raid_config_changed(rdp, 1); if (!(rdp->status & AR_S_READY)) { ata_free_request(request); biofinish(bp, NULL, EIO); return; } if (rdp->status & AR_S_REBUILDING) blk = ((lba / rdp->interleave) * rdp->width) * rdp->interleave + (rdp->interleave * (drv % rdp->width)) + lba % rdp->interleave;; if (bp->bio_cmd == BIO_READ) { int src_online = (rdp->disks[drv].flags & AR_DF_ONLINE); int mir_online = (rdp->disks[drv+rdp->width].flags & AR_DF_ONLINE); /* if mirror gone or close to last access on source */ if (!mir_online || ((src_online) && bp->bio_pblkno >= (rdp->disks[drv].last_lba - AR_PROXIMITY) && bp->bio_pblkno <= (rdp->disks[drv].last_lba + AR_PROXIMITY))) { rdp->toggle = 0; } /* if source gone or close to last access on mirror */ else if (!src_online || ((mir_online) && bp->bio_pblkno >= (rdp->disks[drv+rdp->width].last_lba-AR_PROXIMITY) && bp->bio_pblkno <= (rdp->disks[drv+rdp->width].last_lba+AR_PROXIMITY))) { drv += rdp->width; rdp->toggle = 1; } /* not close to any previous access, toggle */ else { if (rdp->toggle) rdp->toggle = 0; else { drv += rdp->width; rdp->toggle = 1; } } if ((rdp->status & AR_S_REBUILDING) && (blk <= rdp->rebuild_lba) && ((blk + chunk) > rdp->rebuild_lba)) { struct ata_composite *composite; struct ata_request *rebuild; int this; /* figure out what part to rebuild */ if (drv < rdp->width) this = drv + rdp->width; else this = drv - rdp->width; /* do we have a spare to rebuild on ? */ if (rdp->disks[this].flags & AR_DF_SPARE) { if ((composite = ata_alloc_composite())) { if ((rebuild = ata_alloc_request())) { rdp->rebuild_lba = blk + chunk; bcopy(request, rebuild, sizeof(struct ata_request)); rebuild->this = this; rebuild->dev = rdp->disks[this].dev; rebuild->flags &= ~ATA_R_READ; rebuild->flags |= ATA_R_WRITE; mtx_init(&composite->lock, "ATA PseudoRAID rebuild lock", NULL, MTX_DEF); composite->residual = request->bytecount; composite->rd_needed |= (1 << drv); composite->wr_depend |= (1 << drv); composite->wr_needed |= (1 << this); composite->request[drv] = request; composite->request[this] = rebuild; request->composite = composite; rebuild->composite = composite; ata_raid_send_request(rebuild); } else { ata_free_composite(composite); printf("DOH! ata_alloc_request failed!\n"); } } else { printf("DOH! ata_alloc_composite failed!\n"); } } else if (rdp->disks[this].flags & AR_DF_ONLINE) { /* * if we got here we are a chunk of a RAID01 that * does not need a rebuild, but we need to increment * the rebuild_lba address to get the rebuild to * move to the next chunk correctly */ rdp->rebuild_lba = blk + chunk; } else printf("DOH! we didn't find the rebuild part\n"); } } if (bp->bio_cmd == BIO_WRITE) { if ((rdp->disks[drv+rdp->width].flags & AR_DF_ONLINE) || ((rdp->status & AR_S_REBUILDING) && (rdp->disks[drv+rdp->width].flags & AR_DF_SPARE) && ((blk < rdp->rebuild_lba) || ((blk <= rdp->rebuild_lba) && ((blk + chunk) > rdp->rebuild_lba))))) { if ((rdp->disks[drv].flags & AR_DF_ONLINE) || ((rdp->status & AR_S_REBUILDING) && (rdp->disks[drv].flags & AR_DF_SPARE) && ((blk < rdp->rebuild_lba) || ((blk <= rdp->rebuild_lba) && ((blk + chunk) > rdp->rebuild_lba))))) { struct ata_request *mirror; struct ata_composite *composite; int this = drv + rdp->width; if ((composite = ata_alloc_composite())) { if ((mirror = ata_alloc_request())) { if ((blk <= rdp->rebuild_lba) && ((blk + chunk) > rdp->rebuild_lba)) rdp->rebuild_lba = blk + chunk; bcopy(request, mirror, sizeof(struct ata_request)); mirror->this = this; mirror->dev = rdp->disks[this].dev; mtx_init(&composite->lock, "ATA PseudoRAID mirror lock", NULL, MTX_DEF); composite->residual = request->bytecount; composite->wr_needed |= (1 << drv); composite->wr_needed |= (1 << this); composite->request[drv] = request; composite->request[this] = mirror; request->composite = composite; mirror->composite = composite; ata_raid_send_request(mirror); rdp->disks[this].last_lba = bp->bio_pblkno + chunk; } else { ata_free_composite(composite); printf("DOH! ata_alloc_request failed!\n"); } } else { printf("DOH! ata_alloc_composite failed!\n"); } } else drv += rdp->width; } } request->this = drv; request->dev = rdp->disks[request->this].dev; ata_raid_send_request(request); rdp->disks[request->this].last_lba = bp->bio_pblkno + chunk; break; case AR_T_RAID5: if (((rdp->disks[drv].flags & (AR_DF_PRESENT|AR_DF_ONLINE)) == (AR_DF_PRESENT|AR_DF_ONLINE) && !rdp->disks[drv].dev)) { rdp->disks[drv].flags &= ~AR_DF_ONLINE; change = 1; } if (((rdp->disks[par].flags & (AR_DF_PRESENT|AR_DF_ONLINE)) == (AR_DF_PRESENT|AR_DF_ONLINE) && !rdp->disks[par].dev)) { rdp->disks[par].flags &= ~AR_DF_ONLINE; change = 1; } if (change) ata_raid_config_changed(rdp, 1); if (!(rdp->status & AR_S_READY)) { ata_free_request(request); biofinish(bp, NULL, EIO); return; } if (rdp->status & AR_S_DEGRADED) { /* do the XOR game if possible */ } else { request->this = drv; request->dev = rdp->disks[request->this].dev; if (bp->bio_cmd == BIO_READ) { ata_raid_send_request(request); } if (bp->bio_cmd == BIO_WRITE) { ata_raid_send_request(request); // sikre at læs-modify-skriv til hver disk er atomarisk. // par kopi af request // læse orgdata fra drv // skriv nydata til drv // læse parorgdata fra par // skriv orgdata xor parorgdata xor nydata til par } } break; default: printf("ar%d: unknown array type in ata_raid_strategy\n", rdp->lun); } } } static void ata_raid_done(struct ata_request *request) { struct ar_softc *rdp = request->driver; struct ata_composite *composite = NULL; struct bio *bp = request->bio; int i, mirror, finished = 0; switch (rdp->type) { case AR_T_JBOD: case AR_T_SPAN: case AR_T_RAID0: if (request->result) { rdp->disks[request->this].flags &= ~AR_DF_ONLINE; ata_raid_config_changed(rdp, 1); bp->bio_error = request->result; finished = 1; } else { bp->bio_resid -= request->donecount; if (!bp->bio_resid) finished = 1; } break; case AR_T_RAID1: case AR_T_RAID01: if (request->this < rdp->width) mirror = request->this + rdp->width; else mirror = request->this - rdp->width; if (request->result) { rdp->disks[request->this].flags &= ~AR_DF_ONLINE; ata_raid_config_changed(rdp, 1); } if (rdp->status & AR_S_READY) { u_int64_t blk = 0; if (rdp->status & AR_S_REBUILDING) blk = ((request->u.ata.lba / rdp->interleave) * rdp->width) * rdp->interleave + (rdp->interleave * (request->this % rdp->width)) + request->u.ata.lba % rdp->interleave; if (bp->bio_cmd == BIO_READ) { /* is this a rebuild composite */ if ((composite = request->composite)) { mtx_lock(&composite->lock); /* handle the read part of a rebuild composite */ if (request->flags & ATA_R_READ) { /* if read failed array is now broken */ if (request->result) { rdp->disks[request->this].flags &= ~AR_DF_ONLINE; ata_raid_config_changed(rdp, 1); bp->bio_error = request->result; rdp->rebuild_lba = blk; finished = 1; } /* good data, update how far we've gotten */ else { bp->bio_resid -= request->donecount; composite->residual -= request->donecount; if (!composite->residual) { if (composite->wr_done & (1 << mirror)) finished = 1; } } } /* handle the write part of a rebuild composite */ else if (request->flags & ATA_R_WRITE) { if (composite->rd_done & (1 << mirror)) { if (request->result) { printf("DOH! rebuild failed\n"); /* XXX SOS */ rdp->rebuild_lba = blk; } if (!composite->residual) finished = 1; } } mtx_unlock(&composite->lock); } /* if read failed retry on the mirror */ else if (request->result) { request->dev = rdp->disks[mirror].dev; request->flags &= ~ATA_R_TIMEOUT; ata_raid_send_request(request); return; } /* we have good data */ else { bp->bio_resid -= request->donecount; if (!bp->bio_resid) finished = 1; } } else if (bp->bio_cmd == BIO_WRITE) { /* do we have a mirror or rebuild to deal with ? */ if ((composite = request->composite)) { mtx_lock(&composite->lock); if (composite->wr_done & (1 << mirror)) { if (request->result) { if (composite->request[mirror]->result) { printf("DOH! all disks failed and got here\n"); bp->bio_error = EIO; } if (rdp->status & AR_S_REBUILDING) { rdp->rebuild_lba = blk; printf("DOH! rebuild failed\n"); /* XXX SOS */ } bp->bio_resid -= composite->request[mirror]->donecount; composite->residual -= composite->request[mirror]->donecount; } else { bp->bio_resid -= request->donecount; composite->residual -= request->donecount; } if (!composite->residual) finished = 1; } mtx_unlock(&composite->lock); } /* no mirror we are done */ else { bp->bio_resid -= request->donecount; if (!bp->bio_resid) finished = 1; } } } else biofinish(bp, NULL, request->result); break; case AR_T_RAID5: if (request->result) { rdp->disks[request->this].flags &= ~AR_DF_ONLINE; ata_raid_config_changed(rdp, 1); if (rdp->status & AR_S_READY) { if (bp->bio_cmd == BIO_READ) { /* do the XOR game to recover data */ } if (bp->bio_cmd == BIO_WRITE) { /* if the parity failed we're OK sortof */ /* otherwise wee need to do the XOR long dance */ } finished = 1; } else biofinish(bp, NULL, request->result); } else { // did we have an XOR game going ?? bp->bio_resid -= request->donecount; if (!bp->bio_resid) finished = 1; } break; default: printf("ar%d: unknown array type in ata_raid_done\n", rdp->lun); } if (finished) { if ((rdp->status & AR_S_REBUILDING) && rdp->rebuild_lba >= rdp->total_sectors) { int disk; for (disk = 0; disk < rdp->total_disks; disk++) { if ((rdp->disks[disk].flags & (AR_DF_PRESENT | AR_DF_ASSIGNED | AR_DF_SPARE)) == (AR_DF_PRESENT | AR_DF_ASSIGNED | AR_DF_SPARE)) { rdp->disks[disk].flags &= ~AR_DF_SPARE; rdp->disks[disk].flags |= AR_DF_ONLINE; } } rdp->status &= ~AR_S_REBUILDING; ata_raid_config_changed(rdp, 1); } if (!bp->bio_resid) biodone(bp); } if (composite) { if (finished) { /* we are done with this composite, free all resources */ for (i = 0; i < 32; i++) { if (composite->rd_needed & (1 << i) || composite->wr_needed & (1 << i)) { ata_free_request(composite->request[i]); } } mtx_destroy(&composite->lock); ata_free_composite(composite); } } else ata_free_request(request); } static int ata_raid_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length) { struct disk *dp = arg; struct ar_softc *rdp = dp->d_drv1; struct bio bp; /* length zero is special and really means flush buffers to media */ if (!length) { int disk, error; for (disk = 0, error = 0; disk < rdp->total_disks; disk++) if (rdp->disks[disk].dev) error |= ata_controlcmd(rdp->disks[disk].dev, ATA_FLUSHCACHE, 0, 0, 0); return (error ? EIO : 0); } bzero(&bp, sizeof(struct bio)); bp.bio_disk = dp; bp.bio_pblkno = offset / DEV_BSIZE; bp.bio_bcount = length; bp.bio_data = virtual; bp.bio_cmd = BIO_WRITE; ata_raid_strategy(&bp); return bp.bio_error; } static void ata_raid_config_changed(struct ar_softc *rdp, int writeback) { int disk, count, status; mtx_lock(&rdp->lock); /* set default all working mode */ status = rdp->status; rdp->status &= ~AR_S_DEGRADED; rdp->status |= AR_S_READY; /* make sure all lost drives are accounted for */ for (disk = 0; disk < rdp->total_disks; disk++) { if (!(rdp->disks[disk].flags & AR_DF_PRESENT)) rdp->disks[disk].flags &= ~AR_DF_ONLINE; } /* depending on RAID type figure out our health status */ switch (rdp->type) { case AR_T_JBOD: case AR_T_SPAN: case AR_T_RAID0: for (disk = 0; disk < rdp->total_disks; disk++) if (!(rdp->disks[disk].flags & AR_DF_ONLINE)) rdp->status &= ~AR_S_READY; break; case AR_T_RAID1: case AR_T_RAID01: for (disk = 0; disk < rdp->width; disk++) { if (!(rdp->disks[disk].flags & AR_DF_ONLINE) && !(rdp->disks[disk + rdp->width].flags & AR_DF_ONLINE)) { rdp->status &= ~AR_S_READY; } else if (((rdp->disks[disk].flags & AR_DF_ONLINE) && !(rdp->disks[disk + rdp->width].flags & AR_DF_ONLINE)) || (!(rdp->disks[disk].flags & AR_DF_ONLINE) && (rdp->disks [disk + rdp->width].flags & AR_DF_ONLINE))) { rdp->status |= AR_S_DEGRADED; } } break; case AR_T_RAID5: for (count = 0, disk = 0; disk < rdp->total_disks; disk++) { if (!(rdp->disks[disk].flags & AR_DF_ONLINE)) count++; } if (count) { if (count > 1) rdp->status &= ~AR_S_READY; else rdp->status |= AR_S_DEGRADED; } break; default: rdp->status &= ~AR_S_READY; } if (rdp->status != status) { if (!(rdp->status & AR_S_READY)) { printf("ar%d: FAILURE - %s array broken\n", rdp->lun, ata_raid_type(rdp)); } else if (rdp->status & AR_S_DEGRADED) { if (rdp->type & (AR_T_RAID1 | AR_T_RAID01)) printf("ar%d: WARNING - mirror", rdp->lun); else printf("ar%d: WARNING - parity", rdp->lun); printf(" protection lost. %s array in DEGRADED mode\n", ata_raid_type(rdp)); } } mtx_unlock(&rdp->lock); if (writeback) ata_raid_write_metadata(rdp); } static int ata_raid_status(struct ata_ioc_raid_config *config) { struct ar_softc *rdp; int i; if (!(rdp = ata_raid_arrays[config->lun])) return ENXIO; config->type = rdp->type; config->total_disks = rdp->total_disks; for (i = 0; i < rdp->total_disks; i++ ) { if ((rdp->disks[i].flags & AR_DF_PRESENT) && rdp->disks[i].dev) config->disks[i] = device_get_unit(rdp->disks[i].dev); else config->disks[i] = -1; } config->interleave = rdp->interleave; config->status = rdp->status; config->progress = 100 * rdp->rebuild_lba / rdp->total_sectors; return 0; } static int ata_raid_create(struct ata_ioc_raid_config *config) { struct ar_softc *rdp; device_t subdisk; int array, disk; int ctlr = 0, disk_size = 0, total_disks = 0; for (array = 0; array < MAX_ARRAYS; array++) { if (!ata_raid_arrays[array]) break; } if (array >= MAX_ARRAYS) return ENOSPC; if (!(rdp = (struct ar_softc*)malloc(sizeof(struct ar_softc), M_AR, M_NOWAIT | M_ZERO))) { printf("ar%d: no memory for metadata storage\n", array); return ENOMEM; } for (disk = 0; disk < config->total_disks; disk++) { if ((subdisk = devclass_get_device(ata_raid_sub_devclass, config->disks[disk]))) { struct ata_raid_subdisk *ars = device_get_softc(subdisk); /* is device already assigned to another array ? */ if (ars->raid[rdp->volume]) { config->disks[disk] = -1; free(rdp, M_AR); return EBUSY; } rdp->disks[disk].dev = device_get_parent(subdisk); switch (pci_get_vendor(GRANDPARENT(rdp->disks[disk].dev))) { case ATA_HIGHPOINT_ID: /* * we need some way to decide if it should be v2 or v3 * for now just use v2 since the v3 BIOS knows how to * handle that as well. */ ctlr = AR_F_HPTV2_RAID; rdp->disks[disk].sectors = HPTV3_LBA(rdp->disks[disk].dev); break; case ATA_INTEL_ID: ctlr = AR_F_INTEL_RAID; rdp->disks[disk].sectors = INTEL_LBA(rdp->disks[disk].dev); break; case ATA_ITE_ID: ctlr = AR_F_ITE_RAID; rdp->disks[disk].sectors = ITE_LBA(rdp->disks[disk].dev); break; case 0: /* XXX SOS cover up for bug in our PCI code */ case ATA_PROMISE_ID: ctlr = AR_F_PROMISE_RAID; rdp->disks[disk].sectors = PROMISE_LBA(rdp->disks[disk].dev); break; case ATA_SIS_ID: ctlr = AR_F_SIS_RAID; rdp->disks[disk].sectors = SIS_LBA(rdp->disks[disk].dev); break; case ATA_ATI_ID: case ATA_VIA_ID: ctlr = AR_F_VIA_RAID; rdp->disks[disk].sectors = VIA_LBA(rdp->disks[disk].dev); break; default: /* XXX SOS * right, so here we are, we have an ATA chip and we want * to create a RAID and store the metadata. * we need to find a way to tell what kind of metadata this * hardware's BIOS might be using (good ideas are welcomed) * for now we just use our own native FreeBSD format. * the only way to get support for the BIOS format is to * setup the RAID from there, in that case we pickup the * metadata format from the disks (if we support it). */ printf("WARNING!! - not able to determine metadata format\n" "WARNING!! - Using FreeBSD PsuedoRAID metadata\n" "If that is not what you want, use the BIOS to " "create the array\n"); ctlr = AR_F_FREEBSD_RAID; rdp->disks[disk].sectors = PROMISE_LBA(rdp->disks[disk].dev); break; } /* we need all disks to be of the same format */ if ((rdp->format & AR_F_FORMAT_MASK) && (rdp->format & AR_F_FORMAT_MASK) != (ctlr & AR_F_FORMAT_MASK)) { free(rdp, M_AR); return EXDEV; } else rdp->format = ctlr; /* use the smallest disk of the lots size */ /* gigabyte boundry ??? XXX SOS */ if (disk_size) disk_size = min(rdp->disks[disk].sectors, disk_size); else disk_size = rdp->disks[disk].sectors; rdp->disks[disk].flags = (AR_DF_PRESENT | AR_DF_ASSIGNED | AR_DF_ONLINE); total_disks++; } else { config->disks[disk] = -1; free(rdp, M_AR); return ENXIO; } } if (total_disks != config->total_disks) { free(rdp, M_AR); return ENODEV; } switch (config->type) { case AR_T_JBOD: case AR_T_SPAN: case AR_T_RAID0: break; case AR_T_RAID1: if (total_disks != 2) { free(rdp, M_AR); return EPERM; } break; case AR_T_RAID01: if (total_disks % 2 != 0) { free(rdp, M_AR); return EPERM; } break; case AR_T_RAID5: if (total_disks < 3) { free(rdp, M_AR); return EPERM; } break; default: free(rdp, M_AR); return EOPNOTSUPP; } rdp->type = config->type; rdp->lun = array; if (rdp->type == AR_T_RAID0 || rdp->type == AR_T_RAID01 || rdp->type == AR_T_RAID5) { int bit = 0; while (config->interleave >>= 1) bit++; rdp->interleave = 1 << bit; } rdp->offset_sectors = 0; /* values that depend on metadata format */ switch (rdp->format) { case AR_F_ADAPTEC_RAID: rdp->interleave = min(max(32, rdp->interleave), 128); /*+*/ break; case AR_F_HPTV2_RAID: rdp->interleave = min(max(8, rdp->interleave), 128); /*+*/ rdp->offset_sectors = HPTV2_LBA(x) + 1; break; case AR_F_HPTV3_RAID: rdp->interleave = min(max(32, rdp->interleave), 4096); /*+*/ break; case AR_F_INTEL_RAID: rdp->interleave = min(max(8, rdp->interleave), 256); /*+*/ break; case AR_F_ITE_RAID: rdp->interleave = min(max(2, rdp->interleave), 128); /*+*/ break; case AR_F_LSIV2_RAID: rdp->interleave = min(max(2, rdp->interleave), 4096); break; case AR_F_LSIV3_RAID: rdp->interleave = min(max(2, rdp->interleave), 256); break; case AR_F_PROMISE_RAID: rdp->interleave = min(max(2, rdp->interleave), 2048); /*+*/ break; case AR_F_SII_RAID: rdp->interleave = min(max(8, rdp->interleave), 256); /*+*/ break; case AR_F_SIS_RAID: rdp->interleave = min(max(32, rdp->interleave), 512); /*+*/ break; case AR_F_VIA_RAID: rdp->interleave = min(max(8, rdp->interleave), 128); /*+*/ break; } rdp->total_disks = total_disks; rdp->width = total_disks / (rdp->type & (AR_RAID1 | AR_T_RAID01) ? 2 : 1); rdp->total_sectors = disk_size * (rdp->width - (rdp->type == AR_RAID5)); rdp->heads = 255; rdp->sectors = 63; rdp->cylinders = rdp->total_sectors / (255 * 63); rdp->rebuild_lba = 0; rdp->status |= AR_S_READY; /* we are committed to this array, grap the subdisks */ for (disk = 0; disk < config->total_disks; disk++) { if ((subdisk = devclass_get_device(ata_raid_sub_devclass, config->disks[disk]))) { struct ata_raid_subdisk *ars = device_get_softc(subdisk); ars->raid[rdp->volume] = rdp; ars->disk_number[rdp->volume] = disk; } } ata_raid_attach(rdp, 1); ata_raid_arrays[array] = rdp; config->lun = array; return 0; } static int ata_raid_delete(int array) { struct ar_softc *rdp; device_t subdisk; int disk; if (!(rdp = ata_raid_arrays[array])) return ENXIO; rdp->status &= ~AR_S_READY; if (rdp->disk) disk_destroy(rdp->disk); for (disk = 0; disk < rdp->total_disks; disk++) { if ((rdp->disks[disk].flags & AR_DF_PRESENT) && rdp->disks[disk].dev) { if ((subdisk = devclass_get_device(ata_raid_sub_devclass, device_get_unit(rdp->disks[disk].dev)))) { struct ata_raid_subdisk *ars = device_get_softc(subdisk); if (ars->raid[rdp->volume] != rdp) /* XXX SOS */ device_printf(subdisk, "DOH! this disk doesn't belong\n"); if (ars->disk_number[rdp->volume] != disk) /* XXX SOS */ device_printf(subdisk, "DOH! this disk number is wrong\n"); ars->raid[rdp->volume] = NULL; ars->disk_number[rdp->volume] = -1; } rdp->disks[disk].flags = 0; } } ata_raid_wipe_metadata(rdp); ata_raid_arrays[array] = NULL; free(rdp, M_AR); return 0; } static int ata_raid_addspare(struct ata_ioc_raid_config *config) { struct ar_softc *rdp; device_t subdisk; int disk; if (!(rdp = ata_raid_arrays[config->lun])) return ENXIO; if (!(rdp->status & AR_S_DEGRADED) || !(rdp->status & AR_S_READY)) return ENXIO; if (rdp->status & AR_S_REBUILDING) return EBUSY; switch (rdp->type) { case AR_T_RAID1: case AR_T_RAID01: case AR_T_RAID5: for (disk = 0; disk < rdp->total_disks; disk++ ) { if (((rdp->disks[disk].flags & (AR_DF_PRESENT | AR_DF_ONLINE)) == (AR_DF_PRESENT | AR_DF_ONLINE)) && rdp->disks[disk].dev) continue; if ((subdisk = devclass_get_device(ata_raid_sub_devclass, config->disks[0] ))) { struct ata_raid_subdisk *ars = device_get_softc(subdisk); if (ars->raid[rdp->volume]) return EBUSY; /* XXX SOS validate size etc etc */ ars->raid[rdp->volume] = rdp; ars->disk_number[rdp->volume] = disk; rdp->disks[disk].dev = device_get_parent(subdisk); rdp->disks[disk].flags = (AR_DF_PRESENT | AR_DF_ASSIGNED | AR_DF_SPARE); device_printf(rdp->disks[disk].dev, "inserted into ar%d disk%d as spare\n", rdp->lun, disk); ata_raid_config_changed(rdp, 1); return 0; } } return ENXIO; default: return EPERM; } } static int ata_raid_rebuild(int array) { struct ar_softc *rdp; int disk, count; if (!(rdp = ata_raid_arrays[array])) return ENXIO; /* XXX SOS we should lock the rdp softc here */ if (!(rdp->status & AR_S_DEGRADED) || !(rdp->status & AR_S_READY)) return ENXIO; if (rdp->status & AR_S_REBUILDING) return EBUSY; switch (rdp->type) { case AR_T_RAID1: case AR_T_RAID01: case AR_T_RAID5: for (count = 0, disk = 0; disk < rdp->total_disks; disk++ ) { if (((rdp->disks[disk].flags & (AR_DF_PRESENT|AR_DF_ASSIGNED|AR_DF_ONLINE|AR_DF_SPARE)) == (AR_DF_PRESENT | AR_DF_ASSIGNED | AR_DF_SPARE)) && rdp->disks[disk].dev) { count++; } } if (count) { rdp->rebuild_lba = 0; rdp->status |= AR_S_REBUILDING; return 0; } return EIO; default: return EPERM; } } static int ata_raid_read_metadata(device_t subdisk) { devclass_t pci_devclass = devclass_find("pci"); devclass_t devclass=device_get_devclass(GRANDPARENT(GRANDPARENT(subdisk))); /* prioritize vendor native metadata layout if possible */ if (devclass == pci_devclass) { switch (pci_get_vendor(GRANDPARENT(device_get_parent(subdisk)))) { case ATA_HIGHPOINT_ID: if (ata_raid_hptv3_read_meta(subdisk, ata_raid_arrays)) return 0; if (ata_raid_hptv2_read_meta(subdisk, ata_raid_arrays)) return 0; break; case ATA_INTEL_ID: if (ata_raid_intel_read_meta(subdisk, ata_raid_arrays)) return 0; break; case ATA_ITE_ID: if (ata_raid_ite_read_meta(subdisk, ata_raid_arrays)) return 0; break; case ATA_NVIDIA_ID: if (ata_raid_nvidia_read_meta(subdisk, ata_raid_arrays)) return 0; break; case 0: /* XXX SOS cover up for bug in our PCI code */ case ATA_PROMISE_ID: if (ata_raid_promise_read_meta(subdisk, ata_raid_arrays, 0)) return 0; break; case ATA_ATI_ID: case ATA_SILICON_IMAGE_ID: if (ata_raid_sii_read_meta(subdisk, ata_raid_arrays)) return 0; break; case ATA_SIS_ID: if (ata_raid_sis_read_meta(subdisk, ata_raid_arrays)) return 0; break; case ATA_VIA_ID: if (ata_raid_via_read_meta(subdisk, ata_raid_arrays)) return 0; break; } } /* handle controllers that have multiple layout possibilities */ /* NOTE: the order of these are not insignificant */ /* Adaptec HostRAID */ if (ata_raid_adaptec_read_meta(subdisk, ata_raid_arrays)) return 0; /* LSILogic v3 and v2 */ if (ata_raid_lsiv3_read_meta(subdisk, ata_raid_arrays)) return 0; if (ata_raid_lsiv2_read_meta(subdisk, ata_raid_arrays)) return 0; /* if none of the above matched, try FreeBSD native format */ return ata_raid_promise_read_meta(subdisk, ata_raid_arrays, 1); } static int ata_raid_write_metadata(struct ar_softc *rdp) { switch (rdp->format) { case AR_F_FREEBSD_RAID: case AR_F_PROMISE_RAID: return ata_raid_promise_write_meta(rdp); case AR_F_HPTV3_RAID: case AR_F_HPTV2_RAID: /* * always write HPT v2 metadata, the v3 BIOS knows it as well. * this is handy since we cannot know what version BIOS is on there */ return ata_raid_hptv2_write_meta(rdp); case AR_F_INTEL_RAID: return ata_raid_intel_write_meta(rdp); case AR_F_SIS_RAID: return ata_raid_sis_write_meta(rdp); case AR_F_VIA_RAID: return ata_raid_via_write_meta(rdp); #if 0 case AR_F_HPTV3_RAID: return ata_raid_hptv3_write_meta(rdp); case AR_F_ADAPTEC_RAID: return ata_raid_adaptec_write_meta(rdp); case AR_F_ITE_RAID: return ata_raid_ite_write_meta(rdp); case AR_F_LSIV2_RAID: return ata_raid_lsiv2_write_meta(rdp); case AR_F_LSIV3_RAID: return ata_raid_lsiv3_write_meta(rdp); case AR_F_NVIDIA_RAID: return ata_raid_nvidia_write_meta(rdp); case AR_F_SII_RAID: return ata_raid_sii_write_meta(rdp); #endif default: printf("ar%d: writing of %s metadata is NOT supported yet\n", rdp->lun, ata_raid_format(rdp)); } return -1; } static int ata_raid_wipe_metadata(struct ar_softc *rdp) { int disk, error = 0; u_int64_t lba; u_int32_t size; u_int8_t *meta; for (disk = 0; disk < rdp->total_disks; disk++) { if (rdp->disks[disk].dev) { switch (rdp->format) { case AR_F_ADAPTEC_RAID: lba = ADP_LBA(rdp->disks[disk].dev); size = sizeof(struct adaptec_raid_conf); break; case AR_F_HPTV2_RAID: lba = HPTV2_LBA(rdp->disks[disk].dev); size = sizeof(struct hptv2_raid_conf); break; - + case AR_F_HPTV3_RAID: lba = HPTV3_LBA(rdp->disks[disk].dev); size = sizeof(struct hptv3_raid_conf); break; case AR_F_INTEL_RAID: lba = INTEL_LBA(rdp->disks[disk].dev); - size = 3 * 512; /* XXX SOS */ + size = 3 * 512; /* XXX SOS */ break; case AR_F_ITE_RAID: lba = ITE_LBA(rdp->disks[disk].dev); size = sizeof(struct ite_raid_conf); break; case AR_F_LSIV2_RAID: lba = LSIV2_LBA(rdp->disks[disk].dev); size = sizeof(struct lsiv2_raid_conf); break; case AR_F_LSIV3_RAID: lba = LSIV3_LBA(rdp->disks[disk].dev); size = sizeof(struct lsiv3_raid_conf); break; case AR_F_NVIDIA_RAID: lba = NVIDIA_LBA(rdp->disks[disk].dev); size = sizeof(struct nvidia_raid_conf); break; case AR_F_FREEBSD_RAID: case AR_F_PROMISE_RAID: lba = PROMISE_LBA(rdp->disks[disk].dev); size = sizeof(struct promise_raid_conf); break; case AR_F_SII_RAID: lba = SII_LBA(rdp->disks[disk].dev); size = sizeof(struct sii_raid_conf); break; case AR_F_SIS_RAID: lba = SIS_LBA(rdp->disks[disk].dev); size = sizeof(struct sis_raid_conf); break; case AR_F_VIA_RAID: lba = VIA_LBA(rdp->disks[disk].dev); size = sizeof(struct via_raid_conf); break; default: printf("ar%d: wiping of %s metadata is NOT supported yet\n", rdp->lun, ata_raid_format(rdp)); return ENXIO; } if (!(meta = malloc(size, M_AR, M_NOWAIT | M_ZERO))) return ENOMEM; if (ata_raid_rw(rdp->disks[disk].dev, lba, meta, size, ATA_R_WRITE | ATA_R_DIRECT)) { device_printf(rdp->disks[disk].dev, "wipe metadata failed\n"); error = EIO; } free(meta, M_AR); } } return error; } /* Adaptec HostRAID Metadata */ static int ata_raid_adaptec_read_meta(device_t dev, struct ar_softc **raidp) { struct ata_raid_subdisk *ars = device_get_softc(dev); device_t parent = device_get_parent(dev); struct adaptec_raid_conf *meta; struct ar_softc *raid; int array, disk, retval = 0; if (!(meta = (struct adaptec_raid_conf *) malloc(sizeof(struct adaptec_raid_conf), M_AR, M_NOWAIT | M_ZERO))) return ENOMEM; if (ata_raid_rw(parent, ADP_LBA(parent), meta, sizeof(struct adaptec_raid_conf), ATA_R_READ)) { if (testing || bootverbose) device_printf(parent, "Adaptec read metadata failed\n"); goto adaptec_out; } /* check if this is a Adaptec RAID struct */ if (meta->magic_0 != ADP_MAGIC_0 || meta->magic_3 != ADP_MAGIC_3) { if (testing || bootverbose) device_printf(parent, "Adaptec check1 failed\n"); goto adaptec_out; } if (testing || bootverbose) ata_raid_adaptec_print_meta(meta); /* now convert Adaptec metadata into our generic form */ for (array = 0; array < MAX_ARRAYS; array++) { if (!raidp[array]) { raidp[array] = (struct ar_softc *)malloc(sizeof(struct ar_softc), M_AR, M_NOWAIT | M_ZERO); if (!raidp[array]) { device_printf(parent, "failed to allocate metadata storage\n"); goto adaptec_out; } } raid = raidp[array]; if (raid->format && (raid->format != AR_F_ADAPTEC_RAID)) continue; if (raid->magic_0 && raid->magic_0 != meta->configs[0].magic_0) continue; if (!meta->generation || be32toh(meta->generation) > raid->generation) { switch (meta->configs[0].type) { case ADP_T_RAID0: raid->magic_0 = meta->configs[0].magic_0; raid->type = AR_T_RAID0; raid->interleave = 1 << (meta->configs[0].stripe_shift >> 1); raid->width = be16toh(meta->configs[0].total_disks); break; case ADP_T_RAID1: raid->magic_0 = meta->configs[0].magic_0; raid->type = AR_T_RAID1; raid->width = be16toh(meta->configs[0].total_disks) / 2; break; default: device_printf(parent, "Adaptec unknown RAID type 0x%02x\n", meta->configs[0].type); free(raidp[array], M_AR); raidp[array] = NULL; goto adaptec_out; } raid->format = AR_F_ADAPTEC_RAID; raid->generation = be32toh(meta->generation); raid->total_disks = be16toh(meta->configs[0].total_disks); raid->total_sectors = be32toh(meta->configs[0].sectors); raid->heads = 255; raid->sectors = 63; raid->cylinders = raid->total_sectors / (63 * 255); raid->offset_sectors = 0; raid->rebuild_lba = 0; raid->lun = array; strncpy(raid->name, meta->configs[0].name, min(sizeof(raid->name), sizeof(meta->configs[0].name))); /* clear out any old info */ if (raid->generation) { for (disk = 0; disk < raid->total_disks; disk++) { raid->disks[disk].dev = NULL; raid->disks[disk].flags = 0; } } } if (be32toh(meta->generation) >= raid->generation) { struct ata_device *atadev = device_get_softc(parent); struct ata_channel *ch = device_get_softc(GRANDPARENT(dev)); int disk_number = (ch->unit << !(ch->flags & ATA_NO_SLAVE)) + ATA_DEV(atadev->unit); raid->disks[disk_number].dev = parent; raid->disks[disk_number].sectors = be32toh(meta->configs[disk_number + 1].sectors); raid->disks[disk_number].flags = (AR_DF_ONLINE | AR_DF_PRESENT | AR_DF_ASSIGNED); ars->raid[raid->volume] = raid; ars->disk_number[raid->volume] = disk_number; retval = 1; } break; } adaptec_out: free(meta, M_AR); return retval; } /* Highpoint V2 RocketRAID Metadata */ static int ata_raid_hptv2_read_meta(device_t dev, struct ar_softc **raidp) { struct ata_raid_subdisk *ars = device_get_softc(dev); device_t parent = device_get_parent(dev); struct hptv2_raid_conf *meta; struct ar_softc *raid = NULL; int array, disk_number = 0, retval = 0; if (!(meta = (struct hptv2_raid_conf *) malloc(sizeof(struct hptv2_raid_conf), M_AR, M_NOWAIT | M_ZERO))) return ENOMEM; if (ata_raid_rw(parent, HPTV2_LBA(parent), meta, sizeof(struct hptv2_raid_conf), ATA_R_READ)) { if (testing || bootverbose) device_printf(parent, "HighPoint (v2) read metadata failed\n"); goto hptv2_out; } /* check if this is a HighPoint v2 RAID struct */ if (meta->magic != HPTV2_MAGIC_OK && meta->magic != HPTV2_MAGIC_BAD) { if (testing || bootverbose) device_printf(parent, "HighPoint (v2) check1 failed\n"); goto hptv2_out; } /* is this disk defined, or an old leftover/spare ? */ if (!meta->magic_0) { if (testing || bootverbose) device_printf(parent, "HighPoint (v2) check2 failed\n"); goto hptv2_out; } if (testing || bootverbose) ata_raid_hptv2_print_meta(meta); /* now convert HighPoint (v2) metadata into our generic form */ for (array = 0; array < MAX_ARRAYS; array++) { if (!raidp[array]) { raidp[array] = (struct ar_softc *)malloc(sizeof(struct ar_softc), M_AR, M_NOWAIT | M_ZERO); if (!raidp[array]) { device_printf(parent, "failed to allocate metadata storage\n"); goto hptv2_out; } } raid = raidp[array]; if (raid->format && (raid->format != AR_F_HPTV2_RAID)) continue; switch (meta->type) { case HPTV2_T_RAID0: if ((meta->order & (HPTV2_O_RAID0|HPTV2_O_OK)) == (HPTV2_O_RAID0|HPTV2_O_OK)) goto highpoint_raid1; if (meta->order & (HPTV2_O_RAID0 | HPTV2_O_RAID1)) goto highpoint_raid01; if (raid->magic_0 && raid->magic_0 != meta->magic_0) continue; raid->magic_0 = meta->magic_0; raid->type = AR_T_RAID0; raid->interleave = 1 << meta->stripe_shift; disk_number = meta->disk_number; if (!(meta->order & HPTV2_O_OK)) meta->magic = 0; /* mark bad */ break; case HPTV2_T_RAID1: highpoint_raid1: if (raid->magic_0 && raid->magic_0 != meta->magic_0) continue; raid->magic_0 = meta->magic_0; raid->type = AR_T_RAID1; disk_number = (meta->disk_number > 0); break; case HPTV2_T_RAID01_RAID0: highpoint_raid01: if (meta->order & HPTV2_O_RAID0) { if ((raid->magic_0 && raid->magic_0 != meta->magic_0) || (raid->magic_1 && raid->magic_1 != meta->magic_1)) continue; raid->magic_0 = meta->magic_0; raid->magic_1 = meta->magic_1; raid->type = AR_T_RAID01; raid->interleave = 1 << meta->stripe_shift; disk_number = meta->disk_number; } else { if (raid->magic_1 && raid->magic_1 != meta->magic_1) continue; raid->magic_1 = meta->magic_1; raid->type = AR_T_RAID01; raid->interleave = 1 << meta->stripe_shift; disk_number = meta->disk_number + meta->array_width; if (!(meta->order & HPTV2_O_RAID1)) meta->magic = 0; /* mark bad */ } break; case HPTV2_T_SPAN: if (raid->magic_0 && raid->magic_0 != meta->magic_0) continue; raid->magic_0 = meta->magic_0; raid->type = AR_T_SPAN; disk_number = meta->disk_number; break; default: device_printf(parent, "Highpoint (v2) unknown RAID type 0x%02x\n", meta->type); free(raidp[array], M_AR); raidp[array] = NULL; goto hptv2_out; } raid->format |= AR_F_HPTV2_RAID; raid->disks[disk_number].dev = parent; raid->disks[disk_number].flags = (AR_DF_PRESENT | AR_DF_ASSIGNED); raid->lun = array; strncpy(raid->name, meta->name_1, min(sizeof(raid->name), sizeof(meta->name_1))); if (meta->magic == HPTV2_MAGIC_OK) { raid->disks[disk_number].flags |= AR_DF_ONLINE; raid->width = meta->array_width; raid->total_sectors = meta->total_sectors; raid->heads = 255; raid->sectors = 63; raid->cylinders = raid->total_sectors / (63 * 255); raid->offset_sectors = HPTV2_LBA(parent) + 1; raid->rebuild_lba = meta->rebuild_lba; raid->disks[disk_number].sectors = raid->total_sectors / raid->width; } else raid->disks[disk_number].flags &= ~AR_DF_ONLINE; if ((raid->type & AR_T_RAID0) && (raid->total_disks < raid->width)) raid->total_disks = raid->width; if (disk_number >= raid->total_disks) raid->total_disks = disk_number + 1; ars->raid[raid->volume] = raid; ars->disk_number[raid->volume] = disk_number; retval = 1; break; } hptv2_out: free(meta, M_AR); return retval; } static int ata_raid_hptv2_write_meta(struct ar_softc *rdp) { struct hptv2_raid_conf *meta; struct timeval timestamp; int disk, error = 0; if (!(meta = (struct hptv2_raid_conf *) malloc(sizeof(struct hptv2_raid_conf), M_AR, M_NOWAIT | M_ZERO))) { printf("ar%d: failed to allocate metadata storage\n", rdp->lun); return ENOMEM; } microtime(×tamp); rdp->magic_0 = timestamp.tv_sec + 2; rdp->magic_1 = timestamp.tv_sec; for (disk = 0; disk < rdp->total_disks; disk++) { if ((rdp->disks[disk].flags & (AR_DF_PRESENT | AR_DF_ONLINE)) == (AR_DF_PRESENT | AR_DF_ONLINE)) meta->magic = HPTV2_MAGIC_OK; if (rdp->disks[disk].flags & AR_DF_ASSIGNED) { meta->magic_0 = rdp->magic_0; if (strlen(rdp->name)) strncpy(meta->name_1, rdp->name, sizeof(meta->name_1)); else strcpy(meta->name_1, "FreeBSD"); } meta->disk_number = disk; switch (rdp->type) { case AR_T_RAID0: meta->type = HPTV2_T_RAID0; strcpy(meta->name_2, "RAID 0"); if (rdp->disks[disk].flags & AR_DF_ONLINE) meta->order = HPTV2_O_OK; break; case AR_T_RAID1: meta->type = HPTV2_T_RAID0; strcpy(meta->name_2, "RAID 1"); meta->disk_number = (disk < rdp->width) ? disk : disk + 5; meta->order = HPTV2_O_RAID0 | HPTV2_O_OK; break; case AR_T_RAID01: meta->type = HPTV2_T_RAID01_RAID0; strcpy(meta->name_2, "RAID 0+1"); if (rdp->disks[disk].flags & AR_DF_ONLINE) { if (disk < rdp->width) { meta->order = (HPTV2_O_RAID0 | HPTV2_O_RAID1); meta->magic_0 = rdp->magic_0 - 1; } else { meta->order = HPTV2_O_RAID1; meta->disk_number -= rdp->width; } } else meta->magic_0 = rdp->magic_0 - 1; meta->magic_1 = rdp->magic_1; break; case AR_T_SPAN: meta->type = HPTV2_T_SPAN; strcpy(meta->name_2, "SPAN"); break; default: free(meta, M_AR); return ENODEV; } meta->array_width = rdp->width; meta->stripe_shift = (rdp->width > 1) ? (ffs(rdp->interleave)-1) : 0; meta->total_sectors = rdp->total_sectors; meta->rebuild_lba = rdp->rebuild_lba; if (testing || bootverbose) ata_raid_hptv2_print_meta(meta); if (rdp->disks[disk].dev) { if (ata_raid_rw(rdp->disks[disk].dev, HPTV2_LBA(rdp->disks[disk].dev), meta, sizeof(struct promise_raid_conf), ATA_R_WRITE | ATA_R_DIRECT)) { device_printf(rdp->disks[disk].dev, "write metadata failed\n"); error = EIO; } } } free(meta, M_AR); return error; } /* Highpoint V3 RocketRAID Metadata */ static int ata_raid_hptv3_read_meta(device_t dev, struct ar_softc **raidp) { struct ata_raid_subdisk *ars = device_get_softc(dev); device_t parent = device_get_parent(dev); struct hptv3_raid_conf *meta; struct ar_softc *raid = NULL; int array, disk_number, retval = 0; if (!(meta = (struct hptv3_raid_conf *) malloc(sizeof(struct hptv3_raid_conf), M_AR, M_NOWAIT | M_ZERO))) return ENOMEM; if (ata_raid_rw(parent, HPTV3_LBA(parent), meta, sizeof(struct hptv3_raid_conf), ATA_R_READ)) { if (testing || bootverbose) device_printf(parent, "HighPoint (v3) read metadata failed\n"); goto hptv3_out; } /* check if this is a HighPoint v3 RAID struct */ if (meta->magic != HPTV3_MAGIC) { if (testing || bootverbose) device_printf(parent, "HighPoint (v3) check1 failed\n"); goto hptv3_out; } /* check if there are any config_entries */ if (meta->config_entries < 1) { if (testing || bootverbose) device_printf(parent, "HighPoint (v3) check2 failed\n"); goto hptv3_out; } if (testing || bootverbose) ata_raid_hptv3_print_meta(meta); /* now convert HighPoint (v3) metadata into our generic form */ for (array = 0; array < MAX_ARRAYS; array++) { if (!raidp[array]) { raidp[array] = (struct ar_softc *)malloc(sizeof(struct ar_softc), M_AR, M_NOWAIT | M_ZERO); if (!raidp[array]) { device_printf(parent, "failed to allocate metadata storage\n"); goto hptv3_out; } } raid = raidp[array]; if (raid->format && (raid->format != AR_F_HPTV3_RAID)) continue; if ((raid->format & AR_F_HPTV3_RAID) && raid->magic_0 != meta->magic_0) continue; switch (meta->configs[0].type) { case HPTV3_T_RAID0: raid->type = AR_T_RAID0; raid->width = meta->configs[0].total_disks; disk_number = meta->configs[0].disk_number; break; case HPTV3_T_RAID1: raid->type = AR_T_RAID1; raid->width = meta->configs[0].total_disks / 2; disk_number = meta->configs[0].disk_number; break; case HPTV3_T_RAID5: raid->type = AR_T_RAID5; raid->width = meta->configs[0].total_disks; disk_number = meta->configs[0].disk_number; break; case HPTV3_T_SPAN: raid->type = AR_T_SPAN; raid->width = meta->configs[0].total_disks; disk_number = meta->configs[0].disk_number; break; default: device_printf(parent, "Highpoint (v3) unknown RAID type 0x%02x\n", meta->configs[0].type); free(raidp[array], M_AR); raidp[array] = NULL; goto hptv3_out; } if (meta->config_entries == 2) { switch (meta->configs[1].type) { case HPTV3_T_RAID1: if (raid->type == AR_T_RAID0) { raid->type = AR_T_RAID01; disk_number = meta->configs[1].disk_number + (meta->configs[0].disk_number << 1); break; } default: device_printf(parent, "Highpoint (v3) unknown level 2 0x%02x\n", meta->configs[1].type); free(raidp[array], M_AR); raidp[array] = NULL; goto hptv3_out; } } raid->magic_0 = meta->magic_0; raid->format = AR_F_HPTV3_RAID; raid->generation = meta->timestamp; raid->interleave = 1 << meta->configs[0].stripe_shift; raid->total_disks = meta->configs[0].total_disks + meta->configs[1].total_disks; raid->total_sectors = meta->configs[0].total_sectors + ((u_int64_t)meta->configs_high[0].total_sectors << 32); raid->heads = 255; raid->sectors = 63; raid->cylinders = raid->total_sectors / (63 * 255); raid->offset_sectors = 0; raid->rebuild_lba = meta->configs[0].rebuild_lba + ((u_int64_t)meta->configs_high[0].rebuild_lba << 32); raid->lun = array; strncpy(raid->name, meta->name, min(sizeof(raid->name), sizeof(meta->name))); raid->disks[disk_number].sectors = raid->total_sectors / (raid->type == AR_T_RAID5 ? raid->width - 1 : raid->width); raid->disks[disk_number].dev = parent; raid->disks[disk_number].flags = (AR_DF_PRESENT | AR_DF_ASSIGNED | AR_DF_ONLINE); ars->raid[raid->volume] = raid; ars->disk_number[raid->volume] = disk_number; retval = 1; break; } hptv3_out: free(meta, M_AR); return retval; } /* Intel MatrixRAID Metadata */ static int ata_raid_intel_read_meta(device_t dev, struct ar_softc **raidp) { struct ata_raid_subdisk *ars = device_get_softc(dev); device_t parent = device_get_parent(dev); struct intel_raid_conf *meta; struct intel_raid_mapping *map; struct ar_softc *raid = NULL; u_int32_t checksum, *ptr; int array, count, disk, volume = 1, retval = 0; char *tmp; if (!(meta = (struct intel_raid_conf *) malloc(1536, M_AR, M_NOWAIT | M_ZERO))) return ENOMEM; if (ata_raid_rw(parent, INTEL_LBA(parent), meta, 1024, ATA_R_READ)) { if (testing || bootverbose) device_printf(parent, "Intel read metadata failed\n"); goto intel_out; } tmp = (char *)meta; bcopy(tmp, tmp+1024, 512); bcopy(tmp+512, tmp, 1024); bzero(tmp+1024, 512); /* check if this is a Intel RAID struct */ if (strncmp(meta->intel_id, INTEL_MAGIC, strlen(INTEL_MAGIC))) { if (testing || bootverbose) device_printf(parent, "Intel check1 failed\n"); goto intel_out; } for (checksum = 0, ptr = (u_int32_t *)meta, count = 0; count < (meta->config_size / sizeof(u_int32_t)); count++) { checksum += *ptr++; } checksum -= meta->checksum; if (checksum != meta->checksum) { if (testing || bootverbose) device_printf(parent, "Intel check2 failed\n"); goto intel_out; } if (testing || bootverbose) ata_raid_intel_print_meta(meta); map = (struct intel_raid_mapping *)&meta->disk[meta->total_disks]; /* now convert Intel metadata into our generic form */ for (array = 0; array < MAX_ARRAYS; array++) { if (!raidp[array]) { raidp[array] = (struct ar_softc *)malloc(sizeof(struct ar_softc), M_AR, M_NOWAIT | M_ZERO); if (!raidp[array]) { device_printf(parent, "failed to allocate metadata storage\n"); goto intel_out; } } raid = raidp[array]; if (raid->format && (raid->format != AR_F_INTEL_RAID)) continue; if ((raid->format & AR_F_INTEL_RAID) && (raid->magic_0 != meta->config_id)) continue; /* * update our knowledge about the array config based on generation * NOTE: there can be multiple volumes on a disk set */ if (!meta->generation || meta->generation > raid->generation) { switch (map->type) { case INTEL_T_RAID0: raid->type = AR_T_RAID0; raid->width = map->total_disks; break; case INTEL_T_RAID1: if (map->total_disks == 4) raid->type = AR_T_RAID01; else raid->type = AR_T_RAID1; raid->width = map->total_disks / 2; break; case INTEL_T_RAID5: raid->type = AR_T_RAID5; raid->width = map->total_disks; break; default: device_printf(parent, "Intel unknown RAID type 0x%02x\n", map->type); free(raidp[array], M_AR); raidp[array] = NULL; goto intel_out; } switch (map->status) { case INTEL_S_READY: raid->status = AR_S_READY; break; case INTEL_S_DEGRADED: raid->status |= AR_S_DEGRADED; break; case INTEL_S_DISABLED: case INTEL_S_FAILURE: raid->status = 0; } raid->magic_0 = meta->config_id; raid->format = AR_F_INTEL_RAID; raid->generation = meta->generation; raid->interleave = map->stripe_sectors; raid->total_disks = map->total_disks; raid->total_sectors = map->total_sectors; raid->heads = 255; raid->sectors = 63; raid->cylinders = raid->total_sectors / (63 * 255); raid->offset_sectors = map->offset; raid->rebuild_lba = 0; raid->lun = array; raid->volume = volume - 1; strncpy(raid->name, map->name, min(sizeof(raid->name), sizeof(map->name))); /* clear out any old info */ for (disk = 0; disk < raid->total_disks; disk++) { raid->disks[disk].dev = NULL; bcopy(meta->disk[map->disk_idx[disk]].serial, raid->disks[disk].serial, sizeof(raid->disks[disk].serial)); raid->disks[disk].sectors = meta->disk[map->disk_idx[disk]].sectors; raid->disks[disk].flags = 0; if (meta->disk[map->disk_idx[disk]].flags & INTEL_F_ONLINE) raid->disks[disk].flags |= AR_DF_ONLINE; if (meta->disk[map->disk_idx[disk]].flags & INTEL_F_ASSIGNED) raid->disks[disk].flags |= AR_DF_ASSIGNED; if (meta->disk[map->disk_idx[disk]].flags & INTEL_F_SPARE) { raid->disks[disk].flags &= ~(AR_DF_ONLINE | AR_DF_ASSIGNED); raid->disks[disk].flags |= AR_DF_SPARE; } if (meta->disk[map->disk_idx[disk]].flags & INTEL_F_DOWN) raid->disks[disk].flags &= ~AR_DF_ONLINE; } } if (meta->generation >= raid->generation) { for (disk = 0; disk < raid->total_disks; disk++) { struct ata_device *atadev = device_get_softc(parent); if (!strncmp(raid->disks[disk].serial, atadev->param.serial, sizeof(raid->disks[disk].serial))) { raid->disks[disk].dev = parent; raid->disks[disk].flags |= (AR_DF_PRESENT | AR_DF_ONLINE); ars->raid[raid->volume] = raid; ars->disk_number[raid->volume] = disk; retval = 1; } } } else goto intel_out; if (retval) { if (volume < meta->total_volumes) { map = (struct intel_raid_mapping *) &map->disk_idx[map->total_disks]; volume++; retval = 0; continue; } break; } else { free(raidp[array], M_AR); raidp[array] = NULL; if (volume == 2) retval = 1; } } intel_out: free(meta, M_AR); return retval; } static int ata_raid_intel_write_meta(struct ar_softc *rdp) { struct intel_raid_conf *meta; struct intel_raid_mapping *map; struct timeval timestamp; u_int32_t checksum, *ptr; int count, disk, error = 0; char *tmp; if (!(meta = (struct intel_raid_conf *) malloc(1536, M_AR, M_NOWAIT | M_ZERO))) { printf("ar%d: failed to allocate metadata storage\n", rdp->lun); return ENOMEM; } rdp->generation++; microtime(×tamp); bcopy(INTEL_MAGIC, meta->intel_id, sizeof(meta->intel_id)); bcopy(INTEL_VERSION_1100, meta->version, sizeof(meta->version)); meta->config_id = timestamp.tv_sec; meta->generation = rdp->generation; meta->total_disks = rdp->total_disks; - meta->total_volumes = 1; /* XXX SOS */ + meta->total_volumes = 1; /* XXX SOS */ for (disk = 0; disk < rdp->total_disks; disk++) { if (rdp->disks[disk].dev) { - struct ata_channel *ch = + struct ata_channel *ch = device_get_softc(device_get_parent(rdp->disks[disk].dev)); - struct ata_device *atadev = + struct ata_device *atadev = device_get_softc(rdp->disks[disk].dev); bcopy(atadev->param.serial, meta->disk[disk].serial, sizeof(rdp->disks[disk].serial)); meta->disk[disk].sectors = rdp->disks[disk].sectors; meta->disk[disk].id = (ch->unit << 16) | ATA_DEV(atadev->unit); } else meta->disk[disk].sectors = rdp->total_sectors / rdp->width; meta->disk[disk].flags = 0; if (rdp->disks[disk].flags & AR_DF_SPARE) meta->disk[disk].flags |= INTEL_F_SPARE; else { if (rdp->disks[disk].flags & AR_DF_ONLINE) meta->disk[disk].flags |= INTEL_F_ONLINE; else meta->disk[disk].flags |= INTEL_F_DOWN; if (rdp->disks[disk].flags & AR_DF_ASSIGNED) meta->disk[disk].flags |= INTEL_F_ASSIGNED; } } map = (struct intel_raid_mapping *)&meta->disk[meta->total_disks]; bcopy(rdp->name, map->name, sizeof(rdp->name)); map->total_sectors = rdp->total_sectors; - map->state = 12; /* XXX SOS */ + map->state = 12; /* XXX SOS */ map->offset = rdp->offset_sectors; map->stripe_count = rdp->total_sectors / (rdp->interleave*rdp->total_disks); map->stripe_sectors = rdp->interleave; map->disk_sectors = rdp->total_sectors / rdp->width; - map->status = INTEL_S_READY; /* XXX SOS */ + map->status = INTEL_S_READY; /* XXX SOS */ switch (rdp->type) { case AR_T_RAID0: map->type = INTEL_T_RAID0; break; case AR_T_RAID1: map->type = INTEL_T_RAID1; break; case AR_T_RAID01: map->type = INTEL_T_RAID1; break; case AR_T_RAID5: map->type = INTEL_T_RAID5; break; default: free(meta, M_AR); return ENODEV; } map->total_disks = rdp->total_disks; map->magic[0] = 0x02; map->magic[1] = 0xff; map->magic[2] = 0x01; for (disk = 0; disk < rdp->total_disks; disk++) map->disk_idx[disk] = disk; meta->config_size = (char *)&map->disk_idx[disk] - (char *)meta; for (checksum = 0, ptr = (u_int32_t *)meta, count = 0; count < (meta->config_size / sizeof(u_int32_t)); count++) { checksum += *ptr++; } meta->checksum = checksum; if (testing || bootverbose) ata_raid_intel_print_meta(meta); tmp = (char *)meta; bcopy(tmp, tmp+1024, 512); bcopy(tmp+512, tmp, 1024); bzero(tmp+1024, 512); for (disk = 0; disk < rdp->total_disks; disk++) { if (rdp->disks[disk].dev) { if (ata_raid_rw(rdp->disks[disk].dev, INTEL_LBA(rdp->disks[disk].dev), meta, 1024, ATA_R_WRITE | ATA_R_DIRECT)) { device_printf(rdp->disks[disk].dev, "write metadata failed\n"); error = EIO; } } } free(meta, M_AR); return error; } /* Integrated Technology Express Metadata */ static int ata_raid_ite_read_meta(device_t dev, struct ar_softc **raidp) { struct ata_raid_subdisk *ars = device_get_softc(dev); device_t parent = device_get_parent(dev); struct ite_raid_conf *meta; struct ar_softc *raid = NULL; int array, disk_number, count, retval = 0; u_int16_t *ptr; if (!(meta = (struct ite_raid_conf *) malloc(sizeof(struct ite_raid_conf), M_AR, M_NOWAIT | M_ZERO))) return ENOMEM; if (ata_raid_rw(parent, ITE_LBA(parent), meta, sizeof(struct ite_raid_conf), ATA_R_READ)) { if (testing || bootverbose) device_printf(parent, "ITE read metadata failed\n"); goto ite_out; } /* check if this is a ITE RAID struct */ for (ptr = (u_int16_t *)meta->ite_id, count = 0; count < sizeof(meta->ite_id)/sizeof(uint16_t); count++) ptr[count] = be16toh(ptr[count]); if (strncmp(meta->ite_id, ITE_MAGIC, strlen(ITE_MAGIC))) { if (testing || bootverbose) device_printf(parent, "ITE check1 failed\n"); goto ite_out; } if (testing || bootverbose) ata_raid_ite_print_meta(meta); /* now convert ITE metadata into our generic form */ for (array = 0; array < MAX_ARRAYS; array++) { if ((raid = raidp[array])) { if (raid->format != AR_F_ITE_RAID) continue; if (raid->magic_0 != *((u_int64_t *)meta->timestamp_0)) continue; } /* if we dont have a disks timestamp the RAID is invalidated */ if (*((u_int64_t *)meta->timestamp_1) == 0) goto ite_out; if (!raid) { raidp[array] = (struct ar_softc *)malloc(sizeof(struct ar_softc), M_AR, M_NOWAIT | M_ZERO); if (!(raid = raidp[array])) { device_printf(parent, "failed to allocate metadata storage\n"); goto ite_out; } } switch (meta->type) { case ITE_T_RAID0: raid->type = AR_T_RAID0; raid->width = meta->array_width; raid->total_disks = meta->array_width; disk_number = meta->disk_number; break; case ITE_T_RAID1: raid->type = AR_T_RAID1; raid->width = 1; raid->total_disks = 2; disk_number = meta->disk_number; break; case ITE_T_RAID01: raid->type = AR_T_RAID01; raid->width = meta->array_width; raid->total_disks = 4; disk_number = ((meta->disk_number & 0x02) >> 1) | ((meta->disk_number & 0x01) << 1); break; case ITE_T_SPAN: raid->type = AR_T_SPAN; raid->width = 1; raid->total_disks = meta->array_width; disk_number = meta->disk_number; break; default: device_printf(parent, "ITE unknown RAID type 0x%02x\n", meta->type); free(raidp[array], M_AR); raidp[array] = NULL; goto ite_out; } raid->magic_0 = *((u_int64_t *)meta->timestamp_0); raid->format = AR_F_ITE_RAID; raid->generation = 0; raid->interleave = meta->stripe_sectors; raid->total_sectors = meta->total_sectors; raid->heads = 255; raid->sectors = 63; raid->cylinders = raid->total_sectors / (63 * 255); raid->offset_sectors = 0; raid->rebuild_lba = 0; raid->lun = array; raid->disks[disk_number].dev = parent; raid->disks[disk_number].sectors = raid->total_sectors / raid->width; raid->disks[disk_number].flags = (AR_DF_PRESENT | AR_DF_ASSIGNED | AR_DF_ONLINE); ars->raid[raid->volume] = raid; ars->disk_number[raid->volume] = disk_number; retval = 1; break; } ite_out: free(meta, M_AR); return retval; } /* LSILogic V2 MegaRAID Metadata */ static int ata_raid_lsiv2_read_meta(device_t dev, struct ar_softc **raidp) { struct ata_raid_subdisk *ars = device_get_softc(dev); device_t parent = device_get_parent(dev); struct lsiv2_raid_conf *meta; struct ar_softc *raid = NULL; int array, retval = 0; if (!(meta = (struct lsiv2_raid_conf *) malloc(sizeof(struct lsiv2_raid_conf), M_AR, M_NOWAIT | M_ZERO))) return ENOMEM; if (ata_raid_rw(parent, LSIV2_LBA(parent), meta, sizeof(struct lsiv2_raid_conf), ATA_R_READ)) { if (testing || bootverbose) device_printf(parent, "LSI (v2) read metadata failed\n"); goto lsiv2_out; } /* check if this is a LSI RAID struct */ if (strncmp(meta->lsi_id, LSIV2_MAGIC, strlen(LSIV2_MAGIC))) { if (testing || bootverbose) device_printf(parent, "LSI (v2) check1 failed\n"); goto lsiv2_out; } if (testing || bootverbose) ata_raid_lsiv2_print_meta(meta); /* now convert LSI (v2) config meta into our generic form */ for (array = 0; array < MAX_ARRAYS; array++) { int raid_entry, conf_entry; if (!raidp[array + meta->raid_number]) { raidp[array + meta->raid_number] = (struct ar_softc *)malloc(sizeof(struct ar_softc), M_AR, M_NOWAIT | M_ZERO); if (!raidp[array + meta->raid_number]) { device_printf(parent, "failed to allocate metadata storage\n"); goto lsiv2_out; } } raid = raidp[array + meta->raid_number]; if (raid->format && (raid->format != AR_F_LSIV2_RAID)) continue; if (raid->magic_0 && ((raid->magic_0 != meta->timestamp) || (raid->magic_1 != meta->raid_number))) continue; array += meta->raid_number; raid_entry = meta->raid_number; conf_entry = (meta->configs[raid_entry].raid.config_offset >> 4) + meta->disk_number - 1; switch (meta->configs[raid_entry].raid.type) { case LSIV2_T_RAID0: raid->magic_0 = meta->timestamp; raid->magic_1 = meta->raid_number; raid->type = AR_T_RAID0; raid->interleave = meta->configs[raid_entry].raid.stripe_sectors; raid->width = meta->configs[raid_entry].raid.array_width; break; case LSIV2_T_RAID1: raid->magic_0 = meta->timestamp; raid->magic_1 = meta->raid_number; raid->type = AR_T_RAID1; raid->width = meta->configs[raid_entry].raid.array_width; break; case LSIV2_T_RAID0 | LSIV2_T_RAID1: raid->magic_0 = meta->timestamp; raid->magic_1 = meta->raid_number; raid->type = AR_T_RAID01; raid->interleave = meta->configs[raid_entry].raid.stripe_sectors; raid->width = meta->configs[raid_entry].raid.array_width; break; default: device_printf(parent, "LSI v2 unknown RAID type 0x%02x\n", meta->configs[raid_entry].raid.type); free(raidp[array], M_AR); raidp[array] = NULL; goto lsiv2_out; } raid->format = AR_F_LSIV2_RAID; raid->generation = 0; raid->total_disks = meta->configs[raid_entry].raid.disk_count; raid->total_sectors = meta->configs[raid_entry].raid.total_sectors; raid->heads = 255; raid->sectors = 63; raid->cylinders = raid->total_sectors / (63 * 255); raid->offset_sectors = 0; raid->rebuild_lba = 0; raid->lun = array; if (meta->configs[conf_entry].disk.device != LSIV2_D_NONE) { raid->disks[meta->disk_number].dev = parent; raid->disks[meta->disk_number].sectors = meta->configs[conf_entry].disk.disk_sectors; raid->disks[meta->disk_number].flags = (AR_DF_ONLINE | AR_DF_PRESENT | AR_DF_ASSIGNED); ars->raid[raid->volume] = raid; ars->disk_number[raid->volume] = meta->disk_number; retval = 1; } else raid->disks[meta->disk_number].flags &= ~AR_DF_ONLINE; break; } lsiv2_out: free(meta, M_AR); return retval; } /* LSILogic V3 MegaRAID Metadata */ static int ata_raid_lsiv3_read_meta(device_t dev, struct ar_softc **raidp) { struct ata_raid_subdisk *ars = device_get_softc(dev); device_t parent = device_get_parent(dev); struct lsiv3_raid_conf *meta; struct ar_softc *raid = NULL; u_int8_t checksum, *ptr; int array, entry, count, disk_number, retval = 0; if (!(meta = (struct lsiv3_raid_conf *) malloc(sizeof(struct lsiv3_raid_conf), M_AR, M_NOWAIT | M_ZERO))) return ENOMEM; if (ata_raid_rw(parent, LSIV3_LBA(parent), meta, sizeof(struct lsiv3_raid_conf), ATA_R_READ)) { if (testing || bootverbose) device_printf(parent, "LSI (v3) read metadata failed\n"); goto lsiv3_out; } /* check if this is a LSI RAID struct */ if (strncmp(meta->lsi_id, LSIV3_MAGIC, strlen(LSIV3_MAGIC))) { if (testing || bootverbose) device_printf(parent, "LSI (v3) check1 failed\n"); goto lsiv3_out; } /* check if the checksum is OK */ for (checksum = 0, ptr = meta->lsi_id, count = 0; count < 512; count++) checksum += *ptr++; if (checksum) { if (testing || bootverbose) device_printf(parent, "LSI (v3) check2 failed\n"); goto lsiv3_out; } if (testing || bootverbose) ata_raid_lsiv3_print_meta(meta); /* now convert LSI (v3) config meta into our generic form */ for (array = 0, entry = 0; array < MAX_ARRAYS && entry < 8;) { if (!raidp[array]) { raidp[array] = (struct ar_softc *)malloc(sizeof(struct ar_softc), M_AR, M_NOWAIT | M_ZERO); if (!raidp[array]) { device_printf(parent, "failed to allocate metadata storage\n"); goto lsiv3_out; } } raid = raidp[array]; if (raid->format && (raid->format != AR_F_LSIV3_RAID)) { array++; continue; } if ((raid->format == AR_F_LSIV3_RAID) && (raid->magic_0 != meta->timestamp)) { array++; continue; } switch (meta->raid[entry].total_disks) { case 0: entry++; continue; case 1: if (meta->raid[entry].device == meta->device) { disk_number = 0; break; } if (raid->format) array++; entry++; continue; case 2: disk_number = (meta->device & (LSIV3_D_DEVICE|LSIV3_D_CHANNEL))?1:0; break; default: device_printf(parent, "lsiv3 > 2 disk support untested!!\n"); disk_number = (meta->device & LSIV3_D_DEVICE ? 1 : 0) + (meta->device & LSIV3_D_CHANNEL ? 2 : 0); break; } switch (meta->raid[entry].type) { case LSIV3_T_RAID0: raid->type = AR_T_RAID0; raid->width = meta->raid[entry].total_disks; break; case LSIV3_T_RAID1: raid->type = AR_T_RAID1; raid->width = meta->raid[entry].array_width; break; default: device_printf(parent, "LSI v3 unknown RAID type 0x%02x\n", meta->raid[entry].type); free(raidp[array], M_AR); raidp[array] = NULL; entry++; continue; } raid->magic_0 = meta->timestamp; raid->format = AR_F_LSIV3_RAID; raid->generation = 0; raid->interleave = meta->raid[entry].stripe_pages * 8; raid->total_disks = meta->raid[entry].total_disks; raid->total_sectors = raid->width * meta->raid[entry].sectors; raid->heads = 255; raid->sectors = 63; raid->cylinders = raid->total_sectors / (63 * 255); raid->offset_sectors = meta->raid[entry].offset; raid->rebuild_lba = 0; raid->lun = array; raid->disks[disk_number].dev = parent; raid->disks[disk_number].sectors = raid->total_sectors / raid->width; raid->disks[disk_number].flags = (AR_DF_PRESENT | AR_DF_ASSIGNED | AR_DF_ONLINE); ars->raid[raid->volume] = raid; ars->disk_number[raid->volume] = disk_number; retval = 1; entry++; array++; } lsiv3_out: free(meta, M_AR); return retval; } /* nVidia MediaShield Metadata */ static int ata_raid_nvidia_read_meta(device_t dev, struct ar_softc **raidp) { struct ata_raid_subdisk *ars = device_get_softc(dev); device_t parent = device_get_parent(dev); struct nvidia_raid_conf *meta; struct ar_softc *raid = NULL; u_int32_t checksum, *ptr; int array, count, retval = 0; if (!(meta = (struct nvidia_raid_conf *) malloc(sizeof(struct nvidia_raid_conf), M_AR, M_NOWAIT | M_ZERO))) return ENOMEM; if (ata_raid_rw(parent, NVIDIA_LBA(parent), meta, sizeof(struct nvidia_raid_conf), ATA_R_READ)) { if (testing || bootverbose) device_printf(parent, "nVidia read metadata failed\n"); goto nvidia_out; } /* check if this is a nVidia RAID struct */ if (strncmp(meta->nvidia_id, NV_MAGIC, strlen(NV_MAGIC))) { if (testing || bootverbose) device_printf(parent, "nVidia check1 failed\n"); goto nvidia_out; } /* check if the checksum is OK */ for (checksum = 0, ptr = (u_int32_t*)meta, count = 0; count < meta->config_size; count++) checksum += *ptr++; if (checksum) { if (testing || bootverbose) device_printf(parent, "nVidia check2 failed\n"); goto nvidia_out; } if (testing || bootverbose) ata_raid_nvidia_print_meta(meta); /* now convert nVidia meta into our generic form */ for (array = 0; array < MAX_ARRAYS; array++) { if (!raidp[array]) { raidp[array] = (struct ar_softc *)malloc(sizeof(struct ar_softc), M_AR, M_NOWAIT | M_ZERO); if (!raidp[array]) { device_printf(parent, "failed to allocate metadata storage\n"); goto nvidia_out; } } raid = raidp[array]; if (raid->format && (raid->format != AR_F_NVIDIA_RAID)) continue; if (raid->format == AR_F_NVIDIA_RAID && ((raid->magic_0 != meta->magic_1) || (raid->magic_1 != meta->magic_2))) { continue; } switch (meta->type) { case NV_T_SPAN: raid->type = AR_T_SPAN; break; case NV_T_RAID0: raid->type = AR_T_RAID0; break; case NV_T_RAID1: raid->type = AR_T_RAID1; break; case NV_T_RAID5: raid->type = AR_T_RAID5; break; case NV_T_RAID01: raid->type = AR_T_RAID01; break; default: device_printf(parent, "nVidia unknown RAID type 0x%02x\n", meta->type); free(raidp[array], M_AR); raidp[array] = NULL; goto nvidia_out; } raid->magic_0 = meta->magic_1; raid->magic_1 = meta->magic_2; raid->format = AR_F_NVIDIA_RAID; raid->generation = 0; raid->interleave = meta->stripe_sectors; raid->width = meta->array_width; raid->total_disks = meta->total_disks; raid->total_sectors = meta->total_sectors; raid->heads = 255; raid->sectors = 63; raid->cylinders = raid->total_sectors / (63 * 255); raid->offset_sectors = 0; raid->rebuild_lba = meta->rebuild_lba; raid->lun = array; raid->status = AR_S_READY; if (meta->status & NV_S_DEGRADED) raid->status |= AR_S_DEGRADED; raid->disks[meta->disk_number].dev = parent; raid->disks[meta->disk_number].sectors = raid->total_sectors / raid->width; raid->disks[meta->disk_number].flags = (AR_DF_PRESENT | AR_DF_ASSIGNED | AR_DF_ONLINE); ars->raid[raid->volume] = raid; ars->disk_number[raid->volume] = meta->disk_number; retval = 1; break; } nvidia_out: free(meta, M_AR); return retval; } /* Promise FastTrak Metadata */ static int ata_raid_promise_read_meta(device_t dev, struct ar_softc **raidp, int native) { struct ata_raid_subdisk *ars = device_get_softc(dev); device_t parent = device_get_parent(dev); struct promise_raid_conf *meta; struct ar_softc *raid; u_int32_t checksum, *ptr; int array, count, disk, disksum = 0, retval = 0; if (!(meta = (struct promise_raid_conf *) malloc(sizeof(struct promise_raid_conf), M_AR, M_NOWAIT | M_ZERO))) return ENOMEM; if (ata_raid_rw(parent, PROMISE_LBA(parent), meta, sizeof(struct promise_raid_conf), ATA_R_READ)) { if (testing || bootverbose) device_printf(parent, "%s read metadata failed\n", native ? "FreeBSD" : "Promise"); goto promise_out; } /* check the signature */ if (native) { if (strncmp(meta->promise_id, ATA_MAGIC, strlen(ATA_MAGIC))) { if (testing || bootverbose) device_printf(parent, "FreeBSD check1 failed\n"); goto promise_out; } } else { if (strncmp(meta->promise_id, PR_MAGIC, strlen(PR_MAGIC))) { if (testing || bootverbose) device_printf(parent, "Promise check1 failed\n"); goto promise_out; } } /* check if the checksum is OK */ for (checksum = 0, ptr = (u_int32_t *)meta, count = 0; count < 511; count++) checksum += *ptr++; if (checksum != *ptr) { if (testing || bootverbose) device_printf(parent, "%s check2 failed\n", native ? "FreeBSD" : "Promise"); goto promise_out; } /* check on disk integrity status */ if (meta->raid.integrity != PR_I_VALID) { if (testing || bootverbose) device_printf(parent, "%s check3 failed\n", native ? "FreeBSD" : "Promise"); goto promise_out; } if (testing || bootverbose) ata_raid_promise_print_meta(meta); /* now convert Promise metadata into our generic form */ for (array = 0; array < MAX_ARRAYS; array++) { if (!raidp[array]) { raidp[array] = (struct ar_softc *)malloc(sizeof(struct ar_softc), M_AR, M_NOWAIT | M_ZERO); if (!raidp[array]) { device_printf(parent, "failed to allocate metadata storage\n"); goto promise_out; } } raid = raidp[array]; if (raid->format && (raid->format != (native ? AR_F_FREEBSD_RAID : AR_F_PROMISE_RAID))) continue; if ((raid->format == (native ? AR_F_FREEBSD_RAID : AR_F_PROMISE_RAID))&& !(meta->raid.magic_1 == (raid->magic_1))) continue; /* update our knowledge about the array config based on generation */ if (!meta->raid.generation || meta->raid.generation > raid->generation){ switch (meta->raid.type) { case PR_T_SPAN: raid->type = AR_T_SPAN; break; case PR_T_JBOD: raid->type = AR_T_JBOD; break; case PR_T_RAID0: raid->type = AR_T_RAID0; break; case PR_T_RAID1: raid->type = AR_T_RAID1; if (meta->raid.array_width > 1) raid->type = AR_T_RAID01; break; case PR_T_RAID5: raid->type = AR_T_RAID5; break; default: device_printf(parent, "%s unknown RAID type 0x%02x\n", native ? "FreeBSD" : "Promise", meta->raid.type); free(raidp[array], M_AR); raidp[array] = NULL; goto promise_out; } raid->magic_1 = meta->raid.magic_1; raid->format = (native ? AR_F_FREEBSD_RAID : AR_F_PROMISE_RAID); raid->generation = meta->raid.generation; raid->interleave = 1 << meta->raid.stripe_shift; raid->width = meta->raid.array_width; raid->total_disks = meta->raid.total_disks; raid->heads = meta->raid.heads + 1; raid->sectors = meta->raid.sectors; raid->cylinders = meta->raid.cylinders + 1; raid->total_sectors = meta->raid.total_sectors; raid->offset_sectors = 0; raid->rebuild_lba = meta->raid.rebuild_lba; raid->lun = array; if ((meta->raid.status & (PR_S_VALID | PR_S_ONLINE | PR_S_INITED | PR_S_READY)) == (PR_S_VALID | PR_S_ONLINE | PR_S_INITED | PR_S_READY)) { raid->status |= AR_S_READY; if (meta->raid.status & PR_S_DEGRADED) raid->status |= AR_S_DEGRADED; } else raid->status &= ~AR_S_READY; /* convert disk flags to our internal types */ for (disk = 0; disk < meta->raid.total_disks; disk++) { raid->disks[disk].dev = NULL; raid->disks[disk].flags = 0; *((u_int64_t *)(raid->disks[disk].serial)) = meta->raid.disk[disk].magic_0; disksum += meta->raid.disk[disk].flags; if (meta->raid.disk[disk].flags & PR_F_ONLINE) raid->disks[disk].flags |= AR_DF_ONLINE; if (meta->raid.disk[disk].flags & PR_F_ASSIGNED) raid->disks[disk].flags |= AR_DF_ASSIGNED; if (meta->raid.disk[disk].flags & PR_F_SPARE) { raid->disks[disk].flags &= ~(AR_DF_ONLINE | AR_DF_ASSIGNED); raid->disks[disk].flags |= AR_DF_SPARE; } if (meta->raid.disk[disk].flags & (PR_F_REDIR | PR_F_DOWN)) raid->disks[disk].flags &= ~AR_DF_ONLINE; } if (!disksum) { device_printf(parent, "%s subdisks has no flags\n", native ? "FreeBSD" : "Promise"); free(raidp[array], M_AR); raidp[array] = NULL; goto promise_out; } } if (meta->raid.generation >= raid->generation) { int disk_number = meta->raid.disk_number; if (raid->disks[disk_number].flags && (meta->magic_0 == *((u_int64_t *)(raid->disks[disk_number].serial)))) { raid->disks[disk_number].dev = parent; raid->disks[disk_number].flags |= AR_DF_PRESENT; raid->disks[disk_number].sectors = meta->raid.disk_sectors; if ((raid->disks[disk_number].flags & (AR_DF_PRESENT | AR_DF_ASSIGNED | AR_DF_ONLINE)) == (AR_DF_PRESENT | AR_DF_ASSIGNED | AR_DF_ONLINE)) { ars->raid[raid->volume] = raid; ars->disk_number[raid->volume] = disk_number; retval = 1; } } } break; } promise_out: free(meta, M_AR); return retval; } static int ata_raid_promise_write_meta(struct ar_softc *rdp) { struct promise_raid_conf *meta; struct timeval timestamp; u_int32_t *ckptr; int count, disk, drive, error = 0; if (!(meta = (struct promise_raid_conf *) malloc(sizeof(struct promise_raid_conf), M_AR, M_NOWAIT))) { printf("ar%d: failed to allocate metadata storage\n", rdp->lun); return ENOMEM; } rdp->generation++; microtime(×tamp); for (disk = 0; disk < rdp->total_disks; disk++) { for (count = 0; count < sizeof(struct promise_raid_conf); count++) *(((u_int8_t *)meta) + count) = 255 - (count % 256); meta->dummy_0 = 0x00020000; meta->raid.disk_number = disk; if (rdp->disks[disk].dev) { struct ata_device *atadev = device_get_softc(rdp->disks[disk].dev); struct ata_channel *ch = device_get_softc(device_get_parent(rdp->disks[disk].dev)); meta->raid.channel = ch->unit; meta->raid.device = ATA_DEV(atadev->unit); meta->raid.disk_sectors = rdp->disks[disk].sectors; meta->raid.disk_offset = rdp->offset_sectors; } else { meta->raid.channel = 0; meta->raid.device = 0; meta->raid.disk_sectors = 0; meta->raid.disk_offset = 0; } meta->magic_0 = PR_MAGIC0(meta->raid) | timestamp.tv_sec; meta->magic_1 = timestamp.tv_sec >> 16; meta->magic_2 = timestamp.tv_sec; meta->raid.integrity = PR_I_VALID; meta->raid.magic_0 = meta->magic_0; meta->raid.rebuild_lba = rdp->rebuild_lba; meta->raid.generation = rdp->generation; if (rdp->status & AR_S_READY) { meta->raid.flags = (PR_F_VALID | PR_F_ASSIGNED | PR_F_ONLINE); meta->raid.status = (PR_S_VALID | PR_S_ONLINE | PR_S_INITED | PR_S_READY); if (rdp->status & AR_S_DEGRADED) meta->raid.status |= PR_S_DEGRADED; else meta->raid.status |= PR_S_FUNCTIONAL; } else { meta->raid.flags = PR_F_DOWN; meta->raid.status = 0; } switch (rdp->type) { case AR_T_RAID0: meta->raid.type = PR_T_RAID0; break; case AR_T_RAID1: meta->raid.type = PR_T_RAID1; break; case AR_T_RAID01: meta->raid.type = PR_T_RAID1; break; case AR_T_RAID5: meta->raid.type = PR_T_RAID5; break; case AR_T_SPAN: meta->raid.type = PR_T_SPAN; break; case AR_T_JBOD: meta->raid.type = PR_T_JBOD; break; default: free(meta, M_AR); return ENODEV; } meta->raid.total_disks = rdp->total_disks; meta->raid.stripe_shift = ffs(rdp->interleave) - 1; meta->raid.array_width = rdp->width; meta->raid.array_number = rdp->lun; meta->raid.total_sectors = rdp->total_sectors; meta->raid.cylinders = rdp->cylinders - 1; meta->raid.heads = rdp->heads - 1; meta->raid.sectors = rdp->sectors; meta->raid.magic_1 = (u_int64_t)meta->magic_2<<16 | meta->magic_1; bzero(&meta->raid.disk, 8 * 12); for (drive = 0; drive < rdp->total_disks; drive++) { meta->raid.disk[drive].flags = 0; if (rdp->disks[drive].flags & AR_DF_PRESENT) meta->raid.disk[drive].flags |= PR_F_VALID; if (rdp->disks[drive].flags & AR_DF_ASSIGNED) meta->raid.disk[drive].flags |= PR_F_ASSIGNED; if (rdp->disks[drive].flags & AR_DF_ONLINE) meta->raid.disk[drive].flags |= PR_F_ONLINE; else if (rdp->disks[drive].flags & AR_DF_PRESENT) meta->raid.disk[drive].flags = (PR_F_REDIR | PR_F_DOWN); if (rdp->disks[drive].flags & AR_DF_SPARE) meta->raid.disk[drive].flags |= PR_F_SPARE; meta->raid.disk[drive].dummy_0 = 0x0; if (rdp->disks[drive].dev) { struct ata_channel *ch = device_get_softc(device_get_parent(rdp->disks[drive].dev)); struct ata_device *atadev = device_get_softc(rdp->disks[drive].dev); meta->raid.disk[drive].channel = ch->unit; meta->raid.disk[drive].device = ATA_DEV(atadev->unit); } meta->raid.disk[drive].magic_0 = PR_MAGIC0(meta->raid.disk[drive]) | timestamp.tv_sec; } if (rdp->disks[disk].dev) { if ((rdp->disks[disk].flags & (AR_DF_PRESENT | AR_DF_ONLINE)) == (AR_DF_PRESENT | AR_DF_ONLINE)) { if (rdp->format == AR_F_FREEBSD_RAID) bcopy(ATA_MAGIC, meta->promise_id, sizeof(ATA_MAGIC)); else bcopy(PR_MAGIC, meta->promise_id, sizeof(PR_MAGIC)); } else bzero(meta->promise_id, sizeof(meta->promise_id)); meta->checksum = 0; for (ckptr = (int32_t *)meta, count = 0; count < 511; count++) meta->checksum += *ckptr++; if (testing || bootverbose) ata_raid_promise_print_meta(meta); if (ata_raid_rw(rdp->disks[disk].dev, PROMISE_LBA(rdp->disks[disk].dev), meta, sizeof(struct promise_raid_conf), ATA_R_WRITE | ATA_R_DIRECT)) { device_printf(rdp->disks[disk].dev, "write metadata failed\n"); error = EIO; } } } free(meta, M_AR); return error; } /* Silicon Image Medley Metadata */ static int ata_raid_sii_read_meta(device_t dev, struct ar_softc **raidp) { struct ata_raid_subdisk *ars = device_get_softc(dev); device_t parent = device_get_parent(dev); struct sii_raid_conf *meta; struct ar_softc *raid = NULL; u_int16_t checksum, *ptr; int array, count, disk, retval = 0; if (!(meta = (struct sii_raid_conf *) malloc(sizeof(struct sii_raid_conf), M_AR, M_NOWAIT | M_ZERO))) return ENOMEM; if (ata_raid_rw(parent, SII_LBA(parent), meta, sizeof(struct sii_raid_conf), ATA_R_READ)) { if (testing || bootverbose) device_printf(parent, "Silicon Image read metadata failed\n"); goto sii_out; } /* check if this is a Silicon Image (Medley) RAID struct */ for (checksum = 0, ptr = (u_int16_t *)meta, count = 0; count < 160; count++) checksum += *ptr++; if (checksum) { if (testing || bootverbose) device_printf(parent, "Silicon Image check1 failed\n"); goto sii_out; } for (checksum = 0, ptr = (u_int16_t *)meta, count = 0; count < 256; count++) checksum += *ptr++; if (checksum != meta->checksum_1) { if (testing || bootverbose) device_printf(parent, "Silicon Image check2 failed\n"); goto sii_out; } /* check verison */ if (meta->version_major != 0x0002 || (meta->version_minor != 0x0000 && meta->version_minor != 0x0001)) { if (testing || bootverbose) device_printf(parent, "Silicon Image check3 failed\n"); goto sii_out; } if (testing || bootverbose) ata_raid_sii_print_meta(meta); /* now convert Silicon Image meta into our generic form */ for (array = 0; array < MAX_ARRAYS; array++) { if (!raidp[array]) { raidp[array] = (struct ar_softc *)malloc(sizeof(struct ar_softc), M_AR, M_NOWAIT | M_ZERO); if (!raidp[array]) { device_printf(parent, "failed to allocate metadata storage\n"); goto sii_out; } } raid = raidp[array]; if (raid->format && (raid->format != AR_F_SII_RAID)) continue; if (raid->format == AR_F_SII_RAID && (raid->magic_0 != *((u_int64_t *)meta->timestamp))) { continue; } /* update our knowledge about the array config based on generation */ if (!meta->generation || meta->generation > raid->generation) { switch (meta->type) { case SII_T_RAID0: raid->type = AR_T_RAID0; break; case SII_T_RAID1: raid->type = AR_T_RAID1; break; case SII_T_RAID01: raid->type = AR_T_RAID01; break; case SII_T_SPARE: device_printf(parent, "Silicon Image SPARE disk\n"); free(raidp[array], M_AR); raidp[array] = NULL; goto sii_out; default: device_printf(parent,"Silicon Image unknown RAID type 0x%02x\n", meta->type); free(raidp[array], M_AR); raidp[array] = NULL; goto sii_out; } raid->magic_0 = *((u_int64_t *)meta->timestamp); raid->format = AR_F_SII_RAID; raid->generation = meta->generation; raid->interleave = meta->stripe_sectors; raid->width = (meta->raid0_disks != 0xff) ? meta->raid0_disks : 1; raid->total_disks = ((meta->raid0_disks != 0xff) ? meta->raid0_disks : 0) + ((meta->raid1_disks != 0xff) ? meta->raid1_disks : 0); raid->total_sectors = meta->total_sectors; raid->heads = 255; raid->sectors = 63; raid->cylinders = raid->total_sectors / (63 * 255); raid->offset_sectors = 0; raid->rebuild_lba = meta->rebuild_lba; raid->lun = array; strncpy(raid->name, meta->name, min(sizeof(raid->name), sizeof(meta->name))); /* clear out any old info */ if (raid->generation) { for (disk = 0; disk < raid->total_disks; disk++) { raid->disks[disk].dev = NULL; raid->disks[disk].flags = 0; } } } if (meta->generation >= raid->generation) { /* XXX SOS add check for the right physical disk by serial# */ if (meta->status & SII_S_READY) { int disk_number = (raid->type == AR_T_RAID01) ? meta->raid1_ident + (meta->raid0_ident << 1) : meta->disk_number; raid->disks[disk_number].dev = parent; raid->disks[disk_number].sectors = raid->total_sectors / raid->width; raid->disks[disk_number].flags = (AR_DF_ONLINE | AR_DF_PRESENT | AR_DF_ASSIGNED); ars->raid[raid->volume] = raid; ars->disk_number[raid->volume] = disk_number; retval = 1; } } break; } sii_out: free(meta, M_AR); return retval; } /* Silicon Integrated Systems Metadata */ static int ata_raid_sis_read_meta(device_t dev, struct ar_softc **raidp) { struct ata_raid_subdisk *ars = device_get_softc(dev); device_t parent = device_get_parent(dev); struct sis_raid_conf *meta; struct ar_softc *raid = NULL; int array, disk_number, drive, retval = 0; if (!(meta = (struct sis_raid_conf *) malloc(sizeof(struct sis_raid_conf), M_AR, M_NOWAIT | M_ZERO))) return ENOMEM; if (ata_raid_rw(parent, SIS_LBA(parent), meta, sizeof(struct sis_raid_conf), ATA_R_READ)) { if (testing || bootverbose) device_printf(parent, "Silicon Integrated Systems read metadata failed\n"); } /* check for SiS magic */ if (meta->magic != SIS_MAGIC) { if (testing || bootverbose) device_printf(parent, "Silicon Integrated Systems check1 failed\n"); goto sis_out; } if (testing || bootverbose) ata_raid_sis_print_meta(meta); /* now convert SiS meta into our generic form */ for (array = 0; array < MAX_ARRAYS; array++) { if (!raidp[array]) { raidp[array] = (struct ar_softc *)malloc(sizeof(struct ar_softc), M_AR, M_NOWAIT | M_ZERO); if (!raidp[array]) { device_printf(parent, "failed to allocate metadata storage\n"); goto sis_out; } } raid = raidp[array]; if (raid->format && (raid->format != AR_F_SIS_RAID)) continue; if ((raid->format == AR_F_SIS_RAID) && ((raid->magic_0 != meta->controller_pci_id) || (raid->magic_1 != meta->timestamp))) { continue; } switch (meta->type_total_disks & SIS_T_MASK) { case SIS_T_JBOD: raid->type = AR_T_JBOD; raid->width = (meta->type_total_disks & SIS_D_MASK); raid->total_sectors += SIS_LBA(parent); break; case SIS_T_RAID0: raid->type = AR_T_RAID0; raid->width = (meta->type_total_disks & SIS_D_MASK); if (!raid->total_sectors || (raid->total_sectors > (raid->width * SIS_LBA(parent)))) raid->total_sectors = raid->width * SIS_LBA(parent); break; case SIS_T_RAID1: raid->type = AR_T_RAID1; raid->width = 1; if (!raid->total_sectors || (raid->total_sectors > SIS_LBA(parent))) raid->total_sectors = SIS_LBA(parent); break; default: device_printf(parent, "Silicon Integrated Systems " "unknown RAID type 0x%08x\n", meta->magic); free(raidp[array], M_AR); raidp[array] = NULL; goto sis_out; } raid->magic_0 = meta->controller_pci_id; raid->magic_1 = meta->timestamp; raid->format = AR_F_SIS_RAID; raid->generation = 0; raid->interleave = meta->stripe_sectors; raid->total_disks = (meta->type_total_disks & SIS_D_MASK); raid->heads = 255; raid->sectors = 63; raid->cylinders = raid->total_sectors / (63 * 255); raid->offset_sectors = 0; raid->rebuild_lba = 0; raid->lun = array; /* XXX SOS if total_disks > 2 this doesn't float */ if (((meta->disks & SIS_D_MASTER) >> 4) == meta->disk_number) disk_number = 0; else disk_number = 1; for (drive = 0; drive < raid->total_disks; drive++) { raid->disks[drive].sectors = raid->total_sectors/raid->width; if (drive == disk_number) { raid->disks[disk_number].dev = parent; raid->disks[disk_number].flags = (AR_DF_ONLINE | AR_DF_PRESENT | AR_DF_ASSIGNED); ars->raid[raid->volume] = raid; ars->disk_number[raid->volume] = disk_number; } } retval = 1; break; } sis_out: free(meta, M_AR); return retval; } static int ata_raid_sis_write_meta(struct ar_softc *rdp) { struct sis_raid_conf *meta; struct timeval timestamp; int disk, error = 0; if (!(meta = (struct sis_raid_conf *) malloc(sizeof(struct sis_raid_conf), M_AR, M_NOWAIT | M_ZERO))) { printf("ar%d: failed to allocate metadata storage\n", rdp->lun); return ENOMEM; } rdp->generation++; microtime(×tamp); meta->magic = SIS_MAGIC; /* XXX SOS if total_disks > 2 this doesn't float */ for (disk = 0; disk < rdp->total_disks; disk++) { if (rdp->disks[disk].dev) { struct ata_channel *ch = device_get_softc(device_get_parent(rdp->disks[disk].dev)); struct ata_device *atadev = device_get_softc(rdp->disks[disk].dev); int disk_number = 1 + ATA_DEV(atadev->unit) + (ch->unit << 1); meta->disks |= disk_number << ((1 - disk) << 2); } } switch (rdp->type) { case AR_T_JBOD: meta->type_total_disks = SIS_T_JBOD; break; case AR_T_RAID0: meta->type_total_disks = SIS_T_RAID0; break; case AR_T_RAID1: meta->type_total_disks = SIS_T_RAID1; break; default: free(meta, M_AR); return ENODEV; } meta->type_total_disks |= (rdp->total_disks & SIS_D_MASK); meta->stripe_sectors = rdp->interleave; meta->timestamp = timestamp.tv_sec; for (disk = 0; disk < rdp->total_disks; disk++) { if (rdp->disks[disk].dev) { struct ata_channel *ch = device_get_softc(device_get_parent(rdp->disks[disk].dev)); struct ata_device *atadev = device_get_softc(rdp->disks[disk].dev); meta->controller_pci_id = (pci_get_vendor(GRANDPARENT(rdp->disks[disk].dev)) << 16) | pci_get_device(GRANDPARENT(rdp->disks[disk].dev)); bcopy(atadev->param.model, meta->model, sizeof(meta->model)); - /* XXX SOS if total_disks > 2 this may not float */ + /* XXX SOS if total_disks > 2 this may not float */ meta->disk_number = 1 + ATA_DEV(atadev->unit) + (ch->unit << 1); if (testing || bootverbose) ata_raid_sis_print_meta(meta); if (ata_raid_rw(rdp->disks[disk].dev, SIS_LBA(rdp->disks[disk].dev), meta, sizeof(struct sis_raid_conf), ATA_R_WRITE | ATA_R_DIRECT)) { device_printf(rdp->disks[disk].dev, "write metadata failed\n"); error = EIO; } } } free(meta, M_AR); return error; } /* VIA Tech V-RAID Metadata */ static int ata_raid_via_read_meta(device_t dev, struct ar_softc **raidp) { struct ata_raid_subdisk *ars = device_get_softc(dev); device_t parent = device_get_parent(dev); struct via_raid_conf *meta; struct ar_softc *raid = NULL; u_int8_t checksum, *ptr; int array, count, disk, retval = 0; if (!(meta = (struct via_raid_conf *) malloc(sizeof(struct via_raid_conf), M_AR, M_NOWAIT | M_ZERO))) return ENOMEM; if (ata_raid_rw(parent, VIA_LBA(parent), meta, sizeof(struct via_raid_conf), ATA_R_READ)) { if (testing || bootverbose) device_printf(parent, "VIA read metadata failed\n"); goto via_out; } /* check if this is a VIA RAID struct */ if (meta->magic != VIA_MAGIC) { if (testing || bootverbose) device_printf(parent, "VIA check1 failed\n"); goto via_out; } /* calculate checksum and compare for valid */ for (checksum = 0, ptr = (u_int8_t *)meta, count = 0; count < 50; count++) checksum += *ptr++; if (checksum != meta->checksum) { if (testing || bootverbose) device_printf(parent, "VIA check2 failed\n"); goto via_out; } if (testing || bootverbose) ata_raid_via_print_meta(meta); /* now convert VIA meta into our generic form */ for (array = 0; array < MAX_ARRAYS; array++) { if (!raidp[array]) { raidp[array] = (struct ar_softc *)malloc(sizeof(struct ar_softc), M_AR, M_NOWAIT | M_ZERO); if (!raidp[array]) { device_printf(parent, "failed to allocate metadata storage\n"); goto via_out; } } raid = raidp[array]; if (raid->format && (raid->format != AR_F_VIA_RAID)) continue; if (raid->format == AR_F_VIA_RAID && (raid->magic_0 != meta->disks[0])) continue; switch (meta->type & VIA_T_MASK) { case VIA_T_RAID0: raid->type = AR_T_RAID0; raid->width = meta->stripe_layout & VIA_L_DISKS; if (!raid->total_sectors || (raid->total_sectors > (raid->width * meta->disk_sectors))) raid->total_sectors = raid->width * meta->disk_sectors; break; case VIA_T_RAID1: raid->type = AR_T_RAID1; raid->width = 1; raid->total_sectors = meta->disk_sectors; break; case VIA_T_RAID01: raid->type = AR_T_RAID01; raid->width = meta->stripe_layout & VIA_L_DISKS; if (!raid->total_sectors || (raid->total_sectors > (raid->width * meta->disk_sectors))) raid->total_sectors = raid->width * meta->disk_sectors; break; case VIA_T_RAID5: raid->type = AR_T_RAID5; raid->width = meta->stripe_layout & VIA_L_DISKS; if (!raid->total_sectors || (raid->total_sectors > ((raid->width - 1)*meta->disk_sectors))) raid->total_sectors = (raid->width - 1) * meta->disk_sectors; break; case VIA_T_SPAN: raid->type = AR_T_SPAN; raid->width = 1; raid->total_sectors += meta->disk_sectors; break; default: device_printf(parent,"VIA unknown RAID type 0x%02x\n", meta->type); free(raidp[array], M_AR); raidp[array] = NULL; goto via_out; } raid->magic_0 = meta->disks[0]; raid->format = AR_F_VIA_RAID; raid->generation = 0; raid->interleave = 0x08 << ((meta->stripe_layout & VIA_L_MASK) >> VIA_L_SHIFT); for (count = 0, disk = 0; disk < 8; disk++) if (meta->disks[disk]) count++; raid->total_disks = count; raid->heads = 255; raid->sectors = 63; raid->cylinders = raid->total_sectors / (63 * 255); raid->offset_sectors = 0; raid->rebuild_lba = 0; raid->lun = array; for (disk = 0; disk < raid->total_disks; disk++) { if (meta->disks[disk] == meta->disk_id) { raid->disks[disk].dev = parent; bcopy(&meta->disk_id, raid->disks[disk].serial, sizeof(u_int32_t)); raid->disks[disk].sectors = meta->disk_sectors; raid->disks[disk].flags = (AR_DF_ONLINE | AR_DF_PRESENT | AR_DF_ASSIGNED); ars->raid[raid->volume] = raid; ars->disk_number[raid->volume] = disk; retval = 1; break; } } break; } via_out: free(meta, M_AR); return retval; } static int ata_raid_via_write_meta(struct ar_softc *rdp) { struct via_raid_conf *meta; int disk, error = 0; if (!(meta = (struct via_raid_conf *) malloc(sizeof(struct via_raid_conf), M_AR, M_NOWAIT | M_ZERO))) { printf("ar%d: failed to allocate metadata storage\n", rdp->lun); return ENOMEM; } rdp->generation++; meta->magic = VIA_MAGIC; meta->dummy_0 = 0x02; switch (rdp->type) { case AR_T_SPAN: meta->type = VIA_T_SPAN; meta->stripe_layout = (rdp->total_disks & VIA_L_DISKS); break; case AR_T_RAID0: meta->type = VIA_T_RAID0; meta->stripe_layout = ((rdp->interleave >> 1) & VIA_L_MASK); meta->stripe_layout |= (rdp->total_disks & VIA_L_DISKS); break; case AR_T_RAID1: meta->type = VIA_T_RAID1; meta->stripe_layout = (rdp->total_disks & VIA_L_DISKS); break; case AR_T_RAID5: meta->type = VIA_T_RAID5; meta->stripe_layout = ((rdp->interleave >> 1) & VIA_L_MASK); meta->stripe_layout |= (rdp->total_disks & VIA_L_DISKS); break; case AR_T_RAID01: meta->type = VIA_T_RAID01; meta->stripe_layout = ((rdp->interleave >> 1) & VIA_L_MASK); meta->stripe_layout |= (rdp->width & VIA_L_DISKS); break; default: free(meta, M_AR); return ENODEV; } - meta->type |= VIA_T_BOOTABLE; /* XXX SOS */ + meta->type |= VIA_T_BOOTABLE; /* XXX SOS */ meta->disk_sectors = rdp->total_sectors / (rdp->width - (rdp->type == AR_RAID5)); for (disk = 0; disk < rdp->total_disks; disk++) meta->disks[disk] = (u_int32_t)(uintptr_t)rdp->disks[disk].dev; for (disk = 0; disk < rdp->total_disks; disk++) { if (rdp->disks[disk].dev) { u_int8_t *ptr; int count; meta->disk_index = disk * sizeof(u_int32_t); if (rdp->type == AR_T_RAID01) meta->disk_index = ((meta->disk_index & 0x08) << 2) | (meta->disk_index & ~0x08); meta->disk_id = meta->disks[disk]; meta->checksum = 0; for (ptr = (u_int8_t *)meta, count = 0; count < 50; count++) meta->checksum += *ptr++; if (testing || bootverbose) ata_raid_via_print_meta(meta); if (ata_raid_rw(rdp->disks[disk].dev, VIA_LBA(rdp->disks[disk].dev), meta, sizeof(struct via_raid_conf), ATA_R_WRITE | ATA_R_DIRECT)) { device_printf(rdp->disks[disk].dev, "write metadata failed\n"); error = EIO; } } } free(meta, M_AR); return error; } static struct ata_request * ata_raid_init_request(struct ar_softc *rdp, struct bio *bio) { struct ata_request *request; if (!(request = ata_alloc_request())) { printf("FAILURE - out of memory in ata_raid_init_request\n"); return NULL; } request->timeout = 5; request->retries = 2; request->callback = ata_raid_done; request->driver = rdp; request->bio = bio; switch (request->bio->bio_cmd) { case BIO_READ: request->flags = ATA_R_READ; break; case BIO_WRITE: request->flags = ATA_R_WRITE; break; } return request; } static int ata_raid_send_request(struct ata_request *request) { struct ata_device *atadev = device_get_softc(request->dev); request->transfersize = min(request->bytecount, atadev->max_iosize); if (request->flags & ATA_R_READ) { if (atadev->mode >= ATA_DMA) { request->flags |= ATA_R_DMA; request->u.ata.command = ATA_READ_DMA; } else if (atadev->max_iosize > DEV_BSIZE) request->u.ata.command = ATA_READ_MUL; else request->u.ata.command = ATA_READ; } else if (request->flags & ATA_R_WRITE) { if (atadev->mode >= ATA_DMA) { request->flags |= ATA_R_DMA; request->u.ata.command = ATA_WRITE_DMA; } else if (atadev->max_iosize > DEV_BSIZE) request->u.ata.command = ATA_WRITE_MUL; else request->u.ata.command = ATA_WRITE; } else { device_printf(request->dev, "FAILURE - unknown IO operation\n"); ata_free_request(request); return EIO; } request->flags |= (ATA_R_ORDERED | ATA_R_THREAD); ata_queue_request(request); return 0; } static int ata_raid_rw(device_t dev, u_int64_t lba, void *data, u_int bcount, int flags) { struct ata_device *atadev = device_get_softc(dev); struct ata_request *request; int error; if (bcount % DEV_BSIZE) { device_printf(dev, "FAILURE - transfers must be modulo sectorsize\n"); return ENOMEM; } if (!(request = ata_alloc_request())) { device_printf(dev, "FAILURE - out of memory in ata_raid_rw\n"); return ENOMEM; } /* setup request */ request->dev = dev; request->timeout = 10; request->retries = 0; request->data = data; request->bytecount = bcount; request->transfersize = DEV_BSIZE; request->u.ata.lba = lba; request->u.ata.count = request->bytecount / DEV_BSIZE; request->flags = flags; if (flags & ATA_R_READ) { if (atadev->mode >= ATA_DMA) { request->u.ata.command = ATA_READ_DMA; request->flags |= ATA_R_DMA; } else request->u.ata.command = ATA_READ; ata_queue_request(request); } else if (flags & ATA_R_WRITE) { if (atadev->mode >= ATA_DMA) { request->u.ata.command = ATA_WRITE_DMA; request->flags |= ATA_R_DMA; } else request->u.ata.command = ATA_WRITE; ata_queue_request(request); } else { device_printf(dev, "FAILURE - unknown IO operation\n"); request->result = EIO; } error = request->result; ata_free_request(request); return error; } /* * module handeling */ static int ata_raid_subdisk_probe(device_t dev) { device_quiet(dev); return 0; } static int ata_raid_subdisk_attach(device_t dev) { struct ata_raid_subdisk *ars = device_get_softc(dev); int volume; for (volume = 0; volume < MAX_VOLUMES; volume++) { ars->raid[volume] = NULL; ars->disk_number[volume] = -1; } ata_raid_read_metadata(dev); return 0; } static int ata_raid_subdisk_detach(device_t dev) { struct ata_raid_subdisk *ars = device_get_softc(dev); int volume; for (volume = 0; volume < MAX_VOLUMES; volume++) { if (ars->raid[volume]) { ars->raid[volume]->disks[ars->disk_number[volume]].flags &= ~(AR_DF_PRESENT | AR_DF_ONLINE); ars->raid[volume]->disks[ars->disk_number[volume]].dev = NULL; ata_raid_config_changed(ars->raid[volume], 1); ars->raid[volume] = NULL; ars->disk_number[volume] = -1; } } return 0; } static device_method_t ata_raid_sub_methods[] = { /* device interface */ DEVMETHOD(device_probe, ata_raid_subdisk_probe), DEVMETHOD(device_attach, ata_raid_subdisk_attach), DEVMETHOD(device_detach, ata_raid_subdisk_detach), { 0, 0 } }; static driver_t ata_raid_sub_driver = { "subdisk", ata_raid_sub_methods, sizeof(struct ata_raid_subdisk) }; DRIVER_MODULE(subdisk, ad, ata_raid_sub_driver, ata_raid_sub_devclass, NULL, NULL); static int ata_raid_module_event_handler(module_t mod, int what, void *arg) { int i; switch (what) { case MOD_LOAD: if (testing || bootverbose) printf("ATA PseudoRAID loaded\n"); #if 0 /* setup table to hold metadata for all ATA PseudoRAID arrays */ ata_raid_arrays = malloc(sizeof(struct ar_soft *) * MAX_ARRAYS, M_AR, M_NOWAIT | M_ZERO); if (!ata_raid_arrays) { printf("ataraid: no memory for metadata storage\n"); return ENOMEM; } #endif /* attach found PseudoRAID arrays */ for (i = 0; i < MAX_ARRAYS; i++) { struct ar_softc *rdp = ata_raid_arrays[i]; if (!rdp || !rdp->format) continue; if (testing || bootverbose) ata_raid_print_meta(rdp); ata_raid_attach(rdp, 0); } ata_raid_ioctl_func = ata_raid_ioctl; return 0; case MOD_UNLOAD: /* detach found PseudoRAID arrays */ for (i = 0; i < MAX_ARRAYS; i++) { struct ar_softc *rdp = ata_raid_arrays[i]; if (!rdp || !rdp->status) continue; if (rdp->disk) disk_destroy(rdp->disk); } if (testing || bootverbose) printf("ATA PseudoRAID unloaded\n"); #if 0 free(ata_raid_arrays, M_AR); #endif ata_raid_ioctl_func = NULL; return 0; default: return EOPNOTSUPP; } } static moduledata_t ata_raid_moduledata = { "ataraid", ata_raid_module_event_handler, NULL }; DECLARE_MODULE(ata, ata_raid_moduledata, SI_SUB_RAID, SI_ORDER_FIRST); MODULE_VERSION(ataraid, 1); MODULE_DEPEND(ataraid, ata, 1, 1, 1); MODULE_DEPEND(ataraid, ad, 1, 1, 1); static char * ata_raid_format(struct ar_softc *rdp) { switch (rdp->format) { case AR_F_FREEBSD_RAID: return "FreeBSD PseudoRAID"; case AR_F_ADAPTEC_RAID: return "Adaptec HostRAID"; case AR_F_HPTV2_RAID: return "HighPoint v2 RocketRAID"; case AR_F_HPTV3_RAID: return "HighPoint v3 RocketRAID"; case AR_F_INTEL_RAID: return "Intel MatrixRAID"; case AR_F_ITE_RAID: return "Integrated Technology Express"; case AR_F_LSIV2_RAID: return "LSILogic v2 MegaRAID"; case AR_F_LSIV3_RAID: return "LSILogic v3 MegaRAID"; case AR_F_NVIDIA_RAID: return "nVidia MediaShield"; case AR_F_PROMISE_RAID: return "Promise Fasttrak"; case AR_F_SII_RAID: return "Silicon Image Medley"; case AR_F_SIS_RAID: return "Silicon Integrated Systems"; case AR_F_VIA_RAID: return "VIA Tech V-RAID"; default: return "UNKNOWN"; } } static char * ata_raid_type(struct ar_softc *rdp) { switch (rdp->type) { case AR_T_JBOD: return "JBOD"; case AR_T_SPAN: return "SPAN"; case AR_T_RAID0: return "RAID0"; case AR_T_RAID1: return "RAID1"; case AR_T_RAID3: return "RAID3"; case AR_T_RAID4: return "RAID4"; case AR_T_RAID5: return "RAID5"; case AR_T_RAID01: return "RAID0+1"; default: return "UNKNOWN"; } } static char * ata_raid_flags(struct ar_softc *rdp) { switch (rdp->status & (AR_S_READY | AR_S_DEGRADED | AR_S_REBUILDING)) { case AR_S_READY: return "READY"; case AR_S_READY | AR_S_DEGRADED: return "DEGRADED"; case AR_S_READY | AR_S_REBUILDING: case AR_S_READY | AR_S_DEGRADED | AR_S_REBUILDING: return "REBUILDING"; default: return "BROKEN"; } } /* debugging gunk */ static void ata_raid_print_meta(struct ar_softc *raid) { int i; printf("********** ATA PseudoRAID ar%d Metadata **********\n", raid->lun); printf("=================================================\n"); printf("format %s\n", ata_raid_format(raid)); printf("type %s\n", ata_raid_type(raid)); printf("flags 0x%02x %b\n", raid->status, raid->status, "\20\3REBUILDING\2DEGRADED\1READY\n"); printf("magic_0 0x%016llx\n",(unsigned long long)raid->magic_0); printf("magic_1 0x%016llx\n",(unsigned long long)raid->magic_1); printf("generation %u\n", raid->generation); printf("total_sectors %llu\n", (unsigned long long)raid->total_sectors); printf("offset_sectors %llu\n", (unsigned long long)raid->offset_sectors); printf("heads %u\n", raid->heads); printf("sectors %u\n", raid->sectors); printf("cylinders %u\n", raid->cylinders); printf("width %u\n", raid->width); printf("interleave %u\n", raid->interleave); printf("total_disks %u\n", raid->total_disks); for (i = 0; i < raid->total_disks; i++) { printf(" disk %d: flags = 0x%02x %b\n", i, raid->disks[i].flags, raid->disks[i].flags, "\20\4ONLINE\3SPARE\2ASSIGNED\1PRESENT\n"); if (raid->disks[i].dev) { printf(" "); device_printf(raid->disks[i].dev, " sectors %lld\n", (long long)raid->disks[i].sectors); } } printf("=================================================\n"); } static char * ata_raid_adaptec_type(int type) { static char buffer[16]; switch (type) { case ADP_T_RAID0: return "RAID0"; case ADP_T_RAID1: return "RAID1"; default: sprintf(buffer, "UNKNOWN 0x%02x", type); return buffer; } } static void ata_raid_adaptec_print_meta(struct adaptec_raid_conf *meta) { int i; printf("********* ATA Adaptec HostRAID Metadata *********\n"); printf("magic_0 <0x%08x>\n", be32toh(meta->magic_0)); printf("generation 0x%08x\n", be32toh(meta->generation)); printf("dummy_0 0x%04x\n", be16toh(meta->dummy_0)); printf("total_configs %u\n", be16toh(meta->total_configs)); printf("dummy_1 0x%04x\n", be16toh(meta->dummy_1)); printf("checksum 0x%04x\n", be16toh(meta->checksum)); printf("dummy_2 0x%08x\n", be32toh(meta->dummy_2)); printf("dummy_3 0x%08x\n", be32toh(meta->dummy_3)); printf("flags 0x%08x\n", be32toh(meta->flags)); printf("timestamp 0x%08x\n", be32toh(meta->timestamp)); printf("dummy_4 0x%08x 0x%08x 0x%08x 0x%08x\n", be32toh(meta->dummy_4[0]), be32toh(meta->dummy_4[1]), be32toh(meta->dummy_4[2]), be32toh(meta->dummy_4[3])); printf("dummy_5 0x%08x 0x%08x 0x%08x 0x%08x\n", be32toh(meta->dummy_5[0]), be32toh(meta->dummy_5[1]), be32toh(meta->dummy_5[2]), be32toh(meta->dummy_5[3])); for (i = 0; i < be16toh(meta->total_configs); i++) { printf(" %d total_disks %u\n", i, be16toh(meta->configs[i].disk_number)); printf(" %d generation %u\n", i, be16toh(meta->configs[i].generation)); printf(" %d magic_0 0x%08x\n", i, be32toh(meta->configs[i].magic_0)); printf(" %d dummy_0 0x%02x\n", i, meta->configs[i].dummy_0); printf(" %d type %s\n", i, ata_raid_adaptec_type(meta->configs[i].type)); printf(" %d dummy_1 0x%02x\n", i, meta->configs[i].dummy_1); printf(" %d flags %d\n", i, be32toh(meta->configs[i].flags)); printf(" %d dummy_2 0x%02x\n", i, meta->configs[i].dummy_2); printf(" %d dummy_3 0x%02x\n", i, meta->configs[i].dummy_3); printf(" %d dummy_4 0x%02x\n", i, meta->configs[i].dummy_4); printf(" %d dummy_5 0x%02x\n", i, meta->configs[i].dummy_5); printf(" %d disk_number %u\n", i, be32toh(meta->configs[i].disk_number)); printf(" %d dummy_6 0x%08x\n", i, be32toh(meta->configs[i].dummy_6)); printf(" %d sectors %u\n", i, be32toh(meta->configs[i].sectors)); printf(" %d stripe_shift %u\n", i, be16toh(meta->configs[i].stripe_shift)); printf(" %d dummy_7 0x%08x\n", i, be32toh(meta->configs[i].dummy_7)); printf(" %d dummy_8 0x%08x 0x%08x 0x%08x 0x%08x\n", i, be32toh(meta->configs[i].dummy_8[0]), be32toh(meta->configs[i].dummy_8[1]), be32toh(meta->configs[i].dummy_8[2]), be32toh(meta->configs[i].dummy_8[3])); printf(" %d name <%s>\n", i, meta->configs[i].name); } printf("magic_1 <0x%08x>\n", be32toh(meta->magic_1)); printf("magic_2 <0x%08x>\n", be32toh(meta->magic_2)); printf("magic_3 <0x%08x>\n", be32toh(meta->magic_3)); printf("magic_4 <0x%08x>\n", be32toh(meta->magic_4)); printf("=================================================\n"); } static char * ata_raid_hptv2_type(int type) { static char buffer[16]; switch (type) { case HPTV2_T_RAID0: return "RAID0"; case HPTV2_T_RAID1: return "RAID1"; case HPTV2_T_RAID01_RAID0: return "RAID01_RAID0"; case HPTV2_T_SPAN: return "SPAN"; case HPTV2_T_RAID_3: return "RAID3"; case HPTV2_T_RAID_5: return "RAID5"; case HPTV2_T_JBOD: return "JBOD"; case HPTV2_T_RAID01_RAID1: return "RAID01_RAID1"; default: sprintf(buffer, "UNKNOWN 0x%02x", type); return buffer; } } static void ata_raid_hptv2_print_meta(struct hptv2_raid_conf *meta) { int i; printf("****** ATA Highpoint V2 RocketRAID Metadata *****\n"); printf("magic 0x%08x\n", meta->magic); printf("magic_0 0x%08x\n", meta->magic_0); printf("magic_1 0x%08x\n", meta->magic_1); printf("order 0x%08x\n", meta->order); printf("array_width %u\n", meta->array_width); printf("stripe_shift %u\n", meta->stripe_shift); printf("type %s\n", ata_raid_hptv2_type(meta->type)); printf("disk_number %u\n", meta->disk_number); printf("total_sectors %u\n", meta->total_sectors); printf("disk_mode 0x%08x\n", meta->disk_mode); printf("boot_mode 0x%08x\n", meta->boot_mode); printf("boot_disk 0x%02x\n", meta->boot_disk); printf("boot_protect 0x%02x\n", meta->boot_protect); printf("log_entries 0x%02x\n", meta->error_log_entries); printf("log_index 0x%02x\n", meta->error_log_index); if (meta->error_log_entries) { printf(" timestamp reason disk status sectors lba\n"); for (i = meta->error_log_index; i < meta->error_log_index + meta->error_log_entries; i++) printf(" 0x%08x 0x%02x 0x%02x 0x%02x 0x%02x 0x%08x\n", meta->errorlog[i%32].timestamp, meta->errorlog[i%32].reason, meta->errorlog[i%32].disk, meta->errorlog[i%32].status, meta->errorlog[i%32].sectors, meta->errorlog[i%32].lba); } printf("rebuild_lba 0x%08x\n", meta->rebuild_lba); printf("dummy_1 0x%02x\n", meta->dummy_1); printf("name_1 <%.15s>\n", meta->name_1); printf("dummy_2 0x%02x\n", meta->dummy_2); printf("name_2 <%.15s>\n", meta->name_2); printf("=================================================\n"); } static char * ata_raid_hptv3_type(int type) { static char buffer[16]; switch (type) { case HPTV3_T_SPARE: return "SPARE"; case HPTV3_T_JBOD: return "JBOD"; case HPTV3_T_SPAN: return "SPAN"; case HPTV3_T_RAID0: return "RAID0"; case HPTV3_T_RAID1: return "RAID1"; case HPTV3_T_RAID3: return "RAID3"; case HPTV3_T_RAID5: return "RAID5"; default: sprintf(buffer, "UNKNOWN 0x%02x", type); return buffer; } } static void ata_raid_hptv3_print_meta(struct hptv3_raid_conf *meta) { int i; printf("****** ATA Highpoint V3 RocketRAID Metadata *****\n"); printf("magic 0x%08x\n", meta->magic); printf("magic_0 0x%08x\n", meta->magic_0); printf("checksum_0 0x%02x\n", meta->checksum_0); printf("mode 0x%02x\n", meta->mode); printf("user_mode 0x%02x\n", meta->user_mode); printf("config_entries 0x%02x\n", meta->config_entries); for (i = 0; i < meta->config_entries; i++) { printf("config %d:\n", i); printf(" total_sectors %llu\n", (unsigned long long)(meta->configs[0].total_sectors + ((u_int64_t)meta->configs_high[0].total_sectors << 32))); printf(" type %s\n", ata_raid_hptv3_type(meta->configs[i].type)); printf(" total_disks %u\n", meta->configs[i].total_disks); printf(" disk_number %u\n", meta->configs[i].disk_number); printf(" stripe_shift %u\n", meta->configs[i].stripe_shift); printf(" status %b\n", meta->configs[i].status, "\20\2RAID5\1NEED_REBUILD\n"); printf(" critical_disks %u\n", meta->configs[i].critical_disks); printf(" rebuild_lba %llu\n", (unsigned long long)(meta->configs_high[0].rebuild_lba + ((u_int64_t)meta->configs_high[0].rebuild_lba << 32))); } printf("name <%.16s>\n", meta->name); printf("timestamp 0x%08x\n", meta->timestamp); printf("description <%.16s>\n", meta->description); printf("creator <%.16s>\n", meta->creator); printf("checksum_1 0x%02x\n", meta->checksum_1); printf("dummy_0 0x%02x\n", meta->dummy_0); printf("dummy_1 0x%02x\n", meta->dummy_1); printf("flags %b\n", meta->flags, "\20\4RCACHE\3WCACHE\2NCQ\1TCQ\n"); printf("=================================================\n"); } static char * ata_raid_intel_type(int type) { static char buffer[16]; switch (type) { case INTEL_T_RAID0: return "RAID0"; case INTEL_T_RAID1: return "RAID1"; case INTEL_T_RAID5: return "RAID5"; default: sprintf(buffer, "UNKNOWN 0x%02x", type); return buffer; } } static void ata_raid_intel_print_meta(struct intel_raid_conf *meta) { struct intel_raid_mapping *map; int i, j; printf("********* ATA Intel MatrixRAID Metadata *********\n"); printf("intel_id <%.24s>\n", meta->intel_id); printf("version <%.6s>\n", meta->version); printf("checksum 0x%08x\n", meta->checksum); printf("config_size 0x%08x\n", meta->config_size); printf("config_id 0x%08x\n", meta->config_id); printf("generation 0x%08x\n", meta->generation); printf("total_disks %u\n", meta->total_disks); printf("total_volumes %u\n", meta->total_volumes); printf("DISK# serial disk_sectors disk_id flags\n"); for (i = 0; i < meta->total_disks; i++ ) { printf(" %d <%.16s> %u 0x%08x 0x%08x\n", i, meta->disk[i].serial, meta->disk[i].sectors, meta->disk[i].id, meta->disk[i].flags); } map = (struct intel_raid_mapping *)&meta->disk[meta->total_disks]; for (j = 0; j < meta->total_volumes; j++) { printf("name %.16s\n", map->name); printf("total_sectors %llu\n", (unsigned long long)map->total_sectors); printf("state %u\n", map->state); printf("reserved %u\n", map->reserved); printf("offset %u\n", map->offset); printf("disk_sectors %u\n", map->disk_sectors); printf("stripe_count %u\n", map->stripe_count); printf("stripe_sectors %u\n", map->stripe_sectors); printf("status %u\n", map->status); printf("type %s\n", ata_raid_intel_type(map->type)); printf("total_disks %u\n", map->total_disks); printf("magic[0] 0x%02x\n", map->magic[0]); printf("magic[1] 0x%02x\n", map->magic[1]); printf("magic[2] 0x%02x\n", map->magic[2]); for (i = 0; i < map->total_disks; i++ ) { printf(" disk %d at disk_idx 0x%08x\n", i, map->disk_idx[i]); } - map = (struct intel_raid_mapping *)&map->disk_idx[map->total_disks]; + map = (struct intel_raid_mapping *)&map->disk_idx[map->total_disks]; } printf("=================================================\n"); } static char * ata_raid_ite_type(int type) { static char buffer[16]; switch (type) { case ITE_T_RAID0: return "RAID0"; case ITE_T_RAID1: return "RAID1"; case ITE_T_RAID01: return "RAID0+1"; case ITE_T_SPAN: return "SPAN"; default: sprintf(buffer, "UNKNOWN 0x%02x", type); return buffer; } } static void ata_raid_ite_print_meta(struct ite_raid_conf *meta) { printf("*** ATA Integrated Technology Express Metadata **\n"); printf("ite_id <%.40s>\n", meta->ite_id); printf("timestamp_0 %04x/%02x/%02x %02x:%02x:%02x.%02x\n", *((u_int16_t *)meta->timestamp_0), meta->timestamp_0[2], meta->timestamp_0[3], meta->timestamp_0[5], meta->timestamp_0[4], meta->timestamp_0[7], meta->timestamp_0[6]); printf("total_sectors %lld\n", (unsigned long long)meta->total_sectors); printf("type %s\n", ata_raid_ite_type(meta->type)); printf("stripe_1kblocks %u\n", meta->stripe_1kblocks); printf("timestamp_1 %04x/%02x/%02x %02x:%02x:%02x.%02x\n", *((u_int16_t *)meta->timestamp_1), meta->timestamp_1[2], meta->timestamp_1[3], meta->timestamp_1[5], meta->timestamp_1[4], meta->timestamp_1[7], meta->timestamp_1[6]); printf("stripe_sectors %u\n", meta->stripe_sectors); printf("array_width %u\n", meta->array_width); printf("disk_number %u\n", meta->disk_number); printf("disk_sectors %u\n", meta->disk_sectors); printf("=================================================\n"); } static char * ata_raid_lsiv2_type(int type) { static char buffer[16]; switch (type) { case LSIV2_T_RAID0: return "RAID0"; case LSIV2_T_RAID1: return "RAID1"; case LSIV2_T_SPARE: return "SPARE"; default: sprintf(buffer, "UNKNOWN 0x%02x", type); return buffer; } } static void ata_raid_lsiv2_print_meta(struct lsiv2_raid_conf *meta) { int i; printf("******* ATA LSILogic V2 MegaRAID Metadata *******\n"); printf("lsi_id <%s>\n", meta->lsi_id); printf("dummy_0 0x%02x\n", meta->dummy_0); printf("flags 0x%02x\n", meta->flags); printf("version 0x%04x\n", meta->version); printf("config_entries 0x%02x\n", meta->config_entries); printf("raid_count 0x%02x\n", meta->raid_count); printf("total_disks 0x%02x\n", meta->total_disks); printf("dummy_1 0x%02x\n", meta->dummy_1); printf("dummy_2 0x%04x\n", meta->dummy_2); for (i = 0; i < meta->config_entries; i++) { printf(" type %s\n", ata_raid_lsiv2_type(meta->configs[i].raid.type)); printf(" dummy_0 %02x\n", meta->configs[i].raid.dummy_0); printf(" stripe_sectors %u\n", meta->configs[i].raid.stripe_sectors); printf(" array_width %u\n", meta->configs[i].raid.array_width); printf(" disk_count %u\n", meta->configs[i].raid.disk_count); printf(" config_offset %u\n", meta->configs[i].raid.config_offset); printf(" dummy_1 %u\n", meta->configs[i].raid.dummy_1); printf(" flags %02x\n", meta->configs[i].raid.flags); printf(" total_sectors %u\n", meta->configs[i].raid.total_sectors); } printf("disk_number 0x%02x\n", meta->disk_number); printf("raid_number 0x%02x\n", meta->raid_number); printf("timestamp 0x%08x\n", meta->timestamp); printf("=================================================\n"); } static char * ata_raid_lsiv3_type(int type) { static char buffer[16]; switch (type) { case LSIV3_T_RAID0: return "RAID0"; case LSIV3_T_RAID1: return "RAID1"; default: sprintf(buffer, "UNKNOWN 0x%02x", type); return buffer; } } static void ata_raid_lsiv3_print_meta(struct lsiv3_raid_conf *meta) { int i; printf("******* ATA LSILogic V3 MegaRAID Metadata *******\n"); printf("lsi_id <%.6s>\n", meta->lsi_id); printf("dummy_0 0x%04x\n", meta->dummy_0); printf("version 0x%04x\n", meta->version); printf("dummy_0 0x%04x\n", meta->dummy_1); printf("RAID configs:\n"); for (i = 0; i < 8; i++) { if (meta->raid[i].total_disks) { printf("%02d stripe_pages %u\n", i, meta->raid[i].stripe_pages); printf("%02d type %s\n", i, ata_raid_lsiv3_type(meta->raid[i].type)); printf("%02d total_disks %u\n", i, meta->raid[i].total_disks); printf("%02d array_width %u\n", i, meta->raid[i].array_width); printf("%02d sectors %u\n", i, meta->raid[i].sectors); printf("%02d offset %u\n", i, meta->raid[i].offset); printf("%02d device 0x%02x\n", i, meta->raid[i].device); } } printf("DISK configs:\n"); for (i = 0; i < 6; i++) { if (meta->disk[i].disk_sectors) { printf("%02d disk_sectors %u\n", i, meta->disk[i].disk_sectors); printf("%02d flags 0x%02x\n", i, meta->disk[i].flags); } } printf("device 0x%02x\n", meta->device); printf("timestamp 0x%08x\n", meta->timestamp); printf("checksum_1 0x%02x\n", meta->checksum_1); printf("=================================================\n"); } static char * ata_raid_nvidia_type(int type) { static char buffer[16]; switch (type) { case NV_T_SPAN: return "SPAN"; case NV_T_RAID0: return "RAID0"; case NV_T_RAID1: return "RAID1"; case NV_T_RAID3: return "RAID3"; case NV_T_RAID5: return "RAID5"; case NV_T_RAID01: return "RAID0+1"; default: sprintf(buffer, "UNKNOWN 0x%02x", type); return buffer; } } static void ata_raid_nvidia_print_meta(struct nvidia_raid_conf *meta) { printf("******** ATA nVidia MediaShield Metadata ********\n"); printf("nvidia_id <%.8s>\n", meta->nvidia_id); printf("config_size %d\n", meta->config_size); printf("checksum 0x%08x\n", meta->checksum); printf("version 0x%04x\n", meta->version); printf("disk_number %d\n", meta->disk_number); printf("dummy_0 0x%02x\n", meta->dummy_0); printf("total_sectors %d\n", meta->total_sectors); printf("sectors_size %d\n", meta->sector_size); printf("serial %.16s\n", meta->serial); printf("revision %.4s\n", meta->revision); printf("dummy_1 0x%08x\n", meta->dummy_1); printf("magic_0 0x%08x\n", meta->magic_0); printf("magic_1 0x%016llx\n",(unsigned long long)meta->magic_1); printf("magic_2 0x%016llx\n",(unsigned long long)meta->magic_2); printf("flags 0x%02x\n", meta->flags); printf("array_width %d\n", meta->array_width); printf("total_disks %d\n", meta->total_disks); printf("dummy_2 0x%02x\n", meta->dummy_2); printf("type %s\n", ata_raid_nvidia_type(meta->type)); printf("dummy_3 0x%04x\n", meta->dummy_3); printf("stripe_sectors %d\n", meta->stripe_sectors); printf("stripe_bytes %d\n", meta->stripe_bytes); printf("stripe_shift %d\n", meta->stripe_shift); printf("stripe_mask 0x%08x\n", meta->stripe_mask); printf("stripe_sizesectors %d\n", meta->stripe_sizesectors); printf("stripe_sizebytes %d\n", meta->stripe_sizebytes); printf("rebuild_lba %d\n", meta->rebuild_lba); printf("dummy_4 0x%08x\n", meta->dummy_4); printf("dummy_5 0x%08x\n", meta->dummy_5); printf("status 0x%08x\n", meta->status); printf("=================================================\n"); } static char * ata_raid_promise_type(int type) { static char buffer[16]; switch (type) { case PR_T_RAID0: return "RAID0"; case PR_T_RAID1: return "RAID1"; case PR_T_RAID3: return "RAID3"; case PR_T_RAID5: return "RAID5"; case PR_T_SPAN: return "SPAN"; default: sprintf(buffer, "UNKNOWN 0x%02x", type); return buffer; } } static void ata_raid_promise_print_meta(struct promise_raid_conf *meta) { int i; printf("********* ATA Promise FastTrak Metadata *********\n"); printf("promise_id <%s>\n", meta->promise_id); printf("dummy_0 0x%08x\n", meta->dummy_0); printf("magic_0 0x%016llx\n",(unsigned long long)meta->magic_0); printf("magic_1 0x%04x\n", meta->magic_1); printf("magic_2 0x%08x\n", meta->magic_2); printf("integrity 0x%08x %b\n", meta->raid.integrity, meta->raid.integrity, "\20\10VALID\n" ); printf("flags 0x%02x %b\n", meta->raid.flags, meta->raid.flags, "\20\10READY\7DOWN\6REDIR\5DUPLICATE\4SPARE" "\3ASSIGNED\2ONLINE\1VALID\n"); printf("disk_number %d\n", meta->raid.disk_number); printf("channel 0x%02x\n", meta->raid.channel); printf("device 0x%02x\n", meta->raid.device); printf("magic_0 0x%016llx\n", (unsigned long long)meta->raid.magic_0); printf("disk_offset %u\n", meta->raid.disk_offset); printf("disk_sectors %u\n", meta->raid.disk_sectors); printf("rebuild_lba 0x%08x\n", meta->raid.rebuild_lba); printf("generation 0x%04x\n", meta->raid.generation); printf("status 0x%02x %b\n", meta->raid.status, meta->raid.status, "\20\6MARKED\5DEGRADED\4READY\3INITED\2ONLINE\1VALID\n"); printf("type %s\n", ata_raid_promise_type(meta->raid.type)); printf("total_disks %u\n", meta->raid.total_disks); printf("stripe_shift %u\n", meta->raid.stripe_shift); printf("array_width %u\n", meta->raid.array_width); printf("array_number %u\n", meta->raid.array_number); printf("total_sectors %u\n", meta->raid.total_sectors); printf("cylinders %u\n", meta->raid.cylinders); printf("heads %u\n", meta->raid.heads); printf("sectors %u\n", meta->raid.sectors); printf("magic_1 0x%016llx\n", (unsigned long long)meta->raid.magic_1); printf("DISK# flags dummy_0 channel device magic_0\n"); for (i = 0; i < 8; i++) { printf(" %d %b 0x%02x 0x%02x 0x%02x ", i, meta->raid.disk[i].flags, "\20\10READY\7DOWN\6REDIR\5DUPLICATE\4SPARE" "\3ASSIGNED\2ONLINE\1VALID\n", meta->raid.disk[i].dummy_0, meta->raid.disk[i].channel, meta->raid.disk[i].device); printf("0x%016llx\n", (unsigned long long)meta->raid.disk[i].magic_0); } printf("checksum 0x%08x\n", meta->checksum); printf("=================================================\n"); } static char * ata_raid_sii_type(int type) { static char buffer[16]; switch (type) { case SII_T_RAID0: return "RAID0"; case SII_T_RAID1: return "RAID1"; case SII_T_RAID01: return "RAID0+1"; case SII_T_SPARE: return "SPARE"; default: sprintf(buffer, "UNKNOWN 0x%02x", type); return buffer; } } static void ata_raid_sii_print_meta(struct sii_raid_conf *meta) { printf("******* ATA Silicon Image Medley Metadata *******\n"); printf("total_sectors %llu\n", (unsigned long long)meta->total_sectors); printf("dummy_0 0x%04x\n", meta->dummy_0); printf("dummy_1 0x%04x\n", meta->dummy_1); printf("controller_pci_id 0x%08x\n", meta->controller_pci_id); printf("version_minor 0x%04x\n", meta->version_minor); printf("version_major 0x%04x\n", meta->version_major); printf("timestamp 20%02x/%02x/%02x %02x:%02x:%02x\n", meta->timestamp[5], meta->timestamp[4], meta->timestamp[3], meta->timestamp[2], meta->timestamp[1], meta->timestamp[0]); printf("stripe_sectors %u\n", meta->stripe_sectors); printf("dummy_2 0x%04x\n", meta->dummy_2); printf("disk_number %u\n", meta->disk_number); printf("type %s\n", ata_raid_sii_type(meta->type)); printf("raid0_disks %u\n", meta->raid0_disks); printf("raid0_ident %u\n", meta->raid0_ident); printf("raid1_disks %u\n", meta->raid1_disks); printf("raid1_ident %u\n", meta->raid1_ident); printf("rebuild_lba %llu\n", (unsigned long long)meta->rebuild_lba); printf("generation 0x%08x\n", meta->generation); printf("status 0x%02x %b\n", meta->status, meta->status, "\20\1READY\n"); printf("base_raid1_position %02x\n", meta->base_raid1_position); printf("base_raid0_position %02x\n", meta->base_raid0_position); printf("position %02x\n", meta->position); printf("dummy_3 %04x\n", meta->dummy_3); printf("name <%.16s>\n", meta->name); printf("checksum_0 0x%04x\n", meta->checksum_0); printf("checksum_1 0x%04x\n", meta->checksum_1); printf("=================================================\n"); } static char * ata_raid_sis_type(int type) { static char buffer[16]; switch (type) { case SIS_T_JBOD: return "JBOD"; case SIS_T_RAID0: return "RAID0"; case SIS_T_RAID1: return "RAID1"; default: sprintf(buffer, "UNKNOWN 0x%02x", type); return buffer; } } static void ata_raid_sis_print_meta(struct sis_raid_conf *meta) { printf("**** ATA Silicon Integrated Systems Metadata ****\n"); printf("magic 0x%04x\n", meta->magic); printf("disks 0x%02x\n", meta->disks); printf("type %s\n", ata_raid_sis_type(meta->type_total_disks & SIS_T_MASK)); printf("total_disks %u\n", meta->type_total_disks & SIS_D_MASK); printf("dummy_0 0x%08x\n", meta->dummy_0); printf("controller_pci_id 0x%08x\n", meta->controller_pci_id); printf("stripe_sectors %u\n", meta->stripe_sectors); printf("dummy_1 0x%04x\n", meta->dummy_1); printf("timestamp 0x%08x\n", meta->timestamp); printf("model %.40s\n", meta->model); printf("disk_number %u\n", meta->disk_number); printf("dummy_2 0x%02x 0x%02x 0x%02x\n", meta->dummy_2[0], meta->dummy_2[1], meta->dummy_2[2]); printf("=================================================\n"); } static char * ata_raid_via_type(int type) { static char buffer[16]; switch (type) { case VIA_T_RAID0: return "RAID0"; case VIA_T_RAID1: return "RAID1"; case VIA_T_RAID5: return "RAID5"; case VIA_T_RAID01: return "RAID0+1"; case VIA_T_SPAN: return "SPAN"; default: sprintf(buffer, "UNKNOWN 0x%02x", type); return buffer; } } static void ata_raid_via_print_meta(struct via_raid_conf *meta) { int i; printf("*************** ATA VIA Metadata ****************\n"); printf("magic 0x%02x\n", meta->magic); printf("dummy_0 0x%02x\n", meta->dummy_0); printf("type %s\n", ata_raid_via_type(meta->type & VIA_T_MASK)); printf("bootable %d\n", meta->type & VIA_T_BOOTABLE); printf("unknown %d\n", meta->type & VIA_T_UNKNOWN); printf("disk_index 0x%02x\n", meta->disk_index); printf("stripe_layout 0x%02x\n", meta->stripe_layout); printf(" stripe_disks %d\n", meta->stripe_layout & VIA_L_DISKS); printf(" stripe_sectors %d\n", 0x08 << ((meta->stripe_layout & VIA_L_MASK) >> VIA_L_SHIFT)); printf("disk_sectors %llu\n", (unsigned long long)meta->disk_sectors); printf("disk_id 0x%08x\n", meta->disk_id); printf("DISK# disk_id\n"); for (i = 0; i < 8; i++) { if (meta->disks[i]) printf(" %d 0x%08x\n", i, meta->disks[i]); } printf("checksum 0x%02x\n", meta->checksum); printf("=================================================\n"); } diff --git a/sys/dev/ata/ata-raid.h b/sys/dev/ata/ata-raid.h index 2c23fd743f3d..c1d73e875bd6 100644 --- a/sys/dev/ata/ata-raid.h +++ b/sys/dev/ata/ata-raid.h @@ -1,768 +1,768 @@ /*- * Copyright (c) 2000 - 2006 Søren Schmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* misc defines */ #define MAX_ARRAYS 16 #define MAX_VOLUMES 4 #define MAX_DISKS 16 #define AR_PROXIMITY 2048 /* how many sectors is "close" */ #define ATA_MAGIC "FreeBSD ATA driver RAID " struct ata_raid_subdisk { struct ar_softc *raid[MAX_VOLUMES]; int disk_number[MAX_VOLUMES]; }; /* ATA PseudoRAID Metadata */ struct ar_softc { int lun; u_int8_t name[32]; - int volume; + int volume; u_int64_t magic_0; u_int64_t magic_1; int type; #define AR_T_JBOD 0x0001 #define AR_T_SPAN 0x0002 #define AR_T_RAID0 0x0004 #define AR_T_RAID1 0x0008 #define AR_T_RAID01 0x0010 #define AR_T_RAID3 0x0020 #define AR_T_RAID4 0x0040 #define AR_T_RAID5 0x0080 int status; #define AR_S_READY 0x0001 #define AR_S_DEGRADED 0x0002 #define AR_S_REBUILDING 0x0004 int format; #define AR_F_FREEBSD_RAID 0x0001 #define AR_F_ADAPTEC_RAID 0x0002 #define AR_F_HPTV2_RAID 0x0004 #define AR_F_HPTV3_RAID 0x0008 #define AR_F_INTEL_RAID 0x0010 #define AR_F_ITE_RAID 0x0020 #define AR_F_LSIV2_RAID 0x0040 #define AR_F_LSIV3_RAID 0x0080 #define AR_F_NVIDIA_RAID 0x0100 #define AR_F_PROMISE_RAID 0x0200 #define AR_F_SII_RAID 0x0400 #define AR_F_SIS_RAID 0x0800 #define AR_F_VIA_RAID 0x1000 #define AR_F_FORMAT_MASK 0x1fff u_int generation; u_int64_t total_sectors; u_int64_t offset_sectors; /* offset from start of disk */ u_int16_t heads; u_int16_t sectors; u_int32_t cylinders; u_int width; /* array width in disks */ u_int interleave; /* interleave in sectors */ u_int total_disks; /* number of disks in this array */ struct ar_disk { device_t dev; u_int8_t serial[16]; /* serial # of physical disk */ u_int64_t sectors; /* useable sectors on this disk */ off_t last_lba; /* last lba used (for performance) */ u_int flags; #define AR_DF_PRESENT 0x0001 /* this HW pos has a disk present */ #define AR_DF_ASSIGNED 0x0002 /* this HW pos assigned to an array */ #define AR_DF_SPARE 0x0004 /* this HW pos is a spare */ #define AR_DF_ONLINE 0x0008 /* this HW pos is online and in use */ } disks[MAX_DISKS]; int toggle; /* performance hack for RAID1's */ u_int64_t rebuild_lba; /* rebuild progress indicator */ struct mtx lock; /* metadata lock */ struct disk *disk; /* disklabel/slice stuff */ struct proc *pid; /* rebuilder process id */ }; /* Adaptec HostRAID Metadata */ #define ADP_LBA(dev) \ (((struct ad_softc *)device_get_ivars(dev))->total_secs - 17) /* note all entries are big endian */ struct adaptec_raid_conf { u_int32_t magic_0; #define ADP_MAGIC_0 0xc4650790 u_int32_t generation; u_int16_t dummy_0; u_int16_t total_configs; u_int16_t dummy_1; u_int16_t checksum; u_int32_t dummy_2; u_int32_t dummy_3; u_int32_t flags; u_int32_t timestamp; u_int32_t dummy_4[4]; u_int32_t dummy_5[4]; struct { u_int16_t total_disks; u_int16_t generation; u_int32_t magic_0; u_int8_t dummy_0; u_int8_t type; #define ADP_T_RAID0 0x00 #define ADP_T_RAID1 0x01 u_int8_t dummy_1; u_int8_t flags; u_int8_t dummy_2; u_int8_t dummy_3; u_int8_t dummy_4; u_int8_t dummy_5; u_int32_t disk_number; u_int32_t dummy_6; u_int32_t sectors; u_int16_t stripe_shift; u_int16_t dummy_7; u_int32_t dummy_8[4]; u_int8_t name[16]; } configs[127]; u_int32_t dummy_6[13]; u_int32_t magic_1; #define ADP_MAGIC_1 0x9ff85009 u_int32_t dummy_7[3]; u_int32_t magic_2; u_int32_t dummy_8[46]; u_int32_t magic_3; #define ADP_MAGIC_3 0x4d545044 u_int32_t magic_4; #define ADP_MAGIC_4 0x9ff85009 u_int32_t dummy_9[62]; } __packed; /* Highpoint V2 RocketRAID Metadata */ #define HPTV2_LBA(dev) 9 struct hptv2_raid_conf { int8_t filler1[32]; u_int32_t magic; #define HPTV2_MAGIC_OK 0x5a7816f0 #define HPTV2_MAGIC_BAD 0x5a7816fd u_int32_t magic_0; u_int32_t magic_1; u_int32_t order; #define HPTV2_O_RAID0 0x01 #define HPTV2_O_RAID1 0x02 #define HPTV2_O_OK 0x04 u_int8_t array_width; u_int8_t stripe_shift; u_int8_t type; #define HPTV2_T_RAID0 0x00 #define HPTV2_T_RAID1 0x01 #define HPTV2_T_RAID01_RAID0 0x02 #define HPTV2_T_SPAN 0x03 #define HPTV2_T_RAID_3 0x04 #define HPTV2_T_RAID_5 0x05 #define HPTV2_T_JBOD 0x06 #define HPTV2_T_RAID01_RAID1 0x07 u_int8_t disk_number; u_int32_t total_sectors; u_int32_t disk_mode; u_int32_t boot_mode; u_int8_t boot_disk; u_int8_t boot_protect; u_int8_t error_log_entries; u_int8_t error_log_index; struct { u_int32_t timestamp; u_int8_t reason; #define HPTV2_R_REMOVED 0xfe #define HPTV2_R_BROKEN 0xff u_int8_t disk; u_int8_t status; u_int8_t sectors; u_int32_t lba; } errorlog[32]; int8_t filler2[16]; u_int32_t rebuild_lba; u_int8_t dummy_1; u_int8_t name_1[15]; u_int8_t dummy_2; u_int8_t name_2[15]; int8_t filler3[8]; } __packed; /* Highpoint V3 RocketRAID Metadata */ #define HPTV3_LBA(dev) \ (((struct ad_softc *)device_get_ivars(dev))->total_secs - 11) struct hptv3_raid_conf { u_int32_t magic; #define HPTV3_MAGIC 0x5a7816f3 u_int32_t magic_0; u_int8_t checksum_0; u_int8_t mode; #define HPTV3_BOOT_MARK 0x01 #define HPTV3_USER_MODE 0x02 u_int8_t user_mode; u_int8_t config_entries; struct { u_int32_t total_sectors; u_int8_t type; #define HPTV3_T_SPARE 0x00 #define HPTV3_T_JBOD 0x03 #define HPTV3_T_SPAN 0x04 #define HPTV3_T_RAID0 0x05 #define HPTV3_T_RAID1 0x06 #define HPTV3_T_RAID3 0x07 #define HPTV3_T_RAID5 0x08 u_int8_t total_disks; u_int8_t disk_number; u_int8_t stripe_shift; u_int16_t status; #define HPTV3_T_NEED_REBUILD 0x01 #define HPTV3_T_RAID5_FLAG 0x02 u_int16_t critical_disks; u_int32_t rebuild_lba; } __packed configs[2]; u_int8_t name[16]; u_int32_t timestamp; u_int8_t description[64]; u_int8_t creator[16]; u_int8_t checksum_1; u_int8_t dummy_0; u_int8_t dummy_1; u_int8_t flags; #define HPTV3_T_ENABLE_TCQ 0x01 #define HPTV3_T_ENABLE_NCQ 0x02 #define HPTV3_T_ENABLE_WCACHE 0x04 #define HPTV3_T_ENABLE_RCACHE 0x08 struct { u_int32_t total_sectors; u_int32_t rebuild_lba; } __packed configs_high[2]; u_int32_t filler[87]; } __packed; /* Intel MatrixRAID Metadata */ #define INTEL_LBA(dev) \ (((struct ad_softc *)device_get_ivars(dev))->total_secs - 3) struct intel_raid_conf { u_int8_t intel_id[24]; #define INTEL_MAGIC "Intel Raid ISM Cfg Sig. " u_int8_t version[6]; #define INTEL_VERSION_1100 "1.1.00" #define INTEL_VERSION_1201 "1.2.01" #define INTEL_VERSION_1202 "1.2.02" u_int8_t dummy_0[2]; u_int32_t checksum; u_int32_t config_size; u_int32_t config_id; u_int32_t generation; u_int32_t dummy_1[2]; u_int8_t total_disks; u_int8_t total_volumes; u_int8_t dummy_2[2]; u_int32_t filler_0[39]; struct { u_int8_t serial[16]; u_int32_t sectors; u_int32_t id; u_int32_t flags; #define INTEL_F_SPARE 0x01 #define INTEL_F_ASSIGNED 0x02 #define INTEL_F_DOWN 0x04 #define INTEL_F_ONLINE 0x08 u_int32_t filler[5]; } __packed disk[1]; u_int32_t filler_1[62]; } __packed; struct intel_raid_mapping { u_int8_t name[16]; u_int64_t total_sectors __packed; u_int32_t state; u_int32_t reserved; u_int32_t filler_0[20]; u_int32_t offset; u_int32_t disk_sectors; u_int32_t stripe_count; u_int16_t stripe_sectors; u_int8_t status; #define INTEL_S_READY 0x00 #define INTEL_S_DISABLED 0x01 #define INTEL_S_DEGRADED 0x02 #define INTEL_S_FAILURE 0x03 u_int8_t type; #define INTEL_T_RAID0 0x00 #define INTEL_T_RAID1 0x01 #define INTEL_T_RAID5 0x05 u_int8_t total_disks; u_int8_t magic[3]; u_int32_t filler_1[7]; u_int32_t disk_idx[1]; } __packed; /* Integrated Technology Express Metadata */ #define ITE_LBA(dev) \ (((struct ad_softc *)device_get_ivars(dev))->total_secs - 2) struct ite_raid_conf { u_int32_t filler_1[5]; u_int8_t timestamp_0[8]; u_int32_t dummy_1; u_int32_t filler_2[5]; u_int16_t filler_3; u_int8_t ite_id[40]; #define ITE_MAGIC "Integrated Technology Express Inc " u_int16_t filler_4; u_int32_t filler_5[6]; u_int32_t dummy_2; u_int32_t dummy_3; u_int32_t filler_6[12]; u_int32_t dummy_4; u_int32_t filler_7[5]; u_int64_t total_sectors __packed; u_int32_t filler_8[12]; u_int16_t filler_9; u_int8_t type; #define ITE_T_RAID0 0x00 #define ITE_T_RAID1 0x01 #define ITE_T_RAID01 0x02 #define ITE_T_SPAN 0x03 u_int8_t filler_10; u_int32_t dummy_5[8]; u_int8_t stripe_1kblocks; u_int8_t filler_11[3]; u_int32_t filler_12[54]; u_int32_t dummy_6[4]; u_int8_t timestamp_1[8]; u_int32_t filler_13[9]; u_int8_t stripe_sectors; u_int8_t filler_14[3]; u_int8_t array_width; u_int8_t filler_15[3]; u_int32_t filler_16; u_int8_t filler_17; u_int8_t disk_number; u_int32_t disk_sectors; u_int16_t filler_18; u_int32_t dummy_7[4]; u_int32_t filler_20[104]; } __packed; /* LSILogic V2 MegaRAID Metadata */ #define LSIV2_LBA(dev) \ (((struct ad_softc *)device_get_ivars(dev))->total_secs - 1) struct lsiv2_raid_conf { u_int8_t lsi_id[6]; #define LSIV2_MAGIC "$XIDE$" u_int8_t dummy_0; u_int8_t flags; u_int16_t version; u_int8_t config_entries; u_int8_t raid_count; u_int8_t total_disks; u_int8_t dummy_1; u_int16_t dummy_2; union { struct { u_int8_t type; #define LSIV2_T_RAID0 0x01 #define LSIV2_T_RAID1 0x02 #define LSIV2_T_SPARE 0x08 u_int8_t dummy_0; u_int16_t stripe_sectors; u_int8_t array_width; u_int8_t disk_count; u_int8_t config_offset; u_int8_t dummy_1; u_int8_t flags; #define LSIV2_R_DEGRADED 0x02 u_int32_t total_sectors; u_int8_t filler[3]; } __packed raid; struct { u_int8_t device; #define LSIV2_D_MASTER 0x00 #define LSIV2_D_SLAVE 0x01 #define LSIV2_D_CHANNEL0 0x00 #define LSIV2_D_CHANNEL1 0x10 #define LSIV2_D_NONE 0xff u_int8_t dummy_0; u_int32_t disk_sectors; u_int8_t disk_number; u_int8_t raid_number; u_int8_t flags; #define LSIV2_D_GONE 0x02 u_int8_t filler[7]; } __packed disk; } configs[30]; u_int8_t disk_number; u_int8_t raid_number; u_int32_t timestamp; u_int8_t filler[10]; } __packed; /* LSILogic V3 MegaRAID Metadata */ #define LSIV3_LBA(dev) \ (((struct ad_softc *)device_get_ivars(dev))->total_secs - 4) struct lsiv3_raid_conf { u_int32_t magic_0; /* 0xa0203200 */ u_int32_t filler_0[3]; u_int8_t magic_1[4]; /* "SATA" */ u_int32_t filler_1[40]; u_int32_t dummy_0; /* 0x0d000003 */ u_int32_t filler_2[7]; u_int32_t dummy_1; /* 0x0d000003 */ u_int32_t filler_3[70]; u_int8_t magic_2[8]; /* "$_ENQ$31" */ u_int8_t filler_4[7]; u_int8_t checksum_0; u_int8_t filler_5[512*2]; u_int8_t lsi_id[6]; #define LSIV3_MAGIC "$_IDE$" u_int16_t dummy_2; /* 0x33de for OK disk */ u_int16_t version; /* 0x0131 for this version */ u_int16_t dummy_3; /* 0x0440 always */ u_int32_t filler_6; struct { u_int16_t stripe_pages; u_int8_t type; #define LSIV3_T_RAID0 0x00 #define LSIV3_T_RAID1 0x01 u_int8_t dummy_0; u_int8_t total_disks; u_int8_t array_width; u_int8_t filler_0[10]; u_int32_t sectors; u_int16_t dummy_1; u_int32_t offset; u_int16_t dummy_2; u_int8_t device; #define LSIV3_D_DEVICE 0x01 #define LSIV3_D_CHANNEL 0x10 u_int8_t dummy_3; u_int8_t dummy_4; u_int8_t dummy_5; u_int8_t filler_1[16]; } __packed raid[8]; struct { u_int32_t disk_sectors; u_int32_t dummy_0; u_int32_t dummy_1; u_int8_t dummy_2; u_int8_t dummy_3; u_int8_t flags; #define LSIV3_D_MIRROR 0x00 #define LSIV3_D_STRIPE 0xff u_int8_t dummy_4; } __packed disk[6]; u_int8_t filler_7[7]; u_int8_t device; u_int32_t timestamp; u_int8_t filler_8[3]; u_int8_t checksum_1; } __packed; /* nVidia MediaShield Metadata */ #define NVIDIA_LBA(dev) \ (((struct ad_softc *)device_get_ivars(dev))->total_secs - 2) struct nvidia_raid_conf { u_int8_t nvidia_id[8]; #define NV_MAGIC "NVIDIA " u_int32_t config_size; u_int32_t checksum; u_int16_t version; u_int8_t disk_number; u_int8_t dummy_0; u_int32_t total_sectors; u_int32_t sector_size; u_int8_t serial[16]; u_int8_t revision[4]; u_int32_t dummy_1; u_int32_t magic_0; #define NV_MAGIC0 0x00640044 u_int64_t magic_1; u_int64_t magic_2; u_int8_t flags; u_int8_t array_width; u_int8_t total_disks; u_int8_t dummy_2; u_int16_t type; #define NV_T_RAID0 0x00000080 #define NV_T_RAID1 0x00000081 #define NV_T_RAID3 0x00000083 #define NV_T_RAID5 0x00000085 #define NV_T_RAID01 0x00008180 #define NV_T_SPAN 0x000000ff u_int16_t dummy_3; u_int32_t stripe_sectors; u_int32_t stripe_bytes; u_int32_t stripe_shift; u_int32_t stripe_mask; u_int32_t stripe_sizesectors; u_int32_t stripe_sizebytes; u_int32_t rebuild_lba; u_int32_t dummy_4; u_int32_t dummy_5; u_int32_t status; #define NV_S_BOOTABLE 0x00000001 #define NV_S_DEGRADED 0x00000002 u_int32_t filler[98]; } __packed; /* Promise FastTrak Metadata */ #define PROMISE_LBA(dev) \ (((((struct ad_softc *)device_get_ivars(dev))->total_secs / (((struct ad_softc *)device_get_ivars(dev))->heads * ((struct ad_softc *)device_get_ivars(dev))->sectors)) * ((struct ad_softc *)device_get_ivars(dev))->heads * ((struct ad_softc *)device_get_ivars(dev))->sectors) - ((struct ad_softc *)device_get_ivars(dev))->sectors) struct promise_raid_conf { char promise_id[24]; #define PR_MAGIC "Promise Technology, Inc." u_int32_t dummy_0; u_int64_t magic_0; #define PR_MAGIC0(x) (((u_int64_t)(x.channel) << 48) | \ ((u_int64_t)(x.device != 0) << 56)) u_int16_t magic_1; u_int32_t magic_2; u_int8_t filler1[470]; struct { u_int32_t integrity; #define PR_I_VALID 0x00000080 u_int8_t flags; #define PR_F_VALID 0x00000001 #define PR_F_ONLINE 0x00000002 #define PR_F_ASSIGNED 0x00000004 #define PR_F_SPARE 0x00000008 #define PR_F_DUPLICATE 0x00000010 #define PR_F_REDIR 0x00000020 #define PR_F_DOWN 0x00000040 #define PR_F_READY 0x00000080 u_int8_t disk_number; u_int8_t channel; u_int8_t device; u_int64_t magic_0 __packed; u_int32_t disk_offset; u_int32_t disk_sectors; u_int32_t rebuild_lba; u_int16_t generation; u_int8_t status; #define PR_S_VALID 0x01 #define PR_S_ONLINE 0x02 #define PR_S_INITED 0x04 #define PR_S_READY 0x08 #define PR_S_DEGRADED 0x10 #define PR_S_MARKED 0x20 #define PR_S_FUNCTIONAL 0x80 u_int8_t type; #define PR_T_RAID0 0x00 #define PR_T_RAID1 0x01 #define PR_T_RAID3 0x02 #define PR_T_RAID5 0x04 #define PR_T_SPAN 0x08 #define PR_T_JBOD 0x10 u_int8_t total_disks; u_int8_t stripe_shift; u_int8_t array_width; u_int8_t array_number; u_int32_t total_sectors; u_int16_t cylinders; u_int8_t heads; u_int8_t sectors; u_int64_t magic_1 __packed; struct { u_int8_t flags; u_int8_t dummy_0; u_int8_t channel; u_int8_t device; u_int64_t magic_0 __packed; } disk[8]; } raid; int32_t filler2[346]; u_int32_t checksum; } __packed; /* Silicon Image Medley Metadata */ #define SII_LBA(dev) \ ( ((struct ad_softc *)device_get_ivars(dev))->total_secs - 1) struct sii_raid_conf { - u_int16_t ata_params_00_53[54]; - u_int64_t total_sectors; - u_int16_t ata_params_58_79[70]; - u_int16_t dummy_0; - u_int16_t dummy_1; - u_int32_t controller_pci_id; - u_int16_t version_minor; - u_int16_t version_major; - u_int8_t timestamp[6]; - u_int16_t stripe_sectors; - u_int16_t dummy_2; - u_int8_t disk_number; - u_int8_t type; + u_int16_t ata_params_00_53[54]; + u_int64_t total_sectors; + u_int16_t ata_params_58_79[70]; + u_int16_t dummy_0; + u_int16_t dummy_1; + u_int32_t controller_pci_id; + u_int16_t version_minor; + u_int16_t version_major; + u_int8_t timestamp[6]; + u_int16_t stripe_sectors; + u_int16_t dummy_2; + u_int8_t disk_number; + u_int8_t type; #define SII_T_RAID0 0x00 #define SII_T_RAID1 0x01 #define SII_T_RAID01 0x02 #define SII_T_SPARE 0x03 - u_int8_t raid0_disks; - u_int8_t raid0_ident; - u_int8_t raid1_disks; - u_int8_t raid1_ident; - u_int64_t rebuild_lba; - u_int32_t generation; - u_int8_t status; + u_int8_t raid0_disks; + u_int8_t raid0_ident; + u_int8_t raid1_disks; + u_int8_t raid1_ident; + u_int64_t rebuild_lba; + u_int32_t generation; + u_int8_t status; #define SII_S_READY 0x01 - u_int8_t base_raid1_position; - u_int8_t base_raid0_position; - u_int8_t position; - u_int16_t dummy_3; - u_int8_t name[16]; - u_int16_t checksum_0; - int8_t filler1[190]; - u_int16_t checksum_1; + u_int8_t base_raid1_position; + u_int8_t base_raid0_position; + u_int8_t position; + u_int16_t dummy_3; + u_int8_t name[16]; + u_int16_t checksum_0; + int8_t filler1[190]; + u_int16_t checksum_1; } __packed; /* Silicon Integrated Systems RAID Metadata */ #define SIS_LBA(dev) \ ( ((struct ad_softc *)device_get_ivars(dev))->total_secs - 16) struct sis_raid_conf { - u_int16_t magic; + u_int16_t magic; #define SIS_MAGIC 0x0010 - u_int8_t disks; -#define SIS_D_MASTER 0xf0 -#define SIS_D_MIRROR 0x0f + u_int8_t disks; +#define SIS_D_MASTER 0xf0 +#define SIS_D_MIRROR 0x0f - u_int8_t type_total_disks; -#define SIS_D_MASK 0x0f + u_int8_t type_total_disks; +#define SIS_D_MASK 0x0f #define SIS_T_MASK 0xf0 #define SIS_T_JBOD 0x10 -#define SIS_T_RAID0 0x20 +#define SIS_T_RAID0 0x20 #define SIS_T_RAID1 0x30 - u_int32_t dummy_0; - u_int32_t controller_pci_id; - u_int16_t stripe_sectors; - u_int16_t dummy_1; - u_int32_t timestamp; - u_int8_t model[40]; - u_int8_t disk_number; - u_int8_t dummy_2[3]; - int8_t filler1[448]; + u_int32_t dummy_0; + u_int32_t controller_pci_id; + u_int16_t stripe_sectors; + u_int16_t dummy_1; + u_int32_t timestamp; + u_int8_t model[40]; + u_int8_t disk_number; + u_int8_t dummy_2[3]; + int8_t filler1[448]; } __packed; /* VIA Tech V-RAID Metadata */ #define VIA_LBA(dev) \ ( ((struct ad_softc *)device_get_ivars(dev))->total_secs - 1) struct via_raid_conf { - u_int16_t magic; + u_int16_t magic; #define VIA_MAGIC 0xaa55 - u_int8_t dummy_0; - u_int8_t type; + u_int8_t dummy_0; + u_int8_t type; #define VIA_T_MASK 0x7e #define VIA_T_BOOTABLE 0x01 #define VIA_T_RAID0 0x04 #define VIA_T_RAID1 0x0c #define VIA_T_RAID01 0x4c #define VIA_T_RAID5 0x2c #define VIA_T_SPAN 0x44 #define VIA_T_UNKNOWN 0x80 - u_int8_t disk_index; -#define VIA_D_MASK 0x0f -#define VIA_D_DEGRADED 0x10 -#define VIA_D_HIGH_IDX 0x20 + u_int8_t disk_index; +#define VIA_D_MASK 0x0f +#define VIA_D_DEGRADED 0x10 +#define VIA_D_HIGH_IDX 0x20 - u_int8_t stripe_layout; + u_int8_t stripe_layout; #define VIA_L_DISKS 0x07 #define VIA_L_MASK 0xf0 -#define VIA_L_SHIFT 4 +#define VIA_L_SHIFT 4 - u_int64_t disk_sectors; - u_int32_t disk_id; - u_int32_t disks[8]; - u_int8_t checksum; - u_int8_t filler_1[461]; + u_int64_t disk_sectors; + u_int32_t disk_id; + u_int32_t disks[8]; + u_int8_t checksum; + u_int8_t filler_1[461]; } __packed;