Index: head/sys/dev/agp/agp.c =================================================================== --- head/sys/dev/agp/agp.c (revision 313981) +++ head/sys/dev/agp/agp.c (revision 313982) @@ -1,1057 +1,1057 @@ /*- * Copyright (c) 2000 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_agp.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MODULE_VERSION(agp, 1); MALLOC_DEFINE(M_AGP, "agp", "AGP data structures"); /* agp_drv.c */ static d_open_t agp_open; static d_close_t agp_close; static d_ioctl_t agp_ioctl; static d_mmap_t agp_mmap; static struct cdevsw agp_cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT, .d_open = agp_open, .d_close = agp_close, .d_ioctl = agp_ioctl, .d_mmap = agp_mmap, .d_name = "agp", }; static devclass_t agp_devclass; /* Helper functions for implementing chipset mini drivers. */ u_int8_t agp_find_caps(device_t dev) { int capreg; if (pci_find_cap(dev, PCIY_AGP, &capreg) != 0) capreg = 0; return (capreg); } /* * Find an AGP display device (if any). */ static device_t agp_find_display(void) { devclass_t pci = devclass_find("pci"); device_t bus, dev = 0; device_t *kids; int busnum, numkids, i; for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) { bus = devclass_get_device(pci, busnum); if (!bus) continue; if (device_get_children(bus, &kids, &numkids) != 0) continue; for (i = 0; i < numkids; i++) { dev = kids[i]; if (pci_get_class(dev) == PCIC_DISPLAY && pci_get_subclass(dev) == PCIS_DISPLAY_VGA) if (agp_find_caps(dev)) { free(kids, M_TEMP); return dev; } } free(kids, M_TEMP); } return 0; } struct agp_gatt * agp_alloc_gatt(device_t dev) { u_int32_t apsize = AGP_GET_APERTURE(dev); u_int32_t entries = apsize >> AGP_PAGE_SHIFT; struct agp_gatt *gatt; if (bootverbose) device_printf(dev, "allocating GATT for aperture of size %dM\n", apsize / (1024*1024)); if (entries == 0) { device_printf(dev, "bad aperture size\n"); return NULL; } gatt = malloc(sizeof(struct agp_gatt), M_AGP, M_NOWAIT); if (!gatt) return 0; gatt->ag_entries = entries; gatt->ag_virtual = (void *)kmem_alloc_contig(kernel_arena, entries * sizeof(u_int32_t), M_NOWAIT | M_ZERO, 0, ~0, PAGE_SIZE, 0, VM_MEMATTR_WRITE_COMBINING); if (!gatt->ag_virtual) { if (bootverbose) device_printf(dev, "contiguous allocation failed\n"); free(gatt, M_AGP); return 0; } gatt->ag_physical = vtophys((vm_offset_t) gatt->ag_virtual); return gatt; } void agp_free_gatt(struct agp_gatt *gatt) { kmem_free(kernel_arena, (vm_offset_t)gatt->ag_virtual, gatt->ag_entries * sizeof(u_int32_t)); free(gatt, M_AGP); } static u_int agp_max[][2] = { {0, 0}, {32, 4}, {64, 28}, {128, 96}, {256, 204}, {512, 440}, {1024, 942}, {2048, 1920}, {4096, 3932} }; #define AGP_MAX_SIZE nitems(agp_max) /** * Sets the PCI resource which represents the AGP aperture. * * If not called, the default AGP aperture resource of AGP_APBASE will * be used. Must be called before agp_generic_attach(). */ void agp_set_aperture_resource(device_t dev, int rid) { struct agp_softc *sc = device_get_softc(dev); sc->as_aperture_rid = rid; } int agp_generic_attach(device_t dev) { struct agp_softc *sc = device_get_softc(dev); int i; u_int memsize; /* * Find and map the aperture, RF_SHAREABLE for DRM but not RF_ACTIVE * because the kernel doesn't need to map it. */ if (sc->as_aperture_rid != -1) { if (sc->as_aperture_rid == 0) sc->as_aperture_rid = AGP_APBASE; sc->as_aperture = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->as_aperture_rid, RF_SHAREABLE); if (!sc->as_aperture) return ENOMEM; } /* * Work out an upper bound for agp memory allocation. This * uses a heurisitc table from the Linux driver. */ memsize = ptoa(realmem) >> 20; for (i = 0; i < AGP_MAX_SIZE; i++) { if (memsize <= agp_max[i][0]) break; } if (i == AGP_MAX_SIZE) i = AGP_MAX_SIZE - 1; sc->as_maxmem = agp_max[i][1] << 20U; /* * The lock is used to prevent re-entry to * agp_generic_bind_memory() since that function can sleep. */ mtx_init(&sc->as_lock, "agp lock", NULL, MTX_DEF); /* * Initialise stuff for the userland device. */ agp_devclass = devclass_find("agp"); TAILQ_INIT(&sc->as_memory); sc->as_nextid = 1; sc->as_devnode = make_dev(&agp_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "agpgart"); sc->as_devnode->si_drv1 = dev; return 0; } void agp_free_cdev(device_t dev) { struct agp_softc *sc = device_get_softc(dev); destroy_dev(sc->as_devnode); } void agp_free_res(device_t dev) { struct agp_softc *sc = device_get_softc(dev); if (sc->as_aperture != NULL) bus_release_resource(dev, SYS_RES_MEMORY, sc->as_aperture_rid, sc->as_aperture); mtx_destroy(&sc->as_lock); } int agp_generic_detach(device_t dev) { agp_free_cdev(dev); agp_free_res(dev); return 0; } /** * Default AGP aperture size detection which simply returns the size of * the aperture's PCI resource. */ u_int32_t agp_generic_get_aperture(device_t dev) { struct agp_softc *sc = device_get_softc(dev); return rman_get_size(sc->as_aperture); } /** * Default AGP aperture size setting function, which simply doesn't allow * changes to resource size. */ int agp_generic_set_aperture(device_t dev, u_int32_t aperture) { u_int32_t current_aperture; current_aperture = AGP_GET_APERTURE(dev); if (current_aperture != aperture) return EINVAL; else return 0; } /* * This does the enable logic for v3, with the same topology * restrictions as in place for v2 -- one bus, one device on the bus. */ static int agp_v3_enable(device_t dev, device_t mdev, u_int32_t mode) { u_int32_t tstatus, mstatus; u_int32_t command; int rq, sba, fw, rate, arqsz, cal; tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4); mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4); /* Set RQ to the min of mode, tstatus and mstatus */ rq = AGP_MODE_GET_RQ(mode); if (AGP_MODE_GET_RQ(tstatus) < rq) rq = AGP_MODE_GET_RQ(tstatus); if (AGP_MODE_GET_RQ(mstatus) < rq) rq = AGP_MODE_GET_RQ(mstatus); /* * ARQSZ - Set the value to the maximum one. * Don't allow the mode register to override values. */ arqsz = AGP_MODE_GET_ARQSZ(mode); if (AGP_MODE_GET_ARQSZ(tstatus) > rq) rq = AGP_MODE_GET_ARQSZ(tstatus); if (AGP_MODE_GET_ARQSZ(mstatus) > rq) rq = AGP_MODE_GET_ARQSZ(mstatus); /* Calibration cycle - don't allow override by mode register */ cal = AGP_MODE_GET_CAL(tstatus); if (AGP_MODE_GET_CAL(mstatus) < cal) cal = AGP_MODE_GET_CAL(mstatus); /* SBA must be supported for AGP v3. */ sba = 1; /* Set FW if all three support it. */ fw = (AGP_MODE_GET_FW(tstatus) & AGP_MODE_GET_FW(mstatus) & AGP_MODE_GET_FW(mode)); /* Figure out the max rate */ rate = (AGP_MODE_GET_RATE(tstatus) & AGP_MODE_GET_RATE(mstatus) & AGP_MODE_GET_RATE(mode)); if (rate & AGP_MODE_V3_RATE_8x) rate = AGP_MODE_V3_RATE_8x; else rate = AGP_MODE_V3_RATE_4x; if (bootverbose) device_printf(dev, "Setting AGP v3 mode %d\n", rate * 4); pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, 0, 4); /* Construct the new mode word and tell the hardware */ command = 0; command = AGP_MODE_SET_RQ(0, rq); command = AGP_MODE_SET_ARQSZ(command, arqsz); command = AGP_MODE_SET_CAL(command, cal); command = AGP_MODE_SET_SBA(command, sba); command = AGP_MODE_SET_FW(command, fw); command = AGP_MODE_SET_RATE(command, rate); command = AGP_MODE_SET_MODE_3(command, 1); command = AGP_MODE_SET_AGP(command, 1); pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4); pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4); return 0; } static int agp_v2_enable(device_t dev, device_t mdev, u_int32_t mode) { u_int32_t tstatus, mstatus; u_int32_t command; int rq, sba, fw, rate; tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4); mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4); /* Set RQ to the min of mode, tstatus and mstatus */ rq = AGP_MODE_GET_RQ(mode); if (AGP_MODE_GET_RQ(tstatus) < rq) rq = AGP_MODE_GET_RQ(tstatus); if (AGP_MODE_GET_RQ(mstatus) < rq) rq = AGP_MODE_GET_RQ(mstatus); /* Set SBA if all three can deal with SBA */ sba = (AGP_MODE_GET_SBA(tstatus) & AGP_MODE_GET_SBA(mstatus) & AGP_MODE_GET_SBA(mode)); /* Similar for FW */ fw = (AGP_MODE_GET_FW(tstatus) & AGP_MODE_GET_FW(mstatus) & AGP_MODE_GET_FW(mode)); /* Figure out the max rate */ rate = (AGP_MODE_GET_RATE(tstatus) & AGP_MODE_GET_RATE(mstatus) & AGP_MODE_GET_RATE(mode)); if (rate & AGP_MODE_V2_RATE_4x) rate = AGP_MODE_V2_RATE_4x; else if (rate & AGP_MODE_V2_RATE_2x) rate = AGP_MODE_V2_RATE_2x; else rate = AGP_MODE_V2_RATE_1x; if (bootverbose) device_printf(dev, "Setting AGP v2 mode %d\n", rate); /* Construct the new mode word and tell the hardware */ command = 0; command = AGP_MODE_SET_RQ(0, rq); command = AGP_MODE_SET_SBA(command, sba); command = AGP_MODE_SET_FW(command, fw); command = AGP_MODE_SET_RATE(command, rate); command = AGP_MODE_SET_AGP(command, 1); pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4); pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4); return 0; } int agp_generic_enable(device_t dev, u_int32_t mode) { device_t mdev = agp_find_display(); u_int32_t tstatus, mstatus; if (!mdev) { AGP_DPF("can't find display\n"); return ENXIO; } tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4); mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4); /* * Check display and bridge for AGP v3 support. AGP v3 allows * more variety in topology than v2, e.g. multiple AGP devices * attached to one bridge, or multiple AGP bridges in one * system. This doesn't attempt to address those situations, * but should work fine for a classic single AGP slot system * with AGP v3. */ if (AGP_MODE_GET_MODE_3(mode) && AGP_MODE_GET_MODE_3(tstatus) && AGP_MODE_GET_MODE_3(mstatus)) return (agp_v3_enable(dev, mdev, mode)); else return (agp_v2_enable(dev, mdev, mode)); } struct agp_memory * agp_generic_alloc_memory(device_t dev, int type, vm_size_t size) { struct agp_softc *sc = device_get_softc(dev); struct agp_memory *mem; if ((size & (AGP_PAGE_SIZE - 1)) != 0) return 0; if (size > sc->as_maxmem - sc->as_allocated) return 0; if (type != 0) { printf("agp_generic_alloc_memory: unsupported type %d\n", type); return 0; } mem = malloc(sizeof *mem, M_AGP, M_WAITOK); mem->am_id = sc->as_nextid++; mem->am_size = size; mem->am_type = 0; mem->am_obj = vm_object_allocate(OBJT_DEFAULT, atop(round_page(size))); mem->am_physical = 0; mem->am_offset = 0; mem->am_is_bound = 0; TAILQ_INSERT_TAIL(&sc->as_memory, mem, am_link); sc->as_allocated += size; return mem; } int agp_generic_free_memory(device_t dev, struct agp_memory *mem) { struct agp_softc *sc = device_get_softc(dev); if (mem->am_is_bound) return EBUSY; sc->as_allocated -= mem->am_size; TAILQ_REMOVE(&sc->as_memory, mem, am_link); vm_object_deallocate(mem->am_obj); free(mem, M_AGP); return 0; } int agp_generic_bind_memory(device_t dev, struct agp_memory *mem, vm_offset_t offset) { struct agp_softc *sc = device_get_softc(dev); vm_offset_t i, j, k; vm_page_t m; int error; /* Do some sanity checks first. */ if ((offset & (AGP_PAGE_SIZE - 1)) != 0 || offset + mem->am_size > AGP_GET_APERTURE(dev)) { device_printf(dev, "binding memory at bad offset %#x\n", (int)offset); return EINVAL; } /* * Allocate the pages early, before acquiring the lock, * because vm_page_grab() may sleep and we can't hold a mutex * while sleeping. */ VM_OBJECT_WLOCK(mem->am_obj); for (i = 0; i < mem->am_size; i += PAGE_SIZE) { /* * Find a page from the object and wire it * down. This page will be mapped using one or more * entries in the GATT (assuming that PAGE_SIZE >= * AGP_PAGE_SIZE. If this is the first call to bind, * the pages will be allocated and zeroed. */ m = vm_page_grab(mem->am_obj, OFF_TO_IDX(i), VM_ALLOC_WIRED | VM_ALLOC_ZERO); AGP_DPF("found page pa=%#jx\n", (uintmax_t)VM_PAGE_TO_PHYS(m)); } VM_OBJECT_WUNLOCK(mem->am_obj); mtx_lock(&sc->as_lock); if (mem->am_is_bound) { device_printf(dev, "memory already bound\n"); error = EINVAL; VM_OBJECT_WLOCK(mem->am_obj); i = 0; goto bad; } /* * Bind the individual pages and flush the chipset's * TLB. */ VM_OBJECT_WLOCK(mem->am_obj); for (i = 0; i < mem->am_size; i += PAGE_SIZE) { m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(i)); /* * Install entries in the GATT, making sure that if * AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not * aligned to PAGE_SIZE, we don't modify too many GATT * entries. */ for (j = 0; j < PAGE_SIZE && i + j < mem->am_size; j += AGP_PAGE_SIZE) { vm_offset_t pa = VM_PAGE_TO_PHYS(m) + j; AGP_DPF("binding offset %#jx to pa %#jx\n", (uintmax_t)offset + i + j, (uintmax_t)pa); error = AGP_BIND_PAGE(dev, offset + i + j, pa); if (error) { /* * Bail out. Reverse all the mappings * and unwire the pages. */ for (k = 0; k < i + j; k += AGP_PAGE_SIZE) AGP_UNBIND_PAGE(dev, offset + k); goto bad; } } vm_page_xunbusy(m); } VM_OBJECT_WUNLOCK(mem->am_obj); /* * Make sure the chipset gets the new mappings. */ AGP_FLUSH_TLB(dev); mem->am_offset = offset; mem->am_is_bound = 1; mtx_unlock(&sc->as_lock); return 0; bad: mtx_unlock(&sc->as_lock); VM_OBJECT_ASSERT_WLOCKED(mem->am_obj); for (k = 0; k < mem->am_size; k += PAGE_SIZE) { m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(k)); if (k >= i) vm_page_xunbusy(m); vm_page_lock(m); vm_page_unwire(m, PQ_INACTIVE); vm_page_unlock(m); } VM_OBJECT_WUNLOCK(mem->am_obj); return error; } int agp_generic_unbind_memory(device_t dev, struct agp_memory *mem) { struct agp_softc *sc = device_get_softc(dev); vm_page_t m; int i; mtx_lock(&sc->as_lock); if (!mem->am_is_bound) { device_printf(dev, "memory is not bound\n"); mtx_unlock(&sc->as_lock); return EINVAL; } /* * Unbind the individual pages and flush the chipset's * TLB. Unwire the pages so they can be swapped. */ for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE) AGP_UNBIND_PAGE(dev, mem->am_offset + i); AGP_FLUSH_TLB(dev); VM_OBJECT_WLOCK(mem->am_obj); for (i = 0; i < mem->am_size; i += PAGE_SIZE) { m = vm_page_lookup(mem->am_obj, atop(i)); vm_page_lock(m); vm_page_unwire(m, PQ_INACTIVE); vm_page_unlock(m); } VM_OBJECT_WUNLOCK(mem->am_obj); mem->am_offset = 0; mem->am_is_bound = 0; mtx_unlock(&sc->as_lock); return 0; } /* Helper functions for implementing user/kernel api */ static int agp_acquire_helper(device_t dev, enum agp_acquire_state state) { struct agp_softc *sc = device_get_softc(dev); if (sc->as_state != AGP_ACQUIRE_FREE) return EBUSY; sc->as_state = state; return 0; } static int agp_release_helper(device_t dev, enum agp_acquire_state state) { struct agp_softc *sc = device_get_softc(dev); if (sc->as_state == AGP_ACQUIRE_FREE) return 0; if (sc->as_state != state) return EBUSY; sc->as_state = AGP_ACQUIRE_FREE; return 0; } static struct agp_memory * agp_find_memory(device_t dev, int id) { struct agp_softc *sc = device_get_softc(dev); struct agp_memory *mem; AGP_DPF("searching for memory block %d\n", id); TAILQ_FOREACH(mem, &sc->as_memory, am_link) { AGP_DPF("considering memory block %d\n", mem->am_id); if (mem->am_id == id) return mem; } return 0; } /* Implementation of the userland ioctl api */ static int agp_info_user(device_t dev, agp_info *info) { struct agp_softc *sc = device_get_softc(dev); bzero(info, sizeof *info); info->bridge_id = pci_get_devid(dev); info->agp_mode = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4); if (sc->as_aperture) info->aper_base = rman_get_start(sc->as_aperture); else info->aper_base = 0; info->aper_size = AGP_GET_APERTURE(dev) >> 20; info->pg_total = info->pg_system = sc->as_maxmem >> AGP_PAGE_SHIFT; info->pg_used = sc->as_allocated >> AGP_PAGE_SHIFT; return 0; } static int agp_setup_user(device_t dev, agp_setup *setup) { return AGP_ENABLE(dev, setup->agp_mode); } static int agp_allocate_user(device_t dev, agp_allocate *alloc) { struct agp_memory *mem; mem = AGP_ALLOC_MEMORY(dev, alloc->type, alloc->pg_count << AGP_PAGE_SHIFT); if (mem) { alloc->key = mem->am_id; alloc->physical = mem->am_physical; return 0; } else { return ENOMEM; } } static int agp_deallocate_user(device_t dev, int id) { struct agp_memory *mem = agp_find_memory(dev, id); if (mem) { AGP_FREE_MEMORY(dev, mem); return 0; } else { return ENOENT; } } static int agp_bind_user(device_t dev, agp_bind *bind) { struct agp_memory *mem = agp_find_memory(dev, bind->key); if (!mem) return ENOENT; return AGP_BIND_MEMORY(dev, mem, bind->pg_start << AGP_PAGE_SHIFT); } static int agp_unbind_user(device_t dev, agp_unbind *unbind) { struct agp_memory *mem = agp_find_memory(dev, unbind->key); if (!mem) return ENOENT; return AGP_UNBIND_MEMORY(dev, mem); } static int agp_chipset_flush(device_t dev) { return (AGP_CHIPSET_FLUSH(dev)); } static int agp_open(struct cdev *kdev, int oflags, int devtype, struct thread *td) { device_t dev = kdev->si_drv1; struct agp_softc *sc = device_get_softc(dev); if (!sc->as_isopen) { sc->as_isopen = 1; device_busy(dev); } return 0; } static int agp_close(struct cdev *kdev, int fflag, int devtype, struct thread *td) { device_t dev = kdev->si_drv1; struct agp_softc *sc = device_get_softc(dev); struct agp_memory *mem; /* * Clear the GATT and force release on last close */ - while ((mem = TAILQ_FIRST(&sc->as_memory)) != 0) { + while ((mem = TAILQ_FIRST(&sc->as_memory)) != NULL) { if (mem->am_is_bound) AGP_UNBIND_MEMORY(dev, mem); AGP_FREE_MEMORY(dev, mem); } if (sc->as_state == AGP_ACQUIRE_USER) agp_release_helper(dev, AGP_ACQUIRE_USER); sc->as_isopen = 0; device_unbusy(dev); return 0; } static int agp_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int fflag, struct thread *td) { device_t dev = kdev->si_drv1; switch (cmd) { case AGPIOC_INFO: return agp_info_user(dev, (agp_info *) data); case AGPIOC_ACQUIRE: return agp_acquire_helper(dev, AGP_ACQUIRE_USER); case AGPIOC_RELEASE: return agp_release_helper(dev, AGP_ACQUIRE_USER); case AGPIOC_SETUP: return agp_setup_user(dev, (agp_setup *)data); case AGPIOC_ALLOCATE: return agp_allocate_user(dev, (agp_allocate *)data); case AGPIOC_DEALLOCATE: return agp_deallocate_user(dev, *(int *) data); case AGPIOC_BIND: return agp_bind_user(dev, (agp_bind *)data); case AGPIOC_UNBIND: return agp_unbind_user(dev, (agp_unbind *)data); case AGPIOC_CHIPSET_FLUSH: return agp_chipset_flush(dev); } return EINVAL; } static int agp_mmap(struct cdev *kdev, vm_ooffset_t offset, vm_paddr_t *paddr, int prot, vm_memattr_t *memattr) { device_t dev = kdev->si_drv1; struct agp_softc *sc = device_get_softc(dev); if (offset > AGP_GET_APERTURE(dev)) return -1; if (sc->as_aperture == NULL) return -1; *paddr = rman_get_start(sc->as_aperture) + offset; return 0; } /* Implementation of the kernel api */ device_t agp_find_device() { device_t *children, child; int i, count; if (!agp_devclass) return NULL; if (devclass_get_devices(agp_devclass, &children, &count) != 0) return NULL; child = NULL; for (i = 0; i < count; i++) { if (device_is_attached(children[i])) { child = children[i]; break; } } free(children, M_TEMP); return child; } enum agp_acquire_state agp_state(device_t dev) { struct agp_softc *sc = device_get_softc(dev); return sc->as_state; } void agp_get_info(device_t dev, struct agp_info *info) { struct agp_softc *sc = device_get_softc(dev); info->ai_mode = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4); if (sc->as_aperture != NULL) info->ai_aperture_base = rman_get_start(sc->as_aperture); else info->ai_aperture_base = 0; info->ai_aperture_size = AGP_GET_APERTURE(dev); info->ai_memory_allowed = sc->as_maxmem; info->ai_memory_used = sc->as_allocated; } int agp_acquire(device_t dev) { return agp_acquire_helper(dev, AGP_ACQUIRE_KERNEL); } int agp_release(device_t dev) { return agp_release_helper(dev, AGP_ACQUIRE_KERNEL); } int agp_enable(device_t dev, u_int32_t mode) { return AGP_ENABLE(dev, mode); } void *agp_alloc_memory(device_t dev, int type, vm_size_t bytes) { return (void *) AGP_ALLOC_MEMORY(dev, type, bytes); } void agp_free_memory(device_t dev, void *handle) { struct agp_memory *mem = (struct agp_memory *) handle; AGP_FREE_MEMORY(dev, mem); } int agp_bind_memory(device_t dev, void *handle, vm_offset_t offset) { struct agp_memory *mem = (struct agp_memory *) handle; return AGP_BIND_MEMORY(dev, mem, offset); } int agp_unbind_memory(device_t dev, void *handle) { struct agp_memory *mem = (struct agp_memory *) handle; return AGP_UNBIND_MEMORY(dev, mem); } void agp_memory_info(device_t dev, void *handle, struct agp_memory_info *mi) { struct agp_memory *mem = (struct agp_memory *) handle; mi->ami_size = mem->am_size; mi->ami_physical = mem->am_physical; mi->ami_offset = mem->am_offset; mi->ami_is_bound = mem->am_is_bound; } int agp_bind_pages(device_t dev, vm_page_t *pages, vm_size_t size, vm_offset_t offset) { struct agp_softc *sc; vm_offset_t i, j, k, pa; vm_page_t m; int error; if ((size & (AGP_PAGE_SIZE - 1)) != 0 || (offset & (AGP_PAGE_SIZE - 1)) != 0) return (EINVAL); sc = device_get_softc(dev); mtx_lock(&sc->as_lock); for (i = 0; i < size; i += PAGE_SIZE) { m = pages[OFF_TO_IDX(i)]; KASSERT(m->wire_count > 0, ("agp_bind_pages: page %p hasn't been wired", m)); /* * Install entries in the GATT, making sure that if * AGP_PAGE_SIZE < PAGE_SIZE and size is not * aligned to PAGE_SIZE, we don't modify too many GATT * entries. */ for (j = 0; j < PAGE_SIZE && i + j < size; j += AGP_PAGE_SIZE) { pa = VM_PAGE_TO_PHYS(m) + j; AGP_DPF("binding offset %#jx to pa %#jx\n", (uintmax_t)offset + i + j, (uintmax_t)pa); error = AGP_BIND_PAGE(dev, offset + i + j, pa); if (error) { /* * Bail out. Reverse all the mappings. */ for (k = 0; k < i + j; k += AGP_PAGE_SIZE) AGP_UNBIND_PAGE(dev, offset + k); mtx_unlock(&sc->as_lock); return (error); } } } AGP_FLUSH_TLB(dev); mtx_unlock(&sc->as_lock); return (0); } int agp_unbind_pages(device_t dev, vm_size_t size, vm_offset_t offset) { struct agp_softc *sc; vm_offset_t i; if ((size & (AGP_PAGE_SIZE - 1)) != 0 || (offset & (AGP_PAGE_SIZE - 1)) != 0) return (EINVAL); sc = device_get_softc(dev); mtx_lock(&sc->as_lock); for (i = 0; i < size; i += AGP_PAGE_SIZE) AGP_UNBIND_PAGE(dev, offset + i); AGP_FLUSH_TLB(dev); mtx_unlock(&sc->as_lock); return (0); } Index: head/sys/dev/al_eth/al_eth.c =================================================================== --- head/sys/dev/al_eth/al_eth.c (revision 313981) +++ head/sys/dev/al_eth/al_eth.c (revision 313982) @@ -1,3584 +1,3584 @@ /*- * Copyright (c) 2015,2016 Annapurna Labs Ltd. and affiliates * All rights reserved. * * Developed by Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #include #include #endif #ifdef INET6 #include #endif #include #include #include #include #include #include #include #include #include #include #include #include "al_eth.h" #include "al_init_eth_lm.h" #include "arm/annapurna/alpine/alpine_serdes.h" #include "miibus_if.h" #define device_printf_dbg(fmt, ...) do { \ if (AL_DBG_LEVEL >= AL_DBG_LEVEL_DBG) { AL_DBG_LOCK(); \ device_printf(fmt, __VA_ARGS__); AL_DBG_UNLOCK();} \ } while (0) MALLOC_DEFINE(M_IFAL, "if_al_malloc", "All allocated data for AL ETH driver"); /* move out to some pci header file */ #define PCI_VENDOR_ID_ANNAPURNA_LABS 0x1c36 #define PCI_DEVICE_ID_AL_ETH 0x0001 #define PCI_DEVICE_ID_AL_ETH_ADVANCED 0x0002 #define PCI_DEVICE_ID_AL_ETH_NIC 0x0003 #define PCI_DEVICE_ID_AL_ETH_FPGA_NIC 0x0030 #define PCI_DEVICE_ID_AL_CRYPTO 0x0011 #define PCI_DEVICE_ID_AL_CRYPTO_VF 0x8011 #define PCI_DEVICE_ID_AL_RAID_DMA 0x0021 #define PCI_DEVICE_ID_AL_RAID_DMA_VF 0x8021 #define PCI_DEVICE_ID_AL_USB 0x0041 #define MAC_ADDR_STR "%02x:%02x:%02x:%02x:%02x:%02x" #define MAC_ADDR(addr) addr[0], addr[1], addr[2], addr[3], addr[4], addr[5] #define AL_ETH_MAC_TABLE_UNICAST_IDX_BASE 0 #define AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT 4 #define AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX (AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + \ AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT) #define AL_ETH_MAC_TABLE_DROP_IDX (AL_ETH_FWD_MAC_NUM - 1) #define AL_ETH_MAC_TABLE_BROADCAST_IDX (AL_ETH_MAC_TABLE_DROP_IDX - 1) #define AL_ETH_THASH_UDMA_SHIFT 0 #define AL_ETH_THASH_UDMA_MASK (0xF << AL_ETH_THASH_UDMA_SHIFT) #define AL_ETH_THASH_Q_SHIFT 4 #define AL_ETH_THASH_Q_MASK (0x3 << AL_ETH_THASH_Q_SHIFT) /* the following defines should be moved to hal */ #define AL_ETH_FSM_ENTRY_IPV4_TCP 0 #define AL_ETH_FSM_ENTRY_IPV4_UDP 1 #define AL_ETH_FSM_ENTRY_IPV6_TCP 2 #define AL_ETH_FSM_ENTRY_IPV6_UDP 3 #define AL_ETH_FSM_ENTRY_IPV6_NO_UDP_TCP 4 #define AL_ETH_FSM_ENTRY_IPV4_NO_UDP_TCP 5 /* FSM DATA format */ #define AL_ETH_FSM_DATA_OUTER_2_TUPLE 0 #define AL_ETH_FSM_DATA_OUTER_4_TUPLE 1 #define AL_ETH_FSM_DATA_INNER_2_TUPLE 2 #define AL_ETH_FSM_DATA_INNER_4_TUPLE 3 #define AL_ETH_FSM_DATA_HASH_SEL (1 << 2) #define AL_ETH_FSM_DATA_DEFAULT_Q 0 #define AL_ETH_FSM_DATA_DEFAULT_UDMA 0 #define AL_BR_SIZE 512 #define AL_TSO_SIZE 65500 #define AL_DEFAULT_MTU 1500 #define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP) #define AL_IP_ALIGNMENT_OFFSET 2 #define SFP_I2C_ADDR 0x50 #define AL_MASK_GROUP_A_INT 0x7 #define AL_MASK_GROUP_B_INT 0xF #define AL_MASK_GROUP_C_INT 0xF #define AL_MASK_GROUP_D_INT 0xFFFFFFFF #define AL_REG_OFFSET_FORWARD_INTR (0x1800000 + 0x1210) #define AL_EN_FORWARD_INTR 0x1FFFF #define AL_DIS_FORWARD_INTR 0 #define AL_M2S_MASK_INIT 0x480 #define AL_S2M_MASK_INIT 0x1E0 #define AL_M2S_S2M_MASK_NOT_INT (0x3f << 25) #define AL_10BASE_T_SPEED 10 #define AL_100BASE_TX_SPEED 100 #define AL_1000BASE_T_SPEED 1000 static devclass_t al_devclass; #define AL_RX_LOCK_INIT(_sc) mtx_init(&((_sc)->if_rx_lock), "ALRXL", "ALRXL", MTX_DEF) #define AL_RX_LOCK(_sc) mtx_lock(&((_sc)->if_rx_lock)) #define AL_RX_UNLOCK(_sc) mtx_unlock(&((_sc)->if_rx_lock)) /* helper functions */ static int al_is_device_supported(device_t); static void al_eth_init_rings(struct al_eth_adapter *); static void al_eth_flow_ctrl_disable(struct al_eth_adapter *); int al_eth_fpga_read_pci_config(void *, int, uint32_t *); int al_eth_fpga_write_pci_config(void *, int, uint32_t); int al_eth_read_pci_config(void *, int, uint32_t *); int al_eth_write_pci_config(void *, int, uint32_t); void al_eth_irq_config(uint32_t *, uint32_t); void al_eth_forward_int_config(uint32_t *, uint32_t); static void al_eth_start_xmit(void *, int); static void al_eth_rx_recv_work(void *, int); static int al_eth_up(struct al_eth_adapter *); static void al_eth_down(struct al_eth_adapter *); static void al_eth_interrupts_unmask(struct al_eth_adapter *); static void al_eth_interrupts_mask(struct al_eth_adapter *); static int al_eth_check_mtu(struct al_eth_adapter *, int); static uint64_t al_get_counter(struct ifnet *, ift_counter); static void al_eth_req_rx_buff_size(struct al_eth_adapter *, int); static int al_eth_board_params_init(struct al_eth_adapter *); static int al_media_update(struct ifnet *); static void al_media_status(struct ifnet *, struct ifmediareq *); static int al_eth_function_reset(struct al_eth_adapter *); static int al_eth_hw_init_adapter(struct al_eth_adapter *); static void al_eth_serdes_init(struct al_eth_adapter *); static void al_eth_lm_config(struct al_eth_adapter *); static int al_eth_hw_init(struct al_eth_adapter *); static void al_tick_stats(void *); /* ifnet entry points */ static void al_init(void *); static int al_mq_start(struct ifnet *, struct mbuf *); static void al_qflush(struct ifnet *); static int al_ioctl(struct ifnet * ifp, u_long, caddr_t); /* bus entry points */ static int al_probe(device_t); static int al_attach(device_t); static int al_detach(device_t); static int al_shutdown(device_t); /* mii bus support routines */ static int al_miibus_readreg(device_t, int, int); static int al_miibus_writereg(device_t, int, int, int); static void al_miibus_statchg(device_t); static void al_miibus_linkchg(device_t); struct al_eth_adapter* g_adapters[16]; uint32_t g_adapters_count; /* flag for napi-like mbuf processing, controlled from sysctl */ static int napi = 0; static device_method_t al_methods[] = { /* Device interface */ DEVMETHOD(device_probe, al_probe), DEVMETHOD(device_attach, al_attach), DEVMETHOD(device_detach, al_detach), DEVMETHOD(device_shutdown, al_shutdown), DEVMETHOD(miibus_readreg, al_miibus_readreg), DEVMETHOD(miibus_writereg, al_miibus_writereg), DEVMETHOD(miibus_statchg, al_miibus_statchg), DEVMETHOD(miibus_linkchg, al_miibus_linkchg), { 0, 0 } }; static driver_t al_driver = { "al", al_methods, sizeof(struct al_eth_adapter), }; DRIVER_MODULE(al, pci, al_driver, al_devclass, 0, 0); DRIVER_MODULE(miibus, al, miibus_driver, miibus_devclass, 0, 0); static int al_probe(device_t dev) { if ((al_is_device_supported(dev)) != 0) { device_set_desc(dev, "al"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int al_attach(device_t dev) { struct al_eth_lm_context *lm_context; struct al_eth_adapter *adapter; struct sysctl_oid_list *child; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; struct ifnet *ifp; uint32_t dev_id; uint32_t rev_id; int bar_udma; int bar_mac; int bar_ec; int err; err = 0; ifp = NULL; dev_id = rev_id = 0; ctx = device_get_sysctl_ctx(dev); tree = SYSCTL_PARENT(device_get_sysctl_tree(dev)); child = SYSCTL_CHILDREN(tree); if (g_adapters_count == 0) { SYSCTL_ADD_INT(ctx, child, OID_AUTO, "napi", CTLFLAG_RW, &napi, 0, "Use pseudo-napi mechanism"); } adapter = device_get_softc(dev); adapter->dev = dev; adapter->board_type = ALPINE_INTEGRATED; snprintf(adapter->name, AL_ETH_NAME_MAX_LEN, "%s", device_get_nameunit(dev)); AL_RX_LOCK_INIT(adapter); g_adapters[g_adapters_count] = adapter; lm_context = &adapter->lm_context; bar_udma = PCIR_BAR(AL_ETH_UDMA_BAR); adapter->udma_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar_udma, RF_ACTIVE); if (adapter->udma_res == NULL) { device_printf(adapter->dev, "could not allocate memory resources for DMA.\n"); err = ENOMEM; goto err_res_dma; } adapter->udma_base = al_bus_dma_to_va(rman_get_bustag(adapter->udma_res), rman_get_bushandle(adapter->udma_res)); bar_mac = PCIR_BAR(AL_ETH_MAC_BAR); adapter->mac_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar_mac, RF_ACTIVE); if (adapter->mac_res == NULL) { device_printf(adapter->dev, "could not allocate memory resources for MAC.\n"); err = ENOMEM; goto err_res_mac; } adapter->mac_base = al_bus_dma_to_va(rman_get_bustag(adapter->mac_res), rman_get_bushandle(adapter->mac_res)); bar_ec = PCIR_BAR(AL_ETH_EC_BAR); adapter->ec_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar_ec, RF_ACTIVE); if (adapter->ec_res == NULL) { device_printf(adapter->dev, "could not allocate memory resources for EC.\n"); err = ENOMEM; goto err_res_ec; } adapter->ec_base = al_bus_dma_to_va(rman_get_bustag(adapter->ec_res), rman_get_bushandle(adapter->ec_res)); adapter->netdev = ifp = if_alloc(IFT_ETHER); adapter->netdev->if_link_state = LINK_STATE_DOWN; ifp->if_softc = adapter; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_flags = ifp->if_drv_flags; ifp->if_flags |= IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_ALLMULTI; ifp->if_transmit = al_mq_start; ifp->if_qflush = al_qflush; ifp->if_ioctl = al_ioctl; ifp->if_init = al_init; ifp->if_get_counter = al_get_counter; ifp->if_mtu = AL_DEFAULT_MTU; adapter->if_flags = ifp->if_flags; ifp->if_capabilities = ifp->if_capenable = 0; ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_TSO | IFCAP_LRO | IFCAP_JUMBO_MTU; ifp->if_capenable = ifp->if_capabilities; adapter->id_number = g_adapters_count; if (adapter->board_type == ALPINE_INTEGRATED) { dev_id = pci_get_device(adapter->dev); rev_id = pci_get_revid(adapter->dev); } else { al_eth_fpga_read_pci_config(adapter->internal_pcie_base, PCIR_DEVICE, &dev_id); al_eth_fpga_read_pci_config(adapter->internal_pcie_base, PCIR_REVID, &rev_id); } adapter->dev_id = dev_id; adapter->rev_id = rev_id; /* set default ring sizes */ adapter->tx_ring_count = AL_ETH_DEFAULT_TX_SW_DESCS; adapter->tx_descs_count = AL_ETH_DEFAULT_TX_HW_DESCS; adapter->rx_ring_count = AL_ETH_DEFAULT_RX_DESCS; adapter->rx_descs_count = AL_ETH_DEFAULT_RX_DESCS; adapter->num_tx_queues = AL_ETH_NUM_QUEUES; adapter->num_rx_queues = AL_ETH_NUM_QUEUES; adapter->small_copy_len = AL_ETH_DEFAULT_SMALL_PACKET_LEN; adapter->link_poll_interval = AL_ETH_DEFAULT_LINK_POLL_INTERVAL; adapter->max_rx_buff_alloc_size = AL_ETH_DEFAULT_MAX_RX_BUFF_ALLOC_SIZE; al_eth_req_rx_buff_size(adapter, adapter->netdev->if_mtu); adapter->link_config.force_1000_base_x = AL_ETH_DEFAULT_FORCE_1000_BASEX; err = al_eth_board_params_init(adapter); if (err != 0) goto err; if (adapter->mac_mode == AL_ETH_MAC_MODE_10GbE_Serial) { ifmedia_init(&adapter->media, IFM_IMASK, al_media_update, al_media_status); ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); } al_eth_function_reset(adapter); err = al_eth_hw_init_adapter(adapter); if (err != 0) goto err; al_eth_init_rings(adapter); g_adapters_count++; al_eth_lm_config(adapter); mtx_init(&adapter->stats_mtx, "AlStatsMtx", NULL, MTX_DEF); mtx_init(&adapter->wd_mtx, "AlWdMtx", NULL, MTX_DEF); callout_init_mtx(&adapter->stats_callout, &adapter->stats_mtx, 0); callout_init_mtx(&adapter->wd_callout, &adapter->wd_mtx, 0); ether_ifattach(ifp, adapter->mac_addr); ifp->if_mtu = AL_DEFAULT_MTU; if (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII) { al_eth_hw_init(adapter); /* Attach PHY(s) */ err = mii_attach(adapter->dev, &adapter->miibus, adapter->netdev, al_media_update, al_media_status, BMSR_DEFCAPMASK, 0, MII_OFFSET_ANY, 0); if (err != 0) { device_printf(adapter->dev, "attaching PHYs failed\n"); return (err); } adapter->mii = device_get_softc(adapter->miibus); } return (err); err: bus_release_resource(dev, SYS_RES_MEMORY, bar_ec, adapter->ec_res); err_res_ec: bus_release_resource(dev, SYS_RES_MEMORY, bar_mac, adapter->mac_res); err_res_mac: bus_release_resource(dev, SYS_RES_MEMORY, bar_udma, adapter->udma_res); err_res_dma: return (err); } static int al_detach(device_t dev) { struct al_eth_adapter *adapter; adapter = device_get_softc(dev); ether_ifdetach(adapter->netdev); mtx_destroy(&adapter->stats_mtx); mtx_destroy(&adapter->wd_mtx); al_eth_down(adapter); bus_release_resource(dev, SYS_RES_IRQ, 0, adapter->irq_res); bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->ec_res); bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->mac_res); bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->udma_res); return (0); } int al_eth_fpga_read_pci_config(void *handle, int where, uint32_t *val) { /* handle is the base address of the adapter */ *val = al_reg_read32((void*)((u_long)handle + where)); return (0); } int al_eth_fpga_write_pci_config(void *handle, int where, uint32_t val) { /* handle is the base address of the adapter */ al_reg_write32((void*)((u_long)handle + where), val); return (0); } int al_eth_read_pci_config(void *handle, int where, uint32_t *val) { /* handle is a pci_dev */ *val = pci_read_config((device_t)handle, where, sizeof(*val)); return (0); } int al_eth_write_pci_config(void *handle, int where, uint32_t val) { /* handle is a pci_dev */ pci_write_config((device_t)handle, where, val, sizeof(val)); return (0); } void al_eth_irq_config(uint32_t *offset, uint32_t value) { al_reg_write32_relaxed(offset, value); } void al_eth_forward_int_config(uint32_t *offset, uint32_t value) { al_reg_write32(offset, value); } static void al_eth_serdes_init(struct al_eth_adapter *adapter) { void __iomem *serdes_base; adapter->serdes_init = false; serdes_base = alpine_serdes_resource_get(adapter->serdes_grp); if (serdes_base == NULL) { device_printf(adapter->dev, "serdes_base get failed!\n"); return; } serdes_base = al_bus_dma_to_va(serdes_tag, serdes_base); al_serdes_handle_grp_init(serdes_base, adapter->serdes_grp, &adapter->serdes_obj); adapter->serdes_init = true; } static void al_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *paddr; paddr = arg; *paddr = segs->ds_addr; } static int al_dma_alloc_coherent(struct device *dev, bus_dma_tag_t *tag, bus_dmamap_t *map, bus_addr_t *baddr, void **vaddr, uint32_t size) { int ret; uint32_t maxsize = ((size - 1)/PAGE_SIZE + 1) * PAGE_SIZE; ret = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, maxsize, 1, maxsize, BUS_DMA_COHERENT, NULL, NULL, tag); if (ret != 0) { device_printf(dev, "failed to create bus tag, ret = %d\n", ret); return (ret); } ret = bus_dmamem_alloc(*tag, vaddr, BUS_DMA_COHERENT | BUS_DMA_ZERO, map); if (ret != 0) { device_printf(dev, "failed to allocate dmamem, ret = %d\n", ret); return (ret); } ret = bus_dmamap_load(*tag, *map, *vaddr, size, al_dma_map_addr, baddr, 0); if (ret != 0) { device_printf(dev, "failed to allocate bus_dmamap_load, ret = %d\n", ret); return (ret); } return (0); } static void al_dma_free_coherent(bus_dma_tag_t tag, bus_dmamap_t map, void *vaddr) { bus_dmamap_unload(tag, map); bus_dmamem_free(tag, vaddr, map); bus_dma_tag_destroy(tag); } static void al_eth_mac_table_unicast_add(struct al_eth_adapter *adapter, uint8_t idx, uint8_t *addr, uint8_t udma_mask) { struct al_eth_fwd_mac_table_entry entry = { { 0 } }; memcpy(entry.addr, adapter->mac_addr, sizeof(adapter->mac_addr)); memset(entry.mask, 0xff, sizeof(entry.mask)); entry.rx_valid = true; entry.tx_valid = false; entry.udma_mask = udma_mask; entry.filter = false; device_printf_dbg(adapter->dev, "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n", __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask)); al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry); } static void al_eth_mac_table_all_multicast_add(struct al_eth_adapter *adapter, uint8_t idx, uint8_t udma_mask) { struct al_eth_fwd_mac_table_entry entry = { { 0 } }; memset(entry.addr, 0x00, sizeof(entry.addr)); memset(entry.mask, 0x00, sizeof(entry.mask)); entry.mask[0] |= 1; entry.addr[0] |= 1; entry.rx_valid = true; entry.tx_valid = false; entry.udma_mask = udma_mask; entry.filter = false; device_printf_dbg(adapter->dev, "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n", __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask)); al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry); } static void al_eth_mac_table_broadcast_add(struct al_eth_adapter *adapter, uint8_t idx, uint8_t udma_mask) { struct al_eth_fwd_mac_table_entry entry = { { 0 } }; memset(entry.addr, 0xff, sizeof(entry.addr)); memset(entry.mask, 0xff, sizeof(entry.mask)); entry.rx_valid = true; entry.tx_valid = false; entry.udma_mask = udma_mask; entry.filter = false; device_printf_dbg(adapter->dev, "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n", __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask)); al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry); } static void al_eth_mac_table_promiscuous_set(struct al_eth_adapter *adapter, boolean_t promiscuous) { struct al_eth_fwd_mac_table_entry entry = { { 0 } }; memset(entry.addr, 0x00, sizeof(entry.addr)); memset(entry.mask, 0x00, sizeof(entry.mask)); entry.rx_valid = true; entry.tx_valid = false; entry.udma_mask = (promiscuous) ? 1 : 0; entry.filter = (promiscuous) ? false : true; device_printf_dbg(adapter->dev, "%s: %s promiscuous mode\n", __func__, (promiscuous) ? "enter" : "exit"); al_eth_fwd_mac_table_set(&adapter->hal_adapter, AL_ETH_MAC_TABLE_DROP_IDX, &entry); } static void al_eth_set_thash_table_entry(struct al_eth_adapter *adapter, uint8_t idx, uint8_t udma, uint32_t queue) { if (udma != 0) panic("only UDMA0 is supporter"); if (queue >= AL_ETH_NUM_QUEUES) panic("invalid queue number"); al_eth_thash_table_set(&adapter->hal_adapter, idx, udma, queue); } /* init FSM, no tunneling supported yet, if packet is tcp/udp over ipv4/ipv6, use 4 tuple hash */ static void al_eth_fsm_table_init(struct al_eth_adapter *adapter) { uint32_t val; int i; for (i = 0; i < AL_ETH_RX_FSM_TABLE_SIZE; i++) { uint8_t outer_type = AL_ETH_FSM_ENTRY_OUTER(i); switch (outer_type) { case AL_ETH_FSM_ENTRY_IPV4_TCP: case AL_ETH_FSM_ENTRY_IPV4_UDP: case AL_ETH_FSM_ENTRY_IPV6_TCP: case AL_ETH_FSM_ENTRY_IPV6_UDP: val = AL_ETH_FSM_DATA_OUTER_4_TUPLE | AL_ETH_FSM_DATA_HASH_SEL; break; case AL_ETH_FSM_ENTRY_IPV6_NO_UDP_TCP: case AL_ETH_FSM_ENTRY_IPV4_NO_UDP_TCP: val = AL_ETH_FSM_DATA_OUTER_2_TUPLE | AL_ETH_FSM_DATA_HASH_SEL; break; default: val = AL_ETH_FSM_DATA_DEFAULT_Q | AL_ETH_FSM_DATA_DEFAULT_UDMA; } al_eth_fsm_table_set(&adapter->hal_adapter, i, val); } } static void al_eth_mac_table_entry_clear(struct al_eth_adapter *adapter, uint8_t idx) { struct al_eth_fwd_mac_table_entry entry = { { 0 } }; device_printf_dbg(adapter->dev, "%s: clear entry %d\n", __func__, idx); al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry); } static int al_eth_hw_init_adapter(struct al_eth_adapter *adapter) { struct al_eth_adapter_params *params = &adapter->eth_hal_params; int rc; /* params->dev_id = adapter->dev_id; */ params->rev_id = adapter->rev_id; params->udma_id = 0; params->enable_rx_parser = 1; /* enable rx epe parser*/ params->udma_regs_base = adapter->udma_base; /* UDMA register base address */ params->ec_regs_base = adapter->ec_base; /* Ethernet controller registers base address */ params->mac_regs_base = adapter->mac_base; /* Ethernet MAC registers base address */ params->name = adapter->name; params->serdes_lane = adapter->serdes_lane; rc = al_eth_adapter_init(&adapter->hal_adapter, params); if (rc != 0) device_printf(adapter->dev, "%s failed at hal init!\n", __func__); if ((adapter->board_type == ALPINE_NIC) || (adapter->board_type == ALPINE_FPGA_NIC)) { /* in pcie NIC mode, force eth UDMA to access PCIE0 using the vmid */ struct al_udma_gen_tgtid_conf conf; int i; for (i = 0; i < DMA_MAX_Q; i++) { conf.tx_q_conf[i].queue_en = AL_TRUE; conf.tx_q_conf[i].desc_en = AL_FALSE; conf.tx_q_conf[i].tgtid = 0x100; /* for access from PCIE0 */ conf.rx_q_conf[i].queue_en = AL_TRUE; conf.rx_q_conf[i].desc_en = AL_FALSE; conf.rx_q_conf[i].tgtid = 0x100; /* for access from PCIE0 */ } al_udma_gen_tgtid_conf_set(adapter->udma_base, &conf); } return (rc); } static void al_eth_lm_config(struct al_eth_adapter *adapter) { struct al_eth_lm_init_params params = {0}; params.adapter = &adapter->hal_adapter; params.serdes_obj = &adapter->serdes_obj; params.lane = adapter->serdes_lane; params.sfp_detection = adapter->sfp_detection_needed; if (adapter->sfp_detection_needed == true) { params.sfp_bus_id = adapter->i2c_adapter_id; params.sfp_i2c_addr = SFP_I2C_ADDR; } if (adapter->sfp_detection_needed == false) { switch (adapter->mac_mode) { case AL_ETH_MAC_MODE_10GbE_Serial: if ((adapter->lt_en != 0) && (adapter->an_en != 0)) params.default_mode = AL_ETH_LM_MODE_10G_DA; else params.default_mode = AL_ETH_LM_MODE_10G_OPTIC; break; case AL_ETH_MAC_MODE_SGMII: params.default_mode = AL_ETH_LM_MODE_1G; break; default: params.default_mode = AL_ETH_LM_MODE_10G_DA; } } else params.default_mode = AL_ETH_LM_MODE_10G_DA; params.link_training = adapter->lt_en; params.rx_equal = true; params.static_values = !adapter->dont_override_serdes; params.i2c_context = adapter; params.kr_fec_enable = false; params.retimer_exist = adapter->retimer.exist; params.retimer_bus_id = adapter->retimer.bus_id; params.retimer_i2c_addr = adapter->retimer.i2c_addr; params.retimer_channel = adapter->retimer.channel; al_eth_lm_init(&adapter->lm_context, ¶ms); } static int al_eth_board_params_init(struct al_eth_adapter *adapter) { if (adapter->board_type == ALPINE_NIC) { adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial; adapter->sfp_detection_needed = false; adapter->phy_exist = false; adapter->an_en = false; adapter->lt_en = false; adapter->ref_clk_freq = AL_ETH_REF_FREQ_375_MHZ; adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ; } else if (adapter->board_type == ALPINE_FPGA_NIC) { adapter->mac_mode = AL_ETH_MAC_MODE_SGMII; adapter->sfp_detection_needed = false; adapter->phy_exist = false; adapter->an_en = false; adapter->lt_en = false; adapter->ref_clk_freq = AL_ETH_REF_FREQ_375_MHZ; adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ; } else { struct al_eth_board_params params; int rc; adapter->auto_speed = false; rc = al_eth_board_params_get(adapter->mac_base, ¶ms); if (rc != 0) { device_printf(adapter->dev, "board info not available\n"); return (-1); } adapter->phy_exist = params.phy_exist == TRUE; adapter->phy_addr = params.phy_mdio_addr; adapter->an_en = params.autoneg_enable; adapter->lt_en = params.kr_lt_enable; adapter->serdes_grp = params.serdes_grp; adapter->serdes_lane = params.serdes_lane; adapter->sfp_detection_needed = params.sfp_plus_module_exist; adapter->i2c_adapter_id = params.i2c_adapter_id; adapter->ref_clk_freq = params.ref_clk_freq; adapter->dont_override_serdes = params.dont_override_serdes; adapter->link_config.active_duplex = !params.half_duplex; adapter->link_config.autoneg = !params.an_disable; adapter->link_config.force_1000_base_x = params.force_1000_base_x; adapter->retimer.exist = params.retimer_exist; adapter->retimer.bus_id = params.retimer_bus_id; adapter->retimer.i2c_addr = params.retimer_i2c_addr; adapter->retimer.channel = params.retimer_channel; switch (params.speed) { default: device_printf(adapter->dev, "%s: invalid speed (%d)\n", __func__, params.speed); case AL_ETH_BOARD_1G_SPEED_1000M: adapter->link_config.active_speed = 1000; break; case AL_ETH_BOARD_1G_SPEED_100M: adapter->link_config.active_speed = 100; break; case AL_ETH_BOARD_1G_SPEED_10M: adapter->link_config.active_speed = 10; break; } switch (params.mdio_freq) { default: device_printf(adapter->dev, "%s: invalid mdio freq (%d)\n", __func__, params.mdio_freq); case AL_ETH_BOARD_MDIO_FREQ_2_5_MHZ: adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ; break; case AL_ETH_BOARD_MDIO_FREQ_1_MHZ: adapter->mdio_freq = AL_ETH_MDIO_FREQ_1000_KHZ; break; } switch (params.media_type) { case AL_ETH_BOARD_MEDIA_TYPE_RGMII: if (params.sfp_plus_module_exist == TRUE) /* Backward compatibility */ adapter->mac_mode = AL_ETH_MAC_MODE_SGMII; else adapter->mac_mode = AL_ETH_MAC_MODE_RGMII; adapter->use_lm = false; break; case AL_ETH_BOARD_MEDIA_TYPE_SGMII: adapter->mac_mode = AL_ETH_MAC_MODE_SGMII; adapter->use_lm = true; break; case AL_ETH_BOARD_MEDIA_TYPE_10GBASE_SR: adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial; adapter->use_lm = true; break; case AL_ETH_BOARD_MEDIA_TYPE_AUTO_DETECT: adapter->sfp_detection_needed = TRUE; adapter->auto_speed = false; adapter->use_lm = true; break; case AL_ETH_BOARD_MEDIA_TYPE_AUTO_DETECT_AUTO_SPEED: adapter->sfp_detection_needed = TRUE; adapter->auto_speed = true; adapter->mac_mode_set = false; adapter->use_lm = true; adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial; break; default: device_printf(adapter->dev, "%s: unsupported media type %d\n", __func__, params.media_type); return (-1); } device_printf(adapter->dev, "Board info: phy exist %s. phy addr %d. mdio freq %u Khz. " "SFP connected %s. media %d\n", params.phy_exist == TRUE ? "Yes" : "No", params.phy_mdio_addr, adapter->mdio_freq, params.sfp_plus_module_exist == TRUE ? "Yes" : "No", params.media_type); } al_eth_mac_addr_read(adapter->ec_base, 0, adapter->mac_addr); return (0); } static int al_eth_function_reset(struct al_eth_adapter *adapter) { struct al_eth_board_params params; int rc; /* save board params so we restore it after reset */ al_eth_board_params_get(adapter->mac_base, ¶ms); al_eth_mac_addr_read(adapter->ec_base, 0, adapter->mac_addr); if (adapter->board_type == ALPINE_INTEGRATED) rc = al_eth_flr_rmn(&al_eth_read_pci_config, &al_eth_write_pci_config, adapter->dev, adapter->mac_base); else rc = al_eth_flr_rmn(&al_eth_fpga_read_pci_config, &al_eth_fpga_write_pci_config, adapter->internal_pcie_base, adapter->mac_base); /* restore params */ al_eth_board_params_set(adapter->mac_base, ¶ms); al_eth_mac_addr_store(adapter->ec_base, 0, adapter->mac_addr); return (rc); } static void al_eth_init_rings(struct al_eth_adapter *adapter) { int i; for (i = 0; i < adapter->num_tx_queues; i++) { struct al_eth_ring *ring = &adapter->tx_ring[i]; ring->ring_id = i; ring->dev = adapter->dev; ring->adapter = adapter; ring->netdev = adapter->netdev; al_udma_q_handle_get(&adapter->hal_adapter.tx_udma, i, &ring->dma_q); ring->sw_count = adapter->tx_ring_count; ring->hw_count = adapter->tx_descs_count; ring->unmask_reg_offset = al_udma_iofic_unmask_offset_get((struct unit_regs *)adapter->udma_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C); ring->unmask_val = ~(1 << i); } for (i = 0; i < adapter->num_rx_queues; i++) { struct al_eth_ring *ring = &adapter->rx_ring[i]; ring->ring_id = i; ring->dev = adapter->dev; ring->adapter = adapter; ring->netdev = adapter->netdev; al_udma_q_handle_get(&adapter->hal_adapter.rx_udma, i, &ring->dma_q); ring->sw_count = adapter->rx_ring_count; ring->hw_count = adapter->rx_descs_count; ring->unmask_reg_offset = al_udma_iofic_unmask_offset_get( (struct unit_regs *)adapter->udma_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B); ring->unmask_val = ~(1 << i); } } static void al_init_locked(void *arg) { struct al_eth_adapter *adapter = arg; if_t ifp = adapter->netdev; int rc = 0; al_eth_down(adapter); rc = al_eth_up(adapter); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (rc == 0) ifp->if_drv_flags |= IFF_DRV_RUNNING; } static void al_init(void *arg) { struct al_eth_adapter *adapter = arg; al_init_locked(adapter); } static inline int al_eth_alloc_rx_buf(struct al_eth_adapter *adapter, struct al_eth_ring *rx_ring, struct al_eth_rx_buffer *rx_info) { struct al_buf *al_buf; bus_dma_segment_t segs[2]; int error; int nsegs; if (rx_info->m != NULL) return (0); rx_info->data_size = adapter->rx_mbuf_sz; AL_RX_LOCK(adapter); /* Get mbuf using UMA allocator */ rx_info->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_info->data_size); AL_RX_UNLOCK(adapter); if (rx_info->m == NULL) return (ENOMEM); rx_info->m->m_pkthdr.len = rx_info->m->m_len = adapter->rx_mbuf_sz; /* Map packets for DMA */ error = bus_dmamap_load_mbuf_sg(rx_ring->dma_buf_tag, rx_info->dma_map, rx_info->m, segs, &nsegs, BUS_DMA_NOWAIT); if (__predict_false(error)) { device_printf(rx_ring->dev, "failed to map mbuf, error = %d\n", error); m_freem(rx_info->m); rx_info->m = NULL; return (EFAULT); } al_buf = &rx_info->al_buf; al_buf->addr = segs[0].ds_addr + AL_IP_ALIGNMENT_OFFSET; al_buf->len = rx_info->data_size - AL_IP_ALIGNMENT_OFFSET; return (0); } static int al_eth_refill_rx_bufs(struct al_eth_adapter *adapter, unsigned int qid, unsigned int num) { struct al_eth_ring *rx_ring = &adapter->rx_ring[qid]; uint16_t next_to_use; unsigned int i; next_to_use = rx_ring->next_to_use; for (i = 0; i < num; i++) { int rc; struct al_eth_rx_buffer *rx_info = &rx_ring->rx_buffer_info[next_to_use]; if (__predict_false(al_eth_alloc_rx_buf(adapter, rx_ring, rx_info) < 0)) { device_printf(adapter->dev, "failed to alloc buffer for rx queue %d\n", qid); break; } rc = al_eth_rx_buffer_add(rx_ring->dma_q, &rx_info->al_buf, AL_ETH_RX_FLAGS_INT, NULL); if (__predict_false(rc)) { device_printf(adapter->dev, "failed to add buffer for rx queue %d\n", qid); break; } next_to_use = AL_ETH_RX_RING_IDX_NEXT(rx_ring, next_to_use); } if (__predict_false(i < num)) device_printf(adapter->dev, "refilled rx queue %d with %d pages only - available %d\n", qid, i, al_udma_available_get(rx_ring->dma_q)); if (__predict_true(i)) al_eth_rx_buffer_action(rx_ring->dma_q, i); rx_ring->next_to_use = next_to_use; return (i); } /* * al_eth_refill_all_rx_bufs - allocate all queues Rx buffers * @adapter: board private structure */ static void al_eth_refill_all_rx_bufs(struct al_eth_adapter *adapter) { int i; for (i = 0; i < adapter->num_rx_queues; i++) al_eth_refill_rx_bufs(adapter, i, AL_ETH_DEFAULT_RX_DESCS - 1); } static void al_eth_tx_do_cleanup(struct al_eth_ring *tx_ring) { unsigned int total_done; uint16_t next_to_clean; int qid = tx_ring->ring_id; total_done = al_eth_comp_tx_get(tx_ring->dma_q); device_printf_dbg(tx_ring->dev, "tx_poll: q %d total completed descs %x\n", qid, total_done); next_to_clean = tx_ring->next_to_clean; while (total_done != 0) { struct al_eth_tx_buffer *tx_info; struct mbuf *mbuf; tx_info = &tx_ring->tx_buffer_info[next_to_clean]; /* stop if not all descriptors of the packet are completed */ if (tx_info->tx_descs > total_done) break; mbuf = tx_info->m; tx_info->m = NULL; device_printf_dbg(tx_ring->dev, "tx_poll: q %d mbuf %p completed\n", qid, mbuf); /* map is no longer required */ bus_dmamap_unload(tx_ring->dma_buf_tag, tx_info->dma_map); m_freem(mbuf); total_done -= tx_info->tx_descs; next_to_clean = AL_ETH_TX_RING_IDX_NEXT(tx_ring, next_to_clean); } tx_ring->next_to_clean = next_to_clean; device_printf_dbg(tx_ring->dev, "tx_poll: q %d done next to clean %x\n", qid, next_to_clean); /* * need to make the rings circular update visible to * al_eth_start_xmit() before checking for netif_queue_stopped(). */ al_smp_data_memory_barrier(); } static void al_eth_tx_csum(struct al_eth_ring *tx_ring, struct al_eth_tx_buffer *tx_info, struct al_eth_pkt *hal_pkt, struct mbuf *m) { uint32_t mss = m->m_pkthdr.tso_segsz; struct ether_vlan_header *eh; uint16_t etype; struct ip *ip; struct ip6_hdr *ip6; struct tcphdr *th = NULL; int ehdrlen, ip_hlen = 0; uint8_t ipproto = 0; uint32_t offload = 0; if (mss != 0) offload = 1; if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) offload = 1; if ((m->m_pkthdr.csum_flags & CSUM_OFFLOAD) != 0) offload = 1; if (offload != 0) { struct al_eth_meta_data *meta = &tx_ring->hal_meta; if (mss != 0) hal_pkt->flags |= (AL_ETH_TX_FLAGS_TSO | AL_ETH_TX_FLAGS_L4_CSUM); else hal_pkt->flags |= (AL_ETH_TX_FLAGS_L4_CSUM | AL_ETH_TX_FLAGS_L4_PARTIAL_CSUM); /* * Determine where frame payload starts. * Jump over vlan headers if already present, * helpful for QinQ too. */ eh = mtod(m, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { etype = ntohs(eh->evl_proto); ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; } else { etype = ntohs(eh->evl_encap_proto); ehdrlen = ETHER_HDR_LEN; } switch (etype) { case ETHERTYPE_IP: ip = (struct ip *)(m->m_data + ehdrlen); ip_hlen = ip->ip_hl << 2; ipproto = ip->ip_p; hal_pkt->l3_proto_idx = AL_ETH_PROTO_ID_IPv4; th = (struct tcphdr *)((caddr_t)ip + ip_hlen); if (mss != 0) hal_pkt->flags |= AL_ETH_TX_FLAGS_IPV4_L3_CSUM; if (ipproto == IPPROTO_TCP) hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_TCP; else hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_UDP; break; case ETHERTYPE_IPV6: ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen); hal_pkt->l3_proto_idx = AL_ETH_PROTO_ID_IPv6; ip_hlen = sizeof(struct ip6_hdr); th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen); ipproto = ip6->ip6_nxt; if (ipproto == IPPROTO_TCP) hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_TCP; else hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_UDP; break; default: break; } meta->words_valid = 4; meta->l3_header_len = ip_hlen; meta->l3_header_offset = ehdrlen; if (th != NULL) meta->l4_header_len = th->th_off; /* this param needed only for TSO */ meta->mss_idx_sel = 0; /* check how to select MSS */ meta->mss_val = mss; hal_pkt->meta = meta; } else hal_pkt->meta = NULL; } #define XMIT_QUEUE_TIMEOUT 100 static void al_eth_xmit_mbuf(struct al_eth_ring *tx_ring, struct mbuf *m) { struct al_eth_tx_buffer *tx_info; int error; int nsegs, a; uint16_t next_to_use; bus_dma_segment_t segs[AL_ETH_PKT_MAX_BUFS + 1]; struct al_eth_pkt *hal_pkt; struct al_buf *al_buf; boolean_t remap; /* Check if queue is ready */ if (unlikely(tx_ring->stall) != 0) { for (a = 0; a < XMIT_QUEUE_TIMEOUT; a++) { if (al_udma_available_get(tx_ring->dma_q) >= (AL_ETH_DEFAULT_TX_HW_DESCS - AL_ETH_TX_WAKEUP_THRESH)) { tx_ring->stall = 0; break; } pause("stall", 1); } if (a == XMIT_QUEUE_TIMEOUT) { device_printf(tx_ring->dev, "timeout waiting for queue %d ready!\n", tx_ring->ring_id); return; } else { device_printf_dbg(tx_ring->dev, "queue %d is ready!\n", tx_ring->ring_id); } } next_to_use = tx_ring->next_to_use; tx_info = &tx_ring->tx_buffer_info[next_to_use]; tx_info->m = m; hal_pkt = &tx_info->hal_pkt; if (m == NULL) { device_printf(tx_ring->dev, "mbuf is NULL\n"); return; } remap = TRUE; /* Map packets for DMA */ retry: error = bus_dmamap_load_mbuf_sg(tx_ring->dma_buf_tag, tx_info->dma_map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (__predict_false(error)) { struct mbuf *m_new; if (error == EFBIG) { /* Try it again? - one try */ if (remap == TRUE) { remap = FALSE; m_new = m_defrag(m, M_NOWAIT); if (m_new == NULL) { device_printf(tx_ring->dev, "failed to defrag mbuf\n"); goto exit; } m = m_new; goto retry; } else { device_printf(tx_ring->dev, "failed to map mbuf, error %d\n", error); goto exit; } } else { device_printf(tx_ring->dev, "failed to map mbuf, error %d\n", error); goto exit; } } /* set flags and meta data */ hal_pkt->flags = AL_ETH_TX_FLAGS_INT; al_eth_tx_csum(tx_ring, tx_info, hal_pkt, m); al_buf = hal_pkt->bufs; for (a = 0; a < nsegs; a++) { al_buf->addr = segs[a].ds_addr; al_buf->len = segs[a].ds_len; al_buf++; } hal_pkt->num_of_bufs = nsegs; /* prepare the packet's descriptors to dma engine */ tx_info->tx_descs = al_eth_tx_pkt_prepare(tx_ring->dma_q, hal_pkt); if (tx_info->tx_descs == 0) goto exit; /* * stop the queue when no more space available, the packet can have up * to AL_ETH_PKT_MAX_BUFS + 1 buffers and a meta descriptor */ if (unlikely(al_udma_available_get(tx_ring->dma_q) < (AL_ETH_PKT_MAX_BUFS + 2))) { tx_ring->stall = 1; device_printf_dbg(tx_ring->dev, "stall, stopping queue %d...\n", tx_ring->ring_id); al_data_memory_barrier(); } tx_ring->next_to_use = AL_ETH_TX_RING_IDX_NEXT(tx_ring, next_to_use); /* trigger the dma engine */ al_eth_tx_dma_action(tx_ring->dma_q, tx_info->tx_descs); return; exit: m_freem(m); } static void al_eth_tx_cmpl_work(void *arg, int pending) { struct al_eth_ring *tx_ring = arg; if (napi != 0) { tx_ring->cmpl_is_running = 1; al_data_memory_barrier(); } al_eth_tx_do_cleanup(tx_ring); if (napi != 0) { tx_ring->cmpl_is_running = 0; al_data_memory_barrier(); } /* all work done, enable IRQs */ al_eth_irq_config(tx_ring->unmask_reg_offset, tx_ring->unmask_val); } static int al_eth_tx_cmlp_irq_filter(void *arg) { struct al_eth_ring *tx_ring = arg; /* Interrupt should be auto-masked upon arrival */ device_printf_dbg(tx_ring->dev, "%s for ring ID = %d\n", __func__, tx_ring->ring_id); /* * For napi, if work is not running, schedule it. Always schedule * for casual (non-napi) packet handling. */ if ((napi == 0) || (napi && tx_ring->cmpl_is_running == 0)) taskqueue_enqueue(tx_ring->cmpl_tq, &tx_ring->cmpl_task); /* Do not run bottom half */ return (FILTER_HANDLED); } static int al_eth_rx_recv_irq_filter(void *arg) { struct al_eth_ring *rx_ring = arg; /* Interrupt should be auto-masked upon arrival */ device_printf_dbg(rx_ring->dev, "%s for ring ID = %d\n", __func__, rx_ring->ring_id); /* * For napi, if work is not running, schedule it. Always schedule * for casual (non-napi) packet handling. */ if ((napi == 0) || (napi && rx_ring->enqueue_is_running == 0)) taskqueue_enqueue(rx_ring->enqueue_tq, &rx_ring->enqueue_task); /* Do not run bottom half */ return (FILTER_HANDLED); } /* * al_eth_rx_checksum - indicate in mbuf if hw indicated a good cksum * @adapter: structure containing adapter specific data * @hal_pkt: HAL structure for the packet * @mbuf: mbuf currently being received and modified */ static inline void al_eth_rx_checksum(struct al_eth_adapter *adapter, struct al_eth_pkt *hal_pkt, struct mbuf *mbuf) { /* if IPv4 and error */ if (unlikely((adapter->netdev->if_capenable & IFCAP_RXCSUM) && (hal_pkt->l3_proto_idx == AL_ETH_PROTO_ID_IPv4) && (hal_pkt->flags & AL_ETH_RX_FLAGS_L3_CSUM_ERR))) { device_printf(adapter->dev,"rx ipv4 header checksum error\n"); return; } /* if IPv6 and error */ if (unlikely((adapter->netdev->if_capenable & IFCAP_RXCSUM_IPV6) && (hal_pkt->l3_proto_idx == AL_ETH_PROTO_ID_IPv6) && (hal_pkt->flags & AL_ETH_RX_FLAGS_L3_CSUM_ERR))) { device_printf(adapter->dev,"rx ipv6 header checksum error\n"); return; } /* if TCP/UDP */ if (likely((hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_TCP) || (hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_UDP))) { if (unlikely(hal_pkt->flags & AL_ETH_RX_FLAGS_L4_CSUM_ERR)) { device_printf_dbg(adapter->dev, "rx L4 checksum error\n"); /* TCP/UDP checksum error */ mbuf->m_pkthdr.csum_flags = 0; } else { device_printf_dbg(adapter->dev, "rx checksum correct\n"); /* IP Checksum Good */ mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED; mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID; } } } static struct mbuf* al_eth_rx_mbuf(struct al_eth_adapter *adapter, struct al_eth_ring *rx_ring, struct al_eth_pkt *hal_pkt, unsigned int descs, uint16_t *next_to_clean) { struct mbuf *mbuf; struct al_eth_rx_buffer *rx_info = &rx_ring->rx_buffer_info[*next_to_clean]; unsigned int len; len = hal_pkt->bufs[0].len; device_printf_dbg(adapter->dev, "rx_info %p data %p\n", rx_info, rx_info->m); if (rx_info->m == NULL) { *next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring, *next_to_clean); return (NULL); } mbuf = rx_info->m; mbuf->m_pkthdr.len = len; mbuf->m_len = len; mbuf->m_pkthdr.rcvif = rx_ring->netdev; mbuf->m_flags |= M_PKTHDR; if (len <= adapter->small_copy_len) { struct mbuf *smbuf; device_printf_dbg(adapter->dev, "rx small packet. len %d\n", len); AL_RX_LOCK(adapter); smbuf = m_gethdr(M_NOWAIT, MT_DATA); AL_RX_UNLOCK(adapter); if (__predict_false(smbuf == NULL)) { device_printf(adapter->dev, "smbuf is NULL\n"); return (NULL); } smbuf->m_data = smbuf->m_data + AL_IP_ALIGNMENT_OFFSET; memcpy(smbuf->m_data, mbuf->m_data + AL_IP_ALIGNMENT_OFFSET, len); smbuf->m_len = len; smbuf->m_pkthdr.rcvif = rx_ring->netdev; /* first desc of a non-ps chain */ smbuf->m_flags |= M_PKTHDR; smbuf->m_pkthdr.len = smbuf->m_len; *next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring, *next_to_clean); return (smbuf); } mbuf->m_data = mbuf->m_data + AL_IP_ALIGNMENT_OFFSET; /* Unmap the buffer */ bus_dmamap_unload(rx_ring->dma_buf_tag, rx_info->dma_map); rx_info->m = NULL; *next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring, *next_to_clean); return (mbuf); } static void al_eth_rx_recv_work(void *arg, int pending) { struct al_eth_ring *rx_ring = arg; struct mbuf *mbuf; struct lro_entry *queued; unsigned int qid = rx_ring->ring_id; struct al_eth_pkt *hal_pkt = &rx_ring->hal_pkt; uint16_t next_to_clean = rx_ring->next_to_clean; uint32_t refill_required; uint32_t refill_actual; uint32_t do_if_input; if (napi != 0) { rx_ring->enqueue_is_running = 1; al_data_memory_barrier(); } do { unsigned int descs; descs = al_eth_pkt_rx(rx_ring->dma_q, hal_pkt); if (unlikely(descs == 0)) break; device_printf_dbg(rx_ring->dev, "rx_poll: q %d got packet " "from hal. descs %d\n", qid, descs); device_printf_dbg(rx_ring->dev, "rx_poll: q %d flags %x. " "l3 proto %d l4 proto %d\n", qid, hal_pkt->flags, hal_pkt->l3_proto_idx, hal_pkt->l4_proto_idx); /* ignore if detected dma or eth controller errors */ if ((hal_pkt->flags & (AL_ETH_RX_ERROR | AL_UDMA_CDESC_ERROR)) != 0) { device_printf(rx_ring->dev, "receive packet with error. " "flags = 0x%x\n", hal_pkt->flags); next_to_clean = AL_ETH_RX_RING_IDX_ADD(rx_ring, next_to_clean, descs); continue; } /* allocate mbuf and fill it */ mbuf = al_eth_rx_mbuf(rx_ring->adapter, rx_ring, hal_pkt, descs, &next_to_clean); /* exit if we failed to retrieve a buffer */ if (unlikely(mbuf == NULL)) { next_to_clean = AL_ETH_RX_RING_IDX_ADD(rx_ring, next_to_clean, descs); break; } if (__predict_true(rx_ring->netdev->if_capenable & IFCAP_RXCSUM || rx_ring->netdev->if_capenable & IFCAP_RXCSUM_IPV6)) { al_eth_rx_checksum(rx_ring->adapter, hal_pkt, mbuf); } #if __FreeBSD_version >= 800000 mbuf->m_pkthdr.flowid = qid; M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE); #endif /* * LRO is only for IP/TCP packets and TCP checksum of the packet * should be computed by hardware. */ do_if_input = 1; if ((rx_ring->lro_enabled != 0) && ((mbuf->m_pkthdr.csum_flags & CSUM_IP_VALID) != 0) && hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_TCP) { /* * Send to the stack if: * - LRO not enabled, or * - no LRO resources, or * - lro enqueue fails */ if (rx_ring->lro.lro_cnt != 0) { if (tcp_lro_rx(&rx_ring->lro, mbuf, 0) == 0) do_if_input = 0; } } if (do_if_input) (*rx_ring->netdev->if_input)(rx_ring->netdev, mbuf); } while (1); rx_ring->next_to_clean = next_to_clean; refill_required = al_udma_available_get(rx_ring->dma_q); refill_actual = al_eth_refill_rx_bufs(rx_ring->adapter, qid, refill_required); if (unlikely(refill_actual < refill_required)) { device_printf_dbg(rx_ring->dev, "%s: not filling rx queue %d\n", __func__, qid); } while (((queued = LIST_FIRST(&rx_ring->lro.lro_active)) != NULL)) { LIST_REMOVE(queued, next); tcp_lro_flush(&rx_ring->lro, queued); } if (napi != 0) { rx_ring->enqueue_is_running = 0; al_data_memory_barrier(); } /* unmask irq */ al_eth_irq_config(rx_ring->unmask_reg_offset, rx_ring->unmask_val); } static void al_eth_start_xmit(void *arg, int pending) { struct al_eth_ring *tx_ring = arg; struct mbuf *mbuf; if (napi != 0) { tx_ring->enqueue_is_running = 1; al_data_memory_barrier(); } while (1) { mtx_lock(&tx_ring->br_mtx); mbuf = drbr_dequeue(NULL, tx_ring->br); mtx_unlock(&tx_ring->br_mtx); if (mbuf == NULL) break; al_eth_xmit_mbuf(tx_ring, mbuf); } if (napi != 0) { tx_ring->enqueue_is_running = 0; al_data_memory_barrier(); while (1) { mtx_lock(&tx_ring->br_mtx); mbuf = drbr_dequeue(NULL, tx_ring->br); mtx_unlock(&tx_ring->br_mtx); if (mbuf == NULL) break; al_eth_xmit_mbuf(tx_ring, mbuf); } } } static int al_mq_start(struct ifnet *ifp, struct mbuf *m) { struct al_eth_adapter *adapter = ifp->if_softc; struct al_eth_ring *tx_ring; int i; int ret; /* Which queue to use */ if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) i = m->m_pkthdr.flowid % adapter->num_tx_queues; else i = curcpu % adapter->num_tx_queues; if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) { return (EFAULT); } tx_ring = &adapter->tx_ring[i]; device_printf_dbg(adapter->dev, "dgb start() - assuming link is active, " "sending packet to queue %d\n", i); ret = drbr_enqueue(ifp, tx_ring->br, m); /* * For napi, if work is not running, schedule it. Always schedule * for casual (non-napi) packet handling. */ if ((napi == 0) || ((napi != 0) && (tx_ring->enqueue_is_running == 0))) taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task); return (ret); } static void al_qflush(struct ifnet * ifp) { /* unused */ } static inline void al_eth_flow_ctrl_init(struct al_eth_adapter *adapter) { uint8_t default_flow_ctrl; default_flow_ctrl = AL_ETH_FLOW_CTRL_TX_PAUSE; default_flow_ctrl |= AL_ETH_FLOW_CTRL_RX_PAUSE; adapter->link_config.flow_ctrl_supported = default_flow_ctrl; } static int al_eth_flow_ctrl_config(struct al_eth_adapter *adapter) { struct al_eth_flow_control_params *flow_ctrl_params; uint8_t active = adapter->link_config.flow_ctrl_active; int i; flow_ctrl_params = &adapter->flow_ctrl_params; flow_ctrl_params->type = AL_ETH_FLOW_CONTROL_TYPE_LINK_PAUSE; flow_ctrl_params->obay_enable = ((active & AL_ETH_FLOW_CTRL_RX_PAUSE) != 0); flow_ctrl_params->gen_enable = ((active & AL_ETH_FLOW_CTRL_TX_PAUSE) != 0); flow_ctrl_params->rx_fifo_th_high = AL_ETH_FLOW_CTRL_RX_FIFO_TH_HIGH; flow_ctrl_params->rx_fifo_th_low = AL_ETH_FLOW_CTRL_RX_FIFO_TH_LOW; flow_ctrl_params->quanta = AL_ETH_FLOW_CTRL_QUANTA; flow_ctrl_params->quanta_th = AL_ETH_FLOW_CTRL_QUANTA_TH; /* map priority to queue index, queue id = priority/2 */ for (i = 0; i < AL_ETH_FWD_PRIO_TABLE_NUM; i++) flow_ctrl_params->prio_q_map[0][i] = 1 << (i >> 1); al_eth_flow_control_config(&adapter->hal_adapter, flow_ctrl_params); return (0); } static void al_eth_flow_ctrl_enable(struct al_eth_adapter *adapter) { /* * change the active configuration to the default / force by ethtool * and call to configure */ adapter->link_config.flow_ctrl_active = adapter->link_config.flow_ctrl_supported; al_eth_flow_ctrl_config(adapter); } static void al_eth_flow_ctrl_disable(struct al_eth_adapter *adapter) { adapter->link_config.flow_ctrl_active = 0; al_eth_flow_ctrl_config(adapter); } static int al_eth_hw_init(struct al_eth_adapter *adapter) { int rc; rc = al_eth_hw_init_adapter(adapter); if (rc != 0) return (rc); rc = al_eth_mac_config(&adapter->hal_adapter, adapter->mac_mode); if (rc < 0) { device_printf(adapter->dev, "%s failed to configure mac!\n", __func__); return (rc); } if ((adapter->mac_mode == AL_ETH_MAC_MODE_SGMII) || (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII && adapter->phy_exist == FALSE)) { rc = al_eth_mac_link_config(&adapter->hal_adapter, adapter->link_config.force_1000_base_x, adapter->link_config.autoneg, adapter->link_config.active_speed, adapter->link_config.active_duplex); if (rc != 0) { device_printf(adapter->dev, "%s failed to configure link parameters!\n", __func__); return (rc); } } rc = al_eth_mdio_config(&adapter->hal_adapter, AL_ETH_MDIO_TYPE_CLAUSE_22, TRUE /* shared_mdio_if */, adapter->ref_clk_freq, adapter->mdio_freq); if (rc != 0) { device_printf(adapter->dev, "%s failed at mdio config!\n", __func__); return (rc); } al_eth_flow_ctrl_init(adapter); return (rc); } static int al_eth_hw_stop(struct al_eth_adapter *adapter) { al_eth_mac_stop(&adapter->hal_adapter); /* * wait till pending rx packets written and UDMA becomes idle, * the MAC has ~10KB fifo, 10us should be enought time for the * UDMA to write to the memory */ DELAY(10); al_eth_adapter_stop(&adapter->hal_adapter); adapter->flags |= AL_ETH_FLAG_RESET_REQUESTED; /* disable flow ctrl to avoid pause packets*/ al_eth_flow_ctrl_disable(adapter); return (0); } /* * al_eth_intr_intx_all - Legacy Interrupt Handler for all interrupts * @irq: interrupt number * @data: pointer to a network interface device structure */ static int al_eth_intr_intx_all(void *data) { struct al_eth_adapter *adapter = data; struct unit_regs __iomem *regs_base = (struct unit_regs __iomem *)adapter->udma_base; uint32_t reg; reg = al_udma_iofic_read_cause(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_A); if (likely(reg)) device_printf_dbg(adapter->dev, "%s group A cause %x\n", __func__, reg); if (unlikely(reg & AL_INT_GROUP_A_GROUP_D_SUM)) { struct al_iofic_grp_ctrl __iomem *sec_ints_base; uint32_t cause_d = al_udma_iofic_read_cause(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_D); sec_ints_base = ®s_base->gen.interrupt_regs.secondary_iofic_ctrl[0]; if (cause_d != 0) { device_printf_dbg(adapter->dev, "got interrupt from group D. cause %x\n", cause_d); cause_d = al_iofic_read_cause(sec_ints_base, AL_INT_GROUP_A); device_printf(adapter->dev, "secondary A cause %x\n", cause_d); cause_d = al_iofic_read_cause(sec_ints_base, AL_INT_GROUP_B); device_printf_dbg(adapter->dev, "secondary B cause %x\n", cause_d); } } if ((reg & AL_INT_GROUP_A_GROUP_B_SUM) != 0 ) { uint32_t cause_b = al_udma_iofic_read_cause(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B); int qid; device_printf_dbg(adapter->dev, "secondary B cause %x\n", cause_b); for (qid = 0; qid < adapter->num_rx_queues; qid++) { if (cause_b & (1 << qid)) { /* mask */ al_udma_iofic_mask( (struct unit_regs __iomem *)adapter->udma_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B, 1 << qid); } } } if ((reg & AL_INT_GROUP_A_GROUP_C_SUM) != 0) { uint32_t cause_c = al_udma_iofic_read_cause(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C); int qid; device_printf_dbg(adapter->dev, "secondary C cause %x\n", cause_c); for (qid = 0; qid < adapter->num_tx_queues; qid++) { if ((cause_c & (1 << qid)) != 0) { al_udma_iofic_mask( (struct unit_regs __iomem *)adapter->udma_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C, 1 << qid); } } } al_eth_tx_cmlp_irq_filter(adapter->tx_ring); return (0); } static int al_eth_intr_msix_all(void *data) { struct al_eth_adapter *adapter = data; device_printf_dbg(adapter->dev, "%s\n", __func__); return (0); } static int al_eth_intr_msix_mgmt(void *data) { struct al_eth_adapter *adapter = data; device_printf_dbg(adapter->dev, "%s\n", __func__); return (0); } static int al_eth_enable_msix(struct al_eth_adapter *adapter) { int i, msix_vecs, rc, count; device_printf_dbg(adapter->dev, "%s\n", __func__); msix_vecs = 1 + adapter->num_rx_queues + adapter->num_tx_queues; device_printf_dbg(adapter->dev, "Try to enable MSIX, vector numbers = %d\n", msix_vecs); adapter->msix_entries = malloc(msix_vecs*sizeof(*adapter->msix_entries), M_IFAL, M_ZERO | M_WAITOK); - if (adapter->msix_entries == 0) { + if (adapter->msix_entries == NULL) { device_printf_dbg(adapter->dev, "failed to allocate" " msix_entries %d\n", msix_vecs); rc = ENOMEM; goto exit; } /* management vector (GROUP_A) @2*/ adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].entry = 2; adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector = 0; /* rx queues start @3 */ for (i = 0; i < adapter->num_rx_queues; i++) { int irq_idx = AL_ETH_RXQ_IRQ_IDX(adapter, i); adapter->msix_entries[irq_idx].entry = 3 + i; adapter->msix_entries[irq_idx].vector = 0; } /* tx queues start @7 */ for (i = 0; i < adapter->num_tx_queues; i++) { int irq_idx = AL_ETH_TXQ_IRQ_IDX(adapter, i); adapter->msix_entries[irq_idx].entry = 3 + AL_ETH_MAX_HW_QUEUES + i; adapter->msix_entries[irq_idx].vector = 0; } count = msix_vecs + 2; /* entries start from 2 */ rc = pci_alloc_msix(adapter->dev, &count); if (rc != 0) { device_printf_dbg(adapter->dev, "failed to allocate MSIX " "vectors %d\n", msix_vecs+2); device_printf_dbg(adapter->dev, "ret = %d\n", rc); goto msix_entries_exit; } if (count != msix_vecs + 2) { device_printf_dbg(adapter->dev, "failed to allocate all MSIX " "vectors %d, allocated %d\n", msix_vecs+2, count); rc = ENOSPC; goto msix_entries_exit; } for (i = 0; i < msix_vecs; i++) adapter->msix_entries[i].vector = 2 + 1 + i; device_printf_dbg(adapter->dev, "successfully enabled MSIX," " vectors %d\n", msix_vecs); adapter->msix_vecs = msix_vecs; adapter->flags |= AL_ETH_FLAG_MSIX_ENABLED; goto exit; msix_entries_exit: adapter->msix_vecs = 0; free(adapter->msix_entries, M_IFAL); adapter->msix_entries = NULL; exit: return (rc); } static int al_eth_setup_int_mode(struct al_eth_adapter *adapter) { int i, rc; rc = al_eth_enable_msix(adapter); if (rc != 0) { device_printf(adapter->dev, "Failed to enable MSIX mode.\n"); return (rc); } adapter->irq_vecs = max(1, adapter->msix_vecs); /* single INTX mode */ if (adapter->msix_vecs == 0) { snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name, AL_ETH_IRQNAME_SIZE, "al-eth-intx-all@pci:%s", device_get_name(adapter->dev)); adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler = al_eth_intr_intx_all; /* IRQ vector will be resolved from device resources */ adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector = 0; adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter; device_printf(adapter->dev, "%s and vector %d \n", __func__, adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector); return (0); } /* single MSI-X mode */ if (adapter->msix_vecs == 1) { snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name, AL_ETH_IRQNAME_SIZE, "al-eth-msix-all@pci:%s", device_get_name(adapter->dev)); adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler = al_eth_intr_msix_all; adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector = adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector; adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter; return (0); } /* MSI-X per queue */ snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name, AL_ETH_IRQNAME_SIZE, "al-eth-msix-mgmt@pci:%s", device_get_name(adapter->dev)); adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler = al_eth_intr_msix_mgmt; adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter; adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector = adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector; for (i = 0; i < adapter->num_rx_queues; i++) { int irq_idx = AL_ETH_RXQ_IRQ_IDX(adapter, i); snprintf(adapter->irq_tbl[irq_idx].name, AL_ETH_IRQNAME_SIZE, "al-eth-rx-comp-%d@pci:%s", i, device_get_name(adapter->dev)); adapter->irq_tbl[irq_idx].handler = al_eth_rx_recv_irq_filter; adapter->irq_tbl[irq_idx].data = &adapter->rx_ring[i]; adapter->irq_tbl[irq_idx].vector = adapter->msix_entries[irq_idx].vector; } for (i = 0; i < adapter->num_tx_queues; i++) { int irq_idx = AL_ETH_TXQ_IRQ_IDX(adapter, i); snprintf(adapter->irq_tbl[irq_idx].name, AL_ETH_IRQNAME_SIZE, "al-eth-tx-comp-%d@pci:%s", i, device_get_name(adapter->dev)); adapter->irq_tbl[irq_idx].handler = al_eth_tx_cmlp_irq_filter; adapter->irq_tbl[irq_idx].data = &adapter->tx_ring[i]; adapter->irq_tbl[irq_idx].vector = adapter->msix_entries[irq_idx].vector; } return (0); } static void __al_eth_free_irq(struct al_eth_adapter *adapter) { struct al_eth_irq *irq; int i, rc; for (i = 0; i < adapter->irq_vecs; i++) { irq = &adapter->irq_tbl[i]; if (irq->requested != 0) { device_printf_dbg(adapter->dev, "tear down irq: %d\n", irq->vector); rc = bus_teardown_intr(adapter->dev, irq->res, irq->cookie); if (rc != 0) device_printf(adapter->dev, "failed to tear " "down irq: %d\n", irq->vector); } irq->requested = 0; } } static void al_eth_free_irq(struct al_eth_adapter *adapter) { struct al_eth_irq *irq; int i, rc; #ifdef CONFIG_RFS_ACCEL if (adapter->msix_vecs >= 1) { free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap); adapter->netdev->rx_cpu_rmap = NULL; } #endif __al_eth_free_irq(adapter); for (i = 0; i < adapter->irq_vecs; i++) { irq = &adapter->irq_tbl[i]; if (irq->res == NULL) continue; device_printf_dbg(adapter->dev, "release resource irq: %d\n", irq->vector); rc = bus_release_resource(adapter->dev, SYS_RES_IRQ, irq->vector, irq->res); irq->res = NULL; if (rc != 0) device_printf(adapter->dev, "dev has no parent while " "releasing res for irq: %d\n", irq->vector); } pci_release_msi(adapter->dev); adapter->flags &= ~AL_ETH_FLAG_MSIX_ENABLED; adapter->msix_vecs = 0; free(adapter->msix_entries, M_IFAL); adapter->msix_entries = NULL; } static int al_eth_request_irq(struct al_eth_adapter *adapter) { unsigned long flags; struct al_eth_irq *irq; int rc = 0, i, v; if ((adapter->flags & AL_ETH_FLAG_MSIX_ENABLED) != 0) flags = RF_ACTIVE; else flags = RF_ACTIVE | RF_SHAREABLE; for (i = 0; i < adapter->irq_vecs; i++) { irq = &adapter->irq_tbl[i]; if (irq->requested != 0) continue; irq->res = bus_alloc_resource_any(adapter->dev, SYS_RES_IRQ, &irq->vector, flags); if (irq->res == NULL) { device_printf(adapter->dev, "could not allocate " "irq vector=%d\n", irq->vector); rc = ENXIO; goto exit_res; } if ((rc = bus_setup_intr(adapter->dev, irq->res, INTR_TYPE_NET | INTR_MPSAFE, irq->handler, NULL, irq->data, &irq->cookie)) != 0) { device_printf(adapter->dev, "failed to register " "interrupt handler for irq %ju: %d\n", (uintmax_t)rman_get_start(irq->res), rc); goto exit_intr; } irq->requested = 1; } goto exit; exit_intr: v = i - 1; /* -1 because we omit the operation that failed */ while (v-- >= 0) { int bti; irq = &adapter->irq_tbl[v]; bti = bus_teardown_intr(adapter->dev, irq->res, irq->cookie); if (bti != 0) { device_printf(adapter->dev, "failed to tear " "down irq: %d\n", irq->vector); } irq->requested = 0; device_printf_dbg(adapter->dev, "exit_intr: releasing irq %d\n", irq->vector); } exit_res: v = i - 1; /* -1 because we omit the operation that failed */ while (v-- >= 0) { int brr; irq = &adapter->irq_tbl[v]; device_printf_dbg(adapter->dev, "exit_res: releasing resource" " for irq %d\n", irq->vector); brr = bus_release_resource(adapter->dev, SYS_RES_IRQ, irq->vector, irq->res); if (brr != 0) device_printf(adapter->dev, "dev has no parent while " "releasing res for irq: %d\n", irq->vector); irq->res = NULL; } exit: return (rc); } /** * al_eth_setup_tx_resources - allocate Tx resources (Descriptors) * @adapter: network interface device structure * @qid: queue index * * Return 0 on success, negative on failure **/ static int al_eth_setup_tx_resources(struct al_eth_adapter *adapter, int qid) { struct al_eth_ring *tx_ring = &adapter->tx_ring[qid]; struct device *dev = tx_ring->dev; struct al_udma_q_params *q_params = &tx_ring->q_params; int size; int ret; if (adapter->up) return (0); size = sizeof(struct al_eth_tx_buffer) * tx_ring->sw_count; tx_ring->tx_buffer_info = malloc(size, M_IFAL, M_ZERO | M_WAITOK); if (tx_ring->tx_buffer_info == NULL) return (ENOMEM); tx_ring->descs_size = tx_ring->hw_count * sizeof(union al_udma_desc); q_params->size = tx_ring->hw_count; ret = al_dma_alloc_coherent(dev, &q_params->desc_phy_base_tag, (bus_dmamap_t *)&q_params->desc_phy_base_map, (bus_addr_t *)&q_params->desc_phy_base, (void**)&q_params->desc_base, tx_ring->descs_size); if (ret != 0) { device_printf(dev, "failed to al_dma_alloc_coherent," " ret = %d\n", ret); return (ENOMEM); } if (q_params->desc_base == NULL) return (ENOMEM); device_printf_dbg(dev, "Initializing ring queues %d\n", qid); /* Allocate Ring Queue */ mtx_init(&tx_ring->br_mtx, "AlRingMtx", NULL, MTX_DEF); tx_ring->br = buf_ring_alloc(AL_BR_SIZE, M_DEVBUF, M_WAITOK, &tx_ring->br_mtx); if (tx_ring->br == NULL) { device_printf(dev, "Critical Failure setting up buf ring\n"); return (ENOMEM); } /* Allocate taskqueues */ TASK_INIT(&tx_ring->enqueue_task, 0, al_eth_start_xmit, tx_ring); tx_ring->enqueue_tq = taskqueue_create_fast("al_tx_enque", M_NOWAIT, taskqueue_thread_enqueue, &tx_ring->enqueue_tq); taskqueue_start_threads(&tx_ring->enqueue_tq, 1, PI_NET, "%s txeq", device_get_nameunit(adapter->dev)); TASK_INIT(&tx_ring->cmpl_task, 0, al_eth_tx_cmpl_work, tx_ring); tx_ring->cmpl_tq = taskqueue_create_fast("al_tx_cmpl", M_NOWAIT, taskqueue_thread_enqueue, &tx_ring->cmpl_tq); taskqueue_start_threads(&tx_ring->cmpl_tq, 1, PI_REALTIME, "%s txcq", device_get_nameunit(adapter->dev)); /* Setup DMA descriptor areas. */ ret = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ AL_TSO_SIZE, /* maxsize */ AL_ETH_PKT_MAX_BUFS, /* nsegments */ PAGE_SIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &tx_ring->dma_buf_tag); if (ret != 0) { device_printf(dev,"Unable to allocate dma_buf_tag, ret = %d\n", ret); return (ret); } for (size = 0; size < tx_ring->sw_count; size++) { ret = bus_dmamap_create(tx_ring->dma_buf_tag, 0, &tx_ring->tx_buffer_info[size].dma_map); if (ret != 0) { device_printf(dev, "Unable to map DMA TX " "buffer memory [iter=%d]\n", size); return (ret); } } /* completion queue not used for tx */ q_params->cdesc_base = NULL; /* size in bytes of the udma completion ring descriptor */ q_params->cdesc_size = 8; tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; return (0); } /* * al_eth_free_tx_resources - Free Tx Resources per Queue * @adapter: network interface device structure * @qid: queue index * * Free all transmit software resources */ static void al_eth_free_tx_resources(struct al_eth_adapter *adapter, int qid) { struct al_eth_ring *tx_ring = &adapter->tx_ring[qid]; struct al_udma_q_params *q_params = &tx_ring->q_params; int size; /* At this point interrupts' handlers must be deactivated */ while (taskqueue_cancel(tx_ring->cmpl_tq, &tx_ring->cmpl_task, NULL)) taskqueue_drain(tx_ring->cmpl_tq, &tx_ring->cmpl_task); taskqueue_free(tx_ring->cmpl_tq); while (taskqueue_cancel(tx_ring->enqueue_tq, &tx_ring->enqueue_task, NULL)) { taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task); } taskqueue_free(tx_ring->enqueue_tq); if (tx_ring->br != NULL) { drbr_flush(adapter->netdev, tx_ring->br); buf_ring_free(tx_ring->br, M_DEVBUF); } for (size = 0; size < tx_ring->sw_count; size++) { m_freem(tx_ring->tx_buffer_info[size].m); tx_ring->tx_buffer_info[size].m = NULL; bus_dmamap_unload(tx_ring->dma_buf_tag, tx_ring->tx_buffer_info[size].dma_map); bus_dmamap_destroy(tx_ring->dma_buf_tag, tx_ring->tx_buffer_info[size].dma_map); } bus_dma_tag_destroy(tx_ring->dma_buf_tag); free(tx_ring->tx_buffer_info, M_IFAL); tx_ring->tx_buffer_info = NULL; mtx_destroy(&tx_ring->br_mtx); /* if not set, then don't free */ if (q_params->desc_base == NULL) return; al_dma_free_coherent(q_params->desc_phy_base_tag, q_params->desc_phy_base_map, q_params->desc_base); q_params->desc_base = NULL; } /* * al_eth_free_all_tx_resources - Free Tx Resources for All Queues * @adapter: board private structure * * Free all transmit software resources */ static void al_eth_free_all_tx_resources(struct al_eth_adapter *adapter) { int i; for (i = 0; i < adapter->num_tx_queues; i++) if (adapter->tx_ring[i].q_params.desc_base) al_eth_free_tx_resources(adapter, i); } /* * al_eth_setup_rx_resources - allocate Rx resources (Descriptors) * @adapter: network interface device structure * @qid: queue index * * Returns 0 on success, negative on failure */ static int al_eth_setup_rx_resources(struct al_eth_adapter *adapter, unsigned int qid) { struct al_eth_ring *rx_ring = &adapter->rx_ring[qid]; struct device *dev = rx_ring->dev; struct al_udma_q_params *q_params = &rx_ring->q_params; int size; int ret; size = sizeof(struct al_eth_rx_buffer) * rx_ring->sw_count; /* alloc extra element so in rx path we can always prefetch rx_info + 1 */ size += 1; rx_ring->rx_buffer_info = malloc(size, M_IFAL, M_ZERO | M_WAITOK); if (rx_ring->rx_buffer_info == NULL) return (ENOMEM); rx_ring->descs_size = rx_ring->hw_count * sizeof(union al_udma_desc); q_params->size = rx_ring->hw_count; ret = al_dma_alloc_coherent(dev, &q_params->desc_phy_base_tag, &q_params->desc_phy_base_map, (bus_addr_t *)&q_params->desc_phy_base, (void**)&q_params->desc_base, rx_ring->descs_size); if ((q_params->desc_base == NULL) || (ret != 0)) return (ENOMEM); /* size in bytes of the udma completion ring descriptor */ q_params->cdesc_size = 16; rx_ring->cdescs_size = rx_ring->hw_count * q_params->cdesc_size; ret = al_dma_alloc_coherent(dev, &q_params->cdesc_phy_base_tag, &q_params->cdesc_phy_base_map, (bus_addr_t *)&q_params->cdesc_phy_base, (void**)&q_params->cdesc_base, rx_ring->cdescs_size); if ((q_params->cdesc_base == NULL) || (ret != 0)) return (ENOMEM); /* Allocate taskqueues */ TASK_INIT(&rx_ring->enqueue_task, 0, al_eth_rx_recv_work, rx_ring); rx_ring->enqueue_tq = taskqueue_create_fast("al_rx_enque", M_NOWAIT, taskqueue_thread_enqueue, &rx_ring->enqueue_tq); taskqueue_start_threads(&rx_ring->enqueue_tq, 1, PI_NET, "%s rxeq", device_get_nameunit(adapter->dev)); /* Setup DMA descriptor areas. */ ret = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ AL_TSO_SIZE, /* maxsize */ 1, /* nsegments */ AL_TSO_SIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &rx_ring->dma_buf_tag); if (ret != 0) { device_printf(dev,"Unable to allocate RX dma_buf_tag\n"); return (ret); } for (size = 0; size < rx_ring->sw_count; size++) { ret = bus_dmamap_create(rx_ring->dma_buf_tag, 0, &rx_ring->rx_buffer_info[size].dma_map); if (ret != 0) { device_printf(dev,"Unable to map DMA RX buffer memory\n"); return (ret); } } /* Zero out the descriptor ring */ memset(q_params->cdesc_base, 0, rx_ring->cdescs_size); /* Create LRO for the ring */ if ((adapter->netdev->if_capenable & IFCAP_LRO) != 0) { int err = tcp_lro_init(&rx_ring->lro); if (err != 0) { device_printf(adapter->dev, "LRO[%d] Initialization failed!\n", qid); } else { device_printf_dbg(adapter->dev, "RX Soft LRO[%d] Initialized\n", qid); rx_ring->lro_enabled = TRUE; rx_ring->lro.ifp = adapter->netdev; } } rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; return (0); } /* * al_eth_free_rx_resources - Free Rx Resources * @adapter: network interface device structure * @qid: queue index * * Free all receive software resources */ static void al_eth_free_rx_resources(struct al_eth_adapter *adapter, unsigned int qid) { struct al_eth_ring *rx_ring = &adapter->rx_ring[qid]; struct al_udma_q_params *q_params = &rx_ring->q_params; int size; /* At this point interrupts' handlers must be deactivated */ while (taskqueue_cancel(rx_ring->enqueue_tq, &rx_ring->enqueue_task, NULL)) { taskqueue_drain(rx_ring->enqueue_tq, &rx_ring->enqueue_task); } taskqueue_free(rx_ring->enqueue_tq); for (size = 0; size < rx_ring->sw_count; size++) { m_freem(rx_ring->rx_buffer_info[size].m); rx_ring->rx_buffer_info[size].m = NULL; bus_dmamap_unload(rx_ring->dma_buf_tag, rx_ring->rx_buffer_info[size].dma_map); bus_dmamap_destroy(rx_ring->dma_buf_tag, rx_ring->rx_buffer_info[size].dma_map); } bus_dma_tag_destroy(rx_ring->dma_buf_tag); free(rx_ring->rx_buffer_info, M_IFAL); rx_ring->rx_buffer_info = NULL; /* if not set, then don't free */ if (q_params->desc_base == NULL) return; al_dma_free_coherent(q_params->desc_phy_base_tag, q_params->desc_phy_base_map, q_params->desc_base); q_params->desc_base = NULL; /* if not set, then don't free */ if (q_params->cdesc_base == NULL) return; al_dma_free_coherent(q_params->cdesc_phy_base_tag, q_params->cdesc_phy_base_map, q_params->cdesc_base); q_params->cdesc_phy_base = 0; /* Free LRO resources */ tcp_lro_free(&rx_ring->lro); } /* * al_eth_free_all_rx_resources - Free Rx Resources for All Queues * @adapter: board private structure * * Free all receive software resources */ static void al_eth_free_all_rx_resources(struct al_eth_adapter *adapter) { int i; for (i = 0; i < adapter->num_rx_queues; i++) if (adapter->rx_ring[i].q_params.desc_base != 0) al_eth_free_rx_resources(adapter, i); } /* * al_eth_setup_all_rx_resources - allocate all queues Rx resources * @adapter: board private structure * * Return 0 on success, negative on failure */ static int al_eth_setup_all_rx_resources(struct al_eth_adapter *adapter) { int i, rc = 0; for (i = 0; i < adapter->num_rx_queues; i++) { rc = al_eth_setup_rx_resources(adapter, i); if (rc == 0) continue; device_printf(adapter->dev, "Allocation for Rx Queue %u failed\n", i); goto err_setup_rx; } return (0); err_setup_rx: /* rewind the index freeing the rings as we go */ while (i--) al_eth_free_rx_resources(adapter, i); return (rc); } /* * al_eth_setup_all_tx_resources - allocate all queues Tx resources * @adapter: private structure * * Return 0 on success, negative on failure */ static int al_eth_setup_all_tx_resources(struct al_eth_adapter *adapter) { int i, rc = 0; for (i = 0; i < adapter->num_tx_queues; i++) { rc = al_eth_setup_tx_resources(adapter, i); if (rc == 0) continue; device_printf(adapter->dev, "Allocation for Tx Queue %u failed\n", i); goto err_setup_tx; } return (0); err_setup_tx: /* rewind the index freeing the rings as we go */ while (i--) al_eth_free_tx_resources(adapter, i); return (rc); } static void al_eth_disable_int_sync(struct al_eth_adapter *adapter) { /* disable forwarding interrupts from eth through pci end point */ if ((adapter->board_type == ALPINE_FPGA_NIC) || (adapter->board_type == ALPINE_NIC)) { al_eth_forward_int_config((uint32_t*)adapter->internal_pcie_base + AL_REG_OFFSET_FORWARD_INTR, AL_DIS_FORWARD_INTR); } /* mask hw interrupts */ al_eth_interrupts_mask(adapter); } static void al_eth_interrupts_unmask(struct al_eth_adapter *adapter) { uint32_t group_a_mask = AL_INT_GROUP_A_GROUP_D_SUM; /* enable group D summery */ uint32_t group_b_mask = (1 << adapter->num_rx_queues) - 1;/* bit per Rx q*/ uint32_t group_c_mask = (1 << adapter->num_tx_queues) - 1;/* bit per Tx q*/ uint32_t group_d_mask = 3 << 8; struct unit_regs __iomem *regs_base = (struct unit_regs __iomem *)adapter->udma_base; if (adapter->int_mode == AL_IOFIC_MODE_LEGACY) group_a_mask |= AL_INT_GROUP_A_GROUP_B_SUM | AL_INT_GROUP_A_GROUP_C_SUM | AL_INT_GROUP_A_GROUP_D_SUM; al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_A, group_a_mask); al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B, group_b_mask); al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C, group_c_mask); al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_D, group_d_mask); } static void al_eth_interrupts_mask(struct al_eth_adapter *adapter) { struct unit_regs __iomem *regs_base = (struct unit_regs __iomem *)adapter->udma_base; /* mask all interrupts */ al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_A, AL_MASK_GROUP_A_INT); al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B, AL_MASK_GROUP_B_INT); al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C, AL_MASK_GROUP_C_INT); al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_D, AL_MASK_GROUP_D_INT); } static int al_eth_configure_int_mode(struct al_eth_adapter *adapter) { enum al_iofic_mode int_mode; uint32_t m2s_errors_disable = AL_M2S_MASK_INIT; uint32_t m2s_aborts_disable = AL_M2S_MASK_INIT; uint32_t s2m_errors_disable = AL_S2M_MASK_INIT; uint32_t s2m_aborts_disable = AL_S2M_MASK_INIT; /* single INTX mode */ if (adapter->msix_vecs == 0) int_mode = AL_IOFIC_MODE_LEGACY; else if (adapter->msix_vecs > 1) int_mode = AL_IOFIC_MODE_MSIX_PER_Q; else { device_printf(adapter->dev, "udma doesn't support single MSI-X mode yet.\n"); return (EIO); } if (adapter->board_type != ALPINE_INTEGRATED) { m2s_errors_disable |= AL_M2S_S2M_MASK_NOT_INT; m2s_errors_disable |= AL_M2S_S2M_MASK_NOT_INT; s2m_aborts_disable |= AL_M2S_S2M_MASK_NOT_INT; s2m_aborts_disable |= AL_M2S_S2M_MASK_NOT_INT; } if (al_udma_iofic_config((struct unit_regs __iomem *)adapter->udma_base, int_mode, m2s_errors_disable, m2s_aborts_disable, s2m_errors_disable, s2m_aborts_disable)) { device_printf(adapter->dev, "al_udma_unit_int_config failed!.\n"); return (EIO); } adapter->int_mode = int_mode; device_printf_dbg(adapter->dev, "using %s interrupt mode\n", int_mode == AL_IOFIC_MODE_LEGACY ? "INTx" : int_mode == AL_IOFIC_MODE_MSIX_PER_Q ? "MSI-X per Queue" : "Unknown"); /* set interrupt moderation resolution to 15us */ al_iofic_moder_res_config(&((struct unit_regs *)(adapter->udma_base))->gen.interrupt_regs.main_iofic, AL_INT_GROUP_B, 15); al_iofic_moder_res_config(&((struct unit_regs *)(adapter->udma_base))->gen.interrupt_regs.main_iofic, AL_INT_GROUP_C, 15); /* by default interrupt coalescing is disabled */ adapter->tx_usecs = 0; adapter->rx_usecs = 0; return (0); } /* * ethtool_rxfh_indir_default - get default value for RX flow hash indirection * @index: Index in RX flow hash indirection table * @n_rx_rings: Number of RX rings to use * * This function provides the default policy for RX flow hash indirection. */ static inline uint32_t ethtool_rxfh_indir_default(uint32_t index, uint32_t n_rx_rings) { return (index % n_rx_rings); } static void* al_eth_update_stats(struct al_eth_adapter *adapter) { struct al_eth_mac_stats *mac_stats = &adapter->mac_stats; if (adapter->up == 0) return (NULL); al_eth_mac_stats_get(&adapter->hal_adapter, mac_stats); return (NULL); } static uint64_t al_get_counter(struct ifnet *ifp, ift_counter cnt) { struct al_eth_adapter *adapter; struct al_eth_mac_stats *mac_stats; uint64_t rv; adapter = if_getsoftc(ifp); mac_stats = &adapter->mac_stats; switch (cnt) { case IFCOUNTER_IPACKETS: return (mac_stats->aFramesReceivedOK); /* including pause frames */ case IFCOUNTER_OPACKETS: return (mac_stats->aFramesTransmittedOK); case IFCOUNTER_IBYTES: return (mac_stats->aOctetsReceivedOK); case IFCOUNTER_OBYTES: return (mac_stats->aOctetsTransmittedOK); case IFCOUNTER_IMCASTS: return (mac_stats->ifInMulticastPkts); case IFCOUNTER_OMCASTS: return (mac_stats->ifOutMulticastPkts); case IFCOUNTER_COLLISIONS: return (0); case IFCOUNTER_IQDROPS: return (mac_stats->etherStatsDropEvents); case IFCOUNTER_IERRORS: rv = mac_stats->ifInErrors + mac_stats->etherStatsUndersizePkts + /* good but short */ mac_stats->etherStatsFragments + /* short and bad*/ mac_stats->etherStatsJabbers + /* with crc errors */ mac_stats->etherStatsOversizePkts + mac_stats->aFrameCheckSequenceErrors + mac_stats->aAlignmentErrors; return (rv); case IFCOUNTER_OERRORS: return (mac_stats->ifOutErrors); default: return (if_get_counter_default(ifp, cnt)); } } /* * Unicast, Multicast and Promiscuous mode set * * The set_rx_mode entry point is called whenever the unicast or multicast * address lists or the network interface flags are updated. This routine is * responsible for configuring the hardware for proper unicast, multicast, * promiscuous mode, and all-multi behavior. */ #define MAX_NUM_MULTICAST_ADDRESSES 32 #define MAX_NUM_ADDRESSES 32 static void al_eth_set_rx_mode(struct al_eth_adapter *adapter) { struct ifnet *ifp = adapter->netdev; struct ifmultiaddr *ifma; /* multicast addresses configured */ struct ifaddr *ifua; /* unicast address */ int mc = 0; int uc = 0; uint8_t i; unsigned char *mac; if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (mc == MAX_NUM_MULTICAST_ADDRESSES) break; mac = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); /* default mc address inside mac address */ if (mac[3] != 0 && mac[4] != 0 && mac[5] != 1) mc++; } if_maddr_runlock(ifp); if_addr_rlock(ifp); TAILQ_FOREACH(ifua, &ifp->if_addrhead, ifa_link) { if (ifua->ifa_addr->sa_family != AF_LINK) continue; if (uc == MAX_NUM_ADDRESSES) break; uc++; } if_addr_runlock(ifp); if ((ifp->if_flags & IFF_PROMISC) != 0) { al_eth_mac_table_promiscuous_set(adapter, true); } else { if ((ifp->if_flags & IFF_ALLMULTI) != 0) { /* This interface is in all-multicasts mode (used by multicast routers). */ al_eth_mac_table_all_multicast_add(adapter, AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX, 1); } else { if (mc == 0) { al_eth_mac_table_entry_clear(adapter, AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX); } else { al_eth_mac_table_all_multicast_add(adapter, AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX, 1); } } if (uc != 0) { i = AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + 1; if (uc > AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT) { /* * In this case there are more addresses then * entries in the mac table - set promiscuous */ al_eth_mac_table_promiscuous_set(adapter, true); return; } /* clear the last configuration */ while (i < (AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT)) { al_eth_mac_table_entry_clear(adapter, i); i++; } /* set new addresses */ i = AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + 1; if_addr_rlock(ifp); TAILQ_FOREACH(ifua, &ifp->if_addrhead, ifa_link) { if (ifua->ifa_addr->sa_family != AF_LINK) { continue; } al_eth_mac_table_unicast_add(adapter, i, (unsigned char *)ifua->ifa_addr, 1); i++; } if_addr_runlock(ifp); } al_eth_mac_table_promiscuous_set(adapter, false); } } static void al_eth_config_rx_fwd(struct al_eth_adapter *adapter) { struct al_eth_fwd_ctrl_table_entry entry; int i; /* let priority be equal to pbits */ for (i = 0; i < AL_ETH_FWD_PBITS_TABLE_NUM; i++) al_eth_fwd_pbits_table_set(&adapter->hal_adapter, i, i); /* map priority to queue index, queue id = priority/2 */ for (i = 0; i < AL_ETH_FWD_PRIO_TABLE_NUM; i++) al_eth_fwd_priority_table_set(&adapter->hal_adapter, i, i >> 1); entry.prio_sel = AL_ETH_CTRL_TABLE_PRIO_SEL_VAL_0; entry.queue_sel_1 = AL_ETH_CTRL_TABLE_QUEUE_SEL_1_THASH_TABLE; entry.queue_sel_2 = AL_ETH_CTRL_TABLE_QUEUE_SEL_2_NO_PRIO; entry.udma_sel = AL_ETH_CTRL_TABLE_UDMA_SEL_MAC_TABLE; entry.filter = FALSE; al_eth_ctrl_table_def_set(&adapter->hal_adapter, FALSE, &entry); /* * By default set the mac table to forward all unicast packets to our * MAC address and all broadcast. all the rest will be dropped. */ al_eth_mac_table_unicast_add(adapter, AL_ETH_MAC_TABLE_UNICAST_IDX_BASE, adapter->mac_addr, 1); al_eth_mac_table_broadcast_add(adapter, AL_ETH_MAC_TABLE_BROADCAST_IDX, 1); al_eth_mac_table_promiscuous_set(adapter, false); /* set toeplitz hash keys */ for (i = 0; i < sizeof(adapter->toeplitz_hash_key); i++) *((uint8_t*)adapter->toeplitz_hash_key + i) = (uint8_t)random(); for (i = 0; i < AL_ETH_RX_HASH_KEY_NUM; i++) al_eth_hash_key_set(&adapter->hal_adapter, i, htonl(adapter->toeplitz_hash_key[i])); for (i = 0; i < AL_ETH_RX_RSS_TABLE_SIZE; i++) { adapter->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, AL_ETH_NUM_QUEUES); al_eth_set_thash_table_entry(adapter, i, 0, adapter->rss_ind_tbl[i]); } al_eth_fsm_table_init(adapter); } static void al_eth_req_rx_buff_size(struct al_eth_adapter *adapter, int size) { /* * Determine the correct mbuf pool * for doing jumbo frames * Try from the smallest up to maximum supported */ adapter->rx_mbuf_sz = MCLBYTES; if (size > 2048) { if (adapter->max_rx_buff_alloc_size > 2048) adapter->rx_mbuf_sz = MJUMPAGESIZE; else return; } if (size > 4096) { if (adapter->max_rx_buff_alloc_size > 4096) adapter->rx_mbuf_sz = MJUM9BYTES; else return; } if (size > 9216) { if (adapter->max_rx_buff_alloc_size > 9216) adapter->rx_mbuf_sz = MJUM16BYTES; else return; } } static int al_eth_change_mtu(struct al_eth_adapter *adapter, int new_mtu) { int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; al_eth_req_rx_buff_size(adapter, new_mtu); device_printf_dbg(adapter->dev, "set MTU to %d\n", new_mtu); al_eth_rx_pkt_limit_config(&adapter->hal_adapter, AL_ETH_MIN_FRAME_LEN, max_frame); al_eth_tso_mss_config(&adapter->hal_adapter, 0, new_mtu - 100); return (0); } static int al_eth_check_mtu(struct al_eth_adapter *adapter, int new_mtu) { int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; if ((new_mtu < AL_ETH_MIN_FRAME_LEN) || (max_frame > AL_ETH_MAX_FRAME_LEN)) { return (EINVAL); } return (0); } static int al_eth_udma_queue_enable(struct al_eth_adapter *adapter, enum al_udma_type type, int qid) { int rc = 0; char *name = (type == UDMA_TX) ? "Tx" : "Rx"; struct al_udma_q_params *q_params; if (type == UDMA_TX) q_params = &adapter->tx_ring[qid].q_params; else q_params = &adapter->rx_ring[qid].q_params; rc = al_eth_queue_config(&adapter->hal_adapter, type, qid, q_params); if (rc < 0) { device_printf(adapter->dev, "config %s queue %u failed\n", name, qid); return (rc); } return (rc); } static int al_eth_udma_queues_enable_all(struct al_eth_adapter *adapter) { int i; for (i = 0; i < adapter->num_tx_queues; i++) al_eth_udma_queue_enable(adapter, UDMA_TX, i); for (i = 0; i < adapter->num_rx_queues; i++) al_eth_udma_queue_enable(adapter, UDMA_RX, i); return (0); } static void al_eth_up_complete(struct al_eth_adapter *adapter) { al_eth_configure_int_mode(adapter); al_eth_config_rx_fwd(adapter); al_eth_change_mtu(adapter, adapter->netdev->if_mtu); al_eth_udma_queues_enable_all(adapter); al_eth_refill_all_rx_bufs(adapter); al_eth_interrupts_unmask(adapter); /* enable forwarding interrupts from eth through pci end point */ if ((adapter->board_type == ALPINE_FPGA_NIC) || (adapter->board_type == ALPINE_NIC)) { al_eth_forward_int_config((uint32_t*)adapter->internal_pcie_base + AL_REG_OFFSET_FORWARD_INTR, AL_EN_FORWARD_INTR); } al_eth_flow_ctrl_enable(adapter); mtx_lock(&adapter->stats_mtx); callout_reset(&adapter->stats_callout, hz, al_tick_stats, (void*)adapter); mtx_unlock(&adapter->stats_mtx); al_eth_mac_start(&adapter->hal_adapter); } static int al_media_update(struct ifnet *ifp) { struct al_eth_adapter *adapter = ifp->if_softc; if ((ifp->if_flags & IFF_UP) != 0) mii_mediachg(adapter->mii); return (0); } static void al_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) { struct al_eth_adapter *sc = ifp->if_softc; struct mii_data *mii; if (sc->mii == NULL) { ifmr->ifm_active = IFM_ETHER | IFM_NONE; ifmr->ifm_status = 0; return; } mii = sc->mii; mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } static void al_tick(void *arg) { struct al_eth_adapter *adapter = arg; mii_tick(adapter->mii); /* Schedule another timeout one second from now */ callout_schedule(&adapter->wd_callout, hz); } static void al_tick_stats(void *arg) { struct al_eth_adapter *adapter = arg; al_eth_update_stats(adapter); callout_schedule(&adapter->stats_callout, hz); } static int al_eth_up(struct al_eth_adapter *adapter) { struct ifnet *ifp = adapter->netdev; int rc; if (adapter->up) return (0); if ((adapter->flags & AL_ETH_FLAG_RESET_REQUESTED) != 0) { al_eth_function_reset(adapter); adapter->flags &= ~AL_ETH_FLAG_RESET_REQUESTED; } ifp->if_hwassist = 0; if ((ifp->if_capenable & IFCAP_TSO) != 0) ifp->if_hwassist |= CSUM_TSO; if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6) != 0) ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6); al_eth_serdes_init(adapter); rc = al_eth_hw_init(adapter); if (rc != 0) goto err_hw_init_open; rc = al_eth_setup_int_mode(adapter); if (rc != 0) { device_printf(adapter->dev, "%s failed at setup interrupt mode!\n", __func__); goto err_setup_int; } /* allocate transmit descriptors */ rc = al_eth_setup_all_tx_resources(adapter); if (rc != 0) goto err_setup_tx; /* allocate receive descriptors */ rc = al_eth_setup_all_rx_resources(adapter); if (rc != 0) goto err_setup_rx; rc = al_eth_request_irq(adapter); if (rc != 0) goto err_req_irq; al_eth_up_complete(adapter); adapter->up = true; if (adapter->mac_mode == AL_ETH_MAC_MODE_10GbE_Serial) adapter->netdev->if_link_state = LINK_STATE_UP; if (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII) { mii_mediachg(adapter->mii); /* Schedule watchdog timeout */ mtx_lock(&adapter->wd_mtx); callout_reset(&adapter->wd_callout, hz, al_tick, adapter); mtx_unlock(&adapter->wd_mtx); mii_pollstat(adapter->mii); } return (rc); err_req_irq: al_eth_free_all_rx_resources(adapter); err_setup_rx: al_eth_free_all_tx_resources(adapter); err_setup_tx: al_eth_free_irq(adapter); err_setup_int: al_eth_hw_stop(adapter); err_hw_init_open: al_eth_function_reset(adapter); return (rc); } static int al_shutdown(device_t dev) { struct al_eth_adapter *adapter = device_get_softc(dev); al_eth_down(adapter); return (0); } static void al_eth_down(struct al_eth_adapter *adapter) { device_printf_dbg(adapter->dev, "al_eth_down: begin\n"); adapter->up = false; mtx_lock(&adapter->wd_mtx); callout_stop(&adapter->wd_callout); mtx_unlock(&adapter->wd_mtx); al_eth_disable_int_sync(adapter); mtx_lock(&adapter->stats_mtx); callout_stop(&adapter->stats_callout); mtx_unlock(&adapter->stats_mtx); al_eth_free_irq(adapter); al_eth_hw_stop(adapter); al_eth_free_all_tx_resources(adapter); al_eth_free_all_rx_resources(adapter); } static int al_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct al_eth_adapter *adapter = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; int error = 0; switch (command) { case SIOCSIFMTU: { error = al_eth_check_mtu(adapter, ifr->ifr_mtu); if (error != 0) { device_printf(adapter->dev, "ioctl wrong mtu %u\n", adapter->netdev->if_mtu); break; } ifp->if_drv_flags &= ~IFF_DRV_RUNNING; adapter->netdev->if_mtu = ifr->ifr_mtu; al_init(adapter); break; } case SIOCSIFFLAGS: if ((ifp->if_flags & IFF_UP) != 0) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { if (((ifp->if_flags ^ adapter->if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { device_printf_dbg(adapter->dev, "ioctl promisc/allmulti\n"); al_eth_set_rx_mode(adapter); } } else { error = al_eth_up(adapter); if (error == 0) ifp->if_drv_flags |= IFF_DRV_RUNNING; } } else { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { al_eth_down(adapter); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; } } adapter->if_flags = ifp->if_flags; break; case SIOCADDMULTI: case SIOCDELMULTI: if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { device_printf_dbg(adapter->dev, "ioctl add/del multi before\n"); al_eth_set_rx_mode(adapter); #ifdef DEVICE_POLLING if ((ifp->if_capenable & IFCAP_POLLING) == 0) #endif } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: if (adapter->mii != NULL) error = ifmedia_ioctl(ifp, ifr, &adapter->mii->mii_media, command); else error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); break; case SIOCSIFCAP: { int mask, reinit; reinit = 0; mask = ifr->ifr_reqcap ^ ifp->if_capenable; #ifdef DEVICE_POLLING if ((mask & IFCAP_POLLING) != 0) { if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { if (error != 0) return (error); ifp->if_capenable |= IFCAP_POLLING; } else { error = ether_poll_deregister(ifp); /* Enable interrupt even in error case */ ifp->if_capenable &= ~IFCAP_POLLING; } } #endif if ((mask & IFCAP_HWCSUM) != 0) { /* apply to both rx and tx */ ifp->if_capenable ^= IFCAP_HWCSUM; reinit = 1; } if ((mask & IFCAP_HWCSUM_IPV6) != 0) { ifp->if_capenable ^= IFCAP_HWCSUM_IPV6; reinit = 1; } if ((mask & IFCAP_TSO) != 0) { ifp->if_capenable ^= IFCAP_TSO; reinit = 1; } if ((mask & IFCAP_LRO) != 0) { ifp->if_capenable ^= IFCAP_LRO; } if ((mask & IFCAP_VLAN_HWTAGGING) != 0) { ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; reinit = 1; } if ((mask & IFCAP_VLAN_HWFILTER) != 0) { ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; reinit = 1; } if ((mask & IFCAP_VLAN_HWTSO) != 0) { ifp->if_capenable ^= IFCAP_VLAN_HWTSO; reinit = 1; } if ((reinit != 0) && ((ifp->if_drv_flags & IFF_DRV_RUNNING)) != 0) { al_init(adapter); } break; } default: error = ether_ioctl(ifp, command, data); break; } return (error); } static int al_is_device_supported(device_t dev) { uint16_t pci_vendor_id = pci_get_vendor(dev); uint16_t pci_device_id = pci_get_device(dev); return (pci_vendor_id == PCI_VENDOR_ID_ANNAPURNA_LABS && (pci_device_id == PCI_DEVICE_ID_AL_ETH || pci_device_id == PCI_DEVICE_ID_AL_ETH_ADVANCED || pci_device_id == PCI_DEVICE_ID_AL_ETH_NIC || pci_device_id == PCI_DEVICE_ID_AL_ETH_FPGA_NIC)); } /* Time in mSec to keep trying to read / write from MDIO in case of error */ #define MDIO_TIMEOUT_MSEC 100 #define MDIO_PAUSE_MSEC 10 static int al_miibus_readreg(device_t dev, int phy, int reg) { struct al_eth_adapter *adapter = device_get_softc(dev); uint16_t value = 0; int rc; int timeout = MDIO_TIMEOUT_MSEC; while (timeout > 0) { rc = al_eth_mdio_read(&adapter->hal_adapter, adapter->phy_addr, -1, reg, &value); if (rc == 0) return (value); device_printf_dbg(adapter->dev, "mdio read failed. try again in 10 msec\n"); timeout -= MDIO_PAUSE_MSEC; pause("readred pause", MDIO_PAUSE_MSEC); } if (rc != 0) device_printf(adapter->dev, "MDIO read failed on timeout\n"); return (value); } static int al_miibus_writereg(device_t dev, int phy, int reg, int value) { struct al_eth_adapter *adapter = device_get_softc(dev); int rc; int timeout = MDIO_TIMEOUT_MSEC; while (timeout > 0) { rc = al_eth_mdio_write(&adapter->hal_adapter, adapter->phy_addr, -1, reg, value); if (rc == 0) return (0); device_printf(adapter->dev, "mdio write failed. try again in 10 msec\n"); timeout -= MDIO_PAUSE_MSEC; pause("miibus writereg", MDIO_PAUSE_MSEC); } if (rc != 0) device_printf(adapter->dev, "MDIO write failed on timeout\n"); return (rc); } static void al_miibus_statchg(device_t dev) { struct al_eth_adapter *adapter = device_get_softc(dev); device_printf_dbg(adapter->dev, "al_miibus_statchg: state has changed!\n"); device_printf_dbg(adapter->dev, "al_miibus_statchg: active = 0x%x status = 0x%x\n", adapter->mii->mii_media_active, adapter->mii->mii_media_status); if (adapter->up == 0) return; if ((adapter->mii->mii_media_status & IFM_AVALID) != 0) { if (adapter->mii->mii_media_status & IFM_ACTIVE) { device_printf(adapter->dev, "link is UP\n"); adapter->netdev->if_link_state = LINK_STATE_UP; } else { device_printf(adapter->dev, "link is DOWN\n"); adapter->netdev->if_link_state = LINK_STATE_DOWN; } } } static void al_miibus_linkchg(device_t dev) { struct al_eth_adapter *adapter = device_get_softc(dev); uint8_t duplex = 0; uint8_t speed = 0; - if (adapter->mii == 0) + if (adapter->mii == NULL) return; if ((adapter->netdev->if_flags & IFF_UP) == 0) return; /* Ignore link changes when link is not ready */ if ((adapter->mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) != (IFM_AVALID | IFM_ACTIVE)) { return; } if ((adapter->mii->mii_media_active & IFM_FDX) != 0) duplex = 1; speed = IFM_SUBTYPE(adapter->mii->mii_media_active); if (speed == IFM_10_T) { al_eth_mac_link_config(&adapter->hal_adapter, 0, 1, AL_10BASE_T_SPEED, duplex); return; } if (speed == IFM_100_TX) { al_eth_mac_link_config(&adapter->hal_adapter, 0, 1, AL_100BASE_TX_SPEED, duplex); return; } if (speed == IFM_1000_T) { al_eth_mac_link_config(&adapter->hal_adapter, 0, 1, AL_1000BASE_T_SPEED, duplex); return; } device_printf(adapter->dev, "ERROR: unknown MII media active 0x%08x\n", adapter->mii->mii_media_active); } Index: head/sys/dev/al_eth/al_init_eth_lm.c =================================================================== --- head/sys/dev/al_eth/al_init_eth_lm.c (revision 313981) +++ head/sys/dev/al_eth/al_init_eth_lm.c (revision 313982) @@ -1,1537 +1,1537 @@ /*- * Copyright (c) 2015,2016 Annapurna Labs Ltd. and affiliates * All rights reserved. * * Developed by Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "al_init_eth_lm.h" #include "al_serdes.h" #include "al_hal_eth.h" #include "al_init_eth_kr.h" /** * @{ * @file al_init_eth_lm.c * * @brief ethernet link management common utilities * */ /* delay before checking link status with new serdes parameters (uSec) */ #define AL_ETH_LM_LINK_STATUS_DELAY 1000 /* delay before checking link status after reconfiguring the retimer (uSec) */ #define AL_ETH_LM_RETIMER_LINK_STATUS_DELAY 50000 #define AL_ETH_LM_EQ_ITERATIONS 15 #define AL_ETH_LM_MAX_DCGAIN 8 /* num of link training failures till serdes reset */ #define AL_ETH_LT_FAILURES_TO_RESET 10 #define MODULE_IDENTIFIER_IDX 0 #define MODULE_IDENTIFIER_SFP 0x3 #define MODULE_IDENTIFIER_QSFP 0xd #define SFP_PRESENT 0 #define SFP_NOT_PRESENT 1 /* SFP+ module */ #define SFP_I2C_HEADER_10G_IDX 3 #define SFP_I2C_HEADER_10G_DA_IDX 8 #define SFP_I2C_HEADER_10G_DA_LEN_IDX 18 #define SFP_I2C_HEADER_1G_IDX 6 #define SFP_I2C_HEADER_SIGNAL_RATE 12 /* Nominal signaling rate, units of 100MBd. */ #define SFP_MIN_SIGNAL_RATE_25G 250 #define SFP_MIN_SIGNAL_RATE_10G 100 /* QSFP+ module */ #define QSFP_COMPLIANCE_CODE_IDX 131 /* 40GBASE-LR4 and 40GBASE-SR4 are optic modules */ #define QSFP_COMPLIANCE_CODE_OPTIC ((1 << 1) | (1 << 2)) #define QSFP_COMPLIANCE_CODE_DAC (1 << 3) #define QSFP_CABLE_LEN_IDX 146 /* TODO: need to check the necessary delay */ #define AL_ETH_LM_RETIMER_WAIT_FOR_LOCK 500 /* delay after retimer reset to lock (mSec) */ #define AL_ETH_LM_SERDES_WAIT_FOR_LOCK 50 /* delay after signal detect to lock (mSec) */ #define AL_ETH_LM_GEARBOX_RESET_DELAY 1000 /* (uSec) */ static const uint32_t al_eth_retimer_boost_addr[AL_ETH_RETIMER_CHANNEL_MAX][AL_ETH_RETIMER_TYPE_MAX] = { /* BR_210 | BR_410 */ /* AL_ETH_RETIMER_CHANNEL_A */ {0xf, 0x1a}, /* AL_ETH_RETIMER_CHANNEL_B */ {0x16, 0x18}, /* AL_ETH_RETIMER_CHANNEL_C */ {0x0, 0x16}, /* AL_ETH_RETIMER_CHANNEL_D */ {0x0, 0x14}, }; #define RETIMER_LENS_MAX 5 static const uint32_t al_eth_retimer_boost_lens[RETIMER_LENS_MAX] = {0, 1, 2, 3, 5}; static const uint32_t al_eth_retimer_boost_value[RETIMER_LENS_MAX + 1][AL_ETH_RETIMER_TYPE_MAX] = { /* BR_210 | BR_410 */ /* 0 */ {0x0, 0x0}, /* 1 */ {0x1, 0x1}, /* 2 */ {0x2, 0x1}, /* 3 */ {0x3, 0x3}, /* 5 */ {0x7, 0x3}, /* 5+ */{0xb, 0x7}, }; struct retimer_config_reg { uint8_t addr; uint8_t value; uint8_t mask; }; static struct retimer_config_reg retimer_ds25_25g_mode_tx_ch[] = { {.addr = 0x0A, .value = 0x0C, .mask = 0xff }, {.addr = 0x2F, .value = 0x54, .mask = 0xff }, {.addr = 0x31, .value = 0x20, .mask = 0xff }, {.addr = 0x1E, .value = 0xE9, .mask = 0xff }, {.addr = 0x1F, .value = 0x0B, .mask = 0xff }, {.addr = 0xA6, .value = 0x43, .mask = 0xff }, {.addr = 0x2A, .value = 0x5A, .mask = 0xff }, {.addr = 0x2B, .value = 0x0A, .mask = 0xff }, {.addr = 0x2C, .value = 0xF6, .mask = 0xff }, {.addr = 0x70, .value = 0x05, .mask = 0xff }, {.addr = 0x6A, .value = 0x21, .mask = 0xff }, {.addr = 0x35, .value = 0x0F, .mask = 0xff }, {.addr = 0x12, .value = 0x83, .mask = 0xff }, {.addr = 0x9C, .value = 0x24, .mask = 0xff }, {.addr = 0x98, .value = 0x00, .mask = 0xff }, {.addr = 0x42, .value = 0x50, .mask = 0xff }, {.addr = 0x44, .value = 0x90, .mask = 0xff }, {.addr = 0x45, .value = 0xC0, .mask = 0xff }, {.addr = 0x46, .value = 0xD0, .mask = 0xff }, {.addr = 0x47, .value = 0xD1, .mask = 0xff }, {.addr = 0x48, .value = 0xD5, .mask = 0xff }, {.addr = 0x49, .value = 0xD8, .mask = 0xff }, {.addr = 0x4A, .value = 0xEA, .mask = 0xff }, {.addr = 0x4B, .value = 0xF7, .mask = 0xff }, {.addr = 0x4C, .value = 0xFD, .mask = 0xff }, {.addr = 0x8E, .value = 0x00, .mask = 0xff }, {.addr = 0x3D, .value = 0x94, .mask = 0xff }, {.addr = 0x3F, .value = 0x40, .mask = 0xff }, {.addr = 0x3E, .value = 0x43, .mask = 0xff }, {.addr = 0x0A, .value = 0x00, .mask = 0xff }, }; static struct retimer_config_reg retimer_ds25_25g_mode_rx_ch[] = { {.addr = 0x0A, .value = 0x0C, .mask = 0xff}, {.addr = 0x2F, .value = 0x54, .mask = 0xff}, {.addr = 0x31, .value = 0x40, .mask = 0xff}, {.addr = 0x1E, .value = 0xE3, .mask = 0xff}, {.addr = 0x1F, .value = 0x0B, .mask = 0xff}, {.addr = 0xA6, .value = 0x43, .mask = 0xff}, {.addr = 0x2A, .value = 0x5A, .mask = 0xff}, {.addr = 0x2B, .value = 0x0A, .mask = 0xff}, {.addr = 0x2C, .value = 0xF6, .mask = 0xff}, {.addr = 0x70, .value = 0x05, .mask = 0xff}, {.addr = 0x6A, .value = 0x21, .mask = 0xff}, {.addr = 0x35, .value = 0x0F, .mask = 0xff}, {.addr = 0x12, .value = 0x83, .mask = 0xff}, {.addr = 0x9C, .value = 0x24, .mask = 0xff}, {.addr = 0x98, .value = 0x00, .mask = 0xff}, {.addr = 0x42, .value = 0x50, .mask = 0xff}, {.addr = 0x44, .value = 0x90, .mask = 0xff}, {.addr = 0x45, .value = 0xC0, .mask = 0xff}, {.addr = 0x46, .value = 0xD0, .mask = 0xff}, {.addr = 0x47, .value = 0xD1, .mask = 0xff}, {.addr = 0x48, .value = 0xD5, .mask = 0xff}, {.addr = 0x49, .value = 0xD8, .mask = 0xff}, {.addr = 0x4A, .value = 0xEA, .mask = 0xff}, {.addr = 0x4B, .value = 0xF7, .mask = 0xff}, {.addr = 0x4C, .value = 0xFD, .mask = 0xff}, {.addr = 0x8E, .value = 0x00, .mask = 0xff}, {.addr = 0x3D, .value = 0x94, .mask = 0xff}, {.addr = 0x3F, .value = 0x40, .mask = 0xff}, {.addr = 0x3E, .value = 0x43, .mask = 0xff}, {.addr = 0x0A, .value = 0x00, .mask = 0xff}, }; static struct retimer_config_reg retimer_ds25_10g_mode[] = { /* Assert CDR reset (6.3) */ {.addr = 0x0A, .value = 0x0C, .mask = 0x0C}, /* Select 10.3125Gbps standard rate mode (6.6) */ {.addr = 0x2F, .value = 0x00, .mask = 0xF0}, /* Enable loop filter auto-adjust */ {.addr = 0x1F, .value = 0x08, .mask = 0x08}, /* Set Adapt Mode 1 (6.13) */ {.addr = 0x31, .value = 0x20, .mask = 0x60}, /* Disable the DFE since most applications do not need it (6.18) */ {.addr = 0x1E, .value = 0x08, .mask = 0x08}, /* Release CDR reset (6.4) */ {.addr = 0x0A, .value = 0x00, .mask = 0x0C}, /* Enable FIR (6.12) */ {.addr = 0x3D, .value = 0x80, .mask = 0x80}, /* Set Main-cursor tap sign to positive (6.12) */ {.addr = 0x3D, .value = 0x00, .mask = 0x40}, /* Set Post-cursor tap sign to negative (6.12) */ {.addr = 0x3F, .value = 0x40, .mask = 0x40}, /* Set Pre-cursor tap sign to negative (6.12) */ {.addr = 0x3E, .value = 0x40, .mask = 0x40}, /* Set Main-cursor tap magnitude to 13 (6.12) */ {.addr = 0x3D, .value = 0x0D, .mask = 0x1F}, }; static int al_eth_lm_retimer_boost_config(struct al_eth_lm_context *lm_context); static int al_eth_lm_retimer_ds25_full_config(struct al_eth_lm_context *lm_context); static al_bool al_eth_lm_retimer_ds25_signal_detect( struct al_eth_lm_context *lm_context, uint32_t channel); static int al_eth_lm_retimer_ds25_cdr_reset(struct al_eth_lm_context *lm_context, uint32_t channel); static al_bool al_eth_lm_retimer_ds25_cdr_lock( struct al_eth_lm_context *lm_context, uint32_t channel); static int al_eth_lm_retimer_25g_rx_adaptation(struct al_eth_lm_context *lm_context); struct al_eth_lm_retimer { int (*config)(struct al_eth_lm_context *lm_context); int (*reset)(struct al_eth_lm_context *lm_context, uint32_t channel); int (*signal_detect)(struct al_eth_lm_context *lm_context, uint32_t channel); int (*cdr_lock)(struct al_eth_lm_context *lm_context, uint32_t channel); int (*rx_adaptation)(struct al_eth_lm_context *lm_context); }; static struct al_eth_lm_retimer retimer[] = { {.config = al_eth_lm_retimer_boost_config, .signal_detect = NULL, .reset = NULL, .cdr_lock = NULL, .rx_adaptation = NULL}, {.config = al_eth_lm_retimer_boost_config, .signal_detect = NULL, .reset = NULL, .cdr_lock = NULL, .rx_adaptation = NULL}, {.config = al_eth_lm_retimer_ds25_full_config, .signal_detect = al_eth_lm_retimer_ds25_signal_detect, .reset = al_eth_lm_retimer_ds25_cdr_reset, .cdr_lock = al_eth_lm_retimer_ds25_cdr_lock, .rx_adaptation = al_eth_lm_retimer_25g_rx_adaptation}, }; #define SFP_10G_DA_ACTIVE 0x8 #define SFP_10G_DA_PASSIVE 0x4 #define lm_debug(...) \ do { \ if (lm_context->debug) \ al_warn(__VA_ARGS__); \ else \ al_dbg(__VA_ARGS__); \ } while (0) static int al_eth_sfp_detect(struct al_eth_lm_context *lm_context, enum al_eth_lm_link_mode *new_mode) { int rc = 0; uint8_t sfp_10g; uint8_t sfp_1g; uint8_t sfp_cable_tech; uint8_t sfp_da_len; uint8_t signal_rate; do { rc = lm_context->i2c_read(lm_context->i2c_context, lm_context->sfp_bus_id, lm_context->sfp_i2c_addr, SFP_I2C_HEADER_10G_IDX, &sfp_10g); if (rc != 0) break; rc = lm_context->i2c_read(lm_context->i2c_context, lm_context->sfp_bus_id, lm_context->sfp_i2c_addr, SFP_I2C_HEADER_1G_IDX, &sfp_1g); if (rc != 0) break; rc = lm_context->i2c_read(lm_context->i2c_context, lm_context->sfp_bus_id, lm_context->sfp_i2c_addr, SFP_I2C_HEADER_10G_DA_IDX, &sfp_cable_tech); if (rc != 0) break; rc = lm_context->i2c_read(lm_context->i2c_context, lm_context->sfp_bus_id, lm_context->sfp_i2c_addr, SFP_I2C_HEADER_10G_DA_LEN_IDX, &sfp_da_len); if (rc != 0) break; rc = lm_context->i2c_read(lm_context->i2c_context, lm_context->sfp_bus_id, lm_context->sfp_i2c_addr, SFP_I2C_HEADER_SIGNAL_RATE, &signal_rate); } while (0); if (rc != 0) { if (rc == ETIMEDOUT) { /* ETIMEDOUT is returned when no SFP is connected */ if (lm_context->mode != AL_ETH_LM_MODE_DISCONNECTED) lm_debug("%s: SFP Disconnected\n", __func__); *new_mode = AL_ETH_LM_MODE_DISCONNECTED; } else { return (rc); } } else if ((sfp_cable_tech & (SFP_10G_DA_PASSIVE | SFP_10G_DA_ACTIVE)) != 0) { if ((signal_rate >= SFP_MIN_SIGNAL_RATE_25G) && ((lm_context->max_speed == AL_ETH_LM_MAX_SPEED_25G) || (lm_context->max_speed == AL_ETH_LM_MAX_SPEED_MAX))) *new_mode = AL_ETH_LM_MODE_25G; else if ((signal_rate >= SFP_MIN_SIGNAL_RATE_10G) && ((lm_context->max_speed == AL_ETH_LM_MAX_SPEED_10G) || (lm_context->max_speed == AL_ETH_LM_MAX_SPEED_MAX))) *new_mode = AL_ETH_LM_MODE_10G_DA; else *new_mode = AL_ETH_LM_MODE_1G; lm_debug("%s: %s DAC (%d M) detected (max signal rate %d)\n", __func__, (sfp_cable_tech & SFP_10G_DA_PASSIVE) ? "Passive" : "Active", sfp_da_len, signal_rate); /* for active direct attached need to use len 0 in the retimer configuration */ lm_context->da_len = (sfp_cable_tech & SFP_10G_DA_PASSIVE) ? sfp_da_len : 0; } else if (sfp_10g != 0) { lm_debug("%s: 10 SFP detected\n", __func__); *new_mode = AL_ETH_LM_MODE_10G_OPTIC; } else if (sfp_1g != 0) { lm_debug("%s: 1G SFP detected\n", __func__); *new_mode = AL_ETH_LM_MODE_1G; } else { al_warn("%s: unknown SFP inserted. eeprom content: 10G compliance 0x%x," " 1G compliance 0x%x, sfp+cable 0x%x. default to %s\n", __func__, sfp_10g, sfp_1g, sfp_cable_tech, al_eth_lm_mode_convert_to_str(lm_context->default_mode)); *new_mode = lm_context->default_mode; lm_context->da_len = lm_context->default_dac_len; } if ((lm_context->sfp_detect_force_mode) && (*new_mode != AL_ETH_LM_MODE_DISCONNECTED) && (*new_mode != lm_context->default_mode)) { al_warn("%s: Force mode to default (%s). mode based of the SFP EEPROM %s\n", __func__, al_eth_lm_mode_convert_to_str(lm_context->default_mode), al_eth_lm_mode_convert_to_str(*new_mode)); *new_mode = lm_context->default_mode; } lm_context->mode = *new_mode; return (0); } static int al_eth_qsfp_detect(struct al_eth_lm_context *lm_context, enum al_eth_lm_link_mode *new_mode) { int rc = 0; uint8_t qsfp_comp_code; uint8_t qsfp_da_len; do { rc = lm_context->i2c_read(lm_context->i2c_context, lm_context->sfp_bus_id, lm_context->sfp_i2c_addr, QSFP_COMPLIANCE_CODE_IDX, &qsfp_comp_code); if (rc != 0) break; rc = lm_context->i2c_read(lm_context->i2c_context, lm_context->sfp_bus_id, lm_context->sfp_i2c_addr, QSFP_CABLE_LEN_IDX, &qsfp_da_len); if (rc != 0) break; } while (0); if (rc != 0) { if (rc == ETIMEDOUT) { /* ETIMEDOUT is returned when no SFP is connected */ lm_debug("%s: SFP Disconnected\n", __func__); *new_mode = AL_ETH_LM_MODE_DISCONNECTED; } else { return (rc); } } else if ((qsfp_comp_code & QSFP_COMPLIANCE_CODE_DAC) != 0) { lm_debug("%s: 10G passive DAC (%d M) detected\n", __func__, qsfp_da_len); *new_mode = AL_ETH_LM_MODE_10G_DA; lm_context->da_len = qsfp_da_len; } else if ((qsfp_comp_code & QSFP_COMPLIANCE_CODE_OPTIC) != 0) { lm_debug("%s: 10G optic module detected\n", __func__); *new_mode = AL_ETH_LM_MODE_10G_OPTIC; } else { al_warn("%s: unknown QSFP inserted. eeprom content: 10G " "compliance 0x%x default to %s\n", __func__, qsfp_comp_code, al_eth_lm_mode_convert_to_str(lm_context->default_mode)); *new_mode = lm_context->default_mode; lm_context->da_len = lm_context->default_dac_len; } lm_context->mode = *new_mode; return (0); } static int al_eth_module_detect(struct al_eth_lm_context *lm_context, enum al_eth_lm_link_mode *new_mode) { int rc = 0; uint8_t module_idx; int sfp_present = SFP_PRESENT; if ((lm_context->gpio_get) && (lm_context->gpio_present != 0)) sfp_present = lm_context->gpio_get(lm_context->gpio_present); if (sfp_present == SFP_NOT_PRESENT) { lm_debug("%s: SFP not exist\n", __func__); *new_mode = AL_ETH_LM_MODE_DISCONNECTED; return 0; } rc = lm_context->i2c_read(lm_context->i2c_context, lm_context->sfp_bus_id, lm_context->sfp_i2c_addr, MODULE_IDENTIFIER_IDX, &module_idx); if (rc != 0) { if (rc == ETIMEDOUT) { /* ETIMEDOUT is returned when no SFP is connected */ if (lm_context->mode != AL_ETH_LM_MODE_DISCONNECTED) lm_debug("%s: SFP Disconnected\n", __func__); *new_mode = AL_ETH_LM_MODE_DISCONNECTED; return (0); } else { return (rc); } } if (module_idx == MODULE_IDENTIFIER_QSFP) return (al_eth_qsfp_detect(lm_context, new_mode)); else return (al_eth_sfp_detect(lm_context, new_mode)); return (0); } static struct al_serdes_adv_tx_params da_tx_params = { .override = TRUE, .amp = 0x1, .total_driver_units = 0x13, .c_plus_1 = 0x2, .c_plus_2 = 0, .c_minus_1 = 0x2, .slew_rate = 0, }; static struct al_serdes_adv_rx_params da_rx_params = { .override = TRUE, .dcgain = 0x4, .dfe_3db_freq = 0x4, .dfe_gain = 0x3, .dfe_first_tap_ctrl = 0x5, .dfe_secound_tap_ctrl = 0x1, .dfe_third_tap_ctrl = 0x8, .dfe_fourth_tap_ctrl = 0x1, .low_freq_agc_gain = 0x7, .precal_code_sel = 0, .high_freq_agc_boost = 0x1d, }; static struct al_serdes_adv_tx_params optic_tx_params = { .override = TRUE, .amp = 0x1, .total_driver_units = 0x13, .c_plus_1 = 0x2, .c_plus_2 = 0, .c_minus_1 = 0, .slew_rate = 0, }; static struct al_serdes_adv_rx_params optic_rx_params = { .override = TRUE, .dcgain = 0x0, .dfe_3db_freq = 0x7, .dfe_gain = 0x0, .dfe_first_tap_ctrl = 0x0, .dfe_secound_tap_ctrl = 0x8, .dfe_third_tap_ctrl = 0x0, .dfe_fourth_tap_ctrl = 0x8, .low_freq_agc_gain = 0x7, .precal_code_sel = 0, .high_freq_agc_boost = 0x4, }; static void al_eth_serdes_static_tx_params_set(struct al_eth_lm_context *lm_context) { if (lm_context->tx_param_dirty == 0) return; if (lm_context->serdes_tx_params_valid != 0) { lm_context->tx_param_dirty = 0; lm_context->tx_params_override.override = TRUE; if ((lm_context->serdes_obj->tx_advanced_params_set) == 0) { al_err("tx_advanced_params_set is not supported for this serdes group\n"); return; } lm_context->serdes_obj->tx_advanced_params_set( lm_context->serdes_obj, lm_context->lane, &lm_context->tx_params_override); } else if (lm_context->static_values != 0) { lm_context->tx_param_dirty = 0; if ((lm_context->serdes_obj->tx_advanced_params_set) == 0) { al_err("tx_advanced_params_set is not supported for this serdes group\n"); return; } if ((lm_context->retimer_exist == 0) && (lm_context->mode == AL_ETH_LM_MODE_10G_DA)) lm_context->serdes_obj->tx_advanced_params_set( lm_context->serdes_obj, lm_context->lane, &da_tx_params); else lm_context->serdes_obj->tx_advanced_params_set( lm_context->serdes_obj, lm_context->lane, &optic_tx_params); } } static void al_eth_serdes_static_rx_params_set(struct al_eth_lm_context *lm_context) { if (lm_context->rx_param_dirty == 0) return; if (lm_context->serdes_rx_params_valid != 0) { lm_context->rx_param_dirty = 0; lm_context->rx_params_override.override = TRUE; if ((lm_context->serdes_obj->rx_advanced_params_set) == 0) { al_err("rx_advanced_params_set is not supported for this serdes group\n"); return; } lm_context->serdes_obj->rx_advanced_params_set( lm_context->serdes_obj, lm_context->lane, &lm_context->rx_params_override); } else if (lm_context->static_values != 0) { lm_context->rx_param_dirty = 0; if ((lm_context->serdes_obj->rx_advanced_params_set) == 0) { al_err("rx_advanced_params_set is not supported for this serdes group\n"); return; } if ((lm_context->retimer_exist == 0) && (lm_context->mode == AL_ETH_LM_MODE_10G_DA)) lm_context->serdes_obj->rx_advanced_params_set( lm_context->serdes_obj, lm_context->lane, &da_rx_params); else lm_context->serdes_obj->rx_advanced_params_set( lm_context->serdes_obj, lm_context->lane, &optic_rx_params); } } static int al_eth_rx_equal_run(struct al_eth_lm_context *lm_context) { struct al_serdes_adv_rx_params rx_params; int dcgain; int best_dcgain = -1; int i; int best_score = -1; int test_score = -1; rx_params.override = FALSE; lm_context->serdes_obj->rx_advanced_params_set(lm_context->serdes_obj, lm_context->lane, &rx_params); lm_debug("score | dcgain | dfe3db | dfegain | tap1 | tap2 | tap3 | " "tap4 | low freq | high freq\n"); for (dcgain = 0; dcgain < AL_ETH_LM_MAX_DCGAIN; dcgain++) { lm_context->serdes_obj->dcgain_set( lm_context->serdes_obj, dcgain); test_score = lm_context->serdes_obj->rx_equalization( lm_context->serdes_obj, lm_context->lane); if (test_score < 0) { al_warn("serdes rx equalization failed on error\n"); return (test_score); } if (test_score > best_score) { best_score = test_score; best_dcgain = dcgain; } lm_context->serdes_obj->rx_advanced_params_get( lm_context->serdes_obj, lm_context->lane, &rx_params); lm_debug("%6d|%8x|%8x|%9x|%6x|%6x|%6x|%6x|%10x|%10x|\n", test_score, rx_params.dcgain, rx_params.dfe_3db_freq, rx_params.dfe_gain, rx_params.dfe_first_tap_ctrl, rx_params.dfe_secound_tap_ctrl, rx_params.dfe_third_tap_ctrl, rx_params.dfe_fourth_tap_ctrl, rx_params.low_freq_agc_gain, rx_params.high_freq_agc_boost); } lm_context->serdes_obj->dcgain_set( lm_context->serdes_obj, best_dcgain); best_score = -1; for(i = 0; i < AL_ETH_LM_EQ_ITERATIONS; i++) { test_score = lm_context->serdes_obj->rx_equalization( lm_context->serdes_obj, lm_context->lane); if (test_score < 0) { al_warn("serdes rx equalization failed on error\n"); return (test_score); } if (test_score > best_score) { best_score = test_score; lm_context->serdes_obj->rx_advanced_params_get( lm_context->serdes_obj, lm_context->lane, &rx_params); } } rx_params.precal_code_sel = 0; rx_params.override = TRUE; lm_context->serdes_obj->rx_advanced_params_set( lm_context->serdes_obj, lm_context->lane, &rx_params); lm_debug("-------------------- best dcgain %d ------------------------------------\n", best_dcgain); lm_debug("%6d|%8x|%8x|%9x|%6x|%6x|%6x|%6x|%10x|%10x|\n", best_score, rx_params.dcgain, rx_params.dfe_3db_freq, rx_params.dfe_gain, rx_params.dfe_first_tap_ctrl, rx_params.dfe_secound_tap_ctrl, rx_params.dfe_third_tap_ctrl, rx_params.dfe_fourth_tap_ctrl, rx_params.low_freq_agc_gain, rx_params.high_freq_agc_boost); return (0); } static int al_eth_lm_retimer_boost_config(struct al_eth_lm_context *lm_context) { int i; int rc = 0; uint8_t boost = 0; uint32_t boost_addr = al_eth_retimer_boost_addr[lm_context->retimer_channel][lm_context->retimer_type]; if (lm_context->mode != AL_ETH_LM_MODE_10G_DA) { boost = al_eth_retimer_boost_value[0][lm_context->retimer_type]; } else { for (i = 0; i < RETIMER_LENS_MAX; i++) { if (lm_context->da_len <= al_eth_retimer_boost_lens[i]) { boost = al_eth_retimer_boost_value[i][lm_context->retimer_type]; break; } } if (i == RETIMER_LENS_MAX) boost = al_eth_retimer_boost_value[RETIMER_LENS_MAX][lm_context->retimer_type]; } lm_debug("config retimer boost in channel %d (addr %x) to 0x%x\n", lm_context->retimer_channel, boost_addr, boost); rc = lm_context->i2c_write(lm_context->i2c_context, lm_context->retimer_bus_id, lm_context->retimer_i2c_addr, boost_addr, boost); if (rc != 0) { al_err("%s: Error occurred (%d) while writing retimer " "configuration (bus-id %x i2c-addr %x)\n", __func__, rc, lm_context->retimer_bus_id, lm_context->retimer_i2c_addr); return (rc); } return (0); } /******************************************************************************* ************************** retimer DS25 *************************************** ******************************************************************************/ #define LM_DS25_CHANNEL_EN_REG 0xff #define LM_DS25_CHANNEL_EN_MASK 0x03 #define LM_DS25_CHANNEL_EN_VAL 0x01 #define LM_DS25_CHANNEL_SEL_REG 0xfc #define LM_DS25_CHANNEL_SEL_MASK 0xff #define LM_DS25_CDR_RESET_REG 0x0a #define LM_DS25_CDR_RESET_MASK 0x0c #define LM_DS25_CDR_RESET_ASSERT 0x0c #define LM_DS25_CDR_RESET_RELEASE 0x00 #define LM_DS25_SIGNAL_DETECT_REG 0x78 #define LM_DS25_SIGNAL_DETECT_MASK 0x20 #define LM_DS25_CDR_LOCK_REG 0x78 #define LM_DS25_CDR_LOCK_MASK 0x10 #define LM_DS25_DRV_PD_REG 0x15 #define LM_DS25_DRV_PD_MASK 0x08 static int al_eth_lm_retimer_ds25_write_reg(struct al_eth_lm_context *lm_context, uint8_t reg_addr, uint8_t reg_mask, uint8_t reg_value) { uint8_t reg; int rc; rc = lm_context->i2c_read(lm_context->i2c_context, lm_context->retimer_bus_id, lm_context->retimer_i2c_addr, reg_addr, ®); if (rc != 0) return (EIO); reg &= ~(reg_mask); reg |= reg_value; rc = lm_context->i2c_write(lm_context->i2c_context, lm_context->retimer_bus_id, lm_context->retimer_i2c_addr, reg_addr, reg); if (rc != 0) return (EIO); return (0); } static int al_eth_lm_retimer_ds25_channel_select(struct al_eth_lm_context *lm_context, uint8_t channel) { int rc = 0; /* Write to specific channel */ rc = al_eth_lm_retimer_ds25_write_reg(lm_context, LM_DS25_CHANNEL_EN_REG, LM_DS25_CHANNEL_EN_MASK, LM_DS25_CHANNEL_EN_VAL); if (rc != 0) return (rc); rc = al_eth_lm_retimer_ds25_write_reg(lm_context, LM_DS25_CHANNEL_SEL_REG, LM_DS25_CHANNEL_SEL_MASK, (1 << channel)); return (rc); } static int al_eth_lm_retimer_ds25_channel_config(struct al_eth_lm_context *lm_context, uint8_t channel, struct retimer_config_reg *config, uint8_t config_size) { uint8_t i; int rc; rc = al_eth_lm_retimer_ds25_channel_select(lm_context, channel); if (rc != 0) goto config_error; for (i = 0; i < config_size; i++) { rc = al_eth_lm_retimer_ds25_write_reg(lm_context, config[i].addr, config[i].mask, config[i].value); if (rc != 0) goto config_error; } lm_debug("%s: retimer channel config done for channel %d\n", __func__, channel); return (0); config_error: al_err("%s: failed to access to the retimer\n", __func__); return (rc); } static int al_eth_lm_retimer_ds25_cdr_reset(struct al_eth_lm_context *lm_context, uint32_t channel) { int rc; lm_debug("Perform CDR reset to channel %d\n", channel); rc = al_eth_lm_retimer_ds25_channel_select(lm_context, channel); if (rc) goto config_error; rc = al_eth_lm_retimer_ds25_write_reg(lm_context, LM_DS25_CDR_RESET_REG, LM_DS25_CDR_RESET_MASK, LM_DS25_CDR_RESET_ASSERT); if (rc) goto config_error; rc = al_eth_lm_retimer_ds25_write_reg(lm_context, LM_DS25_CDR_RESET_REG, LM_DS25_CDR_RESET_MASK, LM_DS25_CDR_RESET_RELEASE); if (rc) goto config_error; return 0; config_error: al_err("%s: failed to access to the retimer\n", __func__); return rc; } static boolean_t al_eth_lm_retimer_ds25_signal_detect(struct al_eth_lm_context *lm_context, uint32_t channel) { int rc = 0; uint8_t reg; rc = al_eth_lm_retimer_ds25_channel_select(lm_context, channel); if (rc) goto config_error; rc = lm_context->i2c_read(lm_context->i2c_context, lm_context->retimer_bus_id, lm_context->retimer_i2c_addr, LM_DS25_SIGNAL_DETECT_REG, ®); if (rc) goto config_error; if (reg & LM_DS25_SIGNAL_DETECT_MASK) return TRUE; return FALSE; config_error: al_err("%s: failed to access to the retimer\n", __func__); return FALSE; } static boolean_t al_eth_lm_retimer_ds25_cdr_lock(struct al_eth_lm_context *lm_context, uint32_t channel) { int rc = 0; uint8_t reg; rc = al_eth_lm_retimer_ds25_channel_select(lm_context, channel); if (rc) goto config_error; rc = lm_context->i2c_read(lm_context->i2c_context, lm_context->retimer_bus_id, lm_context->retimer_i2c_addr, LM_DS25_CDR_LOCK_REG, ®); if (rc) goto config_error; if (reg & LM_DS25_CDR_LOCK_MASK) return TRUE; return FALSE; config_error: al_err("%s: failed to access to the retimer\n", __func__); return FALSE; } static boolean_t al_eth_lm_wait_for_lock(struct al_eth_lm_context *lm_context, uint32_t channel) { uint32_t timeout = AL_ETH_LM_RETIMER_WAIT_FOR_LOCK; al_bool lock = AL_FALSE; while ((timeout > 0) && (lock == FALSE)) { al_msleep(10); timeout -= 10; lock = retimer[lm_context->retimer_type].cdr_lock(lm_context, channel); } lm_debug("%s: %s to achieve CDR lock in %d msec\n", __func__, (lock) ? "succeed" : "FAILED", (AL_ETH_LM_RETIMER_WAIT_FOR_LOCK - timeout)); return lock; } static void al_eth_lm_retimer_signal_lock_check(struct al_eth_lm_context *lm_context, uint32_t channel, boolean_t *ready) { al_bool signal_detect = TRUE; al_bool cdr_lock = TRUE; if (retimer[lm_context->retimer_type].signal_detect) { if (!retimer[lm_context->retimer_type].signal_detect(lm_context, channel)) { lm_debug("no signal detected on retimer channel %d\n", channel); signal_detect = AL_FALSE; } else { if (retimer[lm_context->retimer_type].cdr_lock) { cdr_lock = retimer[lm_context->retimer_type].cdr_lock( lm_context, channel); if (!cdr_lock) { if (retimer[lm_context->retimer_type].reset) { retimer[lm_context->retimer_type].reset(lm_context, channel); cdr_lock = al_eth_lm_wait_for_lock(lm_context, channel); } } } } } al_info("%s: (channel %d) signal %d cdr lock %d\n", __func__, channel, signal_detect, (signal_detect) ? cdr_lock : 0); *ready = ((cdr_lock == TRUE) && (signal_detect == TRUE)); } static int al_eth_lm_retimer_ds25_full_config(struct al_eth_lm_context *lm_context) { int rc = 0; al_bool ready; struct retimer_config_reg *config_tx; uint32_t config_tx_size; struct retimer_config_reg *config_rx; uint32_t config_rx_size; if (lm_context->mode == AL_ETH_LM_MODE_25G) { config_tx = retimer_ds25_25g_mode_tx_ch; config_tx_size = AL_ARR_SIZE(retimer_ds25_25g_mode_tx_ch); config_rx = retimer_ds25_25g_mode_rx_ch; config_rx_size = AL_ARR_SIZE(retimer_ds25_25g_mode_rx_ch); } else { config_tx = retimer_ds25_10g_mode; config_tx_size = AL_ARR_SIZE(retimer_ds25_10g_mode); config_rx = retimer_ds25_10g_mode; config_rx_size = AL_ARR_SIZE(retimer_ds25_10g_mode); } rc = al_eth_lm_retimer_ds25_channel_config(lm_context, lm_context->retimer_channel, config_rx, config_rx_size); if (rc) return rc; rc = al_eth_lm_retimer_ds25_channel_config(lm_context, lm_context->retimer_tx_channel, config_tx, config_tx_size); if (rc) return rc; if (lm_context->serdes_obj->type_get() == AL_SRDS_TYPE_25G) { lm_debug("%s: serdes 25G - perform tx and rx gearbox reset\n", __func__); al_eth_gearbox_reset(lm_context->adapter, TRUE, TRUE); DELAY(AL_ETH_LM_GEARBOX_RESET_DELAY); } al_eth_lm_retimer_signal_lock_check(lm_context, lm_context->retimer_tx_channel, &ready); if (!ready) { lm_debug("%s: Failed to lock tx channel!\n", __func__); return (1); } lm_debug("%s: retimer full configuration done\n", __func__); return rc; } static int al_eth_lm_retimer_25g_rx_adaptation(struct al_eth_lm_context *lm_context) { int rc = 0; al_bool ready; al_eth_lm_retimer_signal_lock_check(lm_context, lm_context->retimer_channel, &ready); if (!ready) { lm_debug("%s: no signal detected on retimer Rx channel (%d)\n", __func__, lm_context->retimer_channel); return rc; } al_msleep(AL_ETH_LM_SERDES_WAIT_FOR_LOCK); return 0; } static int al_eth_lm_check_for_link(struct al_eth_lm_context *lm_context, boolean_t *link_up) { struct al_eth_link_status status; int ret = 0; al_eth_link_status_clear(lm_context->adapter); al_eth_link_status_get(lm_context->adapter, &status); if (status.link_up == AL_TRUE) { lm_debug("%s: >>>> Link state DOWN ==> UP\n", __func__); al_eth_led_set(lm_context->adapter, AL_TRUE); lm_context->link_state = AL_ETH_LM_LINK_UP; *link_up = AL_TRUE; return 0; } else if (status.local_fault) { lm_context->link_state = AL_ETH_LM_LINK_DOWN; al_eth_led_set(lm_context->adapter, AL_FALSE); al_err("%s: Failed to establish link\n", __func__); ret = 1; } else { lm_debug("%s: >>>> Link state DOWN ==> DOWN_RF\n", __func__); lm_context->link_state = AL_ETH_LM_LINK_DOWN_RF; al_eth_led_set(lm_context->adapter, AL_FALSE); ret = 0; } *link_up = AL_FALSE; return ret; } /*****************************************************************************/ /***************************** API functions *********************************/ /*****************************************************************************/ int al_eth_lm_init(struct al_eth_lm_context *lm_context, struct al_eth_lm_init_params *params) { lm_context->adapter = params->adapter; lm_context->serdes_obj = params->serdes_obj; lm_context->lane = params->lane; lm_context->sfp_detection = params->sfp_detection; lm_context->sfp_bus_id = params->sfp_bus_id; lm_context->sfp_i2c_addr = params->sfp_i2c_addr; lm_context->retimer_exist = params->retimer_exist; lm_context->retimer_type = params->retimer_type; lm_context->retimer_bus_id = params->retimer_bus_id; lm_context->retimer_i2c_addr = params->retimer_i2c_addr; lm_context->retimer_channel = params->retimer_channel; lm_context->retimer_tx_channel = params->retimer_tx_channel; lm_context->default_mode = params->default_mode; lm_context->default_dac_len = params->default_dac_len; lm_context->link_training = params->link_training; lm_context->rx_equal = params->rx_equal; lm_context->static_values = params->static_values; lm_context->i2c_read = params->i2c_read; lm_context->i2c_write = params->i2c_write; lm_context->i2c_context = params->i2c_context; lm_context->get_random_byte = params->get_random_byte; /* eeprom_read must be provided if sfp_detection is true */ al_assert((lm_context->sfp_detection == FALSE) || (lm_context->i2c_read != NULL)); al_assert((lm_context->retimer_exist == FALSE) || (lm_context->i2c_write != NULL)); lm_context->local_adv.selector_field = 1; lm_context->local_adv.capability = 0; lm_context->local_adv.remote_fault = 0; lm_context->local_adv.acknowledge = 0; lm_context->local_adv.next_page = 0; lm_context->local_adv.technology = AL_ETH_AN_TECH_10GBASE_KR; lm_context->local_adv.fec_capability = params->kr_fec_enable; lm_context->mode = AL_ETH_LM_MODE_DISCONNECTED; lm_context->serdes_tx_params_valid = FALSE; lm_context->serdes_rx_params_valid = FALSE; lm_context->rx_param_dirty = 1; lm_context->tx_param_dirty = 1; lm_context->gpio_get = params->gpio_get; lm_context->gpio_present = params->gpio_present; lm_context->max_speed = params->max_speed; lm_context->sfp_detect_force_mode = params->sfp_detect_force_mode; lm_context->lm_pause = params->lm_pause; lm_context->led_config = params->led_config; lm_context->retimer_configured = FALSE; lm_context->link_state = AL_ETH_LM_LINK_DOWN; return (0); } int al_eth_lm_link_detection(struct al_eth_lm_context *lm_context, boolean_t *link_fault, enum al_eth_lm_link_mode *old_mode, enum al_eth_lm_link_mode *new_mode) { int err; struct al_eth_link_status status; al_assert(lm_context != NULL); al_assert(old_mode != NULL); al_assert(new_mode != NULL); /** * if Link management is disabled, report no link fault in case the link was up * before and set new mode to disconnected to avoid calling to link establish * if the link wasn't up. */ if (lm_context->lm_pause != NULL) { boolean_t lm_pause = lm_context->lm_pause(lm_context->i2c_context); if (lm_pause == TRUE) { *new_mode = AL_ETH_LM_MODE_DISCONNECTED; - if (link_fault != 0) { + if (link_fault != NULL) { if (lm_context->link_state == AL_ETH_LM_LINK_UP) *link_fault = FALSE; else *link_fault = TRUE; } return 0; } } *old_mode = lm_context->mode; *new_mode = lm_context->mode; if (link_fault != NULL) *link_fault = TRUE; switch (lm_context->link_state) { case AL_ETH_LM_LINK_UP: al_eth_link_status_get(lm_context->adapter, &status); if (status.link_up) { if (link_fault != NULL) *link_fault = FALSE; al_eth_led_set(lm_context->adapter, TRUE); return (0); } else if (status.local_fault) { lm_debug("%s: >>>> Link state UP ==> DOWN\n", __func__); lm_context->link_state = AL_ETH_LM_LINK_DOWN; } else { lm_debug("%s: >>>> Link state UP ==> DOWN_RF\n", __func__); lm_context->link_state = AL_ETH_LM_LINK_DOWN_RF; } break; case AL_ETH_LM_LINK_DOWN_RF: al_eth_link_status_get(lm_context->adapter, &status); if (status.local_fault) { lm_debug("%s: >>>> Link state DOWN_RF ==> DOWN\n", __func__); lm_context->link_state = AL_ETH_LM_LINK_DOWN; break; } else if (status.remote_fault == FALSE) { lm_debug("%s: >>>> Link state DOWN_RF ==> UP\n", __func__); lm_context->link_state = AL_ETH_LM_LINK_UP; } /* in case of remote fault only no need to check SFP again */ return (0); case AL_ETH_LM_LINK_DOWN: break; }; al_eth_led_set(lm_context->adapter, FALSE); if (lm_context->sfp_detection) { err = al_eth_module_detect(lm_context, new_mode); if (err != 0) { al_err("module_detection failed!\n"); return (err); } lm_context->mode = *new_mode; } else { lm_context->mode = lm_context->default_mode; *new_mode = lm_context->mode; } if (*old_mode != *new_mode) { al_info("%s: New SFP mode detected %s -> %s\n", __func__, al_eth_lm_mode_convert_to_str(*old_mode), al_eth_lm_mode_convert_to_str(*new_mode)); lm_context->rx_param_dirty = 1; lm_context->tx_param_dirty = 1; lm_context->new_port = TRUE; if ((*new_mode != AL_ETH_LM_MODE_DISCONNECTED) && (lm_context->led_config)) { struct al_eth_lm_led_config_data data = {0}; switch (*new_mode) { case AL_ETH_LM_MODE_10G_OPTIC: case AL_ETH_LM_MODE_10G_DA: data.speed = AL_ETH_LM_LED_CONFIG_10G; break; case AL_ETH_LM_MODE_1G: data.speed = AL_ETH_LM_LED_CONFIG_1G; break; case AL_ETH_LM_MODE_25G: data.speed = AL_ETH_LM_LED_CONFIG_25G; break; default: al_err("%s: unknown LM mode!\n", __func__); }; lm_context->led_config(lm_context->i2c_context, &data); } } return (0); } int al_eth_lm_link_establish(struct al_eth_lm_context *lm_context, boolean_t *link_up) { boolean_t signal_detected; int ret = 0; switch (lm_context->link_state) { case AL_ETH_LM_LINK_UP: *link_up = TRUE; lm_debug("%s: return link up\n", __func__); return (0); case AL_ETH_LM_LINK_DOWN_RF: *link_up = FALSE; lm_debug("%s: return link down (DOWN_RF)\n", __func__); return (0); case AL_ETH_LM_LINK_DOWN: break; }; /** * At this point we will get LM disable only if changed to disable after link detection * finished. in this case link will not be established until LM will be enable again. */ if (lm_context->lm_pause) { boolean_t lm_pause = lm_context->lm_pause(lm_context->i2c_context); if (lm_pause == TRUE) { *link_up = FALSE; return (0); } } if ((lm_context->new_port) && (lm_context->retimer_exist)) { al_eth_serdes_static_rx_params_set(lm_context); al_eth_serdes_static_tx_params_set(lm_context); #if 0 al_eth_lm_retimer_config(lm_context); DELAY(AL_ETH_LM_RETIMER_LINK_STATUS_DELAY); #endif if (retimer[lm_context->retimer_type].config(lm_context)) { al_info("%s: failed to configure the retimer\n", __func__); *link_up = FALSE; return (1); } lm_context->new_port = FALSE; DELAY(1000); } if (lm_context->retimer_exist) { if (retimer[lm_context->retimer_type].rx_adaptation) { ret = retimer[lm_context->retimer_type].rx_adaptation(lm_context); if (ret != 0) { lm_debug("retimer rx is not ready\n"); *link_up = FALSE; return (0); } } } signal_detected = lm_context->serdes_obj->signal_is_detected( lm_context->serdes_obj, lm_context->lane); if (signal_detected == FALSE) { /* if no signal detected there is nothing to do */ lm_debug("serdes signal is down\n"); *link_up = AL_FALSE; return 0; } if (lm_context->serdes_obj->type_get() == AL_SRDS_TYPE_25G) { lm_debug("%s: serdes 25G - perform rx gearbox reset\n", __func__); al_eth_gearbox_reset(lm_context->adapter, FALSE, TRUE); DELAY(AL_ETH_LM_GEARBOX_RESET_DELAY); } if (lm_context->retimer_exist) { DELAY(AL_ETH_LM_RETIMER_LINK_STATUS_DELAY); ret = al_eth_lm_check_for_link(lm_context, link_up); if (ret == 0) { lm_debug("%s: link is up with retimer\n", __func__); return 0; } return ret; } if ((lm_context->mode == AL_ETH_LM_MODE_10G_DA) && (lm_context->link_training)) { lm_context->local_adv.transmitted_nonce = lm_context->get_random_byte(); lm_context->local_adv.transmitted_nonce &= 0x1f; ret = al_eth_an_lt_execute(lm_context->adapter, lm_context->serdes_obj, lm_context->lane, &lm_context->local_adv, &lm_context->partner_adv); lm_context->rx_param_dirty = 1; lm_context->tx_param_dirty = 1; if (ret == 0) { al_info("%s: link training finished successfully\n", __func__); lm_context->link_training_failures = 0; ret = al_eth_lm_check_for_link(lm_context, link_up); if (ret == 0) { lm_debug("%s: link is up with LT\n", __func__); return (0); } } lm_context->link_training_failures++; if (lm_context->link_training_failures > AL_ETH_LT_FAILURES_TO_RESET) { lm_debug("%s: failed to establish LT %d times. reset serdes\n", __func__, AL_ETH_LT_FAILURES_TO_RESET); lm_context->serdes_obj->pma_hard_reset_lane( lm_context->serdes_obj, lm_context->lane, TRUE); lm_context->serdes_obj->pma_hard_reset_lane( lm_context->serdes_obj, lm_context->lane, FALSE); lm_context->link_training_failures = 0; } } al_eth_serdes_static_tx_params_set(lm_context); if ((lm_context->mode == AL_ETH_LM_MODE_10G_DA) && (lm_context->rx_equal)) { ret = al_eth_rx_equal_run(lm_context); if (ret == 0) { DELAY(AL_ETH_LM_LINK_STATUS_DELAY); ret = al_eth_lm_check_for_link(lm_context, link_up); if (ret == 0) { lm_debug("%s: link is up with Rx Equalization\n", __func__); return (0); } } } al_eth_serdes_static_rx_params_set(lm_context); DELAY(AL_ETH_LM_LINK_STATUS_DELAY); ret = al_eth_lm_check_for_link(lm_context, link_up); if (ret == 0) { lm_debug("%s: link is up with static parameters\n", __func__); return (0); } *link_up = FALSE; return (1); } int al_eth_lm_static_parameters_override(struct al_eth_lm_context *lm_context, struct al_serdes_adv_tx_params *tx_params, struct al_serdes_adv_rx_params *rx_params) { if (tx_params != NULL) { lm_context->tx_params_override = *tx_params; lm_context->tx_param_dirty = 1; lm_context->serdes_tx_params_valid = TRUE; } if (rx_params != NULL) { lm_context->rx_params_override = *rx_params; lm_context->rx_param_dirty = 1; lm_context->serdes_rx_params_valid = TRUE; } return (0); } int al_eth_lm_static_parameters_override_disable(struct al_eth_lm_context *lm_context, boolean_t tx_params, boolean_t rx_params) { if (tx_params != 0) lm_context->serdes_tx_params_valid = FALSE; if (rx_params != 0) lm_context->serdes_tx_params_valid = FALSE; return (0); } int al_eth_lm_static_parameters_get(struct al_eth_lm_context *lm_context, struct al_serdes_adv_tx_params *tx_params, struct al_serdes_adv_rx_params *rx_params) { if (tx_params != NULL) { if (lm_context->serdes_tx_params_valid) *tx_params = lm_context->tx_params_override; else lm_context->serdes_obj->tx_advanced_params_get( lm_context->serdes_obj, lm_context->lane, tx_params); } if (rx_params != NULL) { if (lm_context->serdes_rx_params_valid) *rx_params = lm_context->rx_params_override; else lm_context->serdes_obj->rx_advanced_params_get( lm_context->serdes_obj, lm_context->lane, rx_params); } return (0); } const char * al_eth_lm_mode_convert_to_str(enum al_eth_lm_link_mode val) { switch (val) { case AL_ETH_LM_MODE_DISCONNECTED: return ("AL_ETH_LM_MODE_DISCONNECTED"); case AL_ETH_LM_MODE_10G_OPTIC: return ("AL_ETH_LM_MODE_10G_OPTIC"); case AL_ETH_LM_MODE_10G_DA: return ("AL_ETH_LM_MODE_10G_DA"); case AL_ETH_LM_MODE_1G: return ("AL_ETH_LM_MODE_1G"); case AL_ETH_LM_MODE_25G: return ("AL_ETH_LM_MODE_25G"); } return ("N/A"); } void al_eth_lm_debug_mode_set(struct al_eth_lm_context *lm_context, boolean_t enable) { lm_context->debug = enable; } Index: head/sys/dev/an/if_an.c =================================================================== --- head/sys/dev/an/if_an.c (revision 313981) +++ head/sys/dev/an/if_an.c (revision 313982) @@ -1,3810 +1,3810 @@ /*- * Copyright (c) 1997, 1998, 1999 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /* * Aironet 4500/4800 802.11 PCMCIA/ISA/PCI driver for FreeBSD. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ #include __FBSDID("$FreeBSD$"); /* * The Aironet 4500/4800 series cards come in PCMCIA, ISA and PCI form. * This driver supports all three device types (PCI devices are supported * through an extra PCI shim: /sys/dev/an/if_an_pci.c). ISA devices can be * supported either using hard-coded IO port/IRQ settings or via Plug * and Play. The 4500 series devices support 1Mbps and 2Mbps data rates. * The 4800 devices support 1, 2, 5.5 and 11Mbps rates. * * Like the WaveLAN/IEEE cards, the Aironet NICs are all essentially * PCMCIA devices. The ISA and PCI cards are a combination of a PCMCIA * device and a PCMCIA to ISA or PCMCIA to PCI adapter card. There are * a couple of important differences though: * * - Lucent ISA card looks to the host like a PCMCIA controller with * a PCMCIA WaveLAN card inserted. This means that even desktop * machines need to be configured with PCMCIA support in order to * use WaveLAN/IEEE ISA cards. The Aironet cards on the other hand * actually look like normal ISA and PCI devices to the host, so * no PCMCIA controller support is needed * * The latter point results in a small gotcha. The Aironet PCMCIA * cards can be configured for one of two operating modes depending * on how the Vpp1 and Vpp2 programming voltages are set when the * card is activated. In order to put the card in proper PCMCIA * operation (where the CIS table is visible and the interface is * programmed for PCMCIA operation), both Vpp1 and Vpp2 have to be * set to 5 volts. FreeBSD by default doesn't set the Vpp voltages, * which leaves the card in ISA/PCI mode, which prevents it from * being activated as an PCMCIA device. * * Note that some PCMCIA controller software packages for Windows NT * fail to set the voltages as well. * * The Aironet devices can operate in both station mode and access point * mode. Typically, when programmed for station mode, the card can be set * to automatically perform encapsulation/decapsulation of Ethernet II * and 802.3 frames within 802.11 frames so that the host doesn't have * to do it itself. This driver doesn't program the card that way: the * driver handles all of the encapsulation/decapsulation itself. */ #include "opt_inet.h" #ifdef INET #define ANCACHE /* enable signal strength cache */ #endif #include #include #include #include #include #include #include #include #include #ifdef ANCACHE #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #include #include #endif #include #include #include #include /* These are global because we need them in sys/pci/if_an_p.c. */ static void an_reset(struct an_softc *); static int an_init_mpi350_desc(struct an_softc *); static int an_ioctl(struct ifnet *, u_long, caddr_t); static void an_init(void *); static void an_init_locked(struct an_softc *); static int an_init_tx_ring(struct an_softc *); static void an_start(struct ifnet *); static void an_start_locked(struct ifnet *); static void an_watchdog(struct an_softc *); static void an_rxeof(struct an_softc *); static void an_txeof(struct an_softc *, int); static void an_promisc(struct an_softc *, int); static int an_cmd(struct an_softc *, int, int); static int an_cmd_struct(struct an_softc *, struct an_command *, struct an_reply *); static int an_read_record(struct an_softc *, struct an_ltv_gen *); static int an_write_record(struct an_softc *, struct an_ltv_gen *); static int an_read_data(struct an_softc *, int, int, caddr_t, int); static int an_write_data(struct an_softc *, int, int, caddr_t, int); static int an_seek(struct an_softc *, int, int, int); static int an_alloc_nicmem(struct an_softc *, int, int *); static int an_dma_malloc(struct an_softc *, bus_size_t, struct an_dma_alloc *, int); static void an_dma_free(struct an_softc *, struct an_dma_alloc *); static void an_dma_malloc_cb(void *, bus_dma_segment_t *, int, int); static void an_stats_update(void *); static void an_setdef(struct an_softc *, struct an_req *); #ifdef ANCACHE static void an_cache_store(struct an_softc *, struct ether_header *, struct mbuf *, u_int8_t, u_int8_t); #endif /* function definitions for use with the Cisco's Linux configuration utilities */ static int readrids(struct ifnet*, struct aironet_ioctl*); static int writerids(struct ifnet*, struct aironet_ioctl*); static int flashcard(struct ifnet*, struct aironet_ioctl*); static int cmdreset(struct ifnet *); static int setflashmode(struct ifnet *); static int flashgchar(struct ifnet *,int,int); static int flashpchar(struct ifnet *,int,int); static int flashputbuf(struct ifnet *); static int flashrestart(struct ifnet *); static int WaitBusy(struct ifnet *, int); static int unstickbusy(struct ifnet *); static void an_dump_record (struct an_softc *,struct an_ltv_gen *, char *); static int an_media_change (struct ifnet *); static void an_media_status (struct ifnet *, struct ifmediareq *); static int an_dump = 0; static int an_cache_mode = 0; #define DBM 0 #define PERCENT 1 #define RAW 2 static char an_conf[256]; static char an_conf_cache[256]; /* sysctl vars */ static SYSCTL_NODE(_hw, OID_AUTO, an, CTLFLAG_RD, 0, "Wireless driver parameters"); /* XXX violate ethernet/netgraph callback hooks */ extern void (*ng_ether_attach_p)(struct ifnet *ifp); extern void (*ng_ether_detach_p)(struct ifnet *ifp); static int sysctl_an_dump(SYSCTL_HANDLER_ARGS) { int error, r, last; char *s = an_conf; last = an_dump; switch (an_dump) { case 0: strcpy(an_conf, "off"); break; case 1: strcpy(an_conf, "type"); break; case 2: strcpy(an_conf, "dump"); break; default: snprintf(an_conf, 5, "%x", an_dump); break; } error = sysctl_handle_string(oidp, an_conf, sizeof(an_conf), req); if (strncmp(an_conf,"off", 3) == 0) { an_dump = 0; } if (strncmp(an_conf,"dump", 4) == 0) { an_dump = 1; } if (strncmp(an_conf,"type", 4) == 0) { an_dump = 2; } if (*s == 'f') { r = 0; for (;;s++) { if ((*s >= '0') && (*s <= '9')) { r = r * 16 + (*s - '0'); } else if ((*s >= 'a') && (*s <= 'f')) { r = r * 16 + (*s - 'a' + 10); } else { break; } } an_dump = r; } if (an_dump != last) printf("Sysctl changed for Aironet driver\n"); return error; } SYSCTL_PROC(_hw_an, OID_AUTO, an_dump, CTLTYPE_STRING | CTLFLAG_RW, 0, sizeof(an_conf), sysctl_an_dump, "A", ""); static int sysctl_an_cache_mode(SYSCTL_HANDLER_ARGS) { int error; switch (an_cache_mode) { case 1: strcpy(an_conf_cache, "per"); break; case 2: strcpy(an_conf_cache, "raw"); break; default: strcpy(an_conf_cache, "dbm"); break; } error = sysctl_handle_string(oidp, an_conf_cache, sizeof(an_conf_cache), req); if (strncmp(an_conf_cache,"dbm", 3) == 0) { an_cache_mode = 0; } if (strncmp(an_conf_cache,"per", 3) == 0) { an_cache_mode = 1; } if (strncmp(an_conf_cache,"raw", 3) == 0) { an_cache_mode = 2; } return error; } SYSCTL_PROC(_hw_an, OID_AUTO, an_cache_mode, CTLTYPE_STRING | CTLFLAG_RW, 0, sizeof(an_conf_cache), sysctl_an_cache_mode, "A", ""); /* * We probe for an Aironet 4500/4800 card by attempting to * read the default SSID list. On reset, the first entry in * the SSID list will contain the name "tsunami." If we don't * find this, then there's no card present. */ int an_probe(device_t dev) { struct an_softc *sc = device_get_softc(dev); struct an_ltv_ssidlist_new ssid; int error; bzero((char *)&ssid, sizeof(ssid)); error = an_alloc_port(dev, 0, AN_IOSIZ); if (error != 0) return (0); /* can't do autoprobing */ if (rman_get_start(sc->port_res) == -1) return(0); /* * We need to fake up a softc structure long enough * to be able to issue commands and call some of the * other routines. */ ssid.an_len = sizeof(ssid); ssid.an_type = AN_RID_SSIDLIST; /* Make sure interrupts are disabled. */ sc->mpi350 = 0; CSR_WRITE_2(sc, AN_INT_EN(sc->mpi350), 0); CSR_WRITE_2(sc, AN_EVENT_ACK(sc->mpi350), 0xFFFF); sc->an_dev = dev; mtx_init(&sc->an_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); AN_LOCK(sc); an_reset(sc); if (an_cmd(sc, AN_CMD_READCFG, 0)) { AN_UNLOCK(sc); goto fail; } if (an_read_record(sc, (struct an_ltv_gen *)&ssid)) { AN_UNLOCK(sc); goto fail; } /* See if the ssid matches what we expect ... but doesn't have to */ if (strcmp(ssid.an_entry[0].an_ssid, AN_DEF_SSID)) { AN_UNLOCK(sc); goto fail; } AN_UNLOCK(sc); return(AN_IOSIZ); fail: mtx_destroy(&sc->an_mtx); return(0); } /* * Allocate a port resource with the given resource id. */ int an_alloc_port(device_t dev, int rid, int size) { struct an_softc *sc = device_get_softc(dev); struct resource *res; res = bus_alloc_resource_anywhere(dev, SYS_RES_IOPORT, &rid, size, RF_ACTIVE); if (res) { sc->port_rid = rid; sc->port_res = res; return (0); } else { return (ENOENT); } } /* * Allocate a memory resource with the given resource id. */ int an_alloc_memory(device_t dev, int rid, int size) { struct an_softc *sc = device_get_softc(dev); struct resource *res; res = bus_alloc_resource_anywhere(dev, SYS_RES_MEMORY, &rid, size, RF_ACTIVE); if (res) { sc->mem_rid = rid; sc->mem_res = res; sc->mem_used = size; return (0); } else { return (ENOENT); } } /* * Allocate a auxiliary memory resource with the given resource id. */ int an_alloc_aux_memory(device_t dev, int rid, int size) { struct an_softc *sc = device_get_softc(dev); struct resource *res; res = bus_alloc_resource_anywhere(dev, SYS_RES_MEMORY, &rid, size, RF_ACTIVE); if (res) { sc->mem_aux_rid = rid; sc->mem_aux_res = res; sc->mem_aux_used = size; return (0); } else { return (ENOENT); } } /* * Allocate an irq resource with the given resource id. */ int an_alloc_irq(device_t dev, int rid, int flags) { struct an_softc *sc = device_get_softc(dev); struct resource *res; res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, (RF_ACTIVE | flags)); if (res) { sc->irq_rid = rid; sc->irq_res = res; return (0); } else { return (ENOENT); } } static void an_dma_malloc_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *paddr = (bus_addr_t*) arg; *paddr = segs->ds_addr; } /* * Alloc DMA memory and set the pointer to it */ static int an_dma_malloc(struct an_softc *sc, bus_size_t size, struct an_dma_alloc *dma, int mapflags) { int r; r = bus_dmamem_alloc(sc->an_dtag, (void**) &dma->an_dma_vaddr, BUS_DMA_NOWAIT, &dma->an_dma_map); if (r != 0) goto fail_1; r = bus_dmamap_load(sc->an_dtag, dma->an_dma_map, dma->an_dma_vaddr, size, an_dma_malloc_cb, &dma->an_dma_paddr, mapflags | BUS_DMA_NOWAIT); if (r != 0) goto fail_2; dma->an_dma_size = size; return (0); fail_2: bus_dmamap_unload(sc->an_dtag, dma->an_dma_map); fail_1: bus_dmamem_free(sc->an_dtag, dma->an_dma_vaddr, dma->an_dma_map); return (r); } static void an_dma_free(struct an_softc *sc, struct an_dma_alloc *dma) { bus_dmamap_unload(sc->an_dtag, dma->an_dma_map); bus_dmamem_free(sc->an_dtag, dma->an_dma_vaddr, dma->an_dma_map); dma->an_dma_vaddr = 0; } /* * Release all resources */ void an_release_resources(device_t dev) { struct an_softc *sc = device_get_softc(dev); int i; if (sc->port_res) { bus_release_resource(dev, SYS_RES_IOPORT, sc->port_rid, sc->port_res); sc->port_res = 0; } if (sc->mem_res) { bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res); sc->mem_res = 0; } if (sc->mem_aux_res) { bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_aux_rid, sc->mem_aux_res); sc->mem_aux_res = 0; } if (sc->irq_res) { bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq_res); sc->irq_res = 0; } if (sc->an_rid_buffer.an_dma_paddr) { an_dma_free(sc, &sc->an_rid_buffer); } for (i = 0; i < AN_MAX_RX_DESC; i++) if (sc->an_rx_buffer[i].an_dma_paddr) { an_dma_free(sc, &sc->an_rx_buffer[i]); } for (i = 0; i < AN_MAX_TX_DESC; i++) if (sc->an_tx_buffer[i].an_dma_paddr) { an_dma_free(sc, &sc->an_tx_buffer[i]); } if (sc->an_dtag) { bus_dma_tag_destroy(sc->an_dtag); } } int an_init_mpi350_desc(struct an_softc *sc) { struct an_command cmd_struct; struct an_reply reply; struct an_card_rid_desc an_rid_desc; struct an_card_rx_desc an_rx_desc; struct an_card_tx_desc an_tx_desc; int i, desc; AN_LOCK_ASSERT(sc); if(!sc->an_rid_buffer.an_dma_paddr) an_dma_malloc(sc, AN_RID_BUFFER_SIZE, &sc->an_rid_buffer, 0); for (i = 0; i < AN_MAX_RX_DESC; i++) if(!sc->an_rx_buffer[i].an_dma_paddr) an_dma_malloc(sc, AN_RX_BUFFER_SIZE, &sc->an_rx_buffer[i], 0); for (i = 0; i < AN_MAX_TX_DESC; i++) if(!sc->an_tx_buffer[i].an_dma_paddr) an_dma_malloc(sc, AN_TX_BUFFER_SIZE, &sc->an_tx_buffer[i], 0); /* * Allocate RX descriptor */ bzero(&reply,sizeof(reply)); cmd_struct.an_cmd = AN_CMD_ALLOC_DESC; cmd_struct.an_parm0 = AN_DESCRIPTOR_RX; cmd_struct.an_parm1 = AN_RX_DESC_OFFSET; cmd_struct.an_parm2 = AN_MAX_RX_DESC; if (an_cmd_struct(sc, &cmd_struct, &reply)) { if_printf(sc->an_ifp, "failed to allocate RX descriptor\n"); return(EIO); } for (desc = 0; desc < AN_MAX_RX_DESC; desc++) { bzero(&an_rx_desc, sizeof(an_rx_desc)); an_rx_desc.an_valid = 1; an_rx_desc.an_len = AN_RX_BUFFER_SIZE; an_rx_desc.an_done = 0; an_rx_desc.an_phys = sc->an_rx_buffer[desc].an_dma_paddr; for (i = 0; i < sizeof(an_rx_desc) / 4; i++) CSR_MEM_AUX_WRITE_4(sc, AN_RX_DESC_OFFSET + (desc * sizeof(an_rx_desc)) + (i * 4), ((u_int32_t *)(void *)&an_rx_desc)[i]); } /* * Allocate TX descriptor */ bzero(&reply,sizeof(reply)); cmd_struct.an_cmd = AN_CMD_ALLOC_DESC; cmd_struct.an_parm0 = AN_DESCRIPTOR_TX; cmd_struct.an_parm1 = AN_TX_DESC_OFFSET; cmd_struct.an_parm2 = AN_MAX_TX_DESC; if (an_cmd_struct(sc, &cmd_struct, &reply)) { if_printf(sc->an_ifp, "failed to allocate TX descriptor\n"); return(EIO); } for (desc = 0; desc < AN_MAX_TX_DESC; desc++) { bzero(&an_tx_desc, sizeof(an_tx_desc)); an_tx_desc.an_offset = 0; an_tx_desc.an_eoc = 0; an_tx_desc.an_valid = 0; an_tx_desc.an_len = 0; an_tx_desc.an_phys = sc->an_tx_buffer[desc].an_dma_paddr; for (i = 0; i < sizeof(an_tx_desc) / 4; i++) CSR_MEM_AUX_WRITE_4(sc, AN_TX_DESC_OFFSET + (desc * sizeof(an_tx_desc)) + (i * 4), ((u_int32_t *)(void *)&an_tx_desc)[i]); } /* * Allocate RID descriptor */ bzero(&reply,sizeof(reply)); cmd_struct.an_cmd = AN_CMD_ALLOC_DESC; cmd_struct.an_parm0 = AN_DESCRIPTOR_HOSTRW; cmd_struct.an_parm1 = AN_HOST_DESC_OFFSET; cmd_struct.an_parm2 = 1; if (an_cmd_struct(sc, &cmd_struct, &reply)) { if_printf(sc->an_ifp, "failed to allocate host descriptor\n"); return(EIO); } bzero(&an_rid_desc, sizeof(an_rid_desc)); an_rid_desc.an_valid = 1; an_rid_desc.an_len = AN_RID_BUFFER_SIZE; an_rid_desc.an_rid = 0; an_rid_desc.an_phys = sc->an_rid_buffer.an_dma_paddr; for (i = 0; i < sizeof(an_rid_desc) / 4; i++) CSR_MEM_AUX_WRITE_4(sc, AN_HOST_DESC_OFFSET + i * 4, ((u_int32_t *)(void *)&an_rid_desc)[i]); return(0); } int an_attach(struct an_softc *sc, int flags) { struct ifnet *ifp; int error = EIO; int i, nrate, mword; u_int8_t r; ifp = sc->an_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(sc->an_dev, "can not if_alloc()\n"); goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(sc->an_dev), device_get_unit(sc->an_dev)); sc->an_gone = 0; sc->an_associated = 0; sc->an_monitor = 0; sc->an_was_monitor = 0; sc->an_flash_buffer = NULL; /* Reset the NIC. */ AN_LOCK(sc); an_reset(sc); if (sc->mpi350) { error = an_init_mpi350_desc(sc); if (error) goto fail; } /* Load factory config */ if (an_cmd(sc, AN_CMD_READCFG, 0)) { device_printf(sc->an_dev, "failed to load config data\n"); goto fail; } /* Read the current configuration */ sc->an_config.an_type = AN_RID_GENCONFIG; sc->an_config.an_len = sizeof(struct an_ltv_genconfig); if (an_read_record(sc, (struct an_ltv_gen *)&sc->an_config)) { device_printf(sc->an_dev, "read record failed\n"); goto fail; } /* Read the card capabilities */ sc->an_caps.an_type = AN_RID_CAPABILITIES; sc->an_caps.an_len = sizeof(struct an_ltv_caps); if (an_read_record(sc, (struct an_ltv_gen *)&sc->an_caps)) { device_printf(sc->an_dev, "read record failed\n"); goto fail; } /* Read ssid list */ sc->an_ssidlist.an_type = AN_RID_SSIDLIST; sc->an_ssidlist.an_len = sizeof(struct an_ltv_ssidlist_new); if (an_read_record(sc, (struct an_ltv_gen *)&sc->an_ssidlist)) { device_printf(sc->an_dev, "read record failed\n"); goto fail; } /* Read AP list */ sc->an_aplist.an_type = AN_RID_APLIST; sc->an_aplist.an_len = sizeof(struct an_ltv_aplist); if (an_read_record(sc, (struct an_ltv_gen *)&sc->an_aplist)) { device_printf(sc->an_dev, "read record failed\n"); goto fail; } #ifdef ANCACHE /* Read the RSSI <-> dBm map */ sc->an_have_rssimap = 0; if (sc->an_caps.an_softcaps & 8) { sc->an_rssimap.an_type = AN_RID_RSSI_MAP; sc->an_rssimap.an_len = sizeof(struct an_ltv_rssi_map); if (an_read_record(sc, (struct an_ltv_gen *)&sc->an_rssimap)) { device_printf(sc->an_dev, "unable to get RSSI <-> dBM map\n"); } else { device_printf(sc->an_dev, "got RSSI <-> dBM map\n"); sc->an_have_rssimap = 1; } } else { device_printf(sc->an_dev, "no RSSI <-> dBM map\n"); } #endif AN_UNLOCK(sc); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = an_ioctl; ifp->if_start = an_start; ifp->if_init = an_init; ifp->if_baudrate = 10000000; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); bzero(sc->an_config.an_nodename, sizeof(sc->an_config.an_nodename)); bcopy(AN_DEFAULT_NODENAME, sc->an_config.an_nodename, sizeof(AN_DEFAULT_NODENAME) - 1); bzero(sc->an_ssidlist.an_entry[0].an_ssid, sizeof(sc->an_ssidlist.an_entry[0].an_ssid)); bcopy(AN_DEFAULT_NETNAME, sc->an_ssidlist.an_entry[0].an_ssid, sizeof(AN_DEFAULT_NETNAME) - 1); sc->an_ssidlist.an_entry[0].an_len = strlen(AN_DEFAULT_NETNAME); sc->an_config.an_opmode = AN_OPMODE_INFRASTRUCTURE_STATION; sc->an_tx_rate = 0; bzero((char *)&sc->an_stats, sizeof(sc->an_stats)); nrate = 8; ifmedia_init(&sc->an_ifmedia, 0, an_media_change, an_media_status); if_printf(ifp, "supported rates: "); #define ADD(s, o) ifmedia_add(&sc->an_ifmedia, \ IFM_MAKEWORD(IFM_IEEE80211, (s), (o), 0), 0, NULL) ADD(IFM_AUTO, 0); ADD(IFM_AUTO, IFM_IEEE80211_ADHOC); for (i = 0; i < nrate; i++) { r = sc->an_caps.an_rates[i]; mword = ieee80211_rate2media(NULL, r, IEEE80211_MODE_AUTO); if (mword == 0) continue; printf("%s%d%sMbps", (i != 0 ? " " : ""), (r & IEEE80211_RATE_VAL) / 2, ((r & 0x1) != 0 ? ".5" : "")); ADD(mword, 0); ADD(mword, IFM_IEEE80211_ADHOC); } printf("\n"); ifmedia_set(&sc->an_ifmedia, IFM_MAKEWORD(IFM_IEEE80211, IFM_AUTO, 0, 0)); #undef ADD /* * Call MI attach routine. */ ether_ifattach(ifp, sc->an_caps.an_oemaddr); callout_init_mtx(&sc->an_stat_ch, &sc->an_mtx, 0); return(0); fail: AN_UNLOCK(sc); mtx_destroy(&sc->an_mtx); if (ifp != NULL) if_free(ifp); return(error); } int an_detach(device_t dev) { struct an_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->an_ifp; if (sc->an_gone) { device_printf(dev,"already unloaded\n"); return(0); } AN_LOCK(sc); an_stop(sc); sc->an_gone = 1; ifmedia_removeall(&sc->an_ifmedia); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; AN_UNLOCK(sc); ether_ifdetach(ifp); bus_teardown_intr(dev, sc->irq_res, sc->irq_handle); callout_drain(&sc->an_stat_ch); if_free(ifp); an_release_resources(dev); mtx_destroy(&sc->an_mtx); return (0); } static void an_rxeof(struct an_softc *sc) { struct ifnet *ifp; struct ether_header *eh; struct ieee80211_frame *ih; struct an_rxframe rx_frame; struct an_rxframe_802_3 rx_frame_802_3; struct mbuf *m; int len, id, error = 0, i, count = 0; int ieee80211_header_len; u_char *bpf_buf; u_short fc1; struct an_card_rx_desc an_rx_desc; u_int8_t *buf; AN_LOCK_ASSERT(sc); ifp = sc->an_ifp; if (!sc->mpi350) { id = CSR_READ_2(sc, AN_RX_FID); if (sc->an_monitor && (ifp->if_flags & IFF_PROMISC)) { /* read raw 802.11 packet */ bpf_buf = sc->buf_802_11; /* read header */ if (an_read_data(sc, id, 0x0, (caddr_t)&rx_frame, sizeof(rx_frame))) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return; } /* * skip beacon by default since this increases the * system load a lot */ if (!(sc->an_monitor & AN_MONITOR_INCLUDE_BEACON) && (rx_frame.an_frame_ctl & IEEE80211_FC0_SUBTYPE_BEACON)) { return; } if (sc->an_monitor & AN_MONITOR_AIRONET_HEADER) { len = rx_frame.an_rx_payload_len + sizeof(rx_frame); /* Check for insane frame length */ if (len > sizeof(sc->buf_802_11)) { if_printf(ifp, "oversized packet " "received (%d, %d)\n", len, MCLBYTES); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return; } bcopy((char *)&rx_frame, bpf_buf, sizeof(rx_frame)); error = an_read_data(sc, id, sizeof(rx_frame), (caddr_t)bpf_buf+sizeof(rx_frame), rx_frame.an_rx_payload_len); } else { fc1=rx_frame.an_frame_ctl >> 8; ieee80211_header_len = sizeof(struct ieee80211_frame); if ((fc1 & IEEE80211_FC1_DIR_TODS) && (fc1 & IEEE80211_FC1_DIR_FROMDS)) { ieee80211_header_len += ETHER_ADDR_LEN; } len = rx_frame.an_rx_payload_len + ieee80211_header_len; /* Check for insane frame length */ if (len > sizeof(sc->buf_802_11)) { if_printf(ifp, "oversized packet " "received (%d, %d)\n", len, MCLBYTES); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return; } ih = (struct ieee80211_frame *)bpf_buf; bcopy((char *)&rx_frame.an_frame_ctl, (char *)ih, ieee80211_header_len); error = an_read_data(sc, id, sizeof(rx_frame) + rx_frame.an_gaplen, (caddr_t)ih +ieee80211_header_len, rx_frame.an_rx_payload_len); } /* dump raw 802.11 packet to bpf and skip ip stack */ BPF_TAP(ifp, bpf_buf, len); } else { MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return; } if (!(MCLGET(m, M_NOWAIT))) { m_freem(m); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return; } m->m_pkthdr.rcvif = ifp; /* Read Ethernet encapsulated packet */ #ifdef ANCACHE /* Read NIC frame header */ if (an_read_data(sc, id, 0, (caddr_t)&rx_frame, sizeof(rx_frame))) { m_freem(m); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return; } #endif /* Read in the 802_3 frame header */ if (an_read_data(sc, id, 0x34, (caddr_t)&rx_frame_802_3, sizeof(rx_frame_802_3))) { m_freem(m); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return; } if (rx_frame_802_3.an_rx_802_3_status != 0) { m_freem(m); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return; } /* Check for insane frame length */ len = rx_frame_802_3.an_rx_802_3_payload_len; if (len > sizeof(sc->buf_802_11)) { m_freem(m); if_printf(ifp, "oversized packet " "received (%d, %d)\n", len, MCLBYTES); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return; } m->m_pkthdr.len = m->m_len = rx_frame_802_3.an_rx_802_3_payload_len + 12; eh = mtod(m, struct ether_header *); bcopy((char *)&rx_frame_802_3.an_rx_dst_addr, (char *)&eh->ether_dhost, ETHER_ADDR_LEN); bcopy((char *)&rx_frame_802_3.an_rx_src_addr, (char *)&eh->ether_shost, ETHER_ADDR_LEN); /* in mbuf header type is just before payload */ error = an_read_data(sc, id, 0x44, (caddr_t)&(eh->ether_type), rx_frame_802_3.an_rx_802_3_payload_len); if (error) { m_freem(m); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return; } if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); /* Receive packet. */ #ifdef ANCACHE an_cache_store(sc, eh, m, rx_frame.an_rx_signal_strength, rx_frame.an_rsvd0); #endif AN_UNLOCK(sc); (*ifp->if_input)(ifp, m); AN_LOCK(sc); } } else { /* MPI-350 */ for (count = 0; count < AN_MAX_RX_DESC; count++){ for (i = 0; i < sizeof(an_rx_desc) / 4; i++) ((u_int32_t *)(void *)&an_rx_desc)[i] = CSR_MEM_AUX_READ_4(sc, AN_RX_DESC_OFFSET + (count * sizeof(an_rx_desc)) + (i * 4)); if (an_rx_desc.an_done && !an_rx_desc.an_valid) { buf = sc->an_rx_buffer[count].an_dma_vaddr; MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return; } if (!(MCLGET(m, M_NOWAIT))) { m_freem(m); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return; } m->m_pkthdr.rcvif = ifp; /* Read Ethernet encapsulated packet */ /* * No ANCACHE support since we just get back * an Ethernet packet no 802.11 info */ #if 0 #ifdef ANCACHE /* Read NIC frame header */ bcopy(buf, (caddr_t)&rx_frame, sizeof(rx_frame)); #endif #endif /* Check for insane frame length */ len = an_rx_desc.an_len + 12; if (len > MCLBYTES) { m_freem(m); if_printf(ifp, "oversized packet " "received (%d, %d)\n", len, MCLBYTES); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return; } m->m_pkthdr.len = m->m_len = an_rx_desc.an_len + 12; eh = mtod(m, struct ether_header *); bcopy(buf, (char *)eh, m->m_pkthdr.len); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); /* Receive packet. */ #if 0 #ifdef ANCACHE an_cache_store(sc, eh, m, rx_frame.an_rx_signal_strength, rx_frame.an_rsvd0); #endif #endif AN_UNLOCK(sc); (*ifp->if_input)(ifp, m); AN_LOCK(sc); an_rx_desc.an_valid = 1; an_rx_desc.an_len = AN_RX_BUFFER_SIZE; an_rx_desc.an_done = 0; an_rx_desc.an_phys = sc->an_rx_buffer[count].an_dma_paddr; for (i = 0; i < sizeof(an_rx_desc) / 4; i++) CSR_MEM_AUX_WRITE_4(sc, AN_RX_DESC_OFFSET + (count * sizeof(an_rx_desc)) + (i * 4), ((u_int32_t *)(void *)&an_rx_desc)[i]); } else { if_printf(ifp, "Didn't get valid RX packet " "%x %x %d\n", an_rx_desc.an_done, an_rx_desc.an_valid, an_rx_desc.an_len); } } } } static void an_txeof(struct an_softc *sc, int status) { struct ifnet *ifp; int id, i; AN_LOCK_ASSERT(sc); ifp = sc->an_ifp; sc->an_timer = 0; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (!sc->mpi350) { id = CSR_READ_2(sc, AN_TX_CMP_FID(sc->mpi350)); if (status & AN_EV_TX_EXC) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } else if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); for (i = 0; i < AN_TX_RING_CNT; i++) { if (id == sc->an_rdata.an_tx_ring[i]) { sc->an_rdata.an_tx_ring[i] = 0; break; } } AN_INC(sc->an_rdata.an_tx_cons, AN_TX_RING_CNT); } else { /* MPI 350 */ id = CSR_READ_2(sc, AN_TX_CMP_FID(sc->mpi350)); if (!sc->an_rdata.an_tx_empty){ if (status & AN_EV_TX_EXC) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } else if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); AN_INC(sc->an_rdata.an_tx_cons, AN_MAX_TX_DESC); if (sc->an_rdata.an_tx_prod == sc->an_rdata.an_tx_cons) sc->an_rdata.an_tx_empty = 1; } } return; } /* * We abuse the stats updater to check the current NIC status. This * is important because we don't want to allow transmissions until * the NIC has synchronized to the current cell (either as the master * in an ad-hoc group, or as a station connected to an access point). * * Note that this function will be called via callout(9) with a lock held. */ static void an_stats_update(void *xsc) { struct an_softc *sc; struct ifnet *ifp; sc = xsc; AN_LOCK_ASSERT(sc); ifp = sc->an_ifp; if (sc->an_timer > 0 && --sc->an_timer == 0) an_watchdog(sc); sc->an_status.an_type = AN_RID_STATUS; sc->an_status.an_len = sizeof(struct an_ltv_status); if (an_read_record(sc, (struct an_ltv_gen *)&sc->an_status)) return; if (sc->an_status.an_opmode & AN_STATUS_OPMODE_IN_SYNC) sc->an_associated = 1; else sc->an_associated = 0; /* Don't do this while we're transmitting */ if (ifp->if_drv_flags & IFF_DRV_OACTIVE) { callout_reset(&sc->an_stat_ch, hz, an_stats_update, sc); return; } sc->an_stats.an_len = sizeof(struct an_ltv_stats); sc->an_stats.an_type = AN_RID_32BITS_CUM; if (an_read_record(sc, (struct an_ltv_gen *)&sc->an_stats.an_len)) return; callout_reset(&sc->an_stat_ch, hz, an_stats_update, sc); return; } void an_intr(void *xsc) { struct an_softc *sc; struct ifnet *ifp; u_int16_t status; sc = (struct an_softc*)xsc; AN_LOCK(sc); if (sc->an_gone) { AN_UNLOCK(sc); return; } ifp = sc->an_ifp; /* Disable interrupts. */ CSR_WRITE_2(sc, AN_INT_EN(sc->mpi350), 0); status = CSR_READ_2(sc, AN_EVENT_STAT(sc->mpi350)); CSR_WRITE_2(sc, AN_EVENT_ACK(sc->mpi350), ~AN_INTRS(sc->mpi350)); if (status & AN_EV_MIC) { CSR_WRITE_2(sc, AN_EVENT_ACK(sc->mpi350), AN_EV_MIC); } if (status & AN_EV_LINKSTAT) { if (CSR_READ_2(sc, AN_LINKSTAT(sc->mpi350)) == AN_LINKSTAT_ASSOCIATED) sc->an_associated = 1; else sc->an_associated = 0; CSR_WRITE_2(sc, AN_EVENT_ACK(sc->mpi350), AN_EV_LINKSTAT); } if (status & AN_EV_RX) { an_rxeof(sc); CSR_WRITE_2(sc, AN_EVENT_ACK(sc->mpi350), AN_EV_RX); } if (sc->mpi350 && status & AN_EV_TX_CPY) { an_txeof(sc, status); CSR_WRITE_2(sc, AN_EVENT_ACK(sc->mpi350), AN_EV_TX_CPY); } if (status & AN_EV_TX) { an_txeof(sc, status); CSR_WRITE_2(sc, AN_EVENT_ACK(sc->mpi350), AN_EV_TX); } if (status & AN_EV_TX_EXC) { an_txeof(sc, status); CSR_WRITE_2(sc, AN_EVENT_ACK(sc->mpi350), AN_EV_TX_EXC); } if (status & AN_EV_ALLOC) CSR_WRITE_2(sc, AN_EVENT_ACK(sc->mpi350), AN_EV_ALLOC); /* Re-enable interrupts. */ CSR_WRITE_2(sc, AN_INT_EN(sc->mpi350), AN_INTRS(sc->mpi350)); if ((ifp->if_flags & IFF_UP) && !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) an_start_locked(ifp); AN_UNLOCK(sc); return; } static int an_cmd_struct(struct an_softc *sc, struct an_command *cmd, struct an_reply *reply) { int i; AN_LOCK_ASSERT(sc); for (i = 0; i != AN_TIMEOUT; i++) { if (CSR_READ_2(sc, AN_COMMAND(sc->mpi350)) & AN_CMD_BUSY) { DELAY(1000); } else break; } if( i == AN_TIMEOUT) { printf("BUSY\n"); return(ETIMEDOUT); } CSR_WRITE_2(sc, AN_PARAM0(sc->mpi350), cmd->an_parm0); CSR_WRITE_2(sc, AN_PARAM1(sc->mpi350), cmd->an_parm1); CSR_WRITE_2(sc, AN_PARAM2(sc->mpi350), cmd->an_parm2); CSR_WRITE_2(sc, AN_COMMAND(sc->mpi350), cmd->an_cmd); for (i = 0; i < AN_TIMEOUT; i++) { if (CSR_READ_2(sc, AN_EVENT_STAT(sc->mpi350)) & AN_EV_CMD) break; DELAY(1000); } reply->an_resp0 = CSR_READ_2(sc, AN_RESP0(sc->mpi350)); reply->an_resp1 = CSR_READ_2(sc, AN_RESP1(sc->mpi350)); reply->an_resp2 = CSR_READ_2(sc, AN_RESP2(sc->mpi350)); reply->an_status = CSR_READ_2(sc, AN_STATUS(sc->mpi350)); if (CSR_READ_2(sc, AN_COMMAND(sc->mpi350)) & AN_CMD_BUSY) CSR_WRITE_2(sc, AN_EVENT_ACK(sc->mpi350), AN_EV_CLR_STUCK_BUSY); /* Ack the command */ CSR_WRITE_2(sc, AN_EVENT_ACK(sc->mpi350), AN_EV_CMD); if (i == AN_TIMEOUT) return(ETIMEDOUT); return(0); } static int an_cmd(struct an_softc *sc, int cmd, int val) { int i, s = 0; AN_LOCK_ASSERT(sc); CSR_WRITE_2(sc, AN_PARAM0(sc->mpi350), val); CSR_WRITE_2(sc, AN_PARAM1(sc->mpi350), 0); CSR_WRITE_2(sc, AN_PARAM2(sc->mpi350), 0); CSR_WRITE_2(sc, AN_COMMAND(sc->mpi350), cmd); for (i = 0; i < AN_TIMEOUT; i++) { if (CSR_READ_2(sc, AN_EVENT_STAT(sc->mpi350)) & AN_EV_CMD) break; else { if (CSR_READ_2(sc, AN_COMMAND(sc->mpi350)) == cmd) CSR_WRITE_2(sc, AN_COMMAND(sc->mpi350), cmd); } } for (i = 0; i < AN_TIMEOUT; i++) { CSR_READ_2(sc, AN_RESP0(sc->mpi350)); CSR_READ_2(sc, AN_RESP1(sc->mpi350)); CSR_READ_2(sc, AN_RESP2(sc->mpi350)); s = CSR_READ_2(sc, AN_STATUS(sc->mpi350)); if ((s & AN_STAT_CMD_CODE) == (cmd & AN_STAT_CMD_CODE)) break; } /* Ack the command */ CSR_WRITE_2(sc, AN_EVENT_ACK(sc->mpi350), AN_EV_CMD); if (CSR_READ_2(sc, AN_COMMAND(sc->mpi350)) & AN_CMD_BUSY) CSR_WRITE_2(sc, AN_EVENT_ACK(sc->mpi350), AN_EV_CLR_STUCK_BUSY); if (i == AN_TIMEOUT) return(ETIMEDOUT); return(0); } /* * This reset sequence may look a little strange, but this is the * most reliable method I've found to really kick the NIC in the * head and force it to reboot correctly. */ static void an_reset(struct an_softc *sc) { if (sc->an_gone) return; AN_LOCK_ASSERT(sc); an_cmd(sc, AN_CMD_ENABLE, 0); an_cmd(sc, AN_CMD_FW_RESTART, 0); an_cmd(sc, AN_CMD_NOOP2, 0); if (an_cmd(sc, AN_CMD_FORCE_SYNCLOSS, 0) == ETIMEDOUT) device_printf(sc->an_dev, "reset failed\n"); an_cmd(sc, AN_CMD_DISABLE, 0); return; } /* * Read an LTV record from the NIC. */ static int an_read_record(struct an_softc *sc, struct an_ltv_gen *ltv) { struct an_ltv_gen *an_ltv; struct an_card_rid_desc an_rid_desc; struct an_command cmd; struct an_reply reply; struct ifnet *ifp; u_int16_t *ptr; u_int8_t *ptr2; int i, len; AN_LOCK_ASSERT(sc); if (ltv->an_len < 4 || ltv->an_type == 0) return(EINVAL); ifp = sc->an_ifp; if (!sc->mpi350){ /* Tell the NIC to enter record read mode. */ if (an_cmd(sc, AN_CMD_ACCESS|AN_ACCESS_READ, ltv->an_type)) { if_printf(ifp, "RID access failed\n"); return(EIO); } /* Seek to the record. */ if (an_seek(sc, ltv->an_type, 0, AN_BAP1)) { if_printf(ifp, "seek to record failed\n"); return(EIO); } /* * Read the length and record type and make sure they * match what we expect (this verifies that we have enough * room to hold all of the returned data). * Length includes type but not length. */ len = CSR_READ_2(sc, AN_DATA1); if (len > (ltv->an_len - 2)) { if_printf(ifp, "record length mismatch -- expected %d, " "got %d for Rid %x\n", ltv->an_len - 2, len, ltv->an_type); len = ltv->an_len - 2; } else { ltv->an_len = len + 2; } /* Now read the data. */ len -= 2; /* skip the type */ ptr = <v->an_val; for (i = len; i > 1; i -= 2) *ptr++ = CSR_READ_2(sc, AN_DATA1); if (i) { ptr2 = (u_int8_t *)ptr; *ptr2 = CSR_READ_1(sc, AN_DATA1); } } else { /* MPI-350 */ if (!sc->an_rid_buffer.an_dma_vaddr) return(EIO); an_rid_desc.an_valid = 1; an_rid_desc.an_len = AN_RID_BUFFER_SIZE; an_rid_desc.an_rid = 0; an_rid_desc.an_phys = sc->an_rid_buffer.an_dma_paddr; bzero(sc->an_rid_buffer.an_dma_vaddr, AN_RID_BUFFER_SIZE); bzero(&cmd, sizeof(cmd)); bzero(&reply, sizeof(reply)); cmd.an_cmd = AN_CMD_ACCESS|AN_ACCESS_READ; cmd.an_parm0 = ltv->an_type; for (i = 0; i < sizeof(an_rid_desc) / 4; i++) CSR_MEM_AUX_WRITE_4(sc, AN_HOST_DESC_OFFSET + i * 4, ((u_int32_t *)(void *)&an_rid_desc)[i]); if (an_cmd_struct(sc, &cmd, &reply) || reply.an_status & AN_CMD_QUAL_MASK) { if_printf(ifp, "failed to read RID %x %x %x %x %x, %d\n", ltv->an_type, reply.an_status, reply.an_resp0, reply.an_resp1, reply.an_resp2, i); return(EIO); } an_ltv = (struct an_ltv_gen *)sc->an_rid_buffer.an_dma_vaddr; if (an_ltv->an_len + 2 < an_rid_desc.an_len) { an_rid_desc.an_len = an_ltv->an_len; } len = an_rid_desc.an_len; if (len > (ltv->an_len - 2)) { if_printf(ifp, "record length mismatch -- expected %d, " "got %d for Rid %x\n", ltv->an_len - 2, len, ltv->an_type); len = ltv->an_len - 2; } else { ltv->an_len = len + 2; } bcopy(&an_ltv->an_type, <v->an_val, len); } if (an_dump) an_dump_record(sc, ltv, "Read"); return(0); } /* * Same as read, except we inject data instead of reading it. */ static int an_write_record(struct an_softc *sc, struct an_ltv_gen *ltv) { struct an_card_rid_desc an_rid_desc; struct an_command cmd; struct an_reply reply; u_int16_t *ptr; u_int8_t *ptr2; int i, len; AN_LOCK_ASSERT(sc); if (an_dump) an_dump_record(sc, ltv, "Write"); if (!sc->mpi350){ if (an_cmd(sc, AN_CMD_ACCESS|AN_ACCESS_READ, ltv->an_type)) return(EIO); if (an_seek(sc, ltv->an_type, 0, AN_BAP1)) return(EIO); /* * Length includes type but not length. */ len = ltv->an_len - 2; CSR_WRITE_2(sc, AN_DATA1, len); len -= 2; /* skip the type */ ptr = <v->an_val; for (i = len; i > 1; i -= 2) CSR_WRITE_2(sc, AN_DATA1, *ptr++); if (i) { ptr2 = (u_int8_t *)ptr; CSR_WRITE_1(sc, AN_DATA0, *ptr2); } if (an_cmd(sc, AN_CMD_ACCESS|AN_ACCESS_WRITE, ltv->an_type)) return(EIO); } else { /* MPI-350 */ for (i = 0; i != AN_TIMEOUT; i++) { if (CSR_READ_2(sc, AN_COMMAND(sc->mpi350)) & AN_CMD_BUSY) { DELAY(10); } else break; } if (i == AN_TIMEOUT) { printf("BUSY\n"); } an_rid_desc.an_valid = 1; an_rid_desc.an_len = ltv->an_len - 2; an_rid_desc.an_rid = ltv->an_type; an_rid_desc.an_phys = sc->an_rid_buffer.an_dma_paddr; bcopy(<v->an_type, sc->an_rid_buffer.an_dma_vaddr, an_rid_desc.an_len); bzero(&cmd,sizeof(cmd)); bzero(&reply,sizeof(reply)); cmd.an_cmd = AN_CMD_ACCESS|AN_ACCESS_WRITE; cmd.an_parm0 = ltv->an_type; for (i = 0; i < sizeof(an_rid_desc) / 4; i++) CSR_MEM_AUX_WRITE_4(sc, AN_HOST_DESC_OFFSET + i * 4, ((u_int32_t *)(void *)&an_rid_desc)[i]); DELAY(100000); if ((i = an_cmd_struct(sc, &cmd, &reply))) { if_printf(sc->an_ifp, "failed to write RID 1 %x %x %x %x %x, %d\n", ltv->an_type, reply.an_status, reply.an_resp0, reply.an_resp1, reply.an_resp2, i); return(EIO); } if (reply.an_status & AN_CMD_QUAL_MASK) { if_printf(sc->an_ifp, "failed to write RID 2 %x %x %x %x %x, %d\n", ltv->an_type, reply.an_status, reply.an_resp0, reply.an_resp1, reply.an_resp2, i); return(EIO); } DELAY(100000); } return(0); } static void an_dump_record(struct an_softc *sc, struct an_ltv_gen *ltv, char *string) { u_int8_t *ptr2; int len; int i; int count = 0; char buf[17], temp; len = ltv->an_len - 4; if_printf(sc->an_ifp, "RID %4x, Length %4d, Mode %s\n", ltv->an_type, ltv->an_len - 4, string); if (an_dump == 1 || (an_dump == ltv->an_type)) { if_printf(sc->an_ifp, "\t"); bzero(buf,sizeof(buf)); ptr2 = (u_int8_t *)<v->an_val; for (i = len; i > 0; i--) { printf("%02x ", *ptr2); temp = *ptr2++; if (isprint(temp)) buf[count] = temp; else buf[count] = '.'; if (++count == 16) { count = 0; printf("%s\n",buf); if_printf(sc->an_ifp, "\t"); bzero(buf,sizeof(buf)); } } for (; count != 16; count++) { printf(" "); } printf(" %s\n",buf); } } static int an_seek(struct an_softc *sc, int id, int off, int chan) { int i; int selreg, offreg; switch (chan) { case AN_BAP0: selreg = AN_SEL0; offreg = AN_OFF0; break; case AN_BAP1: selreg = AN_SEL1; offreg = AN_OFF1; break; default: if_printf(sc->an_ifp, "invalid data path: %x\n", chan); return(EIO); } CSR_WRITE_2(sc, selreg, id); CSR_WRITE_2(sc, offreg, off); for (i = 0; i < AN_TIMEOUT; i++) { if (!(CSR_READ_2(sc, offreg) & (AN_OFF_BUSY|AN_OFF_ERR))) break; } if (i == AN_TIMEOUT) return(ETIMEDOUT); return(0); } static int an_read_data(struct an_softc *sc, int id, int off, caddr_t buf, int len) { int i; u_int16_t *ptr; u_int8_t *ptr2; if (off != -1) { if (an_seek(sc, id, off, AN_BAP1)) return(EIO); } ptr = (u_int16_t *)buf; for (i = len; i > 1; i -= 2) *ptr++ = CSR_READ_2(sc, AN_DATA1); if (i) { ptr2 = (u_int8_t *)ptr; *ptr2 = CSR_READ_1(sc, AN_DATA1); } return(0); } static int an_write_data(struct an_softc *sc, int id, int off, caddr_t buf, int len) { int i; u_int16_t *ptr; u_int8_t *ptr2; if (off != -1) { if (an_seek(sc, id, off, AN_BAP0)) return(EIO); } ptr = (u_int16_t *)buf; for (i = len; i > 1; i -= 2) CSR_WRITE_2(sc, AN_DATA0, *ptr++); if (i) { ptr2 = (u_int8_t *)ptr; CSR_WRITE_1(sc, AN_DATA0, *ptr2); } return(0); } /* * Allocate a region of memory inside the NIC and zero * it out. */ static int an_alloc_nicmem(struct an_softc *sc, int len, int *id) { int i; if (an_cmd(sc, AN_CMD_ALLOC_MEM, len)) { if_printf(sc->an_ifp, "failed to allocate %d bytes on NIC\n", len); return(ENOMEM); } for (i = 0; i < AN_TIMEOUT; i++) { if (CSR_READ_2(sc, AN_EVENT_STAT(sc->mpi350)) & AN_EV_ALLOC) break; } if (i == AN_TIMEOUT) return(ETIMEDOUT); CSR_WRITE_2(sc, AN_EVENT_ACK(sc->mpi350), AN_EV_ALLOC); *id = CSR_READ_2(sc, AN_ALLOC_FID); if (an_seek(sc, *id, 0, AN_BAP0)) return(EIO); for (i = 0; i < len / 2; i++) CSR_WRITE_2(sc, AN_DATA0, 0); return(0); } static void an_setdef(struct an_softc *sc, struct an_req *areq) { struct ifnet *ifp; struct an_ltv_genconfig *cfg; struct an_ltv_ssidlist_new *ssid; struct an_ltv_aplist *ap; struct an_ltv_gen *sp; ifp = sc->an_ifp; AN_LOCK_ASSERT(sc); switch (areq->an_type) { case AN_RID_GENCONFIG: cfg = (struct an_ltv_genconfig *)areq; bcopy((char *)&cfg->an_macaddr, IF_LLADDR(sc->an_ifp), ETHER_ADDR_LEN); bcopy((char *)cfg, (char *)&sc->an_config, sizeof(struct an_ltv_genconfig)); break; case AN_RID_SSIDLIST: ssid = (struct an_ltv_ssidlist_new *)areq; bcopy((char *)ssid, (char *)&sc->an_ssidlist, sizeof(struct an_ltv_ssidlist_new)); break; case AN_RID_APLIST: ap = (struct an_ltv_aplist *)areq; bcopy((char *)ap, (char *)&sc->an_aplist, sizeof(struct an_ltv_aplist)); break; case AN_RID_TX_SPEED: sp = (struct an_ltv_gen *)areq; sc->an_tx_rate = sp->an_val; /* Read the current configuration */ sc->an_config.an_type = AN_RID_GENCONFIG; sc->an_config.an_len = sizeof(struct an_ltv_genconfig); an_read_record(sc, (struct an_ltv_gen *)&sc->an_config); cfg = &sc->an_config; /* clear other rates and set the only one we want */ bzero(cfg->an_rates, sizeof(cfg->an_rates)); cfg->an_rates[0] = sc->an_tx_rate; /* Save the new rate */ sc->an_config.an_type = AN_RID_GENCONFIG; sc->an_config.an_len = sizeof(struct an_ltv_genconfig); break; case AN_RID_WEP_TEMP: /* Cache the temp keys */ bcopy(areq, &sc->an_temp_keys[((struct an_ltv_key *)areq)->kindex], sizeof(struct an_ltv_key)); case AN_RID_WEP_PERM: case AN_RID_LEAPUSERNAME: case AN_RID_LEAPPASSWORD: an_init_locked(sc); /* Disable the MAC. */ an_cmd(sc, AN_CMD_DISABLE, 0); /* Write the key */ an_write_record(sc, (struct an_ltv_gen *)areq); /* Turn the MAC back on. */ an_cmd(sc, AN_CMD_ENABLE, 0); break; case AN_RID_MONITOR_MODE: cfg = (struct an_ltv_genconfig *)areq; bpfdetach(ifp); if (ng_ether_detach_p != NULL) (*ng_ether_detach_p) (ifp); sc->an_monitor = cfg->an_len; if (sc->an_monitor & AN_MONITOR) { if (sc->an_monitor & AN_MONITOR_AIRONET_HEADER) { bpfattach(ifp, DLT_AIRONET_HEADER, sizeof(struct ether_header)); } else { bpfattach(ifp, DLT_IEEE802_11, sizeof(struct ether_header)); } } else { bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header)); if (ng_ether_attach_p != NULL) (*ng_ether_attach_p) (ifp); } break; default: if_printf(ifp, "unknown RID: %x\n", areq->an_type); return; } /* Reinitialize the card. */ if (ifp->if_flags) an_init_locked(sc); return; } /* * Derived from Linux driver to enable promiscious mode. */ static void an_promisc(struct an_softc *sc, int promisc) { AN_LOCK_ASSERT(sc); if (sc->an_was_monitor) { an_reset(sc); if (sc->mpi350) an_init_mpi350_desc(sc); } if (sc->an_monitor || sc->an_was_monitor) an_init_locked(sc); sc->an_was_monitor = sc->an_monitor; an_cmd(sc, AN_CMD_SET_MODE, promisc ? 0xffff : 0); return; } static int an_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { int error = 0; int len; int i, max; struct an_softc *sc; struct ifreq *ifr; struct thread *td = curthread; struct ieee80211req *ireq; struct ieee80211_channel ch; u_int8_t tmpstr[IEEE80211_NWID_LEN*2]; u_int8_t *tmpptr; struct an_ltv_genconfig *config; struct an_ltv_key *key; struct an_ltv_status *status; struct an_ltv_ssidlist_new *ssids; int mode; struct aironet_ioctl l_ioctl; sc = ifp->if_softc; ifr = (struct ifreq *)data; ireq = (struct ieee80211req *)data; config = (struct an_ltv_genconfig *)&sc->areq; key = (struct an_ltv_key *)&sc->areq; status = (struct an_ltv_status *)&sc->areq; ssids = (struct an_ltv_ssidlist_new *)&sc->areq; if (sc->an_gone) { error = ENODEV; goto out; } switch (command) { case SIOCSIFFLAGS: AN_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->an_if_flags & IFF_PROMISC)) { an_promisc(sc, 1); } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->an_if_flags & IFF_PROMISC) { an_promisc(sc, 0); } else an_init_locked(sc); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) an_stop(sc); } sc->an_if_flags = ifp->if_flags; AN_UNLOCK(sc); error = 0; break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->an_ifmedia, command); break; case SIOCADDMULTI: case SIOCDELMULTI: /* The Aironet has no multicast filter. */ error = 0; break; case SIOCGAIRONET: error = copyin(ifr->ifr_data, &sc->areq, sizeof(sc->areq)); if (error != 0) break; AN_LOCK(sc); #ifdef ANCACHE if (sc->areq.an_type == AN_RID_ZERO_CACHE) { error = priv_check(td, PRIV_DRIVER); if (error) break; sc->an_sigitems = sc->an_nextitem = 0; break; } else if (sc->areq.an_type == AN_RID_READ_CACHE) { char *pt = (char *)&sc->areq.an_val; bcopy((char *)&sc->an_sigitems, (char *)pt, sizeof(int)); pt += sizeof(int); sc->areq.an_len = sizeof(int) / 2; bcopy((char *)&sc->an_sigcache, (char *)pt, sizeof(struct an_sigcache) * sc->an_sigitems); sc->areq.an_len += ((sizeof(struct an_sigcache) * sc->an_sigitems) / 2) + 1; } else #endif if (an_read_record(sc, (struct an_ltv_gen *)&sc->areq)) { AN_UNLOCK(sc); error = EINVAL; break; } AN_UNLOCK(sc); error = copyout(&sc->areq, ifr->ifr_data, sizeof(sc->areq)); break; case SIOCSAIRONET: if ((error = priv_check(td, PRIV_DRIVER))) goto out; AN_LOCK(sc); error = copyin(ifr->ifr_data, &sc->areq, sizeof(sc->areq)); if (error != 0) break; an_setdef(sc, &sc->areq); AN_UNLOCK(sc); break; case SIOCGPRIVATE_0: /* used by Cisco client utility */ if ((error = priv_check(td, PRIV_DRIVER))) goto out; error = copyin(ifr->ifr_data, &l_ioctl, sizeof(l_ioctl)); if (error) goto out; mode = l_ioctl.command; AN_LOCK(sc); if (mode >= AIROGCAP && mode <= AIROGSTATSD32) { error = readrids(ifp, &l_ioctl); } else if (mode >= AIROPCAP && mode <= AIROPLEAPUSR) { error = writerids(ifp, &l_ioctl); } else if (mode >= AIROFLSHRST && mode <= AIRORESTART) { error = flashcard(ifp, &l_ioctl); } else { error =-1; } AN_UNLOCK(sc); if (!error) { /* copy out the updated command info */ error = copyout(&l_ioctl, ifr->ifr_data, sizeof(l_ioctl)); } break; case SIOCGPRIVATE_1: /* used by Cisco client utility */ if ((error = priv_check(td, PRIV_DRIVER))) goto out; error = copyin(ifr->ifr_data, &l_ioctl, sizeof(l_ioctl)); if (error) goto out; l_ioctl.command = 0; error = AIROMAGIC; (void) copyout(&error, l_ioctl.data, sizeof(error)); error = 0; break; case SIOCG80211: sc->areq.an_len = sizeof(sc->areq); /* was that a good idea DJA we are doing a short-cut */ switch (ireq->i_type) { case IEEE80211_IOC_SSID: AN_LOCK(sc); if (ireq->i_val == -1) { sc->areq.an_type = AN_RID_STATUS; if (an_read_record(sc, (struct an_ltv_gen *)&sc->areq)) { error = EINVAL; AN_UNLOCK(sc); break; } len = status->an_ssidlen; tmpptr = status->an_ssid; } else if (ireq->i_val >= 0) { sc->areq.an_type = AN_RID_SSIDLIST; if (an_read_record(sc, (struct an_ltv_gen *)&sc->areq)) { error = EINVAL; AN_UNLOCK(sc); break; } max = (sc->areq.an_len - 4) / sizeof(struct an_ltv_ssid_entry); if ( max > MAX_SSIDS ) { printf("To many SSIDs only using " "%d of %d\n", MAX_SSIDS, max); max = MAX_SSIDS; } if (ireq->i_val > max) { error = EINVAL; AN_UNLOCK(sc); break; } else { len = ssids->an_entry[ireq->i_val].an_len; tmpptr = ssids->an_entry[ireq->i_val].an_ssid; } } else { error = EINVAL; AN_UNLOCK(sc); break; } if (len > IEEE80211_NWID_LEN) { error = EINVAL; AN_UNLOCK(sc); break; } AN_UNLOCK(sc); ireq->i_len = len; bzero(tmpstr, IEEE80211_NWID_LEN); bcopy(tmpptr, tmpstr, len); error = copyout(tmpstr, ireq->i_data, IEEE80211_NWID_LEN); break; case IEEE80211_IOC_NUMSSIDS: AN_LOCK(sc); sc->areq.an_len = sizeof(sc->areq); sc->areq.an_type = AN_RID_SSIDLIST; if (an_read_record(sc, (struct an_ltv_gen *)&sc->areq)) { AN_UNLOCK(sc); error = EINVAL; break; } max = (sc->areq.an_len - 4) / sizeof(struct an_ltv_ssid_entry); AN_UNLOCK(sc); if ( max > MAX_SSIDS ) { printf("To many SSIDs only using " "%d of %d\n", MAX_SSIDS, max); max = MAX_SSIDS; } ireq->i_val = max; break; case IEEE80211_IOC_WEP: AN_LOCK(sc); sc->areq.an_type = AN_RID_ACTUALCFG; if (an_read_record(sc, (struct an_ltv_gen *)&sc->areq)) { error = EINVAL; AN_UNLOCK(sc); break; } AN_UNLOCK(sc); if (config->an_authtype & AN_AUTHTYPE_PRIVACY_IN_USE) { if (config->an_authtype & AN_AUTHTYPE_ALLOW_UNENCRYPTED) ireq->i_val = IEEE80211_WEP_MIXED; else ireq->i_val = IEEE80211_WEP_ON; } else { ireq->i_val = IEEE80211_WEP_OFF; } break; case IEEE80211_IOC_WEPKEY: /* * XXX: I'm not entierly convinced this is * correct, but it's what is implemented in * ancontrol so it will have to do until we get * access to actual Cisco code. */ if (ireq->i_val < 0 || ireq->i_val > 8) { error = EINVAL; break; } len = 0; if (ireq->i_val < 5) { AN_LOCK(sc); sc->areq.an_type = AN_RID_WEP_TEMP; for (i = 0; i < 5; i++) { if (an_read_record(sc, (struct an_ltv_gen *)&sc->areq)) { error = EINVAL; break; } if (key->kindex == 0xffff) break; if (key->kindex == ireq->i_val) len = key->klen; /* Required to get next entry */ sc->areq.an_type = AN_RID_WEP_PERM; } AN_UNLOCK(sc); if (error != 0) { break; } } /* We aren't allowed to read the value of the * key from the card so we just output zeros * like we would if we could read the card, but * denied the user access. */ bzero(tmpstr, len); ireq->i_len = len; error = copyout(tmpstr, ireq->i_data, len); break; case IEEE80211_IOC_NUMWEPKEYS: ireq->i_val = 9; /* include home key */ break; case IEEE80211_IOC_WEPTXKEY: /* * For some strange reason, you have to read all * keys before you can read the txkey. */ AN_LOCK(sc); sc->areq.an_type = AN_RID_WEP_TEMP; for (i = 0; i < 5; i++) { if (an_read_record(sc, (struct an_ltv_gen *) &sc->areq)) { error = EINVAL; break; } if (key->kindex == 0xffff) { break; } /* Required to get next entry */ sc->areq.an_type = AN_RID_WEP_PERM; } if (error != 0) { AN_UNLOCK(sc); break; } sc->areq.an_type = AN_RID_WEP_PERM; key->kindex = 0xffff; if (an_read_record(sc, (struct an_ltv_gen *)&sc->areq)) { error = EINVAL; AN_UNLOCK(sc); break; } ireq->i_val = key->mac[0]; /* * Check for home mode. Map home mode into * 5th key since that is how it is stored on * the card */ sc->areq.an_len = sizeof(struct an_ltv_genconfig); sc->areq.an_type = AN_RID_GENCONFIG; if (an_read_record(sc, (struct an_ltv_gen *)&sc->areq)) { error = EINVAL; AN_UNLOCK(sc); break; } if (config->an_home_product & AN_HOME_NETWORK) ireq->i_val = 4; AN_UNLOCK(sc); break; case IEEE80211_IOC_AUTHMODE: AN_LOCK(sc); sc->areq.an_type = AN_RID_ACTUALCFG; if (an_read_record(sc, (struct an_ltv_gen *)&sc->areq)) { error = EINVAL; AN_UNLOCK(sc); break; } AN_UNLOCK(sc); if ((config->an_authtype & AN_AUTHTYPE_MASK) == AN_AUTHTYPE_NONE) { ireq->i_val = IEEE80211_AUTH_NONE; } else if ((config->an_authtype & AN_AUTHTYPE_MASK) == AN_AUTHTYPE_OPEN) { ireq->i_val = IEEE80211_AUTH_OPEN; } else if ((config->an_authtype & AN_AUTHTYPE_MASK) == AN_AUTHTYPE_SHAREDKEY) { ireq->i_val = IEEE80211_AUTH_SHARED; } else error = EINVAL; break; case IEEE80211_IOC_STATIONNAME: AN_LOCK(sc); sc->areq.an_type = AN_RID_ACTUALCFG; if (an_read_record(sc, (struct an_ltv_gen *)&sc->areq)) { error = EINVAL; AN_UNLOCK(sc); break; } AN_UNLOCK(sc); ireq->i_len = sizeof(config->an_nodename); tmpptr = config->an_nodename; bzero(tmpstr, IEEE80211_NWID_LEN); bcopy(tmpptr, tmpstr, ireq->i_len); error = copyout(tmpstr, ireq->i_data, IEEE80211_NWID_LEN); break; case IEEE80211_IOC_CHANNEL: AN_LOCK(sc); sc->areq.an_type = AN_RID_STATUS; if (an_read_record(sc, (struct an_ltv_gen *)&sc->areq)) { error = EINVAL; AN_UNLOCK(sc); break; } AN_UNLOCK(sc); ireq->i_val = status->an_cur_channel; break; case IEEE80211_IOC_CURCHAN: AN_LOCK(sc); sc->areq.an_type = AN_RID_STATUS; if (an_read_record(sc, (struct an_ltv_gen *)&sc->areq)) { error = EINVAL; AN_UNLOCK(sc); break; } AN_UNLOCK(sc); bzero(&ch, sizeof(ch)); ch.ic_freq = ieee80211_ieee2mhz(status->an_cur_channel, IEEE80211_CHAN_B); ch.ic_flags = IEEE80211_CHAN_B; ch.ic_ieee = status->an_cur_channel; error = copyout(&ch, ireq->i_data, sizeof(ch)); break; case IEEE80211_IOC_POWERSAVE: AN_LOCK(sc); sc->areq.an_type = AN_RID_ACTUALCFG; if (an_read_record(sc, (struct an_ltv_gen *)&sc->areq)) { error = EINVAL; AN_UNLOCK(sc); break; } AN_UNLOCK(sc); if (config->an_psave_mode == AN_PSAVE_NONE) { ireq->i_val = IEEE80211_POWERSAVE_OFF; } else if (config->an_psave_mode == AN_PSAVE_CAM) { ireq->i_val = IEEE80211_POWERSAVE_CAM; } else if (config->an_psave_mode == AN_PSAVE_PSP) { ireq->i_val = IEEE80211_POWERSAVE_PSP; } else if (config->an_psave_mode == AN_PSAVE_PSP_CAM) { ireq->i_val = IEEE80211_POWERSAVE_PSP_CAM; } else error = EINVAL; break; case IEEE80211_IOC_POWERSAVESLEEP: AN_LOCK(sc); sc->areq.an_type = AN_RID_ACTUALCFG; if (an_read_record(sc, (struct an_ltv_gen *)&sc->areq)) { error = EINVAL; AN_UNLOCK(sc); break; } AN_UNLOCK(sc); ireq->i_val = config->an_listen_interval; break; } break; case SIOCS80211: if ((error = priv_check(td, PRIV_NET80211_MANAGE))) goto out; AN_LOCK(sc); sc->areq.an_len = sizeof(sc->areq); /* * We need a config structure for everything but the WEP * key management and SSIDs so we get it now so avoid * duplicating this code every time. */ if (ireq->i_type != IEEE80211_IOC_SSID && ireq->i_type != IEEE80211_IOC_WEPKEY && ireq->i_type != IEEE80211_IOC_WEPTXKEY) { sc->areq.an_type = AN_RID_GENCONFIG; if (an_read_record(sc, (struct an_ltv_gen *)&sc->areq)) { error = EINVAL; AN_UNLOCK(sc); break; } } switch (ireq->i_type) { case IEEE80211_IOC_SSID: sc->areq.an_len = sizeof(sc->areq); sc->areq.an_type = AN_RID_SSIDLIST; if (an_read_record(sc, (struct an_ltv_gen *)&sc->areq)) { error = EINVAL; AN_UNLOCK(sc); break; } if (ireq->i_len > IEEE80211_NWID_LEN) { error = EINVAL; AN_UNLOCK(sc); break; } max = (sc->areq.an_len - 4) / sizeof(struct an_ltv_ssid_entry); if ( max > MAX_SSIDS ) { printf("To many SSIDs only using " "%d of %d\n", MAX_SSIDS, max); max = MAX_SSIDS; } if (ireq->i_val > max) { error = EINVAL; AN_UNLOCK(sc); break; } else { error = copyin(ireq->i_data, ssids->an_entry[ireq->i_val].an_ssid, ireq->i_len); ssids->an_entry[ireq->i_val].an_len = ireq->i_len; sc->areq.an_len = sizeof(sc->areq); sc->areq.an_type = AN_RID_SSIDLIST; an_setdef(sc, &sc->areq); AN_UNLOCK(sc); break; } break; case IEEE80211_IOC_WEP: switch (ireq->i_val) { case IEEE80211_WEP_OFF: config->an_authtype &= ~(AN_AUTHTYPE_PRIVACY_IN_USE | AN_AUTHTYPE_ALLOW_UNENCRYPTED); break; case IEEE80211_WEP_ON: config->an_authtype |= AN_AUTHTYPE_PRIVACY_IN_USE; config->an_authtype &= ~AN_AUTHTYPE_ALLOW_UNENCRYPTED; break; case IEEE80211_WEP_MIXED: config->an_authtype |= AN_AUTHTYPE_PRIVACY_IN_USE | AN_AUTHTYPE_ALLOW_UNENCRYPTED; break; default: error = EINVAL; break; } if (error != EINVAL) an_setdef(sc, &sc->areq); AN_UNLOCK(sc); break; case IEEE80211_IOC_WEPKEY: if (ireq->i_val < 0 || ireq->i_val > 8 || ireq->i_len > 13) { error = EINVAL; AN_UNLOCK(sc); break; } error = copyin(ireq->i_data, tmpstr, 13); if (error != 0) { AN_UNLOCK(sc); break; } /* * Map the 9th key into the home mode * since that is how it is stored on * the card */ bzero(&sc->areq, sizeof(struct an_ltv_key)); sc->areq.an_len = sizeof(struct an_ltv_key); key->mac[0] = 1; /* The others are 0. */ if (ireq->i_val < 4) { sc->areq.an_type = AN_RID_WEP_TEMP; key->kindex = ireq->i_val; } else { sc->areq.an_type = AN_RID_WEP_PERM; key->kindex = ireq->i_val - 4; } key->klen = ireq->i_len; bcopy(tmpstr, key->key, key->klen); an_setdef(sc, &sc->areq); AN_UNLOCK(sc); break; case IEEE80211_IOC_WEPTXKEY: if (ireq->i_val < 0 || ireq->i_val > 4) { error = EINVAL; AN_UNLOCK(sc); break; } /* * Map the 5th key into the home mode * since that is how it is stored on * the card */ sc->areq.an_len = sizeof(struct an_ltv_genconfig); sc->areq.an_type = AN_RID_ACTUALCFG; if (an_read_record(sc, (struct an_ltv_gen *)&sc->areq)) { error = EINVAL; AN_UNLOCK(sc); break; } if (ireq->i_val == 4) { config->an_home_product |= AN_HOME_NETWORK; ireq->i_val = 0; } else { config->an_home_product &= ~AN_HOME_NETWORK; } sc->an_config.an_home_product = config->an_home_product; /* update configuration */ an_init_locked(sc); bzero(&sc->areq, sizeof(struct an_ltv_key)); sc->areq.an_len = sizeof(struct an_ltv_key); sc->areq.an_type = AN_RID_WEP_PERM; key->kindex = 0xffff; key->mac[0] = ireq->i_val; an_setdef(sc, &sc->areq); AN_UNLOCK(sc); break; case IEEE80211_IOC_AUTHMODE: switch (ireq->i_val) { case IEEE80211_AUTH_NONE: config->an_authtype = AN_AUTHTYPE_NONE | (config->an_authtype & ~AN_AUTHTYPE_MASK); break; case IEEE80211_AUTH_OPEN: config->an_authtype = AN_AUTHTYPE_OPEN | (config->an_authtype & ~AN_AUTHTYPE_MASK); break; case IEEE80211_AUTH_SHARED: config->an_authtype = AN_AUTHTYPE_SHAREDKEY | (config->an_authtype & ~AN_AUTHTYPE_MASK); break; default: error = EINVAL; } if (error != EINVAL) { an_setdef(sc, &sc->areq); } AN_UNLOCK(sc); break; case IEEE80211_IOC_STATIONNAME: if (ireq->i_len > 16) { error = EINVAL; AN_UNLOCK(sc); break; } bzero(config->an_nodename, 16); error = copyin(ireq->i_data, config->an_nodename, ireq->i_len); an_setdef(sc, &sc->areq); AN_UNLOCK(sc); break; case IEEE80211_IOC_CHANNEL: /* * The actual range is 1-14, but if you set it * to 0 you get the default so we let that work * too. */ if (ireq->i_val < 0 || ireq->i_val >14) { error = EINVAL; AN_UNLOCK(sc); break; } config->an_ds_channel = ireq->i_val; an_setdef(sc, &sc->areq); AN_UNLOCK(sc); break; case IEEE80211_IOC_POWERSAVE: switch (ireq->i_val) { case IEEE80211_POWERSAVE_OFF: config->an_psave_mode = AN_PSAVE_NONE; break; case IEEE80211_POWERSAVE_CAM: config->an_psave_mode = AN_PSAVE_CAM; break; case IEEE80211_POWERSAVE_PSP: config->an_psave_mode = AN_PSAVE_PSP; break; case IEEE80211_POWERSAVE_PSP_CAM: config->an_psave_mode = AN_PSAVE_PSP_CAM; break; default: error = EINVAL; break; } an_setdef(sc, &sc->areq); AN_UNLOCK(sc); break; case IEEE80211_IOC_POWERSAVESLEEP: config->an_listen_interval = ireq->i_val; an_setdef(sc, &sc->areq); AN_UNLOCK(sc); break; default: AN_UNLOCK(sc); break; } /* if (!error) { AN_LOCK(sc); an_setdef(sc, &sc->areq); AN_UNLOCK(sc); } */ break; default: error = ether_ioctl(ifp, command, data); break; } out: return(error != 0); } static int an_init_tx_ring(struct an_softc *sc) { int i; int id; if (sc->an_gone) return (0); if (!sc->mpi350) { for (i = 0; i < AN_TX_RING_CNT; i++) { if (an_alloc_nicmem(sc, 1518 + 0x44, &id)) return(ENOMEM); sc->an_rdata.an_tx_fids[i] = id; sc->an_rdata.an_tx_ring[i] = 0; } } sc->an_rdata.an_tx_prod = 0; sc->an_rdata.an_tx_cons = 0; sc->an_rdata.an_tx_empty = 1; return(0); } static void an_init(void *xsc) { struct an_softc *sc = xsc; AN_LOCK(sc); an_init_locked(sc); AN_UNLOCK(sc); } static void an_init_locked(struct an_softc *sc) { struct ifnet *ifp; AN_LOCK_ASSERT(sc); ifp = sc->an_ifp; if (sc->an_gone) return; if (ifp->if_drv_flags & IFF_DRV_RUNNING) an_stop(sc); sc->an_associated = 0; /* Allocate the TX buffers */ if (an_init_tx_ring(sc)) { an_reset(sc); if (sc->mpi350) an_init_mpi350_desc(sc); if (an_init_tx_ring(sc)) { if_printf(ifp, "tx buffer allocation failed\n"); return; } } /* Set our MAC address. */ bcopy((char *)IF_LLADDR(sc->an_ifp), (char *)&sc->an_config.an_macaddr, ETHER_ADDR_LEN); if (ifp->if_flags & IFF_BROADCAST) sc->an_config.an_rxmode = AN_RXMODE_BC_ADDR; else sc->an_config.an_rxmode = AN_RXMODE_ADDR; if (ifp->if_flags & IFF_MULTICAST) sc->an_config.an_rxmode = AN_RXMODE_BC_MC_ADDR; if (ifp->if_flags & IFF_PROMISC) { if (sc->an_monitor & AN_MONITOR) { if (sc->an_monitor & AN_MONITOR_ANY_BSS) { sc->an_config.an_rxmode |= AN_RXMODE_80211_MONITOR_ANYBSS | AN_RXMODE_NO_8023_HEADER; } else { sc->an_config.an_rxmode |= AN_RXMODE_80211_MONITOR_CURBSS | AN_RXMODE_NO_8023_HEADER; } } } #ifdef ANCACHE if (sc->an_have_rssimap) sc->an_config.an_rxmode |= AN_RXMODE_NORMALIZED_RSSI; #endif /* Set the ssid list */ sc->an_ssidlist.an_type = AN_RID_SSIDLIST; sc->an_ssidlist.an_len = sizeof(struct an_ltv_ssidlist_new); if (an_write_record(sc, (struct an_ltv_gen *)&sc->an_ssidlist)) { if_printf(ifp, "failed to set ssid list\n"); return; } /* Set the AP list */ sc->an_aplist.an_type = AN_RID_APLIST; sc->an_aplist.an_len = sizeof(struct an_ltv_aplist); if (an_write_record(sc, (struct an_ltv_gen *)&sc->an_aplist)) { if_printf(ifp, "failed to set AP list\n"); return; } /* Set the configuration in the NIC */ sc->an_config.an_len = sizeof(struct an_ltv_genconfig); sc->an_config.an_type = AN_RID_GENCONFIG; if (an_write_record(sc, (struct an_ltv_gen *)&sc->an_config)) { if_printf(ifp, "failed to set configuration\n"); return; } /* Enable the MAC */ if (an_cmd(sc, AN_CMD_ENABLE, 0)) { if_printf(ifp, "failed to enable MAC\n"); return; } if (ifp->if_flags & IFF_PROMISC) an_cmd(sc, AN_CMD_SET_MODE, 0xffff); /* enable interrupts */ CSR_WRITE_2(sc, AN_INT_EN(sc->mpi350), AN_INTRS(sc->mpi350)); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; callout_reset(&sc->an_stat_ch, hz, an_stats_update, sc); return; } static void an_start(struct ifnet *ifp) { struct an_softc *sc; sc = ifp->if_softc; AN_LOCK(sc); an_start_locked(ifp); AN_UNLOCK(sc); } static void an_start_locked(struct ifnet *ifp) { struct an_softc *sc; struct mbuf *m0 = NULL; struct an_txframe_802_3 tx_frame_802_3; struct ether_header *eh; int id, idx, i; unsigned char txcontrol; struct an_card_tx_desc an_tx_desc; u_int8_t *buf; sc = ifp->if_softc; AN_LOCK_ASSERT(sc); if (sc->an_gone) return; if (ifp->if_drv_flags & IFF_DRV_OACTIVE) return; if (!sc->an_associated) return; /* We can't send in monitor mode so toss any attempts. */ if (sc->an_monitor && (ifp->if_flags & IFF_PROMISC)) { for (;;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); if (m0 == NULL) break; m_freem(m0); } return; } idx = sc->an_rdata.an_tx_prod; if (!sc->mpi350) { bzero((char *)&tx_frame_802_3, sizeof(tx_frame_802_3)); while (sc->an_rdata.an_tx_ring[idx] == 0) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); if (m0 == NULL) break; id = sc->an_rdata.an_tx_fids[idx]; eh = mtod(m0, struct ether_header *); bcopy((char *)&eh->ether_dhost, (char *)&tx_frame_802_3.an_tx_dst_addr, ETHER_ADDR_LEN); bcopy((char *)&eh->ether_shost, (char *)&tx_frame_802_3.an_tx_src_addr, ETHER_ADDR_LEN); /* minus src/dest mac & type */ tx_frame_802_3.an_tx_802_3_payload_len = m0->m_pkthdr.len - 12; m_copydata(m0, sizeof(struct ether_header) - 2 , tx_frame_802_3.an_tx_802_3_payload_len, (caddr_t)&sc->an_txbuf); txcontrol = AN_TXCTL_8023 | AN_TXCTL_HW(sc->mpi350); /* write the txcontrol only */ an_write_data(sc, id, 0x08, (caddr_t)&txcontrol, sizeof(txcontrol)); /* 802_3 header */ an_write_data(sc, id, 0x34, (caddr_t)&tx_frame_802_3, sizeof(struct an_txframe_802_3)); /* in mbuf header type is just before payload */ an_write_data(sc, id, 0x44, (caddr_t)&sc->an_txbuf, tx_frame_802_3.an_tx_802_3_payload_len); /* * If there's a BPF listner, bounce a copy of * this frame to him. */ BPF_MTAP(ifp, m0); m_freem(m0); m0 = NULL; sc->an_rdata.an_tx_ring[idx] = id; if (an_cmd(sc, AN_CMD_TX, id)) if_printf(ifp, "xmit failed\n"); AN_INC(idx, AN_TX_RING_CNT); /* * Set a timeout in case the chip goes out to lunch. */ sc->an_timer = 5; } } else { /* MPI-350 */ /* Disable interrupts. */ CSR_WRITE_2(sc, AN_INT_EN(sc->mpi350), 0); while (sc->an_rdata.an_tx_empty || idx != sc->an_rdata.an_tx_cons) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); if (m0 == NULL) { break; } buf = sc->an_tx_buffer[idx].an_dma_vaddr; eh = mtod(m0, struct ether_header *); /* DJA optimize this to limit bcopy */ bcopy((char *)&eh->ether_dhost, (char *)&tx_frame_802_3.an_tx_dst_addr, ETHER_ADDR_LEN); bcopy((char *)&eh->ether_shost, (char *)&tx_frame_802_3.an_tx_src_addr, ETHER_ADDR_LEN); /* minus src/dest mac & type */ tx_frame_802_3.an_tx_802_3_payload_len = m0->m_pkthdr.len - 12; m_copydata(m0, sizeof(struct ether_header) - 2 , tx_frame_802_3.an_tx_802_3_payload_len, (caddr_t)&sc->an_txbuf); txcontrol = AN_TXCTL_8023 | AN_TXCTL_HW(sc->mpi350); /* write the txcontrol only */ bcopy((caddr_t)&txcontrol, &buf[0x08], sizeof(txcontrol)); /* 802_3 header */ bcopy((caddr_t)&tx_frame_802_3, &buf[0x34], sizeof(struct an_txframe_802_3)); /* in mbuf header type is just before payload */ bcopy((caddr_t)&sc->an_txbuf, &buf[0x44], tx_frame_802_3.an_tx_802_3_payload_len); bzero(&an_tx_desc, sizeof(an_tx_desc)); an_tx_desc.an_offset = 0; an_tx_desc.an_eoc = 1; an_tx_desc.an_valid = 1; an_tx_desc.an_len = 0x44 + tx_frame_802_3.an_tx_802_3_payload_len; an_tx_desc.an_phys = sc->an_tx_buffer[idx].an_dma_paddr; for (i = sizeof(an_tx_desc) / 4 - 1; i >= 0; i--) { CSR_MEM_AUX_WRITE_4(sc, AN_TX_DESC_OFFSET /* zero for now */ + (0 * sizeof(an_tx_desc)) + (i * 4), ((u_int32_t *)(void *)&an_tx_desc)[i]); } /* * If there's a BPF listner, bounce a copy of * this frame to him. */ BPF_MTAP(ifp, m0); m_freem(m0); m0 = NULL; AN_INC(idx, AN_MAX_TX_DESC); sc->an_rdata.an_tx_empty = 0; CSR_WRITE_2(sc, AN_EVENT_ACK(sc->mpi350), AN_EV_ALLOC); /* * Set a timeout in case the chip goes out to lunch. */ sc->an_timer = 5; } /* Re-enable interrupts. */ CSR_WRITE_2(sc, AN_INT_EN(sc->mpi350), AN_INTRS(sc->mpi350)); } if (m0 != NULL) ifp->if_drv_flags |= IFF_DRV_OACTIVE; sc->an_rdata.an_tx_prod = idx; return; } void an_stop(struct an_softc *sc) { struct ifnet *ifp; int i; AN_LOCK_ASSERT(sc); if (sc->an_gone) return; ifp = sc->an_ifp; an_cmd(sc, AN_CMD_FORCE_SYNCLOSS, 0); CSR_WRITE_2(sc, AN_INT_EN(sc->mpi350), 0); an_cmd(sc, AN_CMD_DISABLE, 0); for (i = 0; i < AN_TX_RING_CNT; i++) an_cmd(sc, AN_CMD_DEALLOC_MEM, sc->an_rdata.an_tx_fids[i]); callout_stop(&sc->an_stat_ch); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE); if (sc->an_flash_buffer) { free(sc->an_flash_buffer, M_DEVBUF); sc->an_flash_buffer = NULL; } } static void an_watchdog(struct an_softc *sc) { struct ifnet *ifp; AN_LOCK_ASSERT(sc); if (sc->an_gone) return; ifp = sc->an_ifp; if_printf(ifp, "device timeout\n"); an_reset(sc); if (sc->mpi350) an_init_mpi350_desc(sc); an_init_locked(sc); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } int an_shutdown(device_t dev) { struct an_softc *sc; sc = device_get_softc(dev); AN_LOCK(sc); an_stop(sc); sc->an_gone = 1; AN_UNLOCK(sc); return (0); } void an_resume(device_t dev) { struct an_softc *sc; struct ifnet *ifp; int i; sc = device_get_softc(dev); AN_LOCK(sc); ifp = sc->an_ifp; sc->an_gone = 0; an_reset(sc); if (sc->mpi350) an_init_mpi350_desc(sc); an_init_locked(sc); /* Recovery temporary keys */ for (i = 0; i < 4; i++) { sc->areq.an_type = AN_RID_WEP_TEMP; sc->areq.an_len = sizeof(struct an_ltv_key); bcopy(&sc->an_temp_keys[i], &sc->areq, sizeof(struct an_ltv_key)); an_setdef(sc, &sc->areq); } if (ifp->if_flags & IFF_UP) an_start_locked(ifp); AN_UNLOCK(sc); return; } #ifdef ANCACHE /* Aironet signal strength cache code. * store signal/noise/quality on per MAC src basis in * a small fixed cache. The cache wraps if > MAX slots * used. The cache may be zeroed out to start over. * Two simple filters exist to reduce computation: * 1. ip only (literally 0x800, ETHERTYPE_IP) which may be used * to ignore some packets. It defaults to ip only. * it could be used to focus on broadcast, non-IP 802.11 beacons. * 2. multicast/broadcast only. This may be used to * ignore unicast packets and only cache signal strength * for multicast/broadcast packets (beacons); e.g., Mobile-IP * beacons and not unicast traffic. * * The cache stores (MAC src(index), IP src (major clue), signal, * quality, noise) * * No apologies for storing IP src here. It's easy and saves much * trouble elsewhere. The cache is assumed to be INET dependent, * although it need not be. * * Note: the Aironet only has a single byte of signal strength value * in the rx frame header, and it's not scaled to anything sensible. * This is kind of lame, but it's all we've got. */ #ifdef documentation int an_sigitems; /* number of cached entries */ struct an_sigcache an_sigcache[MAXANCACHE]; /* array of cache entries */ int an_nextitem; /* index/# of entries */ #endif /* control variables for cache filtering. Basic idea is * to reduce cost (e.g., to only Mobile-IP agent beacons * which are broadcast or multicast). Still you might * want to measure signal strength anth unicast ping packets * on a pt. to pt. ant. setup. */ /* set true if you want to limit cache items to broadcast/mcast * only packets (not unicast). Useful for mobile-ip beacons which * are broadcast/multicast at network layer. Default is all packets * so ping/unicast anll work say anth pt. to pt. antennae setup. */ static int an_cache_mcastonly = 0; SYSCTL_INT(_hw_an, OID_AUTO, an_cache_mcastonly, CTLFLAG_RW, &an_cache_mcastonly, 0, ""); /* set true if you want to limit cache items to IP packets only */ static int an_cache_iponly = 1; SYSCTL_INT(_hw_an, OID_AUTO, an_cache_iponly, CTLFLAG_RW, &an_cache_iponly, 0, ""); /* * an_cache_store, per rx packet store signal * strength in MAC (src) indexed cache. */ static void an_cache_store(struct an_softc *sc, struct ether_header *eh, struct mbuf *m, u_int8_t rx_rssi, u_int8_t rx_quality) { - struct ip *ip = 0; + struct ip *ip = NULL; int i; static int cache_slot = 0; /* use this cache entry */ static int wrapindex = 0; /* next "free" cache entry */ int type_ipv4 = 0; /* filters: * 1. ip only * 2. configurable filter to throw out unicast packets, * keep multicast only. */ if ((ntohs(eh->ether_type) == ETHERTYPE_IP)) { type_ipv4 = 1; } /* filter for ip packets only */ if ( an_cache_iponly && !type_ipv4) { return; } /* filter for broadcast/multicast only */ if (an_cache_mcastonly && ((eh->ether_dhost[0] & 1) == 0)) { return; } #ifdef SIGDEBUG if_printf(sc->an_ifp, "q value %x (MSB=0x%x, LSB=0x%x) \n", rx_rssi & 0xffff, rx_rssi >> 8, rx_rssi & 0xff); #endif /* find the ip header. we want to store the ip_src * address. */ if (type_ipv4) { ip = mtod(m, struct ip *); } /* do a linear search for a matching MAC address * in the cache table * . MAC address is 6 bytes, * . var w_nextitem holds total number of entries already cached */ for (i = 0; i < sc->an_nextitem; i++) { if (! bcmp(eh->ether_shost , sc->an_sigcache[i].macsrc, 6 )) { /* Match!, * so we already have this entry, * update the data */ break; } } /* did we find a matching mac address? * if yes, then overwrite a previously existing cache entry */ if (i < sc->an_nextitem ) { cache_slot = i; } /* else, have a new address entry,so * add this new entry, * if table full, then we need to replace LRU entry */ else { /* check for space in cache table * note: an_nextitem also holds number of entries * added in the cache table */ if ( sc->an_nextitem < MAXANCACHE ) { cache_slot = sc->an_nextitem; sc->an_nextitem++; sc->an_sigitems = sc->an_nextitem; } /* no space found, so simply wrap anth wrap index * and "zap" the next entry */ else { if (wrapindex == MAXANCACHE) { wrapindex = 0; } cache_slot = wrapindex++; } } /* invariant: cache_slot now points at some slot * in cache. */ if (cache_slot < 0 || cache_slot >= MAXANCACHE) { log(LOG_ERR, "an_cache_store, bad index: %d of " "[0..%d], gross cache error\n", cache_slot, MAXANCACHE); return; } /* store items in cache * .ip source address * .mac src * .signal, etc. */ if (type_ipv4) { sc->an_sigcache[cache_slot].ipsrc = ip->ip_src.s_addr; } bcopy( eh->ether_shost, sc->an_sigcache[cache_slot].macsrc, 6); switch (an_cache_mode) { case DBM: if (sc->an_have_rssimap) { sc->an_sigcache[cache_slot].signal = - sc->an_rssimap.an_entries[rx_rssi].an_rss_dbm; sc->an_sigcache[cache_slot].quality = - sc->an_rssimap.an_entries[rx_quality].an_rss_dbm; } else { sc->an_sigcache[cache_slot].signal = rx_rssi - 100; sc->an_sigcache[cache_slot].quality = rx_quality - 100; } break; case PERCENT: if (sc->an_have_rssimap) { sc->an_sigcache[cache_slot].signal = sc->an_rssimap.an_entries[rx_rssi].an_rss_pct; sc->an_sigcache[cache_slot].quality = sc->an_rssimap.an_entries[rx_quality].an_rss_pct; } else { if (rx_rssi > 100) rx_rssi = 100; if (rx_quality > 100) rx_quality = 100; sc->an_sigcache[cache_slot].signal = rx_rssi; sc->an_sigcache[cache_slot].quality = rx_quality; } break; case RAW: sc->an_sigcache[cache_slot].signal = rx_rssi; sc->an_sigcache[cache_slot].quality = rx_quality; break; } sc->an_sigcache[cache_slot].noise = 0; return; } #endif static int an_media_change(struct ifnet *ifp) { struct an_softc *sc = ifp->if_softc; struct an_ltv_genconfig *cfg; int otype = sc->an_config.an_opmode; int orate = sc->an_tx_rate; AN_LOCK(sc); sc->an_tx_rate = ieee80211_media2rate( IFM_SUBTYPE(sc->an_ifmedia.ifm_cur->ifm_media)); if (sc->an_tx_rate < 0) sc->an_tx_rate = 0; if (orate != sc->an_tx_rate) { /* Read the current configuration */ sc->an_config.an_type = AN_RID_GENCONFIG; sc->an_config.an_len = sizeof(struct an_ltv_genconfig); an_read_record(sc, (struct an_ltv_gen *)&sc->an_config); cfg = &sc->an_config; /* clear other rates and set the only one we want */ bzero(cfg->an_rates, sizeof(cfg->an_rates)); cfg->an_rates[0] = sc->an_tx_rate; /* Save the new rate */ sc->an_config.an_type = AN_RID_GENCONFIG; sc->an_config.an_len = sizeof(struct an_ltv_genconfig); } if ((sc->an_ifmedia.ifm_cur->ifm_media & IFM_IEEE80211_ADHOC) != 0) sc->an_config.an_opmode &= ~AN_OPMODE_INFRASTRUCTURE_STATION; else sc->an_config.an_opmode |= AN_OPMODE_INFRASTRUCTURE_STATION; if (otype != sc->an_config.an_opmode || orate != sc->an_tx_rate) an_init_locked(sc); AN_UNLOCK(sc); return(0); } static void an_media_status(struct ifnet *ifp, struct ifmediareq *imr) { struct an_ltv_status status; struct an_softc *sc = ifp->if_softc; imr->ifm_active = IFM_IEEE80211; AN_LOCK(sc); status.an_len = sizeof(status); status.an_type = AN_RID_STATUS; if (an_read_record(sc, (struct an_ltv_gen *)&status)) { /* If the status read fails, just lie. */ imr->ifm_active = sc->an_ifmedia.ifm_cur->ifm_media; imr->ifm_status = IFM_AVALID|IFM_ACTIVE; } if (sc->an_tx_rate == 0) { imr->ifm_active = IFM_IEEE80211|IFM_AUTO; } if (sc->an_config.an_opmode == AN_OPMODE_IBSS_ADHOC) imr->ifm_active |= IFM_IEEE80211_ADHOC; imr->ifm_active |= ieee80211_rate2media(NULL, status.an_current_tx_rate, IEEE80211_MODE_AUTO); imr->ifm_status = IFM_AVALID; if (status.an_opmode & AN_STATUS_OPMODE_ASSOCIATED) imr->ifm_status |= IFM_ACTIVE; AN_UNLOCK(sc); } /********************** Cisco utility support routines *************/ /* * ReadRids & WriteRids derived from Cisco driver additions to Ben Reed's * Linux driver */ static int readrids(struct ifnet *ifp, struct aironet_ioctl *l_ioctl) { unsigned short rid; struct an_softc *sc; int error; switch (l_ioctl->command) { case AIROGCAP: rid = AN_RID_CAPABILITIES; break; case AIROGCFG: rid = AN_RID_GENCONFIG; break; case AIROGSLIST: rid = AN_RID_SSIDLIST; break; case AIROGVLIST: rid = AN_RID_APLIST; break; case AIROGDRVNAM: rid = AN_RID_DRVNAME; break; case AIROGEHTENC: rid = AN_RID_ENCAPPROTO; break; case AIROGWEPKTMP: rid = AN_RID_WEP_TEMP; break; case AIROGWEPKNV: rid = AN_RID_WEP_PERM; break; case AIROGSTAT: rid = AN_RID_STATUS; break; case AIROGSTATSD32: rid = AN_RID_32BITS_DELTA; break; case AIROGSTATSC32: rid = AN_RID_32BITS_CUM; break; default: rid = 999; break; } if (rid == 999) /* Is bad command */ return -EINVAL; sc = ifp->if_softc; sc->areq.an_len = AN_MAX_DATALEN; sc->areq.an_type = rid; an_read_record(sc, (struct an_ltv_gen *)&sc->areq); l_ioctl->len = sc->areq.an_len - 4; /* just data */ AN_UNLOCK(sc); /* the data contains the length at first */ if (copyout(&(sc->areq.an_len), l_ioctl->data, sizeof(sc->areq.an_len))) { error = -EFAULT; goto lock_exit; } /* Just copy the data back */ if (copyout(&(sc->areq.an_val), l_ioctl->data + 2, l_ioctl->len)) { error = -EFAULT; goto lock_exit; } error = 0; lock_exit: AN_LOCK(sc); return (error); } static int writerids(struct ifnet *ifp, struct aironet_ioctl *l_ioctl) { struct an_softc *sc; int rid, command, error; sc = ifp->if_softc; AN_LOCK_ASSERT(sc); rid = 0; command = l_ioctl->command; switch (command) { case AIROPSIDS: rid = AN_RID_SSIDLIST; break; case AIROPCAP: rid = AN_RID_CAPABILITIES; break; case AIROPAPLIST: rid = AN_RID_APLIST; break; case AIROPCFG: rid = AN_RID_GENCONFIG; break; case AIROPMACON: an_cmd(sc, AN_CMD_ENABLE, 0); return 0; break; case AIROPMACOFF: an_cmd(sc, AN_CMD_DISABLE, 0); return 0; break; case AIROPSTCLR: /* * This command merely clears the counts does not actually * store any data only reads rid. But as it changes the cards * state, I put it in the writerid routines. */ rid = AN_RID_32BITS_DELTACLR; sc = ifp->if_softc; sc->areq.an_len = AN_MAX_DATALEN; sc->areq.an_type = rid; an_read_record(sc, (struct an_ltv_gen *)&sc->areq); l_ioctl->len = sc->areq.an_len - 4; /* just data */ AN_UNLOCK(sc); /* the data contains the length at first */ error = copyout(&(sc->areq.an_len), l_ioctl->data, sizeof(sc->areq.an_len)); if (error) { AN_LOCK(sc); return -EFAULT; } /* Just copy the data */ error = copyout(&(sc->areq.an_val), l_ioctl->data + 2, l_ioctl->len); AN_LOCK(sc); if (error) return -EFAULT; return 0; break; case AIROPWEPKEY: rid = AN_RID_WEP_TEMP; break; case AIROPWEPKEYNV: rid = AN_RID_WEP_PERM; break; case AIROPLEAPUSR: rid = AN_RID_LEAPUSERNAME; break; case AIROPLEAPPWD: rid = AN_RID_LEAPPASSWORD; break; default: return -EOPNOTSUPP; } if (rid) { if (l_ioctl->len > sizeof(sc->areq.an_val) + 4) return -EINVAL; sc->areq.an_len = l_ioctl->len + 4; /* add type & length */ sc->areq.an_type = rid; /* Just copy the data back */ AN_UNLOCK(sc); error = copyin((l_ioctl->data) + 2, &sc->areq.an_val, l_ioctl->len); AN_LOCK(sc); if (error) return -EFAULT; an_cmd(sc, AN_CMD_DISABLE, 0); an_write_record(sc, (struct an_ltv_gen *)&sc->areq); an_cmd(sc, AN_CMD_ENABLE, 0); return 0; } return -EOPNOTSUPP; } /* * General Flash utilities derived from Cisco driver additions to Ben Reed's * Linux driver */ #define FLASH_DELAY(_sc, x) msleep(ifp, &(_sc)->an_mtx, PZERO, \ "flash", ((x) / hz) + 1); #define FLASH_COMMAND 0x7e7e #define FLASH_SIZE 32 * 1024 static int unstickbusy(struct ifnet *ifp) { struct an_softc *sc = ifp->if_softc; if (CSR_READ_2(sc, AN_COMMAND(sc->mpi350)) & AN_CMD_BUSY) { CSR_WRITE_2(sc, AN_EVENT_ACK(sc->mpi350), AN_EV_CLR_STUCK_BUSY); return 1; } return 0; } /* * Wait for busy completion from card wait for delay uSec's Return true for * success meaning command reg is clear */ static int WaitBusy(struct ifnet *ifp, int uSec) { int statword = 0xffff; int delay = 0; struct an_softc *sc = ifp->if_softc; while ((statword & AN_CMD_BUSY) && delay <= (1000 * 100)) { FLASH_DELAY(sc, 10); delay += 10; statword = CSR_READ_2(sc, AN_COMMAND(sc->mpi350)); if ((AN_CMD_BUSY & statword) && (delay % 200)) { unstickbusy(ifp); } } return 0 == (AN_CMD_BUSY & statword); } /* * STEP 1) Disable MAC and do soft reset on card. */ static int cmdreset(struct ifnet *ifp) { int status; struct an_softc *sc = ifp->if_softc; AN_LOCK(sc); an_stop(sc); an_cmd(sc, AN_CMD_DISABLE, 0); if (!(status = WaitBusy(ifp, AN_TIMEOUT))) { if_printf(ifp, "Waitbusy hang b4 RESET =%d\n", status); AN_UNLOCK(sc); return -EBUSY; } CSR_WRITE_2(sc, AN_COMMAND(sc->mpi350), AN_CMD_FW_RESTART); FLASH_DELAY(sc, 1000); /* WAS 600 12/7/00 */ if (!(status = WaitBusy(ifp, 100))) { if_printf(ifp, "Waitbusy hang AFTER RESET =%d\n", status); AN_UNLOCK(sc); return -EBUSY; } AN_UNLOCK(sc); return 0; } /* * STEP 2) Put the card in legendary flash mode */ static int setflashmode(struct ifnet *ifp) { int status; struct an_softc *sc = ifp->if_softc; CSR_WRITE_2(sc, AN_SW0(sc->mpi350), FLASH_COMMAND); CSR_WRITE_2(sc, AN_SW1(sc->mpi350), FLASH_COMMAND); CSR_WRITE_2(sc, AN_SW0(sc->mpi350), FLASH_COMMAND); CSR_WRITE_2(sc, AN_COMMAND(sc->mpi350), FLASH_COMMAND); /* * mdelay(500); // 500ms delay */ FLASH_DELAY(sc, 500); if (!(status = WaitBusy(ifp, AN_TIMEOUT))) { printf("Waitbusy hang after setflash mode\n"); return -EIO; } return 0; } /* * Get a character from the card matching matchbyte Step 3) */ static int flashgchar(struct ifnet *ifp, int matchbyte, int dwelltime) { int rchar; unsigned char rbyte = 0; int success = -1; struct an_softc *sc = ifp->if_softc; do { rchar = CSR_READ_2(sc, AN_SW1(sc->mpi350)); if (dwelltime && !(0x8000 & rchar)) { dwelltime -= 10; FLASH_DELAY(sc, 10); continue; } rbyte = 0xff & rchar; if ((rbyte == matchbyte) && (0x8000 & rchar)) { CSR_WRITE_2(sc, AN_SW1(sc->mpi350), 0); success = 1; break; } if (rbyte == 0x81 || rbyte == 0x82 || rbyte == 0x83 || rbyte == 0x1a || 0xffff == rchar) break; CSR_WRITE_2(sc, AN_SW1(sc->mpi350), 0); } while (dwelltime > 0); return success; } /* * Put character to SWS0 wait for dwelltime x 50us for echo . */ static int flashpchar(struct ifnet *ifp, int byte, int dwelltime) { int echo; int pollbusy, waittime; struct an_softc *sc = ifp->if_softc; byte |= 0x8000; if (dwelltime == 0) dwelltime = 200; waittime = dwelltime; /* * Wait for busy bit d15 to go false indicating buffer empty */ do { pollbusy = CSR_READ_2(sc, AN_SW0(sc->mpi350)); if (pollbusy & 0x8000) { FLASH_DELAY(sc, 50); waittime -= 50; continue; } else break; } while (waittime >= 0); /* timeout for busy clear wait */ if (waittime <= 0) { if_printf(ifp, "flash putchar busywait timeout!\n"); return -1; } /* * Port is clear now write byte and wait for it to echo back */ do { CSR_WRITE_2(sc, AN_SW0(sc->mpi350), byte); FLASH_DELAY(sc, 50); dwelltime -= 50; echo = CSR_READ_2(sc, AN_SW1(sc->mpi350)); } while (dwelltime >= 0 && echo != byte); CSR_WRITE_2(sc, AN_SW1(sc->mpi350), 0); return echo == byte; } /* * Transfer 32k of firmware data from user buffer to our buffer and send to * the card */ static int flashputbuf(struct ifnet *ifp) { unsigned short *bufp; int nwords; struct an_softc *sc = ifp->if_softc; /* Write stuff */ bufp = sc->an_flash_buffer; if (!sc->mpi350) { CSR_WRITE_2(sc, AN_AUX_PAGE, 0x100); CSR_WRITE_2(sc, AN_AUX_OFFSET, 0); for (nwords = 0; nwords != FLASH_SIZE / 2; nwords++) { CSR_WRITE_2(sc, AN_AUX_DATA, bufp[nwords] & 0xffff); } } else { for (nwords = 0; nwords != FLASH_SIZE / 4; nwords++) { CSR_MEM_AUX_WRITE_4(sc, 0x8000, ((u_int32_t *)bufp)[nwords] & 0xffff); } } CSR_WRITE_2(sc, AN_SW0(sc->mpi350), 0x8000); return 0; } /* * After flashing restart the card. */ static int flashrestart(struct ifnet *ifp) { int status = 0; struct an_softc *sc = ifp->if_softc; FLASH_DELAY(sc, 1024); /* Added 12/7/00 */ an_init_locked(sc); FLASH_DELAY(sc, 1024); /* Added 12/7/00 */ return status; } /* * Entry point for flash ioclt. */ static int flashcard(struct ifnet *ifp, struct aironet_ioctl *l_ioctl) { int z = 0, status; struct an_softc *sc; sc = ifp->if_softc; if (sc->mpi350) { if_printf(ifp, "flashing not supported on MPI 350 yet\n"); return(-1); } status = l_ioctl->command; switch (l_ioctl->command) { case AIROFLSHRST: return cmdreset(ifp); break; case AIROFLSHSTFL: if (sc->an_flash_buffer) { free(sc->an_flash_buffer, M_DEVBUF); sc->an_flash_buffer = NULL; } sc->an_flash_buffer = malloc(FLASH_SIZE, M_DEVBUF, M_WAITOK); if (sc->an_flash_buffer) return setflashmode(ifp); else return ENOBUFS; break; case AIROFLSHGCHR: /* Get char from aux */ if (l_ioctl->len > sizeof(sc->areq)) { return -EINVAL; } AN_UNLOCK(sc); status = copyin(l_ioctl->data, &sc->areq, l_ioctl->len); AN_LOCK(sc); if (status) return status; z = *(int *)&sc->areq; if ((status = flashgchar(ifp, z, 8000)) == 1) return 0; else return -1; case AIROFLSHPCHR: /* Send char to card. */ if (l_ioctl->len > sizeof(sc->areq)) { return -EINVAL; } AN_UNLOCK(sc); status = copyin(l_ioctl->data, &sc->areq, l_ioctl->len); AN_LOCK(sc); if (status) return status; z = *(int *)&sc->areq; if ((status = flashpchar(ifp, z, 8000)) == -1) return -EIO; else return 0; break; case AIROFLPUTBUF: /* Send 32k to card */ if (l_ioctl->len > FLASH_SIZE) { if_printf(ifp, "Buffer to big, %x %x\n", l_ioctl->len, FLASH_SIZE); return -EINVAL; } AN_UNLOCK(sc); status = copyin(l_ioctl->data, sc->an_flash_buffer, l_ioctl->len); AN_LOCK(sc); if (status) return status; if ((status = flashputbuf(ifp)) != 0) return -EIO; else return 0; break; case AIRORESTART: if ((status = flashrestart(ifp)) != 0) { if_printf(ifp, "FLASHRESTART returned %d\n", status); return -EIO; } else return 0; break; default: return -EINVAL; } return -EINVAL; } Index: head/sys/dev/arcmsr/arcmsr.c =================================================================== --- head/sys/dev/arcmsr/arcmsr.c (revision 313981) +++ head/sys/dev/arcmsr/arcmsr.c (revision 313982) @@ -1,4562 +1,4562 @@ /* ******************************************************************************** ** OS : FreeBSD ** FILE NAME : arcmsr.c ** BY : Erich Chen, Ching Huang ** Description: SCSI RAID Device Driver for ** ARECA (ARC11XX/ARC12XX/ARC13XX/ARC16XX/ARC188x) ** SATA/SAS RAID HOST Adapter ******************************************************************************** ******************************************************************************** ** ** Copyright (C) 2002 - 2012, Areca Technology Corporation All rights reserved. ** ** Redistribution and use in source and binary forms, with or without ** modification, are permitted provided that the following conditions ** are met: ** 1. Redistributions of source code must retain the above copyright ** notice, this list of conditions and the following disclaimer. ** 2. Redistributions in binary form must reproduce the above copyright ** notice, this list of conditions and the following disclaimer in the ** documentation and/or other materials provided with the distribution. ** 3. The name of the author may not be used to endorse or promote products ** derived from this software without specific prior written permission. ** ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT **(INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************** ** History ** ** REV# DATE NAME DESCRIPTION ** 1.00.00.00 03/31/2004 Erich Chen First release ** 1.20.00.02 11/29/2004 Erich Chen bug fix with arcmsr_bus_reset when PHY error ** 1.20.00.03 04/19/2005 Erich Chen add SATA 24 Ports adapter type support ** clean unused function ** 1.20.00.12 09/12/2005 Erich Chen bug fix with abort command handling, ** firmware version check ** and firmware update notify for hardware bug fix ** handling if none zero high part physical address ** of srb resource ** 1.20.00.13 08/18/2006 Erich Chen remove pending srb and report busy ** add iop message xfer ** with scsi pass-through command ** add new device id of sas raid adapters ** code fit for SPARC64 & PPC ** 1.20.00.14 02/05/2007 Erich Chen bug fix for incorrect ccb_h.status report ** and cause g_vfs_done() read write error ** 1.20.00.15 10/10/2007 Erich Chen support new RAID adapter type ARC120x ** 1.20.00.16 10/10/2009 Erich Chen Bug fix for RAID adapter type ARC120x ** bus_dmamem_alloc() with BUS_DMA_ZERO ** 1.20.00.17 07/15/2010 Ching Huang Added support ARC1880 ** report CAM_DEV_NOT_THERE instead of CAM_SEL_TIMEOUT when device failed, ** prevent cam_periph_error removing all LUN devices of one Target id ** for any one LUN device failed ** 1.20.00.18 10/14/2010 Ching Huang Fixed "inquiry data fails comparion at DV1 step" ** 10/25/2010 Ching Huang Fixed bad range input in bus_alloc_resource for ADAPTER_TYPE_B ** 1.20.00.19 11/11/2010 Ching Huang Fixed arcmsr driver prevent arcsas support for Areca SAS HBA ARC13x0 ** 1.20.00.20 12/08/2010 Ching Huang Avoid calling atomic_set_int function ** 1.20.00.21 02/08/2011 Ching Huang Implement I/O request timeout ** 02/14/2011 Ching Huang Modified pktRequestCount ** 1.20.00.21 03/03/2011 Ching Huang if a command timeout, then wait its ccb back before free it ** 1.20.00.22 07/04/2011 Ching Huang Fixed multiple MTX panic ** 1.20.00.23 10/28/2011 Ching Huang Added TIMEOUT_DELAY in case of too many HDDs need to start ** 1.20.00.23 11/08/2011 Ching Huang Added report device transfer speed ** 1.20.00.23 01/30/2012 Ching Huang Fixed Request requeued and Retrying command ** 1.20.00.24 06/11/2012 Ching Huang Fixed return sense data condition ** 1.20.00.25 08/17/2012 Ching Huang Fixed hotplug device no function on type A adapter ** 1.20.00.26 12/14/2012 Ching Huang Added support ARC1214,1224,1264,1284 ** 1.20.00.27 05/06/2013 Ching Huang Fixed out standing cmd full on ARC-12x4 ** 1.20.00.28 09/13/2013 Ching Huang Removed recursive mutex in arcmsr_abort_dr_ccbs ** 1.20.00.29 12/18/2013 Ching Huang Change simq allocation number, support ARC1883 ** 1.30.00.00 11/30/2015 Ching Huang Added support ARC1203 ****************************************************************************************** */ #include __FBSDID("$FreeBSD$"); #if 0 #define ARCMSR_DEBUG1 1 #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* ************************************************************************** ************************************************************************** */ #if __FreeBSD_version >= 500005 #include #include #include #include #include #else #include #include #include #endif #if !defined(CAM_NEW_TRAN_CODE) && __FreeBSD_version >= 700025 #define CAM_NEW_TRAN_CODE 1 #endif #if __FreeBSD_version > 500000 #define arcmsr_callout_init(a) callout_init(a, /*mpsafe*/1); #else #define arcmsr_callout_init(a) callout_init(a); #endif #define ARCMSR_DRIVER_VERSION "arcmsr version 1.30.00.00 2015-11-30" #include /* ************************************************************************** ************************************************************************** */ static void arcmsr_free_srb(struct CommandControlBlock *srb); static struct CommandControlBlock *arcmsr_get_freesrb(struct AdapterControlBlock *acb); static u_int8_t arcmsr_seek_cmd2abort(union ccb *abortccb); static int arcmsr_probe(device_t dev); static int arcmsr_attach(device_t dev); static int arcmsr_detach(device_t dev); static u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg); static void arcmsr_iop_parking(struct AdapterControlBlock *acb); static int arcmsr_shutdown(device_t dev); static void arcmsr_interrupt(struct AdapterControlBlock *acb); static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb); static void arcmsr_free_resource(struct AdapterControlBlock *acb); static void arcmsr_bus_reset(struct AdapterControlBlock *acb); static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb); static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb); static void arcmsr_iop_init(struct AdapterControlBlock *acb); static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb); static u_int32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb, struct QBUFFER *prbuffer); static void arcmsr_Write_data_2iop_wqbuffer(struct AdapterControlBlock *acb); static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb); static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag); static void arcmsr_iop_reset(struct AdapterControlBlock *acb); static void arcmsr_report_sense_info(struct CommandControlBlock *srb); static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t *dm_segs, u_int32_t nseg); static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb *pccb); static int arcmsr_resume(device_t dev); static int arcmsr_suspend(device_t dev); static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb); static void arcmsr_polling_devmap(void *arg); static void arcmsr_srb_timeout(void *arg); static void arcmsr_hbd_postqueue_isr(struct AdapterControlBlock *acb); #ifdef ARCMSR_DEBUG1 static void arcmsr_dump_data(struct AdapterControlBlock *acb); #endif /* ************************************************************************** ************************************************************************** */ static void UDELAY(u_int32_t us) { DELAY(us); } /* ************************************************************************** ************************************************************************** */ static bus_dmamap_callback_t arcmsr_map_free_srb; static bus_dmamap_callback_t arcmsr_execute_srb; /* ************************************************************************** ************************************************************************** */ static d_open_t arcmsr_open; static d_close_t arcmsr_close; static d_ioctl_t arcmsr_ioctl; static device_method_t arcmsr_methods[]={ DEVMETHOD(device_probe, arcmsr_probe), DEVMETHOD(device_attach, arcmsr_attach), DEVMETHOD(device_detach, arcmsr_detach), DEVMETHOD(device_shutdown, arcmsr_shutdown), DEVMETHOD(device_suspend, arcmsr_suspend), DEVMETHOD(device_resume, arcmsr_resume), #if __FreeBSD_version >= 803000 DEVMETHOD_END #else { 0, 0 } #endif }; static driver_t arcmsr_driver={ "arcmsr", arcmsr_methods, sizeof(struct AdapterControlBlock) }; static devclass_t arcmsr_devclass; DRIVER_MODULE(arcmsr, pci, arcmsr_driver, arcmsr_devclass, 0, 0); MODULE_DEPEND(arcmsr, pci, 1, 1, 1); MODULE_DEPEND(arcmsr, cam, 1, 1, 1); #ifndef BUS_DMA_COHERENT #define BUS_DMA_COHERENT 0x04 /* hint: map memory in a coherent way */ #endif #if __FreeBSD_version >= 501000 static struct cdevsw arcmsr_cdevsw={ #if __FreeBSD_version >= 503000 .d_version = D_VERSION, #endif #if (__FreeBSD_version>=503000 && __FreeBSD_version<600034) .d_flags = D_NEEDGIANT, #endif .d_open = arcmsr_open, /* open */ .d_close = arcmsr_close, /* close */ .d_ioctl = arcmsr_ioctl, /* ioctl */ .d_name = "arcmsr", /* name */ }; #else #define ARCMSR_CDEV_MAJOR 180 static struct cdevsw arcmsr_cdevsw = { arcmsr_open, /* open */ arcmsr_close, /* close */ noread, /* read */ nowrite, /* write */ arcmsr_ioctl, /* ioctl */ nopoll, /* poll */ nommap, /* mmap */ nostrategy, /* strategy */ "arcmsr", /* name */ ARCMSR_CDEV_MAJOR, /* major */ nodump, /* dump */ nopsize, /* psize */ 0 /* flags */ }; #endif /* ************************************************************************** ************************************************************************** */ #if __FreeBSD_version < 500005 static int arcmsr_open(dev_t dev, int flags, int fmt, struct proc *proc) #else #if __FreeBSD_version < 503000 static int arcmsr_open(dev_t dev, int flags, int fmt, struct thread *proc) #else static int arcmsr_open(struct cdev *dev, int flags, int fmt, struct thread *proc) #endif #endif { #if __FreeBSD_version < 503000 struct AdapterControlBlock *acb = dev->si_drv1; #else int unit = dev2unit(dev); struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit); #endif if(acb == NULL) { return ENXIO; } return (0); } /* ************************************************************************** ************************************************************************** */ #if __FreeBSD_version < 500005 static int arcmsr_close(dev_t dev, int flags, int fmt, struct proc *proc) #else #if __FreeBSD_version < 503000 static int arcmsr_close(dev_t dev, int flags, int fmt, struct thread *proc) #else static int arcmsr_close(struct cdev *dev, int flags, int fmt, struct thread *proc) #endif #endif { #if __FreeBSD_version < 503000 struct AdapterControlBlock *acb = dev->si_drv1; #else int unit = dev2unit(dev); struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit); #endif if(acb == NULL) { return ENXIO; } return 0; } /* ************************************************************************** ************************************************************************** */ #if __FreeBSD_version < 500005 static int arcmsr_ioctl(dev_t dev, u_long ioctl_cmd, caddr_t arg, int flags, struct proc *proc) #else #if __FreeBSD_version < 503000 static int arcmsr_ioctl(dev_t dev, u_long ioctl_cmd, caddr_t arg, int flags, struct thread *proc) #else static int arcmsr_ioctl(struct cdev *dev, u_long ioctl_cmd, caddr_t arg, int flags, struct thread *proc) #endif #endif { #if __FreeBSD_version < 503000 struct AdapterControlBlock *acb = dev->si_drv1; #else int unit = dev2unit(dev); struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit); #endif if(acb == NULL) { return ENXIO; } return (arcmsr_iop_ioctlcmd(acb, ioctl_cmd, arg)); } /* ********************************************************************** ********************************************************************** */ static u_int32_t arcmsr_disable_allintr( struct AdapterControlBlock *acb) { u_int32_t intmask_org = 0; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { /* disable all outbound interrupt */ intmask_org = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intmask); /* disable outbound message0 int */ CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE); } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; /* disable all outbound interrupt */ intmask_org = READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell_mask) & (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); /* disable outbound message0 int */ WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell_mask, 0); /* disable all interrupt */ } break; case ACB_ADAPTER_TYPE_C: { /* disable all outbound interrupt */ intmask_org = CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_mask) ; /* disable outbound message0 int */ CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE); } break; case ACB_ADAPTER_TYPE_D: { /* disable all outbound interrupt */ intmask_org = CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable) ; /* disable outbound message0 int */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, ARCMSR_HBDMU_ALL_INT_DISABLE); } break; } return (intmask_org); } /* ********************************************************************** ********************************************************************** */ static void arcmsr_enable_allintr( struct AdapterControlBlock *acb, u_int32_t intmask_org) { u_int32_t mask; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { /* enable outbound Post Queue, outbound doorbell Interrupt */ mask = ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE|ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE); CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org & mask); acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff; } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; /* enable ARCMSR_IOP2DRV_MESSAGE_CMD_DONE */ mask = (ARCMSR_IOP2DRV_DATA_WRITE_OK|ARCMSR_IOP2DRV_DATA_READ_OK|ARCMSR_IOP2DRV_CDB_DONE|ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell_mask, intmask_org | mask); /*1=interrupt enable, 0=interrupt disable*/ acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f; } break; case ACB_ADAPTER_TYPE_C: { /* enable outbound Post Queue, outbound doorbell Interrupt */ mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK); CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org & mask); acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f; } break; case ACB_ADAPTER_TYPE_D: { /* enable outbound Post Queue, outbound doorbell Interrupt */ mask = ARCMSR_HBDMU_ALL_INT_ENABLE; CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, intmask_org | mask); CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable); acb->outbound_int_enable = mask; } break; } } /* ********************************************************************** ********************************************************************** */ static u_int8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb) { u_int32_t Index; u_int8_t Retries = 0x00; do { for(Index=0; Index < 100; Index++) { if(CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);/*clear interrupt*/ return TRUE; } UDELAY(10000); }/*max 1 seconds*/ }while(Retries++ < 20);/*max 20 sec*/ return (FALSE); } /* ********************************************************************** ********************************************************************** */ static u_int8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb) { u_int32_t Index; u_int8_t Retries = 0x00; struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; do { for(Index=0; Index < 100; Index++) { if(READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell) & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt*/ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT); return TRUE; } UDELAY(10000); }/*max 1 seconds*/ }while(Retries++ < 20);/*max 20 sec*/ return (FALSE); } /* ********************************************************************** ********************************************************************** */ static u_int8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *acb) { u_int32_t Index; u_int8_t Retries = 0x00; do { for(Index=0; Index < 100; Index++) { if(CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);/*clear interrupt*/ return TRUE; } UDELAY(10000); }/*max 1 seconds*/ }while(Retries++ < 20);/*max 20 sec*/ return (FALSE); } /* ********************************************************************** ********************************************************************** */ static u_int8_t arcmsr_hbd_wait_msgint_ready(struct AdapterControlBlock *acb) { u_int32_t Index; u_int8_t Retries = 0x00; do { for(Index=0; Index < 100; Index++) { if(CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE) { CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE_CLEAR);/*clear interrupt*/ return TRUE; } UDELAY(10000); }/*max 1 seconds*/ }while(Retries++ < 20);/*max 20 sec*/ return (FALSE); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb) { int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */ CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE); do { if(arcmsr_hba_wait_msgint_ready(acb)) { break; } else { retry_count--; } }while(retry_count != 0); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb) { int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */ struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_FLUSH_CACHE); do { if(arcmsr_hbb_wait_msgint_ready(acb)) { break; } else { retry_count--; } }while(retry_count != 0); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *acb) { int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */ CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); do { if(arcmsr_hbc_wait_msgint_ready(acb)) { break; } else { retry_count--; } }while(retry_count != 0); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_flush_hbd_cache(struct AdapterControlBlock *acb) { int retry_count = 30; /* enlarge wait flush adapter cache time: 10 minute */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE); do { if(arcmsr_hbd_wait_msgint_ready(acb)) { break; } else { retry_count--; } }while(retry_count != 0); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { arcmsr_flush_hba_cache(acb); } break; case ACB_ADAPTER_TYPE_B: { arcmsr_flush_hbb_cache(acb); } break; case ACB_ADAPTER_TYPE_C: { arcmsr_flush_hbc_cache(acb); } break; case ACB_ADAPTER_TYPE_D: { arcmsr_flush_hbd_cache(acb); } break; } } /* ******************************************************************************* ******************************************************************************* */ static int arcmsr_suspend(device_t dev) { struct AdapterControlBlock *acb = device_get_softc(dev); /* flush controller */ arcmsr_iop_parking(acb); /* disable all outbound interrupt */ arcmsr_disable_allintr(acb); return(0); } /* ******************************************************************************* ******************************************************************************* */ static int arcmsr_resume(device_t dev) { struct AdapterControlBlock *acb = device_get_softc(dev); arcmsr_iop_init(acb); return(0); } /* ********************************************************************************* ********************************************************************************* */ static void arcmsr_async(void *cb_arg, u_int32_t code, struct cam_path *path, void *arg) { struct AdapterControlBlock *acb; u_int8_t target_id, target_lun; struct cam_sim *sim; sim = (struct cam_sim *) cb_arg; acb =(struct AdapterControlBlock *) cam_sim_softc(sim); switch (code) { case AC_LOST_DEVICE: target_id = xpt_path_target_id(path); target_lun = xpt_path_lun_id(path); if((target_id > ARCMSR_MAX_TARGETID) || (target_lun > ARCMSR_MAX_TARGETLUN)) { break; } // printf("%s:scsi id=%d lun=%d device lost \n", device_get_name(acb->pci_dev), target_id, target_lun); break; default: break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_report_sense_info(struct CommandControlBlock *srb) { union ccb *pccb = srb->pccb; pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; if(pccb->csio.sense_len) { memset(&pccb->csio.sense_data, 0, sizeof(pccb->csio.sense_data)); memcpy(&pccb->csio.sense_data, srb->arcmsr_cdb.SenseData, get_min(sizeof(struct SENSE_DATA), sizeof(pccb->csio.sense_data))); ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */ pccb->ccb_h.status |= CAM_AUTOSNS_VALID; } } /* ********************************************************************* ********************************************************************* */ static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb) { CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD); if(!arcmsr_hba_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit); } } /* ********************************************************************* ********************************************************************* */ static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb) { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_ABORT_CMD); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit); } } /* ********************************************************************* ********************************************************************* */ static void arcmsr_abort_hbc_allcmd(struct AdapterControlBlock *acb) { CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); if(!arcmsr_hbc_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit); } } /* ********************************************************************* ********************************************************************* */ static void arcmsr_abort_hbd_allcmd(struct AdapterControlBlock *acb) { CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD); if(!arcmsr_hbd_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit); } } /* ********************************************************************* ********************************************************************* */ static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { arcmsr_abort_hba_allcmd(acb); } break; case ACB_ADAPTER_TYPE_B: { arcmsr_abort_hbb_allcmd(acb); } break; case ACB_ADAPTER_TYPE_C: { arcmsr_abort_hbc_allcmd(acb); } break; case ACB_ADAPTER_TYPE_D: { arcmsr_abort_hbd_allcmd(acb); } break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag) { struct AdapterControlBlock *acb = srb->acb; union ccb *pccb = srb->pccb; if(srb->srb_flags & SRB_FLAG_TIMER_START) callout_stop(&srb->ccb_callout); if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op = BUS_DMASYNC_POSTREAD; } else { op = BUS_DMASYNC_POSTWRITE; } bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op); bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap); } if(stand_flag == 1) { atomic_subtract_int(&acb->srboutstandingcount, 1); if((acb->acb_flags & ACB_F_CAM_DEV_QFRZN) && ( acb->srboutstandingcount < (acb->maxOutstanding -10))) { acb->acb_flags &= ~ACB_F_CAM_DEV_QFRZN; pccb->ccb_h.status |= CAM_RELEASE_SIMQ; } } if(srb->srb_state != ARCMSR_SRB_TIMEOUT) arcmsr_free_srb(srb); acb->pktReturnCount++; xpt_done(pccb); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_report_srb_state(struct AdapterControlBlock *acb, struct CommandControlBlock *srb, u_int16_t error) { int target, lun; target = srb->pccb->ccb_h.target_id; lun = srb->pccb->ccb_h.target_lun; if(error == FALSE) { if(acb->devstate[target][lun] == ARECA_RAID_GONE) { acb->devstate[target][lun] = ARECA_RAID_GOOD; } srb->pccb->ccb_h.status |= CAM_REQ_CMP; arcmsr_srb_complete(srb, 1); } else { switch(srb->arcmsr_cdb.DeviceStatus) { case ARCMSR_DEV_SELECT_TIMEOUT: { if(acb->devstate[target][lun] == ARECA_RAID_GOOD) { printf( "arcmsr%d: Target=%x, Lun=%x, selection timeout, raid volume was lost\n", acb->pci_unit, target, lun); } acb->devstate[target][lun] = ARECA_RAID_GONE; srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE; arcmsr_srb_complete(srb, 1); } break; case ARCMSR_DEV_ABORTED: case ARCMSR_DEV_INIT_FAIL: { acb->devstate[target][lun] = ARECA_RAID_GONE; srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE; arcmsr_srb_complete(srb, 1); } break; case SCSISTAT_CHECK_CONDITION: { acb->devstate[target][lun] = ARECA_RAID_GOOD; arcmsr_report_sense_info(srb); arcmsr_srb_complete(srb, 1); } break; default: printf("arcmsr%d: scsi id=%d lun=%d isr got command error done,but got unknown DeviceStatus=0x%x \n" , acb->pci_unit, target, lun ,srb->arcmsr_cdb.DeviceStatus); acb->devstate[target][lun] = ARECA_RAID_GONE; srb->pccb->ccb_h.status |= CAM_UNCOR_PARITY; /*unknown error or crc error just for retry*/ arcmsr_srb_complete(srb, 1); break; } } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, u_int32_t flag_srb, u_int16_t error) { struct CommandControlBlock *srb; /* check if command done with no error*/ switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_C: case ACB_ADAPTER_TYPE_D: srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0)); /*frame must be 32 bytes aligned*/ break; case ACB_ADAPTER_TYPE_A: case ACB_ADAPTER_TYPE_B: default: srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/ break; } if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) { if(srb->srb_state == ARCMSR_SRB_TIMEOUT) { arcmsr_free_srb(srb); printf("arcmsr%d: srb='%p' return srb has been timeouted\n", acb->pci_unit, srb); return; } printf("arcmsr%d: return srb has been completed\n" "srb='%p' srb_state=0x%x outstanding srb count=%d \n", acb->pci_unit, srb, srb->srb_state, acb->srboutstandingcount); return; } arcmsr_report_srb_state(acb, srb, error); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_srb_timeout(void *arg) { struct CommandControlBlock *srb = (struct CommandControlBlock *)arg; struct AdapterControlBlock *acb; int target, lun; u_int8_t cmd; target = srb->pccb->ccb_h.target_id; lun = srb->pccb->ccb_h.target_lun; acb = srb->acb; ARCMSR_LOCK_ACQUIRE(&acb->isr_lock); if(srb->srb_state == ARCMSR_SRB_START) { cmd = scsiio_cdb_ptr(&srb->pccb->csio)[0]; srb->srb_state = ARCMSR_SRB_TIMEOUT; srb->pccb->ccb_h.status |= CAM_CMD_TIMEOUT; arcmsr_srb_complete(srb, 1); printf("arcmsr%d: scsi id %d lun %d cmd=0x%x srb='%p' ccb command time out!\n", acb->pci_unit, target, lun, cmd, srb); } ARCMSR_LOCK_RELEASE(&acb->isr_lock); #ifdef ARCMSR_DEBUG1 arcmsr_dump_data(acb); #endif } /* ********************************************************************** ********************************************************************** */ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) { int i=0; u_int32_t flag_srb; u_int16_t error; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { u_int32_t outbound_intstatus; /*clear and abort all outbound posted Q*/ outbound_intstatus = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable; CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);/*clear interrupt*/ while(((flag_srb=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport)) != 0xFFFFFFFF) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) { error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); } } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu; /*clear all outbound posted Q*/ WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */ for(i=0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) { if((flag_srb = phbbmu->done_qbuffer[i]) != 0) { phbbmu->done_qbuffer[i] = 0; error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); } phbbmu->post_qbuffer[i] = 0; }/*drain reply FIFO*/ phbbmu->doneq_index = 0; phbbmu->postq_index = 0; } break; case ACB_ADAPTER_TYPE_C: { while((CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) { flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low); error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); } } break; case ACB_ADAPTER_TYPE_D: { arcmsr_hbd_postqueue_isr(acb); } break; } } /* **************************************************************************** **************************************************************************** */ static void arcmsr_iop_reset(struct AdapterControlBlock *acb) { struct CommandControlBlock *srb; u_int32_t intmask_org; u_int32_t i=0; if(acb->srboutstandingcount>0) { /* disable all outbound interrupt */ intmask_org = arcmsr_disable_allintr(acb); /*clear and abort all outbound posted Q*/ arcmsr_done4abort_postqueue(acb); /* talk to iop 331 outstanding command aborted*/ arcmsr_abort_allcmd(acb); for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) { srb = acb->psrb_pool[i]; if(srb->srb_state == ARCMSR_SRB_START) { srb->srb_state = ARCMSR_SRB_ABORTED; srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); printf("arcmsr%d: scsi id=%d lun=%jx srb='%p' aborted\n" , acb->pci_unit, srb->pccb->ccb_h.target_id , (uintmax_t)srb->pccb->ccb_h.target_lun, srb); } } /* enable all outbound interrupt */ arcmsr_enable_allintr(acb, intmask_org); } acb->srboutstandingcount = 0; acb->workingsrb_doneindex = 0; acb->workingsrb_startindex = 0; acb->pktRequestCount = 0; acb->pktReturnCount = 0; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t *dm_segs, u_int32_t nseg) { struct ARCMSR_CDB *arcmsr_cdb = &srb->arcmsr_cdb; u_int8_t *psge = (u_int8_t *)&arcmsr_cdb->u; u_int32_t address_lo, address_hi; union ccb *pccb = srb->pccb; struct ccb_scsiio *pcsio = &pccb->csio; u_int32_t arccdbsize = 0x30; memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB)); arcmsr_cdb->Bus = 0; arcmsr_cdb->TargetID = pccb->ccb_h.target_id; arcmsr_cdb->LUN = pccb->ccb_h.target_lun; arcmsr_cdb->Function = 1; arcmsr_cdb->CdbLength = (u_int8_t)pcsio->cdb_len; bcopy(scsiio_cdb_ptr(pcsio), arcmsr_cdb->Cdb, pcsio->cdb_len); if(nseg != 0) { struct AdapterControlBlock *acb = srb->acb; bus_dmasync_op_t op; u_int32_t length, i, cdb_sgcount = 0; if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op = BUS_DMASYNC_PREREAD; } else { op = BUS_DMASYNC_PREWRITE; arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE; srb->srb_flags |= SRB_FLAG_WRITE; } bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op); for(i=0; i < nseg; i++) { /* Get the physical address of the current data pointer */ length = arcmsr_htole32(dm_segs[i].ds_len); address_lo = arcmsr_htole32(dma_addr_lo32(dm_segs[i].ds_addr)); address_hi = arcmsr_htole32(dma_addr_hi32(dm_segs[i].ds_addr)); if(address_hi == 0) { struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge; pdma_sg->address = address_lo; pdma_sg->length = length; psge += sizeof(struct SG32ENTRY); arccdbsize += sizeof(struct SG32ENTRY); } else { u_int32_t sg64s_size = 0, tmplength = length; while(1) { u_int64_t span4G, length0; struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge; span4G = (u_int64_t)address_lo + tmplength; pdma_sg->addresshigh = address_hi; pdma_sg->address = address_lo; if(span4G > 0x100000000) { /*see if cross 4G boundary*/ length0 = 0x100000000-address_lo; pdma_sg->length = (u_int32_t)length0 | IS_SG64_ADDR; address_hi = address_hi+1; address_lo = 0; tmplength = tmplength - (u_int32_t)length0; sg64s_size += sizeof(struct SG64ENTRY); psge += sizeof(struct SG64ENTRY); cdb_sgcount++; } else { pdma_sg->length = tmplength | IS_SG64_ADDR; sg64s_size += sizeof(struct SG64ENTRY); psge += sizeof(struct SG64ENTRY); break; } } arccdbsize += sg64s_size; } cdb_sgcount++; } arcmsr_cdb->sgcount = (u_int8_t)cdb_sgcount; arcmsr_cdb->DataLength = pcsio->dxfer_len; if( arccdbsize > 256) { arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE; } } else { arcmsr_cdb->DataLength = 0; } srb->arc_cdb_size = arccdbsize; arcmsr_cdb->msgPages = (arccdbsize/256) + ((arccdbsize % 256) ? 1 : 0); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_post_srb(struct AdapterControlBlock *acb, struct CommandControlBlock *srb) { u_int32_t cdb_phyaddr_low = (u_int32_t) srb->cdb_phyaddr_low; struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&srb->arcmsr_cdb; bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, (srb->srb_flags & SRB_FLAG_WRITE) ? BUS_DMASYNC_POSTWRITE:BUS_DMASYNC_POSTREAD); atomic_add_int(&acb->srboutstandingcount, 1); srb->srb_state = ARCMSR_SRB_START; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) { CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_phyaddr_low|ARCMSR_SRBPOST_FLAG_SGL_BSIZE); } else { CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_phyaddr_low); } } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; int ending_index, index; index = phbbmu->postq_index; ending_index = ((index+1) % ARCMSR_MAX_HBB_POSTQUEUE); phbbmu->post_qbuffer[ending_index] = 0; if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) { phbbmu->post_qbuffer[index] = cdb_phyaddr_low | ARCMSR_SRBPOST_FLAG_SGL_BSIZE; } else { phbbmu->post_qbuffer[index] = cdb_phyaddr_low; } index++; index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */ phbbmu->postq_index = index; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_CDB_POSTED); } break; case ACB_ADAPTER_TYPE_C: { u_int32_t ccb_post_stamp, arc_cdb_size, cdb_phyaddr_hi32; arc_cdb_size = (srb->arc_cdb_size > 0x300) ? 0x300 : srb->arc_cdb_size; ccb_post_stamp = (cdb_phyaddr_low | ((arc_cdb_size-1) >> 6) | 1); cdb_phyaddr_hi32 = acb->srb_phyaddr.B.phyadd_high; if(cdb_phyaddr_hi32) { CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_high, cdb_phyaddr_hi32); CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp); } else { CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp); } } break; case ACB_ADAPTER_TYPE_D: { struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; u_int16_t index_stripped; u_int16_t postq_index; struct InBound_SRB *pinbound_srb; ARCMSR_LOCK_ACQUIRE(&acb->postDone_lock); postq_index = phbdmu->postq_index; pinbound_srb = (struct InBound_SRB *)&phbdmu->post_qbuffer[postq_index & 0xFF]; pinbound_srb->addressHigh = srb->cdb_phyaddr_high; pinbound_srb->addressLow = srb->cdb_phyaddr_low; pinbound_srb->length = srb->arc_cdb_size >> 2; arcmsr_cdb->Context = srb->cdb_phyaddr_low; if (postq_index & 0x4000) { index_stripped = postq_index & 0xFF; index_stripped += 1; index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE; phbdmu->postq_index = index_stripped ? (index_stripped | 0x4000) : index_stripped; } else { index_stripped = postq_index; index_stripped += 1; index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE; phbdmu->postq_index = index_stripped ? index_stripped : (index_stripped | 0x4000); } CHIP_REG_WRITE32(HBD_MessageUnit, 0, inboundlist_write_pointer, postq_index); ARCMSR_LOCK_RELEASE(&acb->postDone_lock); } break; } } /* ************************************************************************ ************************************************************************ */ static struct QBUFFER *arcmsr_get_iop_rqbuffer( struct AdapterControlBlock *acb) { struct QBUFFER *qbuffer=NULL; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct HBA_MessageUnit *phbamu = (struct HBA_MessageUnit *)acb->pmu; qbuffer = (struct QBUFFER *)&phbamu->message_rbuffer; } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; qbuffer = (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_rbuffer; } break; case ACB_ADAPTER_TYPE_C: { struct HBC_MessageUnit *phbcmu = (struct HBC_MessageUnit *)acb->pmu; qbuffer = (struct QBUFFER *)&phbcmu->message_rbuffer; } break; case ACB_ADAPTER_TYPE_D: { struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; qbuffer = (struct QBUFFER *)&phbdmu->phbdmu->message_rbuffer; } break; } return(qbuffer); } /* ************************************************************************ ************************************************************************ */ static struct QBUFFER *arcmsr_get_iop_wqbuffer( struct AdapterControlBlock *acb) { struct QBUFFER *qbuffer = NULL; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct HBA_MessageUnit *phbamu = (struct HBA_MessageUnit *)acb->pmu; qbuffer = (struct QBUFFER *)&phbamu->message_wbuffer; } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; qbuffer = (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_wbuffer; } break; case ACB_ADAPTER_TYPE_C: { struct HBC_MessageUnit *phbcmu = (struct HBC_MessageUnit *)acb->pmu; qbuffer = (struct QBUFFER *)&phbcmu->message_wbuffer; } break; case ACB_ADAPTER_TYPE_D: { struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; qbuffer = (struct QBUFFER *)&phbdmu->phbdmu->message_wbuffer; } break; } return(qbuffer); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_iop_message_read(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { /* let IOP know data has been read */ CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; /* let IOP know data has been read */ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK); } break; case ACB_ADAPTER_TYPE_C: { /* let IOP know data has been read */ CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK); } break; case ACB_ADAPTER_TYPE_D: { /* let IOP know data has been read */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_doorbell, ARCMSR_HBDMU_DRV2IOP_DATA_OUT_READ); } break; } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { /* ** push inbound doorbell tell iop, driver data write ok ** and wait reply on next hwinterrupt for next Qbuffer post */ CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK); } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; /* ** push inbound doorbell tell iop, driver data write ok ** and wait reply on next hwinterrupt for next Qbuffer post */ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_WRITE_OK); } break; case ACB_ADAPTER_TYPE_C: { /* ** push inbound doorbell tell iop, driver data write ok ** and wait reply on next hwinterrupt for next Qbuffer post */ CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK); } break; case ACB_ADAPTER_TYPE_D: { /* ** push inbound doorbell tell iop, driver data write ok ** and wait reply on next hwinterrupt for next Qbuffer post */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_doorbell, ARCMSR_HBDMU_DRV2IOP_DATA_IN_READY); } break; } } /* ************************************************************************ ************************************************************************ */ static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags &= ~ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB); if(!arcmsr_hba_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n" , acb->pci_unit); } } /* ************************************************************************ ************************************************************************ */ static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb) { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; acb->acb_flags &= ~ACB_F_MSG_START_BGRB; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_STOP_BGRB); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: wait 'stop adapter background rebulid' timeout \n" , acb->pci_unit); } } /* ************************************************************************ ************************************************************************ */ static void arcmsr_stop_hbc_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags &= ~ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); if(!arcmsr_hbc_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n", acb->pci_unit); } } /* ************************************************************************ ************************************************************************ */ static void arcmsr_stop_hbd_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags &= ~ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB); if(!arcmsr_hbd_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n", acb->pci_unit); } } /* ************************************************************************ ************************************************************************ */ static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { arcmsr_stop_hba_bgrb(acb); } break; case ACB_ADAPTER_TYPE_B: { arcmsr_stop_hbb_bgrb(acb); } break; case ACB_ADAPTER_TYPE_C: { arcmsr_stop_hbc_bgrb(acb); } break; case ACB_ADAPTER_TYPE_D: { arcmsr_stop_hbd_bgrb(acb); } break; } } /* ************************************************************************ ************************************************************************ */ static void arcmsr_poll(struct cam_sim *psim) { struct AdapterControlBlock *acb; int mutex; acb = (struct AdapterControlBlock *)cam_sim_softc(psim); mutex = mtx_owned(&acb->isr_lock); if( mutex == 0 ) ARCMSR_LOCK_ACQUIRE(&acb->isr_lock); arcmsr_interrupt(acb); if( mutex == 0 ) ARCMSR_LOCK_RELEASE(&acb->isr_lock); } /* ************************************************************************** ************************************************************************** */ static u_int32_t arcmsr_Read_iop_rqbuffer_data_D(struct AdapterControlBlock *acb, struct QBUFFER *prbuffer) { u_int8_t *pQbuffer; - u_int8_t *buf1 = 0; - u_int32_t *iop_data, *buf2 = 0; + u_int8_t *buf1 = NULL; + u_int32_t *iop_data, *buf2 = NULL; u_int32_t iop_len, data_len; iop_data = (u_int32_t *)prbuffer->data; iop_len = (u_int32_t)prbuffer->data_len; if ( iop_len > 0 ) { buf1 = malloc(128, M_DEVBUF, M_NOWAIT | M_ZERO); buf2 = (u_int32_t *)buf1; if( buf1 == NULL) return (0); data_len = iop_len; while(data_len >= 4) { *buf2++ = *iop_data++; data_len -= 4; } if(data_len) *buf2 = *iop_data; buf2 = (u_int32_t *)buf1; } while (iop_len > 0) { pQbuffer = &acb->rqbuffer[acb->rqbuf_lastindex]; *pQbuffer = *buf1; acb->rqbuf_lastindex++; /* if last, index number set it to 0 */ acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; buf1++; iop_len--; } if(buf2) free( (u_int8_t *)buf2, M_DEVBUF); /* let IOP know data has been read */ arcmsr_iop_message_read(acb); return (1); } /* ************************************************************************** ************************************************************************** */ static u_int32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb, struct QBUFFER *prbuffer) { u_int8_t *pQbuffer; u_int8_t *iop_data; u_int32_t iop_len; if(acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D)) { return(arcmsr_Read_iop_rqbuffer_data_D(acb, prbuffer)); } iop_data = (u_int8_t *)prbuffer->data; iop_len = (u_int32_t)prbuffer->data_len; while (iop_len > 0) { pQbuffer = &acb->rqbuffer[acb->rqbuf_lastindex]; *pQbuffer = *iop_data; acb->rqbuf_lastindex++; /* if last, index number set it to 0 */ acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; iop_data++; iop_len--; } /* let IOP know data has been read */ arcmsr_iop_message_read(acb); return (1); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb) { struct QBUFFER *prbuffer; int my_empty_len; /*check this iop data if overflow my rqbuffer*/ ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); prbuffer = arcmsr_get_iop_rqbuffer(acb); my_empty_len = (acb->rqbuf_lastindex - acb->rqbuf_firstindex - 1) & (ARCMSR_MAX_QBUFFER-1); if(my_empty_len >= prbuffer->data_len) { if(arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0) acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; } else { acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; } ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } /* ********************************************************************** ********************************************************************** */ static void arcmsr_Write_data_2iop_wqbuffer_D(struct AdapterControlBlock *acb) { u_int8_t *pQbuffer; struct QBUFFER *pwbuffer; - u_int8_t *buf1 = 0; - u_int32_t *iop_data, *buf2 = 0; + u_int8_t *buf1 = NULL; + u_int32_t *iop_data, *buf2 = NULL; u_int32_t allxfer_len = 0, data_len; if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) { buf1 = malloc(128, M_DEVBUF, M_NOWAIT | M_ZERO); buf2 = (u_int32_t *)buf1; if( buf1 == NULL) return; acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ); pwbuffer = arcmsr_get_iop_wqbuffer(acb); iop_data = (u_int32_t *)pwbuffer->data; while((acb->wqbuf_firstindex != acb->wqbuf_lastindex) && (allxfer_len < 124)) { pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex]; *buf1 = *pQbuffer; acb->wqbuf_firstindex++; acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER; buf1++; allxfer_len++; } pwbuffer->data_len = allxfer_len; data_len = allxfer_len; buf1 = (u_int8_t *)buf2; while(data_len >= 4) { *iop_data++ = *buf2++; data_len -= 4; } if(data_len) *iop_data = *buf2; free( buf1, M_DEVBUF); arcmsr_iop_message_wrote(acb); } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_Write_data_2iop_wqbuffer(struct AdapterControlBlock *acb) { u_int8_t *pQbuffer; struct QBUFFER *pwbuffer; u_int8_t *iop_data; int32_t allxfer_len=0; if(acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D)) { arcmsr_Write_data_2iop_wqbuffer_D(acb); return; } if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) { acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ); pwbuffer = arcmsr_get_iop_wqbuffer(acb); iop_data = (u_int8_t *)pwbuffer->data; while((acb->wqbuf_firstindex != acb->wqbuf_lastindex) && (allxfer_len < 124)) { pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex]; *iop_data = *pQbuffer; acb->wqbuf_firstindex++; acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER; iop_data++; allxfer_len++; } pwbuffer->data_len = allxfer_len; arcmsr_iop_message_wrote(acb); } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb) { ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READ; /* ***************************************************************** ** check if there are any mail packages from user space program ** in my post bag, now is the time to send them into Areca's firmware ***************************************************************** */ if(acb->wqbuf_firstindex != acb->wqbuf_lastindex) { arcmsr_Write_data_2iop_wqbuffer(acb); } if(acb->wqbuf_firstindex == acb->wqbuf_lastindex) { acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED; } ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb) { /* if (ccb->ccb_h.status != CAM_REQ_CMP) printf("arcmsr_rescanLun_cb: Rescan Target=%x, lun=%x," "failure status=%x\n", ccb->ccb_h.target_id, ccb->ccb_h.target_lun, ccb->ccb_h.status); else printf("arcmsr_rescanLun_cb: Rescan lun successfully!\n"); */ xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); } static void arcmsr_rescan_lun(struct AdapterControlBlock *acb, int target, int lun) { struct cam_path *path; union ccb *ccb; if ((ccb = (union ccb *)xpt_alloc_ccb_nowait()) == NULL) return; if (xpt_create_path(&path, NULL, cam_sim_path(acb->psim), target, lun) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return; } /* printf("arcmsr_rescan_lun: Rescan Target=%x, Lun=%x\n", target, lun); */ bzero(ccb, sizeof(union ccb)); xpt_setup_ccb(&ccb->ccb_h, path, 5); ccb->ccb_h.func_code = XPT_SCAN_LUN; ccb->ccb_h.cbfcnp = arcmsr_rescanLun_cb; ccb->crcn.flags = CAM_FLAG_NONE; xpt_action(ccb); } static void arcmsr_abort_dr_ccbs(struct AdapterControlBlock *acb, int target, int lun) { struct CommandControlBlock *srb; u_int32_t intmask_org; int i; /* disable all outbound interrupts */ intmask_org = arcmsr_disable_allintr(acb); for (i = 0; i < ARCMSR_MAX_FREESRB_NUM; i++) { srb = acb->psrb_pool[i]; if (srb->srb_state == ARCMSR_SRB_START) { if((target == srb->pccb->ccb_h.target_id) && (lun == srb->pccb->ccb_h.target_lun)) { srb->srb_state = ARCMSR_SRB_ABORTED; srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); printf("arcmsr%d: abort scsi id %d lun %d srb=%p \n", acb->pci_unit, target, lun, srb); } } } /* enable outbound Post Queue, outbound doorbell Interrupt */ arcmsr_enable_allintr(acb, intmask_org); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_dr_handle(struct AdapterControlBlock *acb) { u_int32_t devicemap; u_int32_t target, lun; u_int32_t deviceMapCurrent[4]={0}; u_int8_t *pDevMap; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: devicemap = offsetof(struct HBA_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); for (target = 0; target < 4; target++) { deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap); devicemap += 4; } break; case ACB_ADAPTER_TYPE_B: devicemap = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); for (target = 0; target < 4; target++) { deviceMapCurrent[target]=bus_space_read_4(acb->btag[1], acb->bhandle[1], devicemap); devicemap += 4; } break; case ACB_ADAPTER_TYPE_C: devicemap = offsetof(struct HBC_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); for (target = 0; target < 4; target++) { deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap); devicemap += 4; } break; case ACB_ADAPTER_TYPE_D: devicemap = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); for (target = 0; target < 4; target++) { deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap); devicemap += 4; } break; } if(acb->acb_flags & ACB_F_BUS_HANG_ON) { acb->acb_flags &= ~ACB_F_BUS_HANG_ON; } /* ** adapter posted CONFIG message ** copy the new map, note if there are differences with the current map */ pDevMap = (u_int8_t *)&deviceMapCurrent[0]; for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) { if (*pDevMap != acb->device_map[target]) { u_int8_t difference, bit_check; difference = *pDevMap ^ acb->device_map[target]; for(lun=0; lun < ARCMSR_MAX_TARGETLUN; lun++) { bit_check = (1 << lun); /*check bit from 0....31*/ if(difference & bit_check) { if(acb->device_map[target] & bit_check) {/* unit departed */ printf("arcmsr_dr_handle: Target=%x, lun=%x, GONE!!!\n",target,lun); arcmsr_abort_dr_ccbs(acb, target, lun); arcmsr_rescan_lun(acb, target, lun); acb->devstate[target][lun] = ARECA_RAID_GONE; } else {/* unit arrived */ printf("arcmsr_dr_handle: Target=%x, lun=%x, Plug-IN!!!\n",target,lun); arcmsr_rescan_lun(acb, target, lun); acb->devstate[target][lun] = ARECA_RAID_GOOD; } } } /* printf("arcmsr_dr_handle: acb->device_map[%x]=0x%x, deviceMapCurrent[%x]=%x\n",target,acb->device_map[target],target,*pDevMap); */ acb->device_map[target] = *pDevMap; } pDevMap++; } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb) { u_int32_t outbound_message; CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT); outbound_message = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[0]); if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG) arcmsr_dr_handle( acb ); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb) { u_int32_t outbound_message; struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; /* clear interrupts */ WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN); outbound_message = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0]); if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG) arcmsr_dr_handle( acb ); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbc_message_isr(struct AdapterControlBlock *acb) { u_int32_t outbound_message; CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR); outbound_message = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[0]); if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG) arcmsr_dr_handle( acb ); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbd_message_isr(struct AdapterControlBlock *acb) { u_int32_t outbound_message; CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE_CLEAR); outbound_message = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[0]); if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG) arcmsr_dr_handle( acb ); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb) { u_int32_t doorbell_status; /* ******************************************************************* ** Maybe here we need to check wrqbuffer_lock is lock or not ** DOORBELL: din! don! ** check if there are any mail need to pack from firmware ******************************************************************* */ doorbell_status = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell); CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, doorbell_status); /* clear doorbell interrupt */ if(doorbell_status & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) { arcmsr_iop2drv_data_wrote_handle(acb); } if(doorbell_status & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) { arcmsr_iop2drv_data_read_handle(acb); } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *acb) { u_int32_t doorbell_status; /* ******************************************************************* ** Maybe here we need to check wrqbuffer_lock is lock or not ** DOORBELL: din! don! ** check if there are any mail need to pack from firmware ******************************************************************* */ doorbell_status = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell); CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, doorbell_status); /* clear doorbell interrupt */ if(doorbell_status & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) { arcmsr_iop2drv_data_wrote_handle(acb); } if(doorbell_status & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) { arcmsr_iop2drv_data_read_handle(acb); } if(doorbell_status & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { arcmsr_hbc_message_isr(acb); /* messenger of "driver to iop commands" */ } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbd_doorbell_isr(struct AdapterControlBlock *acb) { u_int32_t doorbell_status; /* ******************************************************************* ** Maybe here we need to check wrqbuffer_lock is lock or not ** DOORBELL: din! don! ** check if there are any mail need to pack from firmware ******************************************************************* */ doorbell_status = CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_F0_DOORBELL_CAUSE; if(doorbell_status) CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, doorbell_status); /* clear doorbell interrupt */ while( doorbell_status & ARCMSR_HBDMU_F0_DOORBELL_CAUSE ) { if(doorbell_status & ARCMSR_HBDMU_IOP2DRV_DATA_WRITE_OK) { arcmsr_iop2drv_data_wrote_handle(acb); } if(doorbell_status & ARCMSR_HBDMU_IOP2DRV_DATA_READ_OK) { arcmsr_iop2drv_data_read_handle(acb); } if(doorbell_status & ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE) { arcmsr_hbd_message_isr(acb); /* messenger of "driver to iop commands" */ } doorbell_status = CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_F0_DOORBELL_CAUSE; if(doorbell_status) CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, doorbell_status); /* clear doorbell interrupt */ } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb) { u_int32_t flag_srb; u_int16_t error; /* ***************************************************************************** ** areca cdb command done ***************************************************************************** */ bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); while((flag_srb = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport)) != 0xFFFFFFFF) { /* check if command done with no error*/ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0) ? TRUE : FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); } /*drain reply FIFO*/ } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb) { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; u_int32_t flag_srb; int index; u_int16_t error; /* ***************************************************************************** ** areca cdb command done ***************************************************************************** */ bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); index = phbbmu->doneq_index; while((flag_srb = phbbmu->done_qbuffer[index]) != 0) { phbbmu->done_qbuffer[index] = 0; index++; index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */ phbbmu->doneq_index = index; /* check if command done with no error*/ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); } /*drain reply FIFO*/ } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb) { u_int32_t flag_srb,throttling = 0; u_int16_t error; /* ***************************************************************************** ** areca cdb command done ***************************************************************************** */ bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); do { flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low); if (flag_srb == 0xFFFFFFFF) break; /* check if command done with no error*/ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); throttling++; if(throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) { CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING); throttling = 0; } } while(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR); } /* ********************************************************************** ** ********************************************************************** */ static uint16_t arcmsr_get_doneq_index(struct HBD_MessageUnit0 *phbdmu) { uint16_t doneq_index, index_stripped; doneq_index = phbdmu->doneq_index; if (doneq_index & 0x4000) { index_stripped = doneq_index & 0xFF; index_stripped += 1; index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE; phbdmu->doneq_index = index_stripped ? (index_stripped | 0x4000) : index_stripped; } else { index_stripped = doneq_index; index_stripped += 1; index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE; phbdmu->doneq_index = index_stripped ? index_stripped : (index_stripped | 0x4000); } return (phbdmu->doneq_index); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbd_postqueue_isr(struct AdapterControlBlock *acb) { struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; u_int32_t outbound_write_pointer; u_int32_t addressLow; uint16_t doneq_index; u_int16_t error; /* ***************************************************************************** ** areca cdb command done ***************************************************************************** */ if((CHIP_REG_READ32(HBD_MessageUnit, 0, outboundlist_interrupt_cause) & ARCMSR_HBDMU_OUTBOUND_LIST_INTERRUPT) == 0) return; bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); outbound_write_pointer = phbdmu->done_qbuffer[0].addressLow; doneq_index = phbdmu->doneq_index; while ((doneq_index & 0xFF) != (outbound_write_pointer & 0xFF)) { doneq_index = arcmsr_get_doneq_index(phbdmu); addressLow = phbdmu->done_qbuffer[(doneq_index & 0xFF)+1].addressLow; error = (addressLow & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE; arcmsr_drain_donequeue(acb, addressLow, error); /*Check if command done with no error */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, outboundlist_read_pointer, doneq_index); outbound_write_pointer = phbdmu->done_qbuffer[0].addressLow; } CHIP_REG_WRITE32(HBD_MessageUnit, 0, outboundlist_interrupt_cause, ARCMSR_HBDMU_OUTBOUND_LIST_INTERRUPT_CLEAR); CHIP_REG_READ32(HBD_MessageUnit, 0, outboundlist_interrupt_cause); /*Dummy ioread32 to force pci flush */ } /* ********************************************************************** ********************************************************************** */ static void arcmsr_handle_hba_isr( struct AdapterControlBlock *acb) { u_int32_t outbound_intStatus; /* ********************************************* ** check outbound intstatus ********************************************* */ outbound_intStatus = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable; if(!outbound_intStatus) { /*it must be share irq*/ return; } CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intStatus); /*clear interrupt*/ /* MU doorbell interrupts*/ if(outbound_intStatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) { arcmsr_hba_doorbell_isr(acb); } /* MU post queue interrupts*/ if(outbound_intStatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) { arcmsr_hba_postqueue_isr(acb); } if(outbound_intStatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { arcmsr_hba_message_isr(acb); } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_handle_hbb_isr( struct AdapterControlBlock *acb) { u_int32_t outbound_doorbell; struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; /* ********************************************* ** check outbound intstatus ********************************************* */ outbound_doorbell = READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell) & acb->outbound_int_enable; if(!outbound_doorbell) { /*it must be share irq*/ return; } WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ~outbound_doorbell); /* clear doorbell interrupt */ READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell); WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT); /* MU ioctl transfer doorbell interrupts*/ if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) { arcmsr_iop2drv_data_wrote_handle(acb); } if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) { arcmsr_iop2drv_data_read_handle(acb); } /* MU post queue interrupts*/ if(outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) { arcmsr_hbb_postqueue_isr(acb); } if(outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { arcmsr_hbb_message_isr(acb); } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_handle_hbc_isr( struct AdapterControlBlock *acb) { u_int32_t host_interrupt_status; /* ********************************************* ** check outbound intstatus ********************************************* */ host_interrupt_status = CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR); if(!host_interrupt_status) { /*it must be share irq*/ return; } do { /* MU doorbell interrupts*/ if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) { arcmsr_hbc_doorbell_isr(acb); } /* MU post queue interrupts*/ if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) { arcmsr_hbc_postqueue_isr(acb); } host_interrupt_status = CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status); } while (host_interrupt_status & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR)); } /* ********************************************************************** ********************************************************************** */ static void arcmsr_handle_hbd_isr( struct AdapterControlBlock *acb) { u_int32_t host_interrupt_status; u_int32_t intmask_org; /* ********************************************* ** check outbound intstatus ********************************************* */ host_interrupt_status = CHIP_REG_READ32(HBD_MessageUnit, 0, host_int_status) & acb->outbound_int_enable; if(!(host_interrupt_status & ARCMSR_HBDMU_OUTBOUND_INT)) { /*it must be share irq*/ return; } /* disable outbound interrupt */ intmask_org = CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable) ; /* disable outbound message0 int */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, ARCMSR_HBDMU_ALL_INT_DISABLE); /* MU doorbell interrupts*/ if(host_interrupt_status & ARCMSR_HBDMU_OUTBOUND_DOORBELL_INT) { arcmsr_hbd_doorbell_isr(acb); } /* MU post queue interrupts*/ if(host_interrupt_status & ARCMSR_HBDMU_OUTBOUND_POSTQUEUE_INT) { arcmsr_hbd_postqueue_isr(acb); } /* enable all outbound interrupt */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, intmask_org | ARCMSR_HBDMU_ALL_INT_ENABLE); // CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable); } /* ****************************************************************************** ****************************************************************************** */ static void arcmsr_interrupt(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: arcmsr_handle_hba_isr(acb); break; case ACB_ADAPTER_TYPE_B: arcmsr_handle_hbb_isr(acb); break; case ACB_ADAPTER_TYPE_C: arcmsr_handle_hbc_isr(acb); break; case ACB_ADAPTER_TYPE_D: arcmsr_handle_hbd_isr(acb); break; default: printf("arcmsr%d: interrupt service," " unknown adapter type =%d\n", acb->pci_unit, acb->adapter_type); break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_intr_handler(void *arg) { struct AdapterControlBlock *acb = (struct AdapterControlBlock *)arg; ARCMSR_LOCK_ACQUIRE(&acb->isr_lock); arcmsr_interrupt(acb); ARCMSR_LOCK_RELEASE(&acb->isr_lock); } /* ****************************************************************************** ****************************************************************************** */ static void arcmsr_polling_devmap(void *arg) { struct AdapterControlBlock *acb = (struct AdapterControlBlock *)arg; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG); } break; case ACB_ADAPTER_TYPE_C: CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); break; case ACB_ADAPTER_TYPE_D: CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); break; } if((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0) { callout_reset(&acb->devmap_callout, 5 * hz, arcmsr_polling_devmap, acb); /* polling per 5 seconds */ } } /* ******************************************************************************* ** ******************************************************************************* */ static void arcmsr_iop_parking(struct AdapterControlBlock *acb) { u_int32_t intmask_org; if(acb != NULL) { /* stop adapter background rebuild */ if(acb->acb_flags & ACB_F_MSG_START_BGRB) { intmask_org = arcmsr_disable_allintr(acb); arcmsr_stop_adapter_bgrb(acb); arcmsr_flush_adapter_cache(acb); arcmsr_enable_allintr(acb, intmask_org); } } } /* *********************************************************************** ** ************************************************************************ */ u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg) { struct CMD_MESSAGE_FIELD *pcmdmessagefld; u_int32_t retvalue = EINVAL; pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) arg; if(memcmp(pcmdmessagefld->cmdmessage.Signature, "ARCMSR", 6)!=0) { return retvalue; } ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); switch(ioctl_cmd) { case ARCMSR_MESSAGE_READ_RQBUFFER: { u_int8_t *pQbuffer; u_int8_t *ptmpQbuffer = pcmdmessagefld->messagedatabuffer; u_int32_t allxfer_len=0; while((acb->rqbuf_firstindex != acb->rqbuf_lastindex) && (allxfer_len < 1031)) { /*copy READ QBUFFER to srb*/ pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; *ptmpQbuffer = *pQbuffer; acb->rqbuf_firstindex++; acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */ ptmpQbuffer++; allxfer_len++; } if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { struct QBUFFER *prbuffer; acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; prbuffer = arcmsr_get_iop_rqbuffer(acb); if(arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0) acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; } pcmdmessagefld->cmdmessage.Length = allxfer_len; pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; retvalue = ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_WRITE_WQBUFFER: { u_int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; u_int8_t *pQbuffer; u_int8_t *ptmpuserbuffer = pcmdmessagefld->messagedatabuffer; user_len = pcmdmessagefld->cmdmessage.Length; /*check if data xfer length of this request will overflow my array qbuffer */ wqbuf_lastindex = acb->wqbuf_lastindex; wqbuf_firstindex = acb->wqbuf_firstindex; if(wqbuf_lastindex != wqbuf_firstindex) { arcmsr_Write_data_2iop_wqbuffer(acb); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_ERROR; } else { my_empty_len = (wqbuf_firstindex - wqbuf_lastindex - 1) & (ARCMSR_MAX_QBUFFER - 1); if(my_empty_len >= user_len) { while(user_len > 0) { /*copy srb data to wqbuffer*/ pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex]; *pQbuffer = *ptmpuserbuffer; acb->wqbuf_lastindex++; acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */ ptmpuserbuffer++; user_len--; } /*post fist Qbuffer*/ if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { acb->acb_flags &= ~ACB_F_MESSAGE_WQBUFFER_CLEARED; arcmsr_Write_data_2iop_wqbuffer(acb); } pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; } else { pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_ERROR; } } retvalue = ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { u_int8_t *pQbuffer = acb->rqbuffer; if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); /*signature, let IOP know data has been readed */ } acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; acb->rqbuf_firstindex = 0; acb->rqbuf_lastindex = 0; memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; retvalue = ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { u_int8_t *pQbuffer = acb->wqbuffer; if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); /*signature, let IOP know data has been readed */ } acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ); acb->wqbuf_firstindex = 0; acb->wqbuf_lastindex = 0; memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; retvalue = ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { u_int8_t *pQbuffer; if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); /*signature, let IOP know data has been readed */ } acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |ACB_F_MESSAGE_RQBUFFER_CLEARED |ACB_F_MESSAGE_WQBUFFER_READ); acb->rqbuf_firstindex = 0; acb->rqbuf_lastindex = 0; acb->wqbuf_firstindex = 0; acb->wqbuf_lastindex = 0; pQbuffer = acb->rqbuffer; memset(pQbuffer, 0, sizeof(struct QBUFFER)); pQbuffer = acb->wqbuffer; memset(pQbuffer, 0, sizeof(struct QBUFFER)); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; retvalue = ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: { pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F; retvalue = ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_SAY_HELLO: { u_int8_t *hello_string = "Hello! I am ARCMSR"; u_int8_t *puserbuffer = (u_int8_t *)pcmdmessagefld->messagedatabuffer; if(memcpy(puserbuffer, hello_string, (int16_t)strlen(hello_string))) { pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_ERROR; ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); return ENOIOCTL; } pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; retvalue = ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_SAY_GOODBYE: { arcmsr_iop_parking(acb); retvalue = ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: { arcmsr_flush_adapter_cache(acb); retvalue = ARCMSR_MESSAGE_SUCCESS; } break; } ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); return (retvalue); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_free_srb(struct CommandControlBlock *srb) { struct AdapterControlBlock *acb; acb = srb->acb; ARCMSR_LOCK_ACQUIRE(&acb->srb_lock); srb->srb_state = ARCMSR_SRB_DONE; srb->srb_flags = 0; acb->srbworkingQ[acb->workingsrb_doneindex] = srb; acb->workingsrb_doneindex++; acb->workingsrb_doneindex %= ARCMSR_MAX_FREESRB_NUM; ARCMSR_LOCK_RELEASE(&acb->srb_lock); } /* ************************************************************************** ************************************************************************** */ struct CommandControlBlock *arcmsr_get_freesrb(struct AdapterControlBlock *acb) { struct CommandControlBlock *srb = NULL; u_int32_t workingsrb_startindex, workingsrb_doneindex; ARCMSR_LOCK_ACQUIRE(&acb->srb_lock); workingsrb_doneindex = acb->workingsrb_doneindex; workingsrb_startindex = acb->workingsrb_startindex; srb = acb->srbworkingQ[workingsrb_startindex]; workingsrb_startindex++; workingsrb_startindex %= ARCMSR_MAX_FREESRB_NUM; if(workingsrb_doneindex != workingsrb_startindex) { acb->workingsrb_startindex = workingsrb_startindex; } else { srb = NULL; } ARCMSR_LOCK_RELEASE(&acb->srb_lock); return(srb); } /* ************************************************************************** ************************************************************************** */ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb *pccb) { struct CMD_MESSAGE_FIELD *pcmdmessagefld; int retvalue = 0, transfer_len = 0; char *buffer; uint8_t *ptr = scsiio_cdb_ptr(&pccb->csio); u_int32_t controlcode = (u_int32_t ) ptr[5] << 24 | (u_int32_t ) ptr[6] << 16 | (u_int32_t ) ptr[7] << 8 | (u_int32_t ) ptr[8]; /* 4 bytes: Areca io control code */ if ((pccb->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) { buffer = pccb->csio.data_ptr; transfer_len = pccb->csio.dxfer_len; } else { retvalue = ARCMSR_MESSAGE_FAIL; goto message_out; } if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) { retvalue = ARCMSR_MESSAGE_FAIL; goto message_out; } pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer; switch(controlcode) { case ARCMSR_MESSAGE_READ_RQBUFFER: { u_int8_t *pQbuffer; u_int8_t *ptmpQbuffer = pcmdmessagefld->messagedatabuffer; int32_t allxfer_len = 0; ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) && (allxfer_len < 1031)) { pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; *ptmpQbuffer = *pQbuffer; acb->rqbuf_firstindex++; acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; ptmpQbuffer++; allxfer_len++; } if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { struct QBUFFER *prbuffer; acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; prbuffer = arcmsr_get_iop_rqbuffer(acb); if(arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0) acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; } pcmdmessagefld->cmdmessage.Length = allxfer_len; pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; retvalue = ARCMSR_MESSAGE_SUCCESS; ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } break; case ARCMSR_MESSAGE_WRITE_WQBUFFER: { int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; u_int8_t *pQbuffer; u_int8_t *ptmpuserbuffer = pcmdmessagefld->messagedatabuffer; user_len = pcmdmessagefld->cmdmessage.Length; ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); wqbuf_lastindex = acb->wqbuf_lastindex; wqbuf_firstindex = acb->wqbuf_firstindex; if (wqbuf_lastindex != wqbuf_firstindex) { arcmsr_Write_data_2iop_wqbuffer(acb); /* has error report sensedata */ if(pccb->csio.sense_len) { ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */ ((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05; /* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */ ((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A; /* AdditionalSenseLength */ ((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20; /* AdditionalSenseCode */ } retvalue = ARCMSR_MESSAGE_FAIL; } else { my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1) &(ARCMSR_MAX_QBUFFER - 1); if (my_empty_len >= user_len) { while (user_len > 0) { pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex]; *pQbuffer = *ptmpuserbuffer; acb->wqbuf_lastindex++; acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER; ptmpuserbuffer++; user_len--; } if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { acb->acb_flags &= ~ACB_F_MESSAGE_WQBUFFER_CLEARED; arcmsr_Write_data_2iop_wqbuffer(acb); } } else { /* has error report sensedata */ if(pccb->csio.sense_len) { ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */ ((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05; /* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */ ((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A; /* AdditionalSenseLength */ ((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20; /* AdditionalSenseCode */ } retvalue = ARCMSR_MESSAGE_FAIL; } } ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } break; case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { u_int8_t *pQbuffer = acb->rqbuffer; ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); } acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; acb->rqbuf_firstindex = 0; acb->rqbuf_lastindex = 0; memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } break; case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { u_int8_t *pQbuffer = acb->wqbuffer; ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); } acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READ); acb->wqbuf_firstindex = 0; acb->wqbuf_lastindex = 0; memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } break; case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { u_int8_t *pQbuffer; ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); } acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | ACB_F_MESSAGE_RQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READ); acb->rqbuf_firstindex = 0; acb->rqbuf_lastindex = 0; acb->wqbuf_firstindex = 0; acb->wqbuf_lastindex = 0; pQbuffer = acb->rqbuffer; memset(pQbuffer, 0, sizeof (struct QBUFFER)); pQbuffer = acb->wqbuffer; memset(pQbuffer, 0, sizeof (struct QBUFFER)); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } break; case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: { pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F; } break; case ARCMSR_MESSAGE_SAY_HELLO: { int8_t *hello_string = "Hello! I am ARCMSR"; memcpy(pcmdmessagefld->messagedatabuffer, hello_string , (int16_t)strlen(hello_string)); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; } break; case ARCMSR_MESSAGE_SAY_GOODBYE: arcmsr_iop_parking(acb); break; case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: arcmsr_flush_adapter_cache(acb); break; default: retvalue = ARCMSR_MESSAGE_FAIL; } message_out: return (retvalue); } /* ********************************************************************* ********************************************************************* */ static void arcmsr_execute_srb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { struct CommandControlBlock *srb = (struct CommandControlBlock *)arg; struct AdapterControlBlock *acb = (struct AdapterControlBlock *)srb->acb; union ccb *pccb; int target, lun; pccb = srb->pccb; target = pccb->ccb_h.target_id; lun = pccb->ccb_h.target_lun; acb->pktRequestCount++; if(error != 0) { if(error != EFBIG) { printf("arcmsr%d: unexpected error %x" " returned from 'bus_dmamap_load' \n" , acb->pci_unit, error); } if((pccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { pccb->ccb_h.status |= CAM_REQ_TOO_BIG; } arcmsr_srb_complete(srb, 0); return; } if(nseg > ARCMSR_MAX_SG_ENTRIES) { pccb->ccb_h.status |= CAM_REQ_TOO_BIG; arcmsr_srb_complete(srb, 0); return; } if(acb->acb_flags & ACB_F_BUS_RESET) { printf("arcmsr%d: bus reset and return busy \n", acb->pci_unit); pccb->ccb_h.status |= CAM_SCSI_BUS_RESET; arcmsr_srb_complete(srb, 0); return; } if(acb->devstate[target][lun] == ARECA_RAID_GONE) { u_int8_t block_cmd, cmd; cmd = scsiio_cdb_ptr(&pccb->csio)[0]; block_cmd = cmd & 0x0f; if(block_cmd == 0x08 || block_cmd == 0x0a) { printf("arcmsr%d:block 'read/write' command " "with gone raid volume Cmd=0x%2x, TargetId=%d, Lun=%d \n" , acb->pci_unit, cmd, target, lun); pccb->ccb_h.status |= CAM_DEV_NOT_THERE; arcmsr_srb_complete(srb, 0); return; } } if((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { if(nseg != 0) { bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap); } arcmsr_srb_complete(srb, 0); return; } if(acb->srboutstandingcount >= acb->maxOutstanding) { if((acb->acb_flags & ACB_F_CAM_DEV_QFRZN) == 0) { xpt_freeze_simq(acb->psim, 1); acb->acb_flags |= ACB_F_CAM_DEV_QFRZN; } pccb->ccb_h.status &= ~CAM_SIM_QUEUED; pccb->ccb_h.status |= CAM_REQUEUE_REQ; arcmsr_srb_complete(srb, 0); return; } pccb->ccb_h.status |= CAM_SIM_QUEUED; arcmsr_build_srb(srb, dm_segs, nseg); arcmsr_post_srb(acb, srb); if (pccb->ccb_h.timeout != CAM_TIME_INFINITY) { arcmsr_callout_init(&srb->ccb_callout); callout_reset_sbt(&srb->ccb_callout, SBT_1MS * (pccb->ccb_h.timeout + (ARCMSR_TIMEOUT_DELAY * 1000)), 0, arcmsr_srb_timeout, srb, 0); srb->srb_flags |= SRB_FLAG_TIMER_START; } } /* ***************************************************************************************** ***************************************************************************************** */ static u_int8_t arcmsr_seek_cmd2abort(union ccb *abortccb) { struct CommandControlBlock *srb; struct AdapterControlBlock *acb = (struct AdapterControlBlock *) abortccb->ccb_h.arcmsr_ccbacb_ptr; u_int32_t intmask_org; int i = 0; acb->num_aborts++; /* *************************************************************************** ** It is the upper layer do abort command this lock just prior to calling us. ** First determine if we currently own this command. ** Start by searching the device queue. If not found ** at all, and the system wanted us to just abort the ** command return success. *************************************************************************** */ if(acb->srboutstandingcount != 0) { /* disable all outbound interrupt */ intmask_org = arcmsr_disable_allintr(acb); for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) { srb = acb->psrb_pool[i]; if(srb->srb_state == ARCMSR_SRB_START) { if(srb->pccb == abortccb) { srb->srb_state = ARCMSR_SRB_ABORTED; printf("arcmsr%d:scsi id=%d lun=%jx abort srb '%p'" "outstanding command \n" , acb->pci_unit, abortccb->ccb_h.target_id , (uintmax_t)abortccb->ccb_h.target_lun, srb); arcmsr_polling_srbdone(acb, srb); /* enable outbound Post Queue, outbound doorbell Interrupt */ arcmsr_enable_allintr(acb, intmask_org); return (TRUE); } } } /* enable outbound Post Queue, outbound doorbell Interrupt */ arcmsr_enable_allintr(acb, intmask_org); } return(FALSE); } /* **************************************************************************** **************************************************************************** */ static void arcmsr_bus_reset(struct AdapterControlBlock *acb) { int retry = 0; acb->num_resets++; acb->acb_flags |= ACB_F_BUS_RESET; while(acb->srboutstandingcount != 0 && retry < 400) { arcmsr_interrupt(acb); UDELAY(25000); retry++; } arcmsr_iop_reset(acb); acb->acb_flags &= ~ACB_F_BUS_RESET; } /* ************************************************************************** ************************************************************************** */ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb, union ccb *pccb) { if (pccb->ccb_h.target_lun) { pccb->ccb_h.status |= CAM_DEV_NOT_THERE; xpt_done(pccb); return; } pccb->ccb_h.status |= CAM_REQ_CMP; switch (scsiio_cdb_ptr(&pccb->csio)[0]) { case INQUIRY: { unsigned char inqdata[36]; char *buffer = pccb->csio.data_ptr; inqdata[0] = T_PROCESSOR; /* Periph Qualifier & Periph Dev Type */ inqdata[1] = 0; /* rem media bit & Dev Type Modifier */ inqdata[2] = 0; /* ISO, ECMA, & ANSI versions */ inqdata[3] = 0; inqdata[4] = 31; /* length of additional data */ inqdata[5] = 0; inqdata[6] = 0; inqdata[7] = 0; strncpy(&inqdata[8], "Areca ", 8); /* Vendor Identification */ strncpy(&inqdata[16], "RAID controller ", 16); /* Product Identification */ strncpy(&inqdata[32], "R001", 4); /* Product Revision */ memcpy(buffer, inqdata, sizeof(inqdata)); xpt_done(pccb); } break; case WRITE_BUFFER: case READ_BUFFER: { if (arcmsr_iop_message_xfer(acb, pccb)) { pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; } xpt_done(pccb); } break; default: xpt_done(pccb); } } /* ********************************************************************* ********************************************************************* */ static void arcmsr_action(struct cam_sim *psim, union ccb *pccb) { struct AdapterControlBlock *acb; acb = (struct AdapterControlBlock *) cam_sim_softc(psim); if(acb == NULL) { pccb->ccb_h.status |= CAM_REQ_INVALID; xpt_done(pccb); return; } switch (pccb->ccb_h.func_code) { case XPT_SCSI_IO: { struct CommandControlBlock *srb; int target = pccb->ccb_h.target_id; int error; if (pccb->ccb_h.flags & CAM_CDB_PHYS) { pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); return; } if(target == 16) { /* virtual device for iop message transfer */ arcmsr_handle_virtual_command(acb, pccb); return; } if((srb = arcmsr_get_freesrb(acb)) == NULL) { pccb->ccb_h.status |= CAM_RESRC_UNAVAIL; xpt_done(pccb); return; } pccb->ccb_h.arcmsr_ccbsrb_ptr = srb; pccb->ccb_h.arcmsr_ccbacb_ptr = acb; srb->pccb = pccb; error = bus_dmamap_load_ccb(acb->dm_segs_dmat , srb->dm_segs_dmamap , pccb , arcmsr_execute_srb, srb, /*flags*/0); if(error == EINPROGRESS) { xpt_freeze_simq(acb->psim, 1); pccb->ccb_h.status |= CAM_RELEASE_SIMQ; } break; } case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &pccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = ARCMSR_MAX_TARGETID; /* 0-16 */ cpi->max_lun = ARCMSR_MAX_TARGETLUN; /* 0-7 */ cpi->initiator_id = ARCMSR_SCSI_INITIATOR_ID; /* 255 */ cpi->bus_id = cam_sim_bus(psim); strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "ARCMSR", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(psim); #ifdef CAM_NEW_TRAN_CODE if(acb->adapter_bus_speed == ACB_BUS_SPEED_12G) cpi->base_transfer_speed = 1200000; else if(acb->adapter_bus_speed == ACB_BUS_SPEED_6G) cpi->base_transfer_speed = 600000; else cpi->base_transfer_speed = 300000; if((acb->vendor_device_id == PCIDevVenIDARC1880) || (acb->vendor_device_id == PCIDevVenIDARC1680) || (acb->vendor_device_id == PCIDevVenIDARC1214)) { cpi->transport = XPORT_SAS; cpi->transport_version = 0; cpi->protocol_version = SCSI_REV_SPC2; } else { cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol_version = SCSI_REV_2; } cpi->protocol = PROTO_SCSI; #endif cpi->ccb_h.status |= CAM_REQ_CMP; xpt_done(pccb); break; } case XPT_ABORT: { union ccb *pabort_ccb; pabort_ccb = pccb->cab.abort_ccb; switch (pabort_ccb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: case XPT_CONT_TARGET_IO: if(arcmsr_seek_cmd2abort(pabort_ccb)==TRUE) { pabort_ccb->ccb_h.status |= CAM_REQ_ABORTED; xpt_done(pabort_ccb); pccb->ccb_h.status |= CAM_REQ_CMP; } else { xpt_print_path(pabort_ccb->ccb_h.path); printf("Not found\n"); pccb->ccb_h.status |= CAM_PATH_INVALID; } break; case XPT_SCSI_IO: pccb->ccb_h.status |= CAM_UA_ABORT; break; default: pccb->ccb_h.status |= CAM_REQ_INVALID; break; } xpt_done(pccb); break; } case XPT_RESET_BUS: case XPT_RESET_DEV: { u_int32_t i; arcmsr_bus_reset(acb); for (i=0; i < 500; i++) { DELAY(1000); } pccb->ccb_h.status |= CAM_REQ_CMP; xpt_done(pccb); break; } case XPT_TERM_IO: { pccb->ccb_h.status |= CAM_REQ_INVALID; xpt_done(pccb); break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts; if(pccb->ccb_h.target_id == 16) { pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; xpt_done(pccb); break; } cts = &pccb->cts; #ifdef CAM_NEW_TRAN_CODE { struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_spi *spi; struct ccb_trans_settings_sas *sas; scsi = &cts->proto_specific.scsi; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; scsi->valid = CTS_SCSI_VALID_TQ; cts->protocol = PROTO_SCSI; if((acb->vendor_device_id == PCIDevVenIDARC1880) || (acb->vendor_device_id == PCIDevVenIDARC1680) || (acb->vendor_device_id == PCIDevVenIDARC1214)) { cts->protocol_version = SCSI_REV_SPC2; cts->transport_version = 0; cts->transport = XPORT_SAS; sas = &cts->xport_specific.sas; sas->valid = CTS_SAS_VALID_SPEED; if (acb->adapter_bus_speed == ACB_BUS_SPEED_12G) sas->bitrate = 1200000; else if(acb->adapter_bus_speed == ACB_BUS_SPEED_6G) sas->bitrate = 600000; else if(acb->adapter_bus_speed == ACB_BUS_SPEED_3G) sas->bitrate = 300000; } else { cts->protocol_version = SCSI_REV_2; cts->transport_version = 2; cts->transport = XPORT_SPI; spi = &cts->xport_specific.spi; spi->flags = CTS_SPI_FLAGS_DISC_ENB; if (acb->adapter_bus_speed == ACB_BUS_SPEED_6G) spi->sync_period = 1; else spi->sync_period = 2; spi->sync_offset = 32; spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; spi->valid = CTS_SPI_VALID_DISC | CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH; } } #else { cts->flags = (CCB_TRANS_DISC_ENB | CCB_TRANS_TAG_ENB); if (acb->adapter_bus_speed == ACB_BUS_SPEED_6G) cts->sync_period = 1; else cts->sync_period = 2; cts->sync_offset = 32; cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; cts->valid = CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_SYNC_OFFSET_VALID | CCB_TRANS_BUS_WIDTH_VALID | CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; } #endif pccb->ccb_h.status |= CAM_REQ_CMP; xpt_done(pccb); break; } case XPT_SET_TRAN_SETTINGS: { pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; xpt_done(pccb); break; } case XPT_CALC_GEOMETRY: if(pccb->ccb_h.target_id == 16) { pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; xpt_done(pccb); break; } #if __FreeBSD_version >= 500000 cam_calc_geometry(&pccb->ccg, 1); #else { struct ccb_calc_geometry *ccg; u_int32_t size_mb; u_int32_t secs_per_cylinder; ccg = &pccb->ccg; if (ccg->block_size == 0) { pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; } if(((1024L * 1024L)/ccg->block_size) < 0) { pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; } size_mb = ccg->volume_size/((1024L * 1024L)/ccg->block_size); if(size_mb > 1024 ) { ccg->heads = 255; ccg->secs_per_track = 63; } else { ccg->heads = 64; ccg->secs_per_track = 32; } secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; pccb->ccb_h.status |= CAM_REQ_CMP; } #endif xpt_done(pccb); break; default: pccb->ccb_h.status |= CAM_REQ_INVALID; xpt_done(pccb); break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags |= ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB); if(!arcmsr_hba_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb) { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; acb->acb_flags |= ACB_F_MSG_START_BGRB; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_START_BGRB); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_start_hbc_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags |= ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); if(!arcmsr_hbc_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_start_hbd_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags |= ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB); if(!arcmsr_hbd_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: arcmsr_start_hba_bgrb(acb); break; case ACB_ADAPTER_TYPE_B: arcmsr_start_hbb_bgrb(acb); break; case ACB_ADAPTER_TYPE_C: arcmsr_start_hbc_bgrb(acb); break; case ACB_ADAPTER_TYPE_D: arcmsr_start_hbd_bgrb(acb); break; } } /* ********************************************************************** ** ********************************************************************** */ static void arcmsr_polling_hba_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) { struct CommandControlBlock *srb; u_int32_t flag_srb, outbound_intstatus, poll_srb_done=0, poll_count=0; u_int16_t error; polling_ccb_retry: poll_count++; outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable; CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus); /*clear interrupt*/ bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); while(1) { if((flag_srb = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport)) == 0xFFFFFFFF) { if(poll_srb_done) { break;/*chip FIFO no ccb for completion already*/ } else { UDELAY(25000); if ((poll_count > 100) && (poll_srb != NULL)) { break; } goto polling_ccb_retry; } } /* check if command done with no error*/ srb = (struct CommandControlBlock *) (acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; poll_srb_done = (srb == poll_srb) ? 1:0; if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) { if(srb->srb_state == ARCMSR_SRB_ABORTED) { printf("arcmsr%d: scsi id=%d lun=%jx srb='%p'" "poll command abort successfully \n" , acb->pci_unit , srb->pccb->ccb_h.target_id , (uintmax_t)srb->pccb->ccb_h.target_lun, srb); srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); continue; } printf("arcmsr%d: polling get an illegal srb command done srb='%p'" "srboutstandingcount=%d \n" , acb->pci_unit , srb, acb->srboutstandingcount); continue; } arcmsr_report_srb_state(acb, srb, error); } /*drain reply FIFO*/ } /* ********************************************************************** ** ********************************************************************** */ static void arcmsr_polling_hbb_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; struct CommandControlBlock *srb; u_int32_t flag_srb, poll_srb_done=0, poll_count=0; int index; u_int16_t error; polling_ccb_retry: poll_count++; WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */ bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); while(1) { index = phbbmu->doneq_index; if((flag_srb = phbbmu->done_qbuffer[index]) == 0) { if(poll_srb_done) { break;/*chip FIFO no ccb for completion already*/ } else { UDELAY(25000); if ((poll_count > 100) && (poll_srb != NULL)) { break; } goto polling_ccb_retry; } } phbbmu->done_qbuffer[index] = 0; index++; index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */ phbbmu->doneq_index = index; /* check if command done with no error*/ srb = (struct CommandControlBlock *) (acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; poll_srb_done = (srb == poll_srb) ? 1:0; if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) { if(srb->srb_state == ARCMSR_SRB_ABORTED) { printf("arcmsr%d: scsi id=%d lun=%jx srb='%p'" "poll command abort successfully \n" , acb->pci_unit , srb->pccb->ccb_h.target_id , (uintmax_t)srb->pccb->ccb_h.target_lun, srb); srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); continue; } printf("arcmsr%d: polling get an illegal srb command done srb='%p'" "srboutstandingcount=%d \n" , acb->pci_unit , srb, acb->srboutstandingcount); continue; } arcmsr_report_srb_state(acb, srb, error); } /*drain reply FIFO*/ } /* ********************************************************************** ** ********************************************************************** */ static void arcmsr_polling_hbc_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) { struct CommandControlBlock *srb; u_int32_t flag_srb, poll_srb_done=0, poll_count=0; u_int16_t error; polling_ccb_retry: poll_count++; bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); while(1) { if(!(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)) { if(poll_srb_done) { break;/*chip FIFO no ccb for completion already*/ } else { UDELAY(25000); if ((poll_count > 100) && (poll_srb != NULL)) { break; } if (acb->srboutstandingcount == 0) { break; } goto polling_ccb_retry; } } flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low); /* check if command done with no error*/ srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE; if (poll_srb != NULL) poll_srb_done = (srb == poll_srb) ? 1:0; if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) { if(srb->srb_state == ARCMSR_SRB_ABORTED) { printf("arcmsr%d: scsi id=%d lun=%jx srb='%p'poll command abort successfully \n" , acb->pci_unit, srb->pccb->ccb_h.target_id, (uintmax_t)srb->pccb->ccb_h.target_lun, srb); srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); continue; } printf("arcmsr%d: polling get an illegal srb command done srb='%p'srboutstandingcount=%d \n" , acb->pci_unit, srb, acb->srboutstandingcount); continue; } arcmsr_report_srb_state(acb, srb, error); } /*drain reply FIFO*/ } /* ********************************************************************** ** ********************************************************************** */ static void arcmsr_polling_hbd_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) { struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; struct CommandControlBlock *srb; u_int32_t flag_srb, poll_srb_done=0, poll_count=0; u_int32_t outbound_write_pointer; u_int16_t error, doneq_index; polling_ccb_retry: poll_count++; bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); while(1) { outbound_write_pointer = phbdmu->done_qbuffer[0].addressLow; doneq_index = phbdmu->doneq_index; if ((outbound_write_pointer & 0xFF) == (doneq_index & 0xFF)) { if(poll_srb_done) { break;/*chip FIFO no ccb for completion already*/ } else { UDELAY(25000); if ((poll_count > 100) && (poll_srb != NULL)) { break; } if (acb->srboutstandingcount == 0) { break; } goto polling_ccb_retry; } } doneq_index = arcmsr_get_doneq_index(phbdmu); flag_srb = phbdmu->done_qbuffer[(doneq_index & 0xFF)+1].addressLow; /* check if command done with no error*/ srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE; CHIP_REG_WRITE32(HBD_MessageUnit, 0, outboundlist_read_pointer, doneq_index); if (poll_srb != NULL) poll_srb_done = (srb == poll_srb) ? 1:0; if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) { if(srb->srb_state == ARCMSR_SRB_ABORTED) { printf("arcmsr%d: scsi id=%d lun=%jx srb='%p'poll command abort successfully \n" , acb->pci_unit, srb->pccb->ccb_h.target_id, (uintmax_t)srb->pccb->ccb_h.target_lun, srb); srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); continue; } printf("arcmsr%d: polling get an illegal srb command done srb='%p'srboutstandingcount=%d \n" , acb->pci_unit, srb, acb->srboutstandingcount); continue; } arcmsr_report_srb_state(acb, srb, error); } /*drain reply FIFO*/ } /* ********************************************************************** ********************************************************************** */ static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { arcmsr_polling_hba_srbdone(acb, poll_srb); } break; case ACB_ADAPTER_TYPE_B: { arcmsr_polling_hbb_srbdone(acb, poll_srb); } break; case ACB_ADAPTER_TYPE_C: { arcmsr_polling_hbc_srbdone(acb, poll_srb); } break; case ACB_ADAPTER_TYPE_D: { arcmsr_polling_hbd_srbdone(acb, poll_srb); } break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_get_hba_config(struct AdapterControlBlock *acb) { char *acb_firm_model = acb->firm_model; char *acb_firm_version = acb->firm_version; char *acb_device_map = acb->device_map; size_t iop_firm_model = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/ size_t iop_firm_version = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/ size_t iop_device_map = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); int i; CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); if(!arcmsr_hba_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit); } i = 0; while(i < 8) { *acb_firm_model = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i); /* 8 bytes firm_model, 15, 60-67*/ acb_firm_model++; i++; } i=0; while(i < 16) { *acb_firm_version = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i); /* 16 bytes firm_version, 17, 68-83*/ acb_firm_version++; i++; } i=0; while(i < 16) { *acb_device_map = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i); acb_device_map++; i++; } printf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version); acb->firm_request_len = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ acb->firm_numbers_queue = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ acb->firm_sdram_size = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ acb->firm_ide_channels = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ acb->firm_cfg_version = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */ if(acb->firm_numbers_queue > ARCMSR_MAX_OUTSTANDING_CMD) acb->maxOutstanding = ARCMSR_MAX_OUTSTANDING_CMD - 1; else acb->maxOutstanding = acb->firm_numbers_queue - 1; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb) { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; char *acb_firm_model = acb->firm_model; char *acb_firm_version = acb->firm_version; char *acb_device_map = acb->device_map; size_t iop_firm_model = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/ size_t iop_firm_version = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/ size_t iop_device_map = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); int i; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: wait" "'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit); } i = 0; while(i < 8) { *acb_firm_model = bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_model+i); /* 8 bytes firm_model, 15, 60-67*/ acb_firm_model++; i++; } i = 0; while(i < 16) { *acb_firm_version = bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_version+i); /* 16 bytes firm_version, 17, 68-83*/ acb_firm_version++; i++; } i = 0; while(i < 16) { *acb_device_map = bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_device_map+i); acb_device_map++; i++; } printf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version); acb->firm_request_len = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ acb->firm_numbers_queue = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ acb->firm_sdram_size = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ acb->firm_ide_channels = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ acb->firm_cfg_version = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */ if(acb->firm_numbers_queue > ARCMSR_MAX_HBB_POSTQUEUE) acb->maxOutstanding = ARCMSR_MAX_HBB_POSTQUEUE - 1; else acb->maxOutstanding = acb->firm_numbers_queue - 1; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_get_hbc_config(struct AdapterControlBlock *acb) { char *acb_firm_model = acb->firm_model; char *acb_firm_version = acb->firm_version; char *acb_device_map = acb->device_map; size_t iop_firm_model = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/ size_t iop_firm_version = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/ size_t iop_device_map = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); int i; CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); if(!arcmsr_hbc_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit); } i = 0; while(i < 8) { *acb_firm_model = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i); /* 8 bytes firm_model, 15, 60-67*/ acb_firm_model++; i++; } i = 0; while(i < 16) { *acb_firm_version = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i); /* 16 bytes firm_version, 17, 68-83*/ acb_firm_version++; i++; } i = 0; while(i < 16) { *acb_device_map = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i); acb_device_map++; i++; } printf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version); acb->firm_request_len = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ acb->firm_numbers_queue = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ acb->firm_sdram_size = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ acb->firm_ide_channels = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ acb->firm_cfg_version = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */ if(acb->firm_numbers_queue > ARCMSR_MAX_OUTSTANDING_CMD) acb->maxOutstanding = ARCMSR_MAX_OUTSTANDING_CMD - 1; else acb->maxOutstanding = acb->firm_numbers_queue - 1; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_get_hbd_config(struct AdapterControlBlock *acb) { char *acb_firm_model = acb->firm_model; char *acb_firm_version = acb->firm_version; char *acb_device_map = acb->device_map; size_t iop_firm_model = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/ size_t iop_firm_version = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/ size_t iop_device_map = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); int i; if(CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE) CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE_CLEAR); CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); if(!arcmsr_hbd_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit); } i = 0; while(i < 8) { *acb_firm_model = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i); /* 8 bytes firm_model, 15, 60-67*/ acb_firm_model++; i++; } i = 0; while(i < 16) { *acb_firm_version = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i); /* 16 bytes firm_version, 17, 68-83*/ acb_firm_version++; i++; } i = 0; while(i < 16) { *acb_device_map = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i); acb_device_map++; i++; } printf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version); acb->firm_request_len = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ acb->firm_numbers_queue = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ acb->firm_sdram_size = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ acb->firm_ide_channels = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ acb->firm_cfg_version = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */ if(acb->firm_numbers_queue > ARCMSR_MAX_HBD_POSTQUEUE) acb->maxOutstanding = ARCMSR_MAX_HBD_POSTQUEUE - 1; else acb->maxOutstanding = acb->firm_numbers_queue - 1; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { arcmsr_get_hba_config(acb); } break; case ACB_ADAPTER_TYPE_B: { arcmsr_get_hbb_config(acb); } break; case ACB_ADAPTER_TYPE_C: { arcmsr_get_hbc_config(acb); } break; case ACB_ADAPTER_TYPE_D: { arcmsr_get_hbd_config(acb); } break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_wait_firmware_ready( struct AdapterControlBlock *acb) { int timeout=0; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { while ((CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) { if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */ { printf( "arcmsr%d:timed out waiting for firmware \n", acb->pci_unit); return; } UDELAY(15000); /* wait 15 milli-seconds */ } } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; while ((READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell) & ARCMSR_MESSAGE_FIRMWARE_OK) == 0) { if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */ { printf( "arcmsr%d: timed out waiting for firmware \n", acb->pci_unit); return; } UDELAY(15000); /* wait 15 milli-seconds */ } WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT); } break; case ACB_ADAPTER_TYPE_C: { while ((CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0) { if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */ { printf( "arcmsr%d:timed out waiting for firmware ready\n", acb->pci_unit); return; } UDELAY(15000); /* wait 15 milli-seconds */ } } break; case ACB_ADAPTER_TYPE_D: { while ((CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_HBDMU_MESSAGE_FIRMWARE_OK) == 0) { if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */ { printf( "arcmsr%d:timed out waiting for firmware ready\n", acb->pci_unit); return; } UDELAY(15000); /* wait 15 milli-seconds */ } } break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_clear_doorbell_queue_buffer( struct AdapterControlBlock *acb) { u_int32_t outbound_doorbell; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { /* empty doorbell Qbuffer if door bell ringed */ outbound_doorbell = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell); CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, outbound_doorbell); /*clear doorbell interrupt */ CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt and message state*/ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK); /* let IOP know data has been read */ } break; case ACB_ADAPTER_TYPE_C: { /* empty doorbell Qbuffer if door bell ringed */ outbound_doorbell = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell); CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell); /*clear doorbell interrupt */ CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK); CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell_clear); /* Dummy read to force pci flush */ CHIP_REG_READ32(HBC_MessageUnit, 0, inbound_doorbell); /* Dummy read to force pci flush */ } break; case ACB_ADAPTER_TYPE_D: { /* empty doorbell Qbuffer if door bell ringed */ outbound_doorbell = CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell); CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, outbound_doorbell); /*clear doorbell interrupt */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_doorbell, ARCMSR_HBDMU_DRV2IOP_DATA_OUT_READ); } break; } } /* ************************************************************************ ************************************************************************ */ static u_int32_t arcmsr_iop_confirm(struct AdapterControlBlock *acb) { unsigned long srb_phyaddr; u_int32_t srb_phyaddr_hi32; u_int32_t srb_phyaddr_lo32; /* ******************************************************************** ** here we need to tell iop 331 our freesrb.HighPart ** if freesrb.HighPart is not zero ******************************************************************** */ srb_phyaddr = (unsigned long) acb->srb_phyaddr.phyaddr; srb_phyaddr_hi32 = acb->srb_phyaddr.B.phyadd_high; srb_phyaddr_lo32 = acb->srb_phyaddr.B.phyadd_low; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { if(srb_phyaddr_hi32 != 0) { CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32); CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG); if(!arcmsr_hba_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit); return FALSE; } } } break; /* *********************************************************************** ** if adapter type B, set window of "post command Q" *********************************************************************** */ case ACB_ADAPTER_TYPE_B: { u_int32_t post_queue_phyaddr; struct HBB_MessageUnit *phbbmu; phbbmu = (struct HBB_MessageUnit *)acb->pmu; phbbmu->postq_index = 0; phbbmu->doneq_index = 0; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_SET_POST_WINDOW); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'set window of post command Q' timeout\n", acb->pci_unit); return FALSE; } post_queue_phyaddr = srb_phyaddr + ARCMSR_SRBS_POOL_SIZE + offsetof(struct HBB_MessageUnit, post_qbuffer); CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); /* driver "set config" signature */ CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1], srb_phyaddr_hi32); /* normal should be zero */ CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2], post_queue_phyaddr); /* postQ size (256+8)*4 */ CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3], post_queue_phyaddr+1056); /* doneQ size (256+8)*4 */ CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4], 1056); /* srb maxQ size must be --> [(256+8)*4] */ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_SET_CONFIG); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'set command Q window' timeout \n", acb->pci_unit); return FALSE; } WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_START_DRIVER_MODE); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'start diver mode' timeout \n", acb->pci_unit); return FALSE; } } break; case ACB_ADAPTER_TYPE_C: { if(srb_phyaddr_hi32 != 0) { CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); if(!arcmsr_hbc_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit); return FALSE; } } } break; case ACB_ADAPTER_TYPE_D: { u_int32_t post_queue_phyaddr, done_queue_phyaddr; struct HBD_MessageUnit0 *phbdmu; phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; phbdmu->postq_index = 0; phbdmu->doneq_index = 0x40FF; post_queue_phyaddr = srb_phyaddr_lo32 + ARCMSR_SRBS_POOL_SIZE + offsetof(struct HBD_MessageUnit0, post_qbuffer); done_queue_phyaddr = srb_phyaddr_lo32 + ARCMSR_SRBS_POOL_SIZE + offsetof(struct HBD_MessageUnit0, done_qbuffer); CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); /* driver "set config" signature */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32); CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[2], post_queue_phyaddr); /* postQ base */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[3], done_queue_phyaddr); /* doneQ base */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[4], 0x100); CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG); if(!arcmsr_hbd_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit); return FALSE; } } break; } return (TRUE); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: case ACB_ADAPTER_TYPE_C: case ACB_ADAPTER_TYPE_D: break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_ACTIVE_EOI_MODE); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'iop enable eoi mode' timeout \n", acb->pci_unit); return; } } break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_iop_init(struct AdapterControlBlock *acb) { u_int32_t intmask_org; /* disable all outbound interrupt */ intmask_org = arcmsr_disable_allintr(acb); arcmsr_wait_firmware_ready(acb); arcmsr_iop_confirm(acb); arcmsr_get_firmware_spec(acb); /*start background rebuild*/ arcmsr_start_adapter_bgrb(acb); /* empty doorbell Qbuffer if door bell ringed */ arcmsr_clear_doorbell_queue_buffer(acb); arcmsr_enable_eoi_mode(acb); /* enable outbound Post Queue, outbound doorbell Interrupt */ arcmsr_enable_allintr(acb, intmask_org); acb->acb_flags |= ACB_F_IOP_INITED; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_map_free_srb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct AdapterControlBlock *acb = arg; struct CommandControlBlock *srb_tmp; u_int32_t i; unsigned long srb_phyaddr = (unsigned long)segs->ds_addr; acb->srb_phyaddr.phyaddr = srb_phyaddr; srb_tmp = (struct CommandControlBlock *)acb->uncacheptr; for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) { if(bus_dmamap_create(acb->dm_segs_dmat, /*flags*/0, &srb_tmp->dm_segs_dmamap) != 0) { acb->acb_flags |= ACB_F_MAPFREESRB_FAILD; printf("arcmsr%d:" " srb dmamap bus_dmamap_create error\n", acb->pci_unit); return; } if((acb->adapter_type == ACB_ADAPTER_TYPE_C) || (acb->adapter_type == ACB_ADAPTER_TYPE_D)) { srb_tmp->cdb_phyaddr_low = srb_phyaddr; srb_tmp->cdb_phyaddr_high = (u_int32_t)((srb_phyaddr >> 16) >> 16); } else srb_tmp->cdb_phyaddr_low = srb_phyaddr >> 5; srb_tmp->acb = acb; acb->srbworkingQ[i] = acb->psrb_pool[i] = srb_tmp; srb_phyaddr = srb_phyaddr + SRB_SIZE; srb_tmp = (struct CommandControlBlock *)((unsigned long)srb_tmp + SRB_SIZE); } acb->vir2phy_offset = (unsigned long)srb_tmp - (unsigned long)srb_phyaddr; } /* ************************************************************************ ************************************************************************ */ static void arcmsr_free_resource(struct AdapterControlBlock *acb) { /* remove the control device */ if(acb->ioctl_dev != NULL) { destroy_dev(acb->ioctl_dev); } bus_dmamap_unload(acb->srb_dmat, acb->srb_dmamap); bus_dmamap_destroy(acb->srb_dmat, acb->srb_dmamap); bus_dma_tag_destroy(acb->srb_dmat); bus_dma_tag_destroy(acb->dm_segs_dmat); bus_dma_tag_destroy(acb->parent_dmat); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_mutex_init(struct AdapterControlBlock *acb) { ARCMSR_LOCK_INIT(&acb->isr_lock, "arcmsr isr lock"); ARCMSR_LOCK_INIT(&acb->srb_lock, "arcmsr srb lock"); ARCMSR_LOCK_INIT(&acb->postDone_lock, "arcmsr postQ lock"); ARCMSR_LOCK_INIT(&acb->qbuffer_lock, "arcmsr RW buffer lock"); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_mutex_destroy(struct AdapterControlBlock *acb) { ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock); ARCMSR_LOCK_DESTROY(&acb->postDone_lock); ARCMSR_LOCK_DESTROY(&acb->srb_lock); ARCMSR_LOCK_DESTROY(&acb->isr_lock); } /* ************************************************************************ ************************************************************************ */ static u_int32_t arcmsr_initialize(device_t dev) { struct AdapterControlBlock *acb = device_get_softc(dev); u_int16_t pci_command; int i, j,max_coherent_size; u_int32_t vendor_dev_id; vendor_dev_id = pci_get_devid(dev); acb->vendor_device_id = vendor_dev_id; acb->sub_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); switch (vendor_dev_id) { case PCIDevVenIDARC1880: case PCIDevVenIDARC1882: case PCIDevVenIDARC1213: case PCIDevVenIDARC1223: { acb->adapter_type = ACB_ADAPTER_TYPE_C; if (acb->sub_device_id == ARECA_SUB_DEV_ID_1883) acb->adapter_bus_speed = ACB_BUS_SPEED_12G; else acb->adapter_bus_speed = ACB_BUS_SPEED_6G; max_coherent_size = ARCMSR_SRBS_POOL_SIZE; } break; case PCIDevVenIDARC1214: { acb->adapter_type = ACB_ADAPTER_TYPE_D; acb->adapter_bus_speed = ACB_BUS_SPEED_6G; max_coherent_size = ARCMSR_SRBS_POOL_SIZE + (sizeof(struct HBD_MessageUnit0)); } break; case PCIDevVenIDARC1200: case PCIDevVenIDARC1201: { acb->adapter_type = ACB_ADAPTER_TYPE_B; acb->adapter_bus_speed = ACB_BUS_SPEED_3G; max_coherent_size = ARCMSR_SRBS_POOL_SIZE + (sizeof(struct HBB_MessageUnit)); } break; case PCIDevVenIDARC1203: { acb->adapter_type = ACB_ADAPTER_TYPE_B; acb->adapter_bus_speed = ACB_BUS_SPEED_6G; max_coherent_size = ARCMSR_SRBS_POOL_SIZE + (sizeof(struct HBB_MessageUnit)); } break; case PCIDevVenIDARC1110: case PCIDevVenIDARC1120: case PCIDevVenIDARC1130: case PCIDevVenIDARC1160: case PCIDevVenIDARC1170: case PCIDevVenIDARC1210: case PCIDevVenIDARC1220: case PCIDevVenIDARC1230: case PCIDevVenIDARC1231: case PCIDevVenIDARC1260: case PCIDevVenIDARC1261: case PCIDevVenIDARC1270: case PCIDevVenIDARC1280: case PCIDevVenIDARC1212: case PCIDevVenIDARC1222: case PCIDevVenIDARC1380: case PCIDevVenIDARC1381: case PCIDevVenIDARC1680: case PCIDevVenIDARC1681: { acb->adapter_type = ACB_ADAPTER_TYPE_A; acb->adapter_bus_speed = ACB_BUS_SPEED_3G; max_coherent_size = ARCMSR_SRBS_POOL_SIZE; } break; default: { printf("arcmsr%d:" " unknown RAID adapter type \n", device_get_unit(dev)); return ENOMEM; } } #if __FreeBSD_version >= 700000 if(bus_dma_tag_create( /*PCI parent*/ bus_get_dma_tag(dev), #else if(bus_dma_tag_create( /*PCI parent*/ NULL, #endif /*alignemnt*/ 1, /*boundary*/ 0, /*lowaddr*/ BUS_SPACE_MAXADDR, /*highaddr*/ BUS_SPACE_MAXADDR, /*filter*/ NULL, /*filterarg*/ NULL, /*maxsize*/ BUS_SPACE_MAXSIZE_32BIT, /*nsegments*/ BUS_SPACE_UNRESTRICTED, /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, /*flags*/ 0, #if __FreeBSD_version >= 501102 /*lockfunc*/ NULL, /*lockarg*/ NULL, #endif &acb->parent_dmat) != 0) { printf("arcmsr%d: parent_dmat bus_dma_tag_create failure!\n", device_get_unit(dev)); return ENOMEM; } /* Create a single tag describing a region large enough to hold all of the s/g lists we will need. */ if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat, /*alignment*/ 1, /*boundary*/ 0, #ifdef PAE /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT, #else /*lowaddr*/ BUS_SPACE_MAXADDR, #endif /*highaddr*/ BUS_SPACE_MAXADDR, /*filter*/ NULL, /*filterarg*/ NULL, /*maxsize*/ ARCMSR_MAX_SG_ENTRIES * PAGE_SIZE * ARCMSR_MAX_FREESRB_NUM, /*nsegments*/ ARCMSR_MAX_SG_ENTRIES, /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, /*flags*/ 0, #if __FreeBSD_version >= 501102 /*lockfunc*/ busdma_lock_mutex, /*lockarg*/ &acb->isr_lock, #endif &acb->dm_segs_dmat) != 0) { bus_dma_tag_destroy(acb->parent_dmat); printf("arcmsr%d: dm_segs_dmat bus_dma_tag_create failure!\n", device_get_unit(dev)); return ENOMEM; } /* DMA tag for our srb structures.... Allocate the freesrb memory */ if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat, /*alignment*/ 0x20, /*boundary*/ 0, /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT, /*highaddr*/ BUS_SPACE_MAXADDR, /*filter*/ NULL, /*filterarg*/ NULL, /*maxsize*/ max_coherent_size, /*nsegments*/ 1, /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, /*flags*/ 0, #if __FreeBSD_version >= 501102 /*lockfunc*/ NULL, /*lockarg*/ NULL, #endif &acb->srb_dmat) != 0) { bus_dma_tag_destroy(acb->dm_segs_dmat); bus_dma_tag_destroy(acb->parent_dmat); printf("arcmsr%d: srb_dmat bus_dma_tag_create failure!\n", device_get_unit(dev)); return ENXIO; } /* Allocation for our srbs */ if(bus_dmamem_alloc(acb->srb_dmat, (void **)&acb->uncacheptr, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &acb->srb_dmamap) != 0) { bus_dma_tag_destroy(acb->srb_dmat); bus_dma_tag_destroy(acb->dm_segs_dmat); bus_dma_tag_destroy(acb->parent_dmat); printf("arcmsr%d: srb_dmat bus_dmamem_alloc failure!\n", device_get_unit(dev)); return ENXIO; } /* And permanently map them */ if(bus_dmamap_load(acb->srb_dmat, acb->srb_dmamap, acb->uncacheptr, max_coherent_size, arcmsr_map_free_srb, acb, /*flags*/0)) { bus_dma_tag_destroy(acb->srb_dmat); bus_dma_tag_destroy(acb->dm_segs_dmat); bus_dma_tag_destroy(acb->parent_dmat); printf("arcmsr%d: srb_dmat bus_dmamap_load failure!\n", device_get_unit(dev)); return ENXIO; } pci_command = pci_read_config(dev, PCIR_COMMAND, 2); pci_command |= PCIM_CMD_BUSMASTEREN; pci_command |= PCIM_CMD_PERRESPEN; pci_command |= PCIM_CMD_MWRICEN; /* Enable Busmaster */ pci_write_config(dev, PCIR_COMMAND, pci_command, 2); switch(acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { u_int32_t rid0 = PCIR_BAR(0); vm_offset_t mem_base0; acb->sys_res_arcmsr[0] = bus_alloc_resource_any(dev,SYS_RES_MEMORY, &rid0, RF_ACTIVE); if(acb->sys_res_arcmsr[0] == NULL) { arcmsr_free_resource(acb); printf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev)); return ENOMEM; } if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev)); return ENXIO; } mem_base0 = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]); if(mem_base0 == 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev)); return ENXIO; } acb->btag[0] = rman_get_bustag(acb->sys_res_arcmsr[0]); acb->bhandle[0] = rman_get_bushandle(acb->sys_res_arcmsr[0]); acb->pmu = (struct MessageUnit_UNION *)mem_base0; } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu; struct CommandControlBlock *freesrb; u_int32_t rid[]={ PCIR_BAR(0), PCIR_BAR(2) }; vm_offset_t mem_base[]={0,0}; u_long size; if (vendor_dev_id == PCIDevVenIDARC1203) size = sizeof(struct HBB_DOORBELL_1203); else size = sizeof(struct HBB_DOORBELL); for(i=0; i < 2; i++) { if(i == 0) { acb->sys_res_arcmsr[i] = bus_alloc_resource_any(dev,SYS_RES_MEMORY, &rid[i], RF_ACTIVE); } else { acb->sys_res_arcmsr[i] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid[i], RF_ACTIVE); } if(acb->sys_res_arcmsr[i] == NULL) { arcmsr_free_resource(acb); printf("arcmsr%d: bus_alloc_resource %d failure!\n", device_get_unit(dev), i); return ENOMEM; } if(rman_get_start(acb->sys_res_arcmsr[i]) <= 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_start %d failure!\n", device_get_unit(dev), i); return ENXIO; } mem_base[i] = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[i]); if(mem_base[i] == 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_virtual %d failure!\n", device_get_unit(dev), i); return ENXIO; } acb->btag[i] = rman_get_bustag(acb->sys_res_arcmsr[i]); acb->bhandle[i] = rman_get_bushandle(acb->sys_res_arcmsr[i]); } freesrb = (struct CommandControlBlock *)acb->uncacheptr; acb->pmu = (struct MessageUnit_UNION *)((unsigned long)freesrb+ARCMSR_SRBS_POOL_SIZE); phbbmu = (struct HBB_MessageUnit *)acb->pmu; phbbmu->hbb_doorbell = (struct HBB_DOORBELL *)mem_base[0]; phbbmu->hbb_rwbuffer = (struct HBB_RWBUFFER *)mem_base[1]; if (vendor_dev_id == PCIDevVenIDARC1203) { phbbmu->drv2iop_doorbell = offsetof(struct HBB_DOORBELL_1203, drv2iop_doorbell); phbbmu->drv2iop_doorbell_mask = offsetof(struct HBB_DOORBELL_1203, drv2iop_doorbell_mask); phbbmu->iop2drv_doorbell = offsetof(struct HBB_DOORBELL_1203, iop2drv_doorbell); phbbmu->iop2drv_doorbell_mask = offsetof(struct HBB_DOORBELL_1203, iop2drv_doorbell_mask); } else { phbbmu->drv2iop_doorbell = offsetof(struct HBB_DOORBELL, drv2iop_doorbell); phbbmu->drv2iop_doorbell_mask = offsetof(struct HBB_DOORBELL, drv2iop_doorbell_mask); phbbmu->iop2drv_doorbell = offsetof(struct HBB_DOORBELL, iop2drv_doorbell); phbbmu->iop2drv_doorbell_mask = offsetof(struct HBB_DOORBELL, iop2drv_doorbell_mask); } } break; case ACB_ADAPTER_TYPE_C: { u_int32_t rid0 = PCIR_BAR(1); vm_offset_t mem_base0; acb->sys_res_arcmsr[0] = bus_alloc_resource_any(dev,SYS_RES_MEMORY, &rid0, RF_ACTIVE); if(acb->sys_res_arcmsr[0] == NULL) { arcmsr_free_resource(acb); printf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev)); return ENOMEM; } if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev)); return ENXIO; } mem_base0 = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]); if(mem_base0 == 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev)); return ENXIO; } acb->btag[0] = rman_get_bustag(acb->sys_res_arcmsr[0]); acb->bhandle[0] = rman_get_bushandle(acb->sys_res_arcmsr[0]); acb->pmu = (struct MessageUnit_UNION *)mem_base0; } break; case ACB_ADAPTER_TYPE_D: { struct HBD_MessageUnit0 *phbdmu; u_int32_t rid0 = PCIR_BAR(0); vm_offset_t mem_base0; acb->sys_res_arcmsr[0] = bus_alloc_resource_any(dev,SYS_RES_MEMORY, &rid0, RF_ACTIVE); if(acb->sys_res_arcmsr[0] == NULL) { arcmsr_free_resource(acb); printf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev)); return ENOMEM; } if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev)); return ENXIO; } mem_base0 = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]); if(mem_base0 == 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev)); return ENXIO; } acb->btag[0] = rman_get_bustag(acb->sys_res_arcmsr[0]); acb->bhandle[0] = rman_get_bushandle(acb->sys_res_arcmsr[0]); acb->pmu = (struct MessageUnit_UNION *)((unsigned long)acb->uncacheptr+ARCMSR_SRBS_POOL_SIZE); phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; phbdmu->phbdmu = (struct HBD_MessageUnit *)mem_base0; } break; } if(acb->acb_flags & ACB_F_MAPFREESRB_FAILD) { arcmsr_free_resource(acb); printf("arcmsr%d: map free srb failure!\n", device_get_unit(dev)); return ENXIO; } acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_RQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ); acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; /* ******************************************************************** ** init raid volume state ******************************************************************** */ for(i=0; i < ARCMSR_MAX_TARGETID; i++) { for(j=0; j < ARCMSR_MAX_TARGETLUN; j++) { acb->devstate[i][j] = ARECA_RAID_GONE; } } arcmsr_iop_init(acb); return(0); } /* ************************************************************************ ************************************************************************ */ static int arcmsr_attach(device_t dev) { struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); u_int32_t unit=device_get_unit(dev); struct ccb_setasync csa; struct cam_devq *devq; /* Device Queue to use for this SIM */ struct resource *irqres; int rid; if(acb == NULL) { printf("arcmsr%d: cannot allocate softc\n", unit); return (ENOMEM); } arcmsr_mutex_init(acb); acb->pci_dev = dev; acb->pci_unit = unit; if(arcmsr_initialize(dev)) { printf("arcmsr%d: initialize failure!\n", unit); arcmsr_mutex_destroy(acb); return ENXIO; } /* After setting up the adapter, map our interrupt */ rid = 0; irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if(irqres == NULL || #if __FreeBSD_version >= 700025 bus_setup_intr(dev, irqres, INTR_TYPE_CAM|INTR_ENTROPY|INTR_MPSAFE, NULL, arcmsr_intr_handler, acb, &acb->ih)) { #else bus_setup_intr(dev, irqres, INTR_TYPE_CAM|INTR_ENTROPY|INTR_MPSAFE, arcmsr_intr_handler, acb, &acb->ih)) { #endif arcmsr_free_resource(acb); arcmsr_mutex_destroy(acb); printf("arcmsr%d: unable to register interrupt handler!\n", unit); return ENXIO; } acb->irqres = irqres; /* * Now let the CAM generic SCSI layer find the SCSI devices on * the bus * start queue to reset to the idle loop. * * Create device queue of SIM(s) * (MAX_START_JOB - 1) : * max_sim_transactions */ devq = cam_simq_alloc(acb->maxOutstanding); if(devq == NULL) { arcmsr_free_resource(acb); bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); arcmsr_mutex_destroy(acb); printf("arcmsr%d: cam_simq_alloc failure!\n", unit); return ENXIO; } #if __FreeBSD_version >= 700025 acb->psim = cam_sim_alloc(arcmsr_action, arcmsr_poll, "arcmsr", acb, unit, &acb->isr_lock, 1, ARCMSR_MAX_OUTSTANDING_CMD, devq); #else acb->psim = cam_sim_alloc(arcmsr_action, arcmsr_poll, "arcmsr", acb, unit, 1, ARCMSR_MAX_OUTSTANDING_CMD, devq); #endif if(acb->psim == NULL) { arcmsr_free_resource(acb); bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); cam_simq_free(devq); arcmsr_mutex_destroy(acb); printf("arcmsr%d: cam_sim_alloc failure!\n", unit); return ENXIO; } ARCMSR_LOCK_ACQUIRE(&acb->isr_lock); #if __FreeBSD_version >= 700044 if(xpt_bus_register(acb->psim, dev, 0) != CAM_SUCCESS) { #else if(xpt_bus_register(acb->psim, 0) != CAM_SUCCESS) { #endif arcmsr_free_resource(acb); bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); cam_sim_free(acb->psim, /*free_devq*/TRUE); arcmsr_mutex_destroy(acb); printf("arcmsr%d: xpt_bus_register failure!\n", unit); return ENXIO; } if(xpt_create_path(&acb->ppath, /* periph */ NULL, cam_sim_path(acb->psim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { arcmsr_free_resource(acb); bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); xpt_bus_deregister(cam_sim_path(acb->psim)); cam_sim_free(acb->psim, /* free_simq */ TRUE); arcmsr_mutex_destroy(acb); printf("arcmsr%d: xpt_create_path failure!\n", unit); return ENXIO; } /* **************************************************** */ xpt_setup_ccb(&csa.ccb_h, acb->ppath, /*priority*/5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_FOUND_DEVICE|AC_LOST_DEVICE; csa.callback = arcmsr_async; csa.callback_arg = acb->psim; xpt_action((union ccb *)&csa); ARCMSR_LOCK_RELEASE(&acb->isr_lock); /* Create the control device. */ acb->ioctl_dev = make_dev(&arcmsr_cdevsw, unit, UID_ROOT, GID_WHEEL /* GID_OPERATOR */, S_IRUSR | S_IWUSR, "arcmsr%d", unit); #if __FreeBSD_version < 503000 acb->ioctl_dev->si_drv1 = acb; #endif #if __FreeBSD_version > 500005 (void)make_dev_alias(acb->ioctl_dev, "arc%d", unit); #endif arcmsr_callout_init(&acb->devmap_callout); callout_reset(&acb->devmap_callout, 60 * hz, arcmsr_polling_devmap, acb); return (0); } /* ************************************************************************ ************************************************************************ */ static int arcmsr_probe(device_t dev) { u_int32_t id; u_int16_t sub_device_id; static char buf[256]; char x_type[]={"unknown"}; char *type; int raid6 = 1; if (pci_get_vendor(dev) != PCI_VENDOR_ID_ARECA) { return (ENXIO); } sub_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); switch(id = pci_get_devid(dev)) { case PCIDevVenIDARC1110: case PCIDevVenIDARC1200: case PCIDevVenIDARC1201: case PCIDevVenIDARC1210: raid6 = 0; /*FALLTHRU*/ case PCIDevVenIDARC1120: case PCIDevVenIDARC1130: case PCIDevVenIDARC1160: case PCIDevVenIDARC1170: case PCIDevVenIDARC1220: case PCIDevVenIDARC1230: case PCIDevVenIDARC1231: case PCIDevVenIDARC1260: case PCIDevVenIDARC1261: case PCIDevVenIDARC1270: case PCIDevVenIDARC1280: type = "SATA 3G"; break; case PCIDevVenIDARC1212: case PCIDevVenIDARC1222: case PCIDevVenIDARC1380: case PCIDevVenIDARC1381: case PCIDevVenIDARC1680: case PCIDevVenIDARC1681: type = "SAS 3G"; break; case PCIDevVenIDARC1880: case PCIDevVenIDARC1882: case PCIDevVenIDARC1213: case PCIDevVenIDARC1223: if (sub_device_id == ARECA_SUB_DEV_ID_1883) type = "SAS 12G"; else type = "SAS 6G"; break; case PCIDevVenIDARC1214: case PCIDevVenIDARC1203: type = "SATA 6G"; break; default: type = x_type; raid6 = 0; break; } if(type == x_type) return(ENXIO); sprintf(buf, "Areca %s Host Adapter RAID Controller %s\n%s\n", type, raid6 ? "(RAID6 capable)" : "", ARCMSR_DRIVER_VERSION); device_set_desc_copy(dev, buf); return (BUS_PROBE_DEFAULT); } /* ************************************************************************ ************************************************************************ */ static int arcmsr_shutdown(device_t dev) { u_int32_t i; u_int32_t intmask_org; struct CommandControlBlock *srb; struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); /* stop adapter background rebuild */ ARCMSR_LOCK_ACQUIRE(&acb->isr_lock); /* disable all outbound interrupt */ intmask_org = arcmsr_disable_allintr(acb); arcmsr_stop_adapter_bgrb(acb); arcmsr_flush_adapter_cache(acb); /* abort all outstanding command */ acb->acb_flags |= ACB_F_SCSISTOPADAPTER; acb->acb_flags &= ~ACB_F_IOP_INITED; if(acb->srboutstandingcount != 0) { /*clear and abort all outbound posted Q*/ arcmsr_done4abort_postqueue(acb); /* talk to iop 331 outstanding command aborted*/ arcmsr_abort_allcmd(acb); for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) { srb = acb->psrb_pool[i]; if(srb->srb_state == ARCMSR_SRB_START) { srb->srb_state = ARCMSR_SRB_ABORTED; srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); } } } acb->srboutstandingcount = 0; acb->workingsrb_doneindex = 0; acb->workingsrb_startindex = 0; acb->pktRequestCount = 0; acb->pktReturnCount = 0; ARCMSR_LOCK_RELEASE(&acb->isr_lock); return (0); } /* ************************************************************************ ************************************************************************ */ static int arcmsr_detach(device_t dev) { struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); int i; callout_stop(&acb->devmap_callout); bus_teardown_intr(dev, acb->irqres, acb->ih); arcmsr_shutdown(dev); arcmsr_free_resource(acb); for(i=0; (acb->sys_res_arcmsr[i]!=NULL) && (i<2); i++) { bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(i), acb->sys_res_arcmsr[i]); } bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); ARCMSR_LOCK_ACQUIRE(&acb->isr_lock); xpt_async(AC_LOST_DEVICE, acb->ppath, NULL); xpt_free_path(acb->ppath); xpt_bus_deregister(cam_sim_path(acb->psim)); cam_sim_free(acb->psim, TRUE); ARCMSR_LOCK_RELEASE(&acb->isr_lock); arcmsr_mutex_destroy(acb); return (0); } #ifdef ARCMSR_DEBUG1 static void arcmsr_dump_data(struct AdapterControlBlock *acb) { if((acb->pktRequestCount - acb->pktReturnCount) == 0) return; printf("Command Request Count =0x%x\n",acb->pktRequestCount); printf("Command Return Count =0x%x\n",acb->pktReturnCount); printf("Command (Req-Rtn) Count =0x%x\n",(acb->pktRequestCount - acb->pktReturnCount)); printf("Queued Command Count =0x%x\n",acb->srboutstandingcount); } #endif Index: head/sys/dev/bce/if_bce.c =================================================================== --- head/sys/dev/bce/if_bce.c (revision 313981) +++ head/sys/dev/bce/if_bce.c (revision 313982) @@ -1,11611 +1,11611 @@ /*- * Copyright (c) 2006-2014 QLogic Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * The following controllers are supported by this driver: * BCM5706C A2, A3 * BCM5706S A2, A3 * BCM5708C B1, B2 * BCM5708S B1, B2 * BCM5709C A1, C0 * BCM5709S A1, C0 * BCM5716C C0 * BCM5716S C0 * * The following controllers are not supported by this driver: * BCM5706C A0, A1 (pre-production) * BCM5706S A0, A1 (pre-production) * BCM5708C A0, B0 (pre-production) * BCM5708S A0, B0 (pre-production) * BCM5709C A0 B0, B1, B2 (pre-production) * BCM5709S A0, B0, B1, B2 (pre-production) */ #include "opt_bce.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "miidevs.h" #include #include #include #include "miibus_if.h" #include #include /****************************************************************************/ /* BCE Debug Options */ /****************************************************************************/ #ifdef BCE_DEBUG u32 bce_debug = BCE_WARN; /* 0 = Never */ /* 1 = 1 in 2,147,483,648 */ /* 256 = 1 in 8,388,608 */ /* 2048 = 1 in 1,048,576 */ /* 65536 = 1 in 32,768 */ /* 1048576 = 1 in 2,048 */ /* 268435456 = 1 in 8 */ /* 536870912 = 1 in 4 */ /* 1073741824 = 1 in 2 */ /* Controls how often the l2_fhdr frame error check will fail. */ int l2fhdr_error_sim_control = 0; /* Controls how often the unexpected attention check will fail. */ int unexpected_attention_sim_control = 0; /* Controls how often to simulate an mbuf allocation failure. */ int mbuf_alloc_failed_sim_control = 0; /* Controls how often to simulate a DMA mapping failure. */ int dma_map_addr_failed_sim_control = 0; /* Controls how often to simulate a bootcode failure. */ int bootcode_running_failure_sim_control = 0; #endif /****************************************************************************/ /* PCI Device ID Table */ /* */ /* Used by bce_probe() to identify the devices supported by this driver. */ /****************************************************************************/ #define BCE_DEVDESC_MAX 64 static const struct bce_type bce_devs[] = { /* BCM5706C Controllers and OEM boards. */ { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3101, "HP NC370T Multifunction Gigabit Server Adapter" }, { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3106, "HP NC370i Multifunction Gigabit Server Adapter" }, { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3070, "HP NC380T PCIe DP Multifunc Gig Server Adapter" }, { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x1709, "HP NC371i Multifunction Gigabit Server Adapter" }, { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM5706 1000Base-T" }, /* BCM5706S controllers and OEM boards. */ { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102, "HP NC370F Multifunction Gigabit Server Adapter" }, { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM5706 1000Base-SX" }, /* BCM5708C controllers and OEM boards. */ { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7037, "HP NC373T PCIe Multifunction Gig Server Adapter" }, { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7038, "HP NC373i Multifunction Gigabit Server Adapter" }, { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7045, "HP NC374m PCIe Multifunction Adapter" }, { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM5708 1000Base-T" }, /* BCM5708S controllers and OEM boards. */ { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x1706, "HP NC373m Multifunction Gigabit Server Adapter" }, { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703b, "HP NC373i Multifunction Gigabit Server Adapter" }, { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703d, "HP NC373F PCIe Multifunc Giga Server Adapter" }, { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM5708 1000Base-SX" }, /* BCM5709C controllers and OEM boards. */ { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7055, "HP NC382i DP Multifunction Gigabit Server Adapter" }, { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7059, "HP NC382T PCIe DP Multifunction Gigabit Server Adapter" }, { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM5709 1000Base-T" }, /* BCM5709S controllers and OEM boards. */ { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x171d, "HP NC382m DP 1GbE Multifunction BL-c Adapter" }, { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x7056, "HP NC382i DP Multifunction Gigabit Server Adapter" }, { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM5709 1000Base-SX" }, /* BCM5716 controllers and OEM boards. */ { BRCM_VENDORID, BRCM_DEVICEID_BCM5716, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM5716 1000Base-T" }, { 0, 0, 0, 0, NULL } }; /****************************************************************************/ /* Supported Flash NVRAM device data. */ /****************************************************************************/ static const struct flash_spec flash_table[] = { #define BUFFERED_FLAGS (BCE_NV_BUFFERED | BCE_NV_TRANSLATE) #define NONBUFFERED_FLAGS (BCE_NV_WREN) /* Slow EEPROM */ {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400, BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, "EEPROM - slow"}, /* Expansion entry 0001 */ {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406, NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, 0, "Entry 0001"}, /* Saifun SA25F010 (non-buffered flash) */ /* strap, cfg1, & write1 need updates */ {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406, NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2, "Non-buffered flash (128kB)"}, /* Saifun SA25F020 (non-buffered flash) */ /* strap, cfg1, & write1 need updates */ {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406, NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4, "Non-buffered flash (256kB)"}, /* Expansion entry 0100 */ {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406, NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, 0, "Entry 0100"}, /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */ {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, "Entry 0101: ST M45PE10 (128kB non-bufferred)"}, /* Entry 0110: ST M45PE20 (non-buffered flash)*/ {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, "Entry 0110: ST M45PE20 (256kB non-bufferred)"}, /* Saifun SA25F005 (non-buffered flash) */ /* strap, cfg1, & write1 need updates */ {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE, "Non-buffered flash (64kB)"}, /* Fast EEPROM */ {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400, BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, "EEPROM - fast"}, /* Expansion entry 1001 */ {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406, NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, 0, "Entry 1001"}, /* Expansion entry 1010 */ {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406, NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, 0, "Entry 1010"}, /* ATMEL AT45DB011B (buffered flash) */ {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400, BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE, "Buffered flash (128kB)"}, /* Expansion entry 1100 */ {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406, NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, 0, "Entry 1100"}, /* Expansion entry 1101 */ {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406, NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, 0, "Entry 1101"}, /* Ateml Expansion entry 1110 */ {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400, BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, BUFFERED_FLASH_BYTE_ADDR_MASK, 0, "Entry 1110 (Atmel)"}, /* ATMEL AT45DB021B (buffered flash) */ {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400, BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2, "Buffered flash (256kB)"}, }; /* * The BCM5709 controllers transparently handle the * differences between Atmel 264 byte pages and all * flash devices which use 256 byte pages, so no * logical-to-physical mapping is required in the * driver. */ static const struct flash_spec flash_5709 = { .flags = BCE_NV_BUFFERED, .page_bits = BCM5709_FLASH_PAGE_BITS, .page_size = BCM5709_FLASH_PAGE_SIZE, .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK, .total_size = BUFFERED_FLASH_TOTAL_SIZE * 2, .name = "5709/5716 buffered flash (256kB)", }; /****************************************************************************/ /* FreeBSD device entry points. */ /****************************************************************************/ static int bce_probe (device_t); static int bce_attach (device_t); static int bce_detach (device_t); static int bce_shutdown (device_t); /****************************************************************************/ /* BCE Debug Data Structure Dump Routines */ /****************************************************************************/ #ifdef BCE_DEBUG static u32 bce_reg_rd (struct bce_softc *, u32); static void bce_reg_wr (struct bce_softc *, u32, u32); static void bce_reg_wr16 (struct bce_softc *, u32, u16); static u32 bce_ctx_rd (struct bce_softc *, u32, u32); static void bce_dump_enet (struct bce_softc *, struct mbuf *); static void bce_dump_mbuf (struct bce_softc *, struct mbuf *); static void bce_dump_tx_mbuf_chain (struct bce_softc *, u16, int); static void bce_dump_rx_mbuf_chain (struct bce_softc *, u16, int); static void bce_dump_pg_mbuf_chain (struct bce_softc *, u16, int); static void bce_dump_txbd (struct bce_softc *, int, struct tx_bd *); static void bce_dump_rxbd (struct bce_softc *, int, struct rx_bd *); static void bce_dump_pgbd (struct bce_softc *, int, struct rx_bd *); static void bce_dump_l2fhdr (struct bce_softc *, int, struct l2_fhdr *); static void bce_dump_ctx (struct bce_softc *, u16); static void bce_dump_ftqs (struct bce_softc *); static void bce_dump_tx_chain (struct bce_softc *, u16, int); static void bce_dump_rx_bd_chain (struct bce_softc *, u16, int); static void bce_dump_pg_chain (struct bce_softc *, u16, int); static void bce_dump_status_block (struct bce_softc *); static void bce_dump_stats_block (struct bce_softc *); static void bce_dump_driver_state (struct bce_softc *); static void bce_dump_hw_state (struct bce_softc *); static void bce_dump_shmem_state (struct bce_softc *); static void bce_dump_mq_regs (struct bce_softc *); static void bce_dump_bc_state (struct bce_softc *); static void bce_dump_txp_state (struct bce_softc *, int); static void bce_dump_rxp_state (struct bce_softc *, int); static void bce_dump_tpat_state (struct bce_softc *, int); static void bce_dump_cp_state (struct bce_softc *, int); static void bce_dump_com_state (struct bce_softc *, int); static void bce_dump_rv2p_state (struct bce_softc *); static void bce_breakpoint (struct bce_softc *); #endif /*BCE_DEBUG */ /****************************************************************************/ /* BCE Register/Memory Access Routines */ /****************************************************************************/ static u32 bce_reg_rd_ind (struct bce_softc *, u32); static void bce_reg_wr_ind (struct bce_softc *, u32, u32); static void bce_shmem_wr (struct bce_softc *, u32, u32); static u32 bce_shmem_rd (struct bce_softc *, u32); static void bce_ctx_wr (struct bce_softc *, u32, u32, u32); static int bce_miibus_read_reg (device_t, int, int); static int bce_miibus_write_reg (device_t, int, int, int); static void bce_miibus_statchg (device_t); #ifdef BCE_DEBUG static int bce_sysctl_nvram_dump(SYSCTL_HANDLER_ARGS); #ifdef BCE_NVRAM_WRITE_SUPPORT static int bce_sysctl_nvram_write(SYSCTL_HANDLER_ARGS); #endif #endif /****************************************************************************/ /* BCE NVRAM Access Routines */ /****************************************************************************/ static int bce_acquire_nvram_lock (struct bce_softc *); static int bce_release_nvram_lock (struct bce_softc *); static void bce_enable_nvram_access(struct bce_softc *); static void bce_disable_nvram_access(struct bce_softc *); static int bce_nvram_read_dword (struct bce_softc *, u32, u8 *, u32); static int bce_init_nvram (struct bce_softc *); static int bce_nvram_read (struct bce_softc *, u32, u8 *, int); static int bce_nvram_test (struct bce_softc *); #ifdef BCE_NVRAM_WRITE_SUPPORT static int bce_enable_nvram_write (struct bce_softc *); static void bce_disable_nvram_write(struct bce_softc *); static int bce_nvram_erase_page (struct bce_softc *, u32); static int bce_nvram_write_dword (struct bce_softc *, u32, u8 *, u32); static int bce_nvram_write (struct bce_softc *, u32, u8 *, int); #endif /****************************************************************************/ /* */ /****************************************************************************/ static void bce_get_rx_buffer_sizes(struct bce_softc *, int); static void bce_get_media (struct bce_softc *); static void bce_init_media (struct bce_softc *); static u32 bce_get_rphy_link (struct bce_softc *); static void bce_dma_map_addr (void *, bus_dma_segment_t *, int, int); static int bce_dma_alloc (device_t); static void bce_dma_free (struct bce_softc *); static void bce_release_resources (struct bce_softc *); /****************************************************************************/ /* BCE Firmware Synchronization and Load */ /****************************************************************************/ static void bce_fw_cap_init (struct bce_softc *); static int bce_fw_sync (struct bce_softc *, u32); static void bce_load_rv2p_fw (struct bce_softc *, const u32 *, u32, u32); static void bce_load_cpu_fw (struct bce_softc *, struct cpu_reg *, struct fw_info *); static void bce_start_cpu (struct bce_softc *, struct cpu_reg *); static void bce_halt_cpu (struct bce_softc *, struct cpu_reg *); static void bce_start_rxp_cpu (struct bce_softc *); static void bce_init_rxp_cpu (struct bce_softc *); static void bce_init_txp_cpu (struct bce_softc *); static void bce_init_tpat_cpu (struct bce_softc *); static void bce_init_cp_cpu (struct bce_softc *); static void bce_init_com_cpu (struct bce_softc *); static void bce_init_cpus (struct bce_softc *); static void bce_print_adapter_info (struct bce_softc *); static void bce_probe_pci_caps (device_t, struct bce_softc *); static void bce_stop (struct bce_softc *); static int bce_reset (struct bce_softc *, u32); static int bce_chipinit (struct bce_softc *); static int bce_blockinit (struct bce_softc *); static int bce_init_tx_chain (struct bce_softc *); static void bce_free_tx_chain (struct bce_softc *); static int bce_get_rx_buf (struct bce_softc *, u16, u16, u32 *); static int bce_init_rx_chain (struct bce_softc *); static void bce_fill_rx_chain (struct bce_softc *); static void bce_free_rx_chain (struct bce_softc *); static int bce_get_pg_buf (struct bce_softc *, u16, u16); static int bce_init_pg_chain (struct bce_softc *); static void bce_fill_pg_chain (struct bce_softc *); static void bce_free_pg_chain (struct bce_softc *); static struct mbuf *bce_tso_setup (struct bce_softc *, struct mbuf **, u16 *); static int bce_tx_encap (struct bce_softc *, struct mbuf **); static void bce_start_locked (struct ifnet *); static void bce_start (struct ifnet *); static int bce_ioctl (struct ifnet *, u_long, caddr_t); static uint64_t bce_get_counter (struct ifnet *, ift_counter); static void bce_watchdog (struct bce_softc *); static int bce_ifmedia_upd (struct ifnet *); static int bce_ifmedia_upd_locked (struct ifnet *); static void bce_ifmedia_sts (struct ifnet *, struct ifmediareq *); static void bce_ifmedia_sts_rphy (struct bce_softc *, struct ifmediareq *); static void bce_init_locked (struct bce_softc *); static void bce_init (void *); static void bce_mgmt_init_locked (struct bce_softc *sc); static int bce_init_ctx (struct bce_softc *); static void bce_get_mac_addr (struct bce_softc *); static void bce_set_mac_addr (struct bce_softc *); static void bce_phy_intr (struct bce_softc *); static inline u16 bce_get_hw_rx_cons (struct bce_softc *); static void bce_rx_intr (struct bce_softc *); static void bce_tx_intr (struct bce_softc *); static void bce_disable_intr (struct bce_softc *); static void bce_enable_intr (struct bce_softc *, int); static void bce_intr (void *); static void bce_set_rx_mode (struct bce_softc *); static void bce_stats_update (struct bce_softc *); static void bce_tick (void *); static void bce_pulse (void *); static void bce_add_sysctls (struct bce_softc *); /****************************************************************************/ /* FreeBSD device dispatch table. */ /****************************************************************************/ static device_method_t bce_methods[] = { /* Device interface (device_if.h) */ DEVMETHOD(device_probe, bce_probe), DEVMETHOD(device_attach, bce_attach), DEVMETHOD(device_detach, bce_detach), DEVMETHOD(device_shutdown, bce_shutdown), /* Supported by device interface but not used here. */ /* DEVMETHOD(device_identify, bce_identify), */ /* DEVMETHOD(device_suspend, bce_suspend), */ /* DEVMETHOD(device_resume, bce_resume), */ /* DEVMETHOD(device_quiesce, bce_quiesce), */ /* MII interface (miibus_if.h) */ DEVMETHOD(miibus_readreg, bce_miibus_read_reg), DEVMETHOD(miibus_writereg, bce_miibus_write_reg), DEVMETHOD(miibus_statchg, bce_miibus_statchg), /* Supported by MII interface but not used here. */ /* DEVMETHOD(miibus_linkchg, bce_miibus_linkchg), */ /* DEVMETHOD(miibus_mediainit, bce_miibus_mediainit), */ DEVMETHOD_END }; static driver_t bce_driver = { "bce", bce_methods, sizeof(struct bce_softc) }; static devclass_t bce_devclass; MODULE_DEPEND(bce, pci, 1, 1, 1); MODULE_DEPEND(bce, ether, 1, 1, 1); MODULE_DEPEND(bce, miibus, 1, 1, 1); DRIVER_MODULE(bce, pci, bce_driver, bce_devclass, NULL, NULL); DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, NULL, NULL); /****************************************************************************/ /* Tunable device values */ /****************************************************************************/ static SYSCTL_NODE(_hw, OID_AUTO, bce, CTLFLAG_RD, 0, "bce driver parameters"); /* Allowable values are TRUE or FALSE */ static int bce_verbose = TRUE; SYSCTL_INT(_hw_bce, OID_AUTO, verbose, CTLFLAG_RDTUN, &bce_verbose, 0, "Verbose output enable/disable"); /* Allowable values are TRUE or FALSE */ static int bce_tso_enable = TRUE; SYSCTL_INT(_hw_bce, OID_AUTO, tso_enable, CTLFLAG_RDTUN, &bce_tso_enable, 0, "TSO Enable/Disable"); /* Allowable values are 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */ /* ToDo: Add MSI-X support. */ static int bce_msi_enable = 1; SYSCTL_INT(_hw_bce, OID_AUTO, msi_enable, CTLFLAG_RDTUN, &bce_msi_enable, 0, "MSI-X|MSI|INTx selector"); /* Allowable values are 1, 2, 4, 8. */ static int bce_rx_pages = DEFAULT_RX_PAGES; SYSCTL_UINT(_hw_bce, OID_AUTO, rx_pages, CTLFLAG_RDTUN, &bce_rx_pages, 0, "Receive buffer descriptor pages (1 page = 255 buffer descriptors)"); /* Allowable values are 1, 2, 4, 8. */ static int bce_tx_pages = DEFAULT_TX_PAGES; SYSCTL_UINT(_hw_bce, OID_AUTO, tx_pages, CTLFLAG_RDTUN, &bce_tx_pages, 0, "Transmit buffer descriptor pages (1 page = 255 buffer descriptors)"); /* Allowable values are TRUE or FALSE. */ static int bce_hdr_split = TRUE; SYSCTL_UINT(_hw_bce, OID_AUTO, hdr_split, CTLFLAG_RDTUN, &bce_hdr_split, 0, "Frame header/payload splitting Enable/Disable"); /* Allowable values are TRUE or FALSE. */ static int bce_strict_rx_mtu = FALSE; SYSCTL_UINT(_hw_bce, OID_AUTO, strict_rx_mtu, CTLFLAG_RDTUN, &bce_strict_rx_mtu, 0, "Enable/Disable strict RX frame size checking"); /* Allowable values are 0 ... 100 */ #ifdef BCE_DEBUG /* Generate 1 interrupt for every transmit completion. */ static int bce_tx_quick_cons_trip_int = 1; #else /* Generate 1 interrupt for every 20 transmit completions. */ static int bce_tx_quick_cons_trip_int = DEFAULT_TX_QUICK_CONS_TRIP_INT; #endif SYSCTL_UINT(_hw_bce, OID_AUTO, tx_quick_cons_trip_int, CTLFLAG_RDTUN, &bce_tx_quick_cons_trip_int, 0, "Transmit BD trip point during interrupts"); /* Allowable values are 0 ... 100 */ /* Generate 1 interrupt for every transmit completion. */ #ifdef BCE_DEBUG static int bce_tx_quick_cons_trip = 1; #else /* Generate 1 interrupt for every 20 transmit completions. */ static int bce_tx_quick_cons_trip = DEFAULT_TX_QUICK_CONS_TRIP; #endif SYSCTL_UINT(_hw_bce, OID_AUTO, tx_quick_cons_trip, CTLFLAG_RDTUN, &bce_tx_quick_cons_trip, 0, "Transmit BD trip point"); /* Allowable values are 0 ... 100 */ #ifdef BCE_DEBUG /* Generate an interrupt if 0us have elapsed since the last TX completion. */ static int bce_tx_ticks_int = 0; #else /* Generate an interrupt if 80us have elapsed since the last TX completion. */ static int bce_tx_ticks_int = DEFAULT_TX_TICKS_INT; #endif SYSCTL_UINT(_hw_bce, OID_AUTO, tx_ticks_int, CTLFLAG_RDTUN, &bce_tx_ticks_int, 0, "Transmit ticks count during interrupt"); /* Allowable values are 0 ... 100 */ #ifdef BCE_DEBUG /* Generate an interrupt if 0us have elapsed since the last TX completion. */ static int bce_tx_ticks = 0; #else /* Generate an interrupt if 80us have elapsed since the last TX completion. */ static int bce_tx_ticks = DEFAULT_TX_TICKS; #endif SYSCTL_UINT(_hw_bce, OID_AUTO, tx_ticks, CTLFLAG_RDTUN, &bce_tx_ticks, 0, "Transmit ticks count"); /* Allowable values are 1 ... 100 */ #ifdef BCE_DEBUG /* Generate 1 interrupt for every received frame. */ static int bce_rx_quick_cons_trip_int = 1; #else /* Generate 1 interrupt for every 6 received frames. */ static int bce_rx_quick_cons_trip_int = DEFAULT_RX_QUICK_CONS_TRIP_INT; #endif SYSCTL_UINT(_hw_bce, OID_AUTO, rx_quick_cons_trip_int, CTLFLAG_RDTUN, &bce_rx_quick_cons_trip_int, 0, "Receive BD trip point duirng interrupts"); /* Allowable values are 1 ... 100 */ #ifdef BCE_DEBUG /* Generate 1 interrupt for every received frame. */ static int bce_rx_quick_cons_trip = 1; #else /* Generate 1 interrupt for every 6 received frames. */ static int bce_rx_quick_cons_trip = DEFAULT_RX_QUICK_CONS_TRIP; #endif SYSCTL_UINT(_hw_bce, OID_AUTO, rx_quick_cons_trip, CTLFLAG_RDTUN, &bce_rx_quick_cons_trip, 0, "Receive BD trip point"); /* Allowable values are 0 ... 100 */ #ifdef BCE_DEBUG /* Generate an int. if 0us have elapsed since the last received frame. */ static int bce_rx_ticks_int = 0; #else /* Generate an int. if 18us have elapsed since the last received frame. */ static int bce_rx_ticks_int = DEFAULT_RX_TICKS_INT; #endif SYSCTL_UINT(_hw_bce, OID_AUTO, rx_ticks_int, CTLFLAG_RDTUN, &bce_rx_ticks_int, 0, "Receive ticks count during interrupt"); /* Allowable values are 0 ... 100 */ #ifdef BCE_DEBUG /* Generate an int. if 0us have elapsed since the last received frame. */ static int bce_rx_ticks = 0; #else /* Generate an int. if 18us have elapsed since the last received frame. */ static int bce_rx_ticks = DEFAULT_RX_TICKS; #endif SYSCTL_UINT(_hw_bce, OID_AUTO, rx_ticks, CTLFLAG_RDTUN, &bce_rx_ticks, 0, "Receive ticks count"); /****************************************************************************/ /* Device probe function. */ /* */ /* Compares the device to the driver's list of supported devices and */ /* reports back to the OS whether this is the right driver for the device. */ /* */ /* Returns: */ /* BUS_PROBE_DEFAULT on success, positive value on failure. */ /****************************************************************************/ static int bce_probe(device_t dev) { const struct bce_type *t; struct bce_softc *sc; char *descbuf; u16 vid = 0, did = 0, svid = 0, sdid = 0; t = bce_devs; sc = device_get_softc(dev); sc->bce_unit = device_get_unit(dev); sc->bce_dev = dev; /* Get the data for the device to be probed. */ vid = pci_get_vendor(dev); did = pci_get_device(dev); svid = pci_get_subvendor(dev); sdid = pci_get_subdevice(dev); DBPRINT(sc, BCE_EXTREME_LOAD, "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, " "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid); /* Look through the list of known devices for a match. */ while(t->bce_name != NULL) { if ((vid == t->bce_vid) && (did == t->bce_did) && ((svid == t->bce_svid) || (t->bce_svid == PCI_ANY_ID)) && ((sdid == t->bce_sdid) || (t->bce_sdid == PCI_ANY_ID))) { descbuf = malloc(BCE_DEVDESC_MAX, M_TEMP, M_NOWAIT); if (descbuf == NULL) return(ENOMEM); /* Print out the device identity. */ snprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)", t->bce_name, (((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'), (pci_read_config(dev, PCIR_REVID, 4) & 0xf)); device_set_desc_copy(dev, descbuf); free(descbuf, M_TEMP); return(BUS_PROBE_DEFAULT); } t++; } return(ENXIO); } /****************************************************************************/ /* PCI Capabilities Probe Function. */ /* */ /* Walks the PCI capabiites list for the device to find what features are */ /* supported. */ /* */ /* Returns: */ /* None. */ /****************************************************************************/ static void bce_print_adapter_info(struct bce_softc *sc) { int i = 0; DBENTER(BCE_VERBOSE_LOAD); if (bce_verbose || bootverbose) { BCE_PRINTF("ASIC (0x%08X); ", sc->bce_chipid); printf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A', ((BCE_CHIP_ID(sc) & 0x0ff0) >> 4)); /* Bus info. */ if (sc->bce_flags & BCE_PCIE_FLAG) { printf("Bus (PCIe x%d, ", sc->link_width); switch (sc->link_speed) { case 1: printf("2.5Gbps); "); break; case 2: printf("5Gbps); "); break; default: printf("Unknown link speed); "); } } else { printf("Bus (PCI%s, %s, %dMHz); ", ((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""), ((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"), sc->bus_speed_mhz); } /* Firmware version and device features. */ printf("B/C (%s); Bufs (RX:%d;TX:%d;PG:%d); Flags (", sc->bce_bc_ver, sc->rx_pages, sc->tx_pages, (bce_hdr_split == TRUE ? sc->pg_pages: 0)); if (bce_hdr_split == TRUE) { printf("SPLT"); i++; } if (sc->bce_flags & BCE_USING_MSI_FLAG) { if (i > 0) printf("|"); printf("MSI"); i++; } if (sc->bce_flags & BCE_USING_MSIX_FLAG) { if (i > 0) printf("|"); printf("MSI-X"); i++; } if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) { if (i > 0) printf("|"); printf("2.5G"); i++; } if (sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) { if (i > 0) printf("|"); printf("Remote PHY(%s)", sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG ? "FIBER" : "TP"); i++; } if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { if (i > 0) printf("|"); printf("MFW); MFW (%s)\n", sc->bce_mfw_ver); } else { printf(")\n"); } printf("Coal (RX:%d,%d,%d,%d; TX:%d,%d,%d,%d)\n", sc->bce_rx_quick_cons_trip_int, sc->bce_rx_quick_cons_trip, sc->bce_rx_ticks_int, sc->bce_rx_ticks, sc->bce_tx_quick_cons_trip_int, sc->bce_tx_quick_cons_trip, sc->bce_tx_ticks_int, sc->bce_tx_ticks); } DBEXIT(BCE_VERBOSE_LOAD); } /****************************************************************************/ /* PCI Capabilities Probe Function. */ /* */ /* Walks the PCI capabiites list for the device to find what features are */ /* supported. */ /* */ /* Returns: */ /* None. */ /****************************************************************************/ static void bce_probe_pci_caps(device_t dev, struct bce_softc *sc) { u32 reg; DBENTER(BCE_VERBOSE_LOAD); /* Check if PCI-X capability is enabled. */ if (pci_find_cap(dev, PCIY_PCIX, ®) == 0) { if (reg != 0) sc->bce_cap_flags |= BCE_PCIX_CAPABLE_FLAG; } /* Check if PCIe capability is enabled. */ if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { if (reg != 0) { u16 link_status = pci_read_config(dev, reg + 0x12, 2); DBPRINT(sc, BCE_INFO_LOAD, "PCIe link_status = " "0x%08X\n", link_status); sc->link_speed = link_status & 0xf; sc->link_width = (link_status >> 4) & 0x3f; sc->bce_cap_flags |= BCE_PCIE_CAPABLE_FLAG; sc->bce_flags |= BCE_PCIE_FLAG; } } /* Check if MSI capability is enabled. */ if (pci_find_cap(dev, PCIY_MSI, ®) == 0) { if (reg != 0) sc->bce_cap_flags |= BCE_MSI_CAPABLE_FLAG; } /* Check if MSI-X capability is enabled. */ if (pci_find_cap(dev, PCIY_MSIX, ®) == 0) { if (reg != 0) sc->bce_cap_flags |= BCE_MSIX_CAPABLE_FLAG; } DBEXIT(BCE_VERBOSE_LOAD); } /****************************************************************************/ /* Load and validate user tunable settings. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_set_tunables(struct bce_softc *sc) { /* Set sysctl values for RX page count. */ switch (bce_rx_pages) { case 1: /* fall-through */ case 2: /* fall-through */ case 4: /* fall-through */ case 8: sc->rx_pages = bce_rx_pages; break; default: sc->rx_pages = DEFAULT_RX_PAGES; BCE_PRINTF("%s(%d): Illegal value (%d) specified for " "hw.bce.rx_pages! Setting default of %d.\n", __FILE__, __LINE__, bce_rx_pages, DEFAULT_RX_PAGES); } /* ToDo: Consider allowing user setting for pg_pages. */ sc->pg_pages = min((sc->rx_pages * 4), MAX_PG_PAGES); /* Set sysctl values for TX page count. */ switch (bce_tx_pages) { case 1: /* fall-through */ case 2: /* fall-through */ case 4: /* fall-through */ case 8: sc->tx_pages = bce_tx_pages; break; default: sc->tx_pages = DEFAULT_TX_PAGES; BCE_PRINTF("%s(%d): Illegal value (%d) specified for " "hw.bce.tx_pages! Setting default of %d.\n", __FILE__, __LINE__, bce_tx_pages, DEFAULT_TX_PAGES); } /* * Validate the TX trip point (i.e. the number of * TX completions before a status block update is * generated and an interrupt is asserted. */ if (bce_tx_quick_cons_trip_int <= 100) { sc->bce_tx_quick_cons_trip_int = bce_tx_quick_cons_trip_int; } else { BCE_PRINTF("%s(%d): Illegal value (%d) specified for " "hw.bce.tx_quick_cons_trip_int! Setting default of %d.\n", __FILE__, __LINE__, bce_tx_quick_cons_trip_int, DEFAULT_TX_QUICK_CONS_TRIP_INT); sc->bce_tx_quick_cons_trip_int = DEFAULT_TX_QUICK_CONS_TRIP_INT; } if (bce_tx_quick_cons_trip <= 100) { sc->bce_tx_quick_cons_trip = bce_tx_quick_cons_trip; } else { BCE_PRINTF("%s(%d): Illegal value (%d) specified for " "hw.bce.tx_quick_cons_trip! Setting default of %d.\n", __FILE__, __LINE__, bce_tx_quick_cons_trip, DEFAULT_TX_QUICK_CONS_TRIP); sc->bce_tx_quick_cons_trip = DEFAULT_TX_QUICK_CONS_TRIP; } /* * Validate the TX ticks count (i.e. the maximum amount * of time to wait after the last TX completion has * occurred before a status block update is generated * and an interrupt is asserted. */ if (bce_tx_ticks_int <= 100) { sc->bce_tx_ticks_int = bce_tx_ticks_int; } else { BCE_PRINTF("%s(%d): Illegal value (%d) specified for " "hw.bce.tx_ticks_int! Setting default of %d.\n", __FILE__, __LINE__, bce_tx_ticks_int, DEFAULT_TX_TICKS_INT); sc->bce_tx_ticks_int = DEFAULT_TX_TICKS_INT; } if (bce_tx_ticks <= 100) { sc->bce_tx_ticks = bce_tx_ticks; } else { BCE_PRINTF("%s(%d): Illegal value (%d) specified for " "hw.bce.tx_ticks! Setting default of %d.\n", __FILE__, __LINE__, bce_tx_ticks, DEFAULT_TX_TICKS); sc->bce_tx_ticks = DEFAULT_TX_TICKS; } /* * Validate the RX trip point (i.e. the number of * RX frames received before a status block update is * generated and an interrupt is asserted. */ if (bce_rx_quick_cons_trip_int <= 100) { sc->bce_rx_quick_cons_trip_int = bce_rx_quick_cons_trip_int; } else { BCE_PRINTF("%s(%d): Illegal value (%d) specified for " "hw.bce.rx_quick_cons_trip_int! Setting default of %d.\n", __FILE__, __LINE__, bce_rx_quick_cons_trip_int, DEFAULT_RX_QUICK_CONS_TRIP_INT); sc->bce_rx_quick_cons_trip_int = DEFAULT_RX_QUICK_CONS_TRIP_INT; } if (bce_rx_quick_cons_trip <= 100) { sc->bce_rx_quick_cons_trip = bce_rx_quick_cons_trip; } else { BCE_PRINTF("%s(%d): Illegal value (%d) specified for " "hw.bce.rx_quick_cons_trip! Setting default of %d.\n", __FILE__, __LINE__, bce_rx_quick_cons_trip, DEFAULT_RX_QUICK_CONS_TRIP); sc->bce_rx_quick_cons_trip = DEFAULT_RX_QUICK_CONS_TRIP; } /* * Validate the RX ticks count (i.e. the maximum amount * of time to wait after the last RX frame has been * received before a status block update is generated * and an interrupt is asserted. */ if (bce_rx_ticks_int <= 100) { sc->bce_rx_ticks_int = bce_rx_ticks_int; } else { BCE_PRINTF("%s(%d): Illegal value (%d) specified for " "hw.bce.rx_ticks_int! Setting default of %d.\n", __FILE__, __LINE__, bce_rx_ticks_int, DEFAULT_RX_TICKS_INT); sc->bce_rx_ticks_int = DEFAULT_RX_TICKS_INT; } if (bce_rx_ticks <= 100) { sc->bce_rx_ticks = bce_rx_ticks; } else { BCE_PRINTF("%s(%d): Illegal value (%d) specified for " "hw.bce.rx_ticks! Setting default of %d.\n", __FILE__, __LINE__, bce_rx_ticks, DEFAULT_RX_TICKS); sc->bce_rx_ticks = DEFAULT_RX_TICKS; } /* Disabling both RX ticks and RX trips will prevent interrupts. */ if ((bce_rx_quick_cons_trip == 0) && (bce_rx_ticks == 0)) { BCE_PRINTF("%s(%d): Cannot set both hw.bce.rx_ticks and " "hw.bce.rx_quick_cons_trip to 0. Setting default values.\n", __FILE__, __LINE__); sc->bce_rx_ticks = DEFAULT_RX_TICKS; sc->bce_rx_quick_cons_trip = DEFAULT_RX_QUICK_CONS_TRIP; } /* Disabling both TX ticks and TX trips will prevent interrupts. */ if ((bce_tx_quick_cons_trip == 0) && (bce_tx_ticks == 0)) { BCE_PRINTF("%s(%d): Cannot set both hw.bce.tx_ticks and " "hw.bce.tx_quick_cons_trip to 0. Setting default values.\n", __FILE__, __LINE__); sc->bce_tx_ticks = DEFAULT_TX_TICKS; sc->bce_tx_quick_cons_trip = DEFAULT_TX_QUICK_CONS_TRIP; } } /****************************************************************************/ /* Device attach function. */ /* */ /* Allocates device resources, performs secondary chip identification, */ /* resets and initializes the hardware, and initializes driver instance */ /* variables. */ /* */ /* Returns: */ /* 0 on success, positive value on failure. */ /****************************************************************************/ static int bce_attach(device_t dev) { struct bce_softc *sc; struct ifnet *ifp; u32 val; int count, error, rc = 0, rid; sc = device_get_softc(dev); sc->bce_dev = dev; DBENTER(BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET); sc->bce_unit = device_get_unit(dev); /* Set initial device and PHY flags */ sc->bce_flags = 0; sc->bce_phy_flags = 0; bce_set_tunables(sc); pci_enable_busmaster(dev); /* Allocate PCI memory resources. */ rid = PCIR_BAR(0); sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->bce_res_mem == NULL) { BCE_PRINTF("%s(%d): PCI memory allocation failed\n", __FILE__, __LINE__); rc = ENXIO; goto bce_attach_fail; } /* Get various resource handles. */ sc->bce_btag = rman_get_bustag(sc->bce_res_mem); sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem); sc->bce_vhandle = (vm_offset_t) rman_get_virtual(sc->bce_res_mem); bce_probe_pci_caps(dev, sc); rid = 1; count = 0; #if 0 /* Try allocating MSI-X interrupts. */ if ((sc->bce_cap_flags & BCE_MSIX_CAPABLE_FLAG) && (bce_msi_enable >= 2) && ((sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE)) != NULL)) { msi_needed = count = 1; if (((error = pci_alloc_msix(dev, &count)) != 0) || (count != msi_needed)) { BCE_PRINTF("%s(%d): MSI-X allocation failed! Requested = %d," "Received = %d, error = %d\n", __FILE__, __LINE__, msi_needed, count, error); count = 0; pci_release_msi(dev); bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->bce_res_irq); sc->bce_res_irq = NULL; } else { DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using MSI-X interrupt.\n", __FUNCTION__); sc->bce_flags |= BCE_USING_MSIX_FLAG; } } #endif /* Try allocating a MSI interrupt. */ if ((sc->bce_cap_flags & BCE_MSI_CAPABLE_FLAG) && (bce_msi_enable >= 1) && (count == 0)) { count = 1; if ((error = pci_alloc_msi(dev, &count)) != 0) { BCE_PRINTF("%s(%d): MSI allocation failed! " "error = %d\n", __FILE__, __LINE__, error); count = 0; pci_release_msi(dev); } else { DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using MSI " "interrupt.\n", __FUNCTION__); sc->bce_flags |= BCE_USING_MSI_FLAG; if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) sc->bce_flags |= BCE_ONE_SHOT_MSI_FLAG; rid = 1; } } /* Try allocating a legacy interrupt. */ if (count == 0) { DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using INTx interrupt.\n", __FUNCTION__); rid = 0; } sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | (count != 0 ? 0 : RF_SHAREABLE)); /* Report any IRQ allocation errors. */ if (sc->bce_res_irq == NULL) { BCE_PRINTF("%s(%d): PCI map interrupt failed!\n", __FILE__, __LINE__); rc = ENXIO; goto bce_attach_fail; } /* Initialize mutex for the current device instance. */ BCE_LOCK_INIT(sc, device_get_nameunit(dev)); /* * Configure byte swap and enable indirect register access. * Rely on CPU to do target byte swapping on big endian systems. * Access to registers outside of PCI configurtion space are not * valid until this is done. */ pci_write_config(dev, BCE_PCICFG_MISC_CONFIG, BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4); /* Save ASIC revsion info. */ sc->bce_chipid = REG_RD(sc, BCE_MISC_ID); /* Weed out any non-production controller revisions. */ switch(BCE_CHIP_ID(sc)) { case BCE_CHIP_ID_5706_A0: case BCE_CHIP_ID_5706_A1: case BCE_CHIP_ID_5708_A0: case BCE_CHIP_ID_5708_B0: case BCE_CHIP_ID_5709_A0: case BCE_CHIP_ID_5709_B0: case BCE_CHIP_ID_5709_B1: case BCE_CHIP_ID_5709_B2: BCE_PRINTF("%s(%d): Unsupported controller " "revision (%c%d)!\n", __FILE__, __LINE__, (((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'), (pci_read_config(dev, PCIR_REVID, 4) & 0xf)); rc = ENODEV; goto bce_attach_fail; } /* * The embedded PCIe to PCI-X bridge (EPB) * in the 5708 cannot address memory above * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043). */ if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708) sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR; else sc->max_bus_addr = BUS_SPACE_MAXADDR; /* * Find the base address for shared memory access. * Newer versions of bootcode use a signature and offset * while older versions use a fixed address. */ val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE); if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG) /* Multi-port devices use different offsets in shared memory. */ sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0 + (pci_get_function(sc->bce_dev) << 2)); else sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE; DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): bce_shmem_base = 0x%08X\n", __FUNCTION__, sc->bce_shmem_base); /* Fetch the bootcode revision. */ val = bce_shmem_rd(sc, BCE_DEV_INFO_BC_REV); for (int i = 0, j = 0; i < 3; i++) { u8 num; num = (u8) (val >> (24 - (i * 8))); for (int k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) { if (num >= k || !skip0 || k == 1) { sc->bce_bc_ver[j++] = (num / k) + '0'; skip0 = 0; } } if (i != 2) sc->bce_bc_ver[j++] = '.'; } /* Check if any management firwmare is enabled. */ val = bce_shmem_rd(sc, BCE_PORT_FEATURE); if (val & BCE_PORT_FEATURE_ASF_ENABLED) { sc->bce_flags |= BCE_MFW_ENABLE_FLAG; /* Allow time for firmware to enter the running state. */ for (int i = 0; i < 30; i++) { val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); if (val & BCE_CONDITION_MFW_RUN_MASK) break; DELAY(10000); } /* Check if management firmware is running. */ val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); val &= BCE_CONDITION_MFW_RUN_MASK; if ((val != BCE_CONDITION_MFW_RUN_UNKNOWN) && (val != BCE_CONDITION_MFW_RUN_NONE)) { u32 addr = bce_shmem_rd(sc, BCE_MFW_VER_PTR); int i = 0; /* Read the management firmware version string. */ for (int j = 0; j < 3; j++) { val = bce_reg_rd_ind(sc, addr + j * 4); val = bswap32(val); memcpy(&sc->bce_mfw_ver[i], &val, 4); i += 4; } } else { /* May cause firmware synchronization timeouts. */ BCE_PRINTF("%s(%d): Management firmware enabled " "but not running!\n", __FILE__, __LINE__); strcpy(sc->bce_mfw_ver, "NOT RUNNING!"); /* ToDo: Any action the driver should take? */ } } /* Get PCI bus information (speed and type). */ val = REG_RD(sc, BCE_PCICFG_MISC_STATUS); if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) { u32 clkreg; sc->bce_flags |= BCE_PCIX_FLAG; clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS); clkreg &= BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET; switch (clkreg) { case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ: sc->bus_speed_mhz = 133; break; case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ: sc->bus_speed_mhz = 100; break; case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ: case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ: sc->bus_speed_mhz = 66; break; case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ: case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ: sc->bus_speed_mhz = 50; break; case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW: case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ: case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ: sc->bus_speed_mhz = 33; break; } } else { if (val & BCE_PCICFG_MISC_STATUS_M66EN) sc->bus_speed_mhz = 66; else sc->bus_speed_mhz = 33; } if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET) sc->bce_flags |= BCE_PCI_32BIT_FLAG; /* Find the media type for the adapter. */ bce_get_media(sc); /* Reset controller and announce to bootcode that driver is present. */ if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) { BCE_PRINTF("%s(%d): Controller reset failed!\n", __FILE__, __LINE__); rc = ENXIO; goto bce_attach_fail; } /* Initialize the controller. */ if (bce_chipinit(sc)) { BCE_PRINTF("%s(%d): Controller initialization failed!\n", __FILE__, __LINE__); rc = ENXIO; goto bce_attach_fail; } /* Perform NVRAM test. */ if (bce_nvram_test(sc)) { BCE_PRINTF("%s(%d): NVRAM test failed!\n", __FILE__, __LINE__); rc = ENXIO; goto bce_attach_fail; } /* Fetch the permanent Ethernet MAC address. */ bce_get_mac_addr(sc); /* Update statistics once every second. */ sc->bce_stats_ticks = 1000000 & 0xffff00; /* Store data needed by PHY driver for backplane applications */ sc->bce_shared_hw_cfg = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG); sc->bce_port_hw_cfg = bce_shmem_rd(sc, BCE_PORT_HW_CFG_CONFIG); /* Allocate DMA memory resources. */ if (bce_dma_alloc(dev)) { BCE_PRINTF("%s(%d): DMA resource allocation failed!\n", __FILE__, __LINE__); rc = ENXIO; goto bce_attach_fail; } /* Allocate an ifnet structure. */ ifp = sc->bce_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { BCE_PRINTF("%s(%d): Interface allocation failed!\n", __FILE__, __LINE__); rc = ENXIO; goto bce_attach_fail; } /* Initialize the ifnet interface. */ ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = bce_ioctl; ifp->if_start = bce_start; ifp->if_get_counter = bce_get_counter; ifp->if_init = bce_init; ifp->if_mtu = ETHERMTU; if (bce_tso_enable) { ifp->if_hwassist = BCE_IF_HWASSIST | CSUM_TSO; ifp->if_capabilities = BCE_IF_CAPABILITIES | IFCAP_TSO4 | IFCAP_VLAN_HWTSO; } else { ifp->if_hwassist = BCE_IF_HWASSIST; ifp->if_capabilities = BCE_IF_CAPABILITIES; } #if __FreeBSD_version >= 800505 /* * Introducing IFCAP_LINKSTATE didn't bump __FreeBSD_version * so it's approximate value. */ if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0) ifp->if_capabilities |= IFCAP_LINKSTATE; #endif ifp->if_capenable = ifp->if_capabilities; /* * Assume standard mbuf sizes for buffer allocation. * This may change later if the MTU size is set to * something other than 1500. */ bce_get_rx_buffer_sizes(sc, (ETHER_MAX_LEN - ETHER_HDR_LEN - ETHER_CRC_LEN)); /* Recalculate our buffer allocation sizes. */ ifp->if_snd.ifq_drv_maxlen = USABLE_TX_BD_ALLOC; IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); IFQ_SET_READY(&ifp->if_snd); if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) ifp->if_baudrate = IF_Mbps(2500ULL); else ifp->if_baudrate = IF_Mbps(1000); /* Handle any special PHY initialization for SerDes PHYs. */ bce_init_media(sc); if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0) { ifmedia_init(&sc->bce_ifmedia, IFM_IMASK, bce_ifmedia_upd, bce_ifmedia_sts); /* * We can't manually override remote PHY's link and assume * PHY port configuration(Fiber or TP) is not changed after * device attach. This may not be correct though. */ if ((sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) != 0) { if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) { ifmedia_add(&sc->bce_ifmedia, IFM_ETHER | IFM_2500_SX, 0, NULL); ifmedia_add(&sc->bce_ifmedia, IFM_ETHER | IFM_2500_SX | IFM_FDX, 0, NULL); } ifmedia_add(&sc->bce_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL); ifmedia_add(&sc->bce_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX, 0, NULL); } else { ifmedia_add(&sc->bce_ifmedia, IFM_ETHER | IFM_10_T, 0, NULL); ifmedia_add(&sc->bce_ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); ifmedia_add(&sc->bce_ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL); ifmedia_add(&sc->bce_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); ifmedia_add(&sc->bce_ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL); ifmedia_add(&sc->bce_ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); } ifmedia_add(&sc->bce_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&sc->bce_ifmedia, IFM_ETHER | IFM_AUTO); sc->bce_ifmedia.ifm_media = sc->bce_ifmedia.ifm_cur->ifm_media; } else { /* MII child bus by attaching the PHY. */ rc = mii_attach(dev, &sc->bce_miibus, ifp, bce_ifmedia_upd, bce_ifmedia_sts, BMSR_DEFCAPMASK, sc->bce_phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE); if (rc != 0) { BCE_PRINTF("%s(%d): attaching PHYs failed\n", __FILE__, __LINE__); goto bce_attach_fail; } } /* Attach to the Ethernet interface list. */ ether_ifattach(ifp, sc->eaddr); #if __FreeBSD_version < 500000 callout_init(&sc->bce_tick_callout); callout_init(&sc->bce_pulse_callout); #else callout_init_mtx(&sc->bce_tick_callout, &sc->bce_mtx, 0); callout_init_mtx(&sc->bce_pulse_callout, &sc->bce_mtx, 0); #endif /* Hookup IRQ last. */ rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, bce_intr, sc, &sc->bce_intrhand); if (rc) { BCE_PRINTF("%s(%d): Failed to setup IRQ!\n", __FILE__, __LINE__); bce_detach(dev); goto bce_attach_exit; } /* * At this point we've acquired all the resources * we need to run so there's no turning back, we're * cleared for launch. */ /* Print some important debugging info. */ DBRUNMSG(BCE_INFO, bce_dump_driver_state(sc)); /* Add the supported sysctls to the kernel. */ bce_add_sysctls(sc); BCE_LOCK(sc); /* * The chip reset earlier notified the bootcode that * a driver is present. We now need to start our pulse * routine so that the bootcode is reminded that we're * still running. */ bce_pulse(sc); bce_mgmt_init_locked(sc); BCE_UNLOCK(sc); /* Finally, print some useful adapter info */ bce_print_adapter_info(sc); DBPRINT(sc, BCE_FATAL, "%s(): sc = %p\n", __FUNCTION__, sc); goto bce_attach_exit; bce_attach_fail: bce_release_resources(sc); bce_attach_exit: DBEXIT(BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET); return(rc); } /****************************************************************************/ /* Device detach function. */ /* */ /* Stops the controller, resets the controller, and releases resources. */ /* */ /* Returns: */ /* 0 on success, positive value on failure. */ /****************************************************************************/ static int bce_detach(device_t dev) { struct bce_softc *sc = device_get_softc(dev); struct ifnet *ifp; u32 msg; DBENTER(BCE_VERBOSE_UNLOAD | BCE_VERBOSE_RESET); ifp = sc->bce_ifp; /* Stop and reset the controller. */ BCE_LOCK(sc); /* Stop the pulse so the bootcode can go to driver absent state. */ callout_stop(&sc->bce_pulse_callout); bce_stop(sc); if (sc->bce_flags & BCE_NO_WOL_FLAG) msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN; else msg = BCE_DRV_MSG_CODE_UNLOAD; bce_reset(sc, msg); BCE_UNLOCK(sc); ether_ifdetach(ifp); /* If we have a child device on the MII bus remove it too. */ if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0) ifmedia_removeall(&sc->bce_ifmedia); else { bus_generic_detach(dev); device_delete_child(dev, sc->bce_miibus); } /* Release all remaining resources. */ bce_release_resources(sc); DBEXIT(BCE_VERBOSE_UNLOAD | BCE_VERBOSE_RESET); return(0); } /****************************************************************************/ /* Device shutdown function. */ /* */ /* Stops and resets the controller. */ /* */ /* Returns: */ /* 0 on success, positive value on failure. */ /****************************************************************************/ static int bce_shutdown(device_t dev) { struct bce_softc *sc = device_get_softc(dev); u32 msg; DBENTER(BCE_VERBOSE); BCE_LOCK(sc); bce_stop(sc); if (sc->bce_flags & BCE_NO_WOL_FLAG) msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN; else msg = BCE_DRV_MSG_CODE_UNLOAD; bce_reset(sc, msg); BCE_UNLOCK(sc); DBEXIT(BCE_VERBOSE); return (0); } #ifdef BCE_DEBUG /****************************************************************************/ /* Register read. */ /* */ /* Returns: */ /* The value of the register. */ /****************************************************************************/ static u32 bce_reg_rd(struct bce_softc *sc, u32 offset) { u32 val = REG_RD(sc, offset); DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n", __FUNCTION__, offset, val); return val; } /****************************************************************************/ /* Register write (16 bit). */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_reg_wr16(struct bce_softc *sc, u32 offset, u16 val) { DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%04X\n", __FUNCTION__, offset, val); REG_WR16(sc, offset, val); } /****************************************************************************/ /* Register write. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_reg_wr(struct bce_softc *sc, u32 offset, u32 val) { DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n", __FUNCTION__, offset, val); REG_WR(sc, offset, val); } #endif /****************************************************************************/ /* Indirect register read. */ /* */ /* Reads NetXtreme II registers using an index/data register pair in PCI */ /* configuration space. Using this mechanism avoids issues with posted */ /* reads but is much slower than memory-mapped I/O. */ /* */ /* Returns: */ /* The value of the register. */ /****************************************************************************/ static u32 bce_reg_rd_ind(struct bce_softc *sc, u32 offset) { device_t dev; dev = sc->bce_dev; pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4); #ifdef BCE_DEBUG { u32 val; val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4); DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n", __FUNCTION__, offset, val); return val; } #else return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4); #endif } /****************************************************************************/ /* Indirect register write. */ /* */ /* Writes NetXtreme II registers using an index/data register pair in PCI */ /* configuration space. Using this mechanism avoids issues with posted */ /* writes but is muchh slower than memory-mapped I/O. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_reg_wr_ind(struct bce_softc *sc, u32 offset, u32 val) { device_t dev; dev = sc->bce_dev; DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n", __FUNCTION__, offset, val); pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4); pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4); } /****************************************************************************/ /* Shared memory write. */ /* */ /* Writes NetXtreme II shared memory region. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_shmem_wr(struct bce_softc *sc, u32 offset, u32 val) { DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): Writing 0x%08X to " "0x%08X\n", __FUNCTION__, val, offset); bce_reg_wr_ind(sc, sc->bce_shmem_base + offset, val); } /****************************************************************************/ /* Shared memory read. */ /* */ /* Reads NetXtreme II shared memory region. */ /* */ /* Returns: */ /* The 32 bit value read. */ /****************************************************************************/ static u32 bce_shmem_rd(struct bce_softc *sc, u32 offset) { u32 val = bce_reg_rd_ind(sc, sc->bce_shmem_base + offset); DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): Reading 0x%08X from " "0x%08X\n", __FUNCTION__, val, offset); return val; } #ifdef BCE_DEBUG /****************************************************************************/ /* Context memory read. */ /* */ /* The NetXtreme II controller uses context memory to track connection */ /* information for L2 and higher network protocols. */ /* */ /* Returns: */ /* The requested 32 bit value of context memory. */ /****************************************************************************/ static u32 bce_ctx_rd(struct bce_softc *sc, u32 cid_addr, u32 ctx_offset) { u32 idx, offset, retry_cnt = 5, val; DBRUNIF((cid_addr > MAX_CID_ADDR || ctx_offset & 0x3 || cid_addr & CTX_MASK), BCE_PRINTF("%s(): Invalid CID " "address: 0x%08X.\n", __FUNCTION__, cid_addr)); offset = ctx_offset + cid_addr; if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_READ_REQ)); for (idx = 0; idx < retry_cnt; idx++) { val = REG_RD(sc, BCE_CTX_CTX_CTRL); if ((val & BCE_CTX_CTX_CTRL_READ_REQ) == 0) break; DELAY(5); } if (val & BCE_CTX_CTX_CTRL_READ_REQ) BCE_PRINTF("%s(%d); Unable to read CTX memory: " "cid_addr = 0x%08X, offset = 0x%08X!\n", __FILE__, __LINE__, cid_addr, ctx_offset); val = REG_RD(sc, BCE_CTX_CTX_DATA); } else { REG_WR(sc, BCE_CTX_DATA_ADR, offset); val = REG_RD(sc, BCE_CTX_DATA); } DBPRINT(sc, BCE_EXTREME_CTX, "%s(); cid_addr = 0x%08X, offset = 0x%08X, " "val = 0x%08X\n", __FUNCTION__, cid_addr, ctx_offset, val); return(val); } #endif /****************************************************************************/ /* Context memory write. */ /* */ /* The NetXtreme II controller uses context memory to track connection */ /* information for L2 and higher network protocols. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_ctx_wr(struct bce_softc *sc, u32 cid_addr, u32 ctx_offset, u32 ctx_val) { u32 idx, offset = ctx_offset + cid_addr; u32 val, retry_cnt = 5; DBPRINT(sc, BCE_EXTREME_CTX, "%s(); cid_addr = 0x%08X, offset = 0x%08X, " "val = 0x%08X\n", __FUNCTION__, cid_addr, ctx_offset, ctx_val); DBRUNIF((cid_addr > MAX_CID_ADDR || ctx_offset & 0x3 || cid_addr & CTX_MASK), BCE_PRINTF("%s(): Invalid CID address: 0x%08X.\n", __FUNCTION__, cid_addr)); if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { REG_WR(sc, BCE_CTX_CTX_DATA, ctx_val); REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_WRITE_REQ)); for (idx = 0; idx < retry_cnt; idx++) { val = REG_RD(sc, BCE_CTX_CTX_CTRL); if ((val & BCE_CTX_CTX_CTRL_WRITE_REQ) == 0) break; DELAY(5); } if (val & BCE_CTX_CTX_CTRL_WRITE_REQ) BCE_PRINTF("%s(%d); Unable to write CTX memory: " "cid_addr = 0x%08X, offset = 0x%08X!\n", __FILE__, __LINE__, cid_addr, ctx_offset); } else { REG_WR(sc, BCE_CTX_DATA_ADR, offset); REG_WR(sc, BCE_CTX_DATA, ctx_val); } } /****************************************************************************/ /* PHY register read. */ /* */ /* Implements register reads on the MII bus. */ /* */ /* Returns: */ /* The value of the register. */ /****************************************************************************/ static int bce_miibus_read_reg(device_t dev, int phy, int reg) { struct bce_softc *sc; u32 val; int i; sc = device_get_softc(dev); /* * The 5709S PHY is an IEEE Clause 45 PHY * with special mappings to work with IEEE * Clause 22 register accesses. */ if ((sc->bce_phy_flags & BCE_PHY_IEEE_CLAUSE_45_FLAG) != 0) { if (reg >= MII_BMCR && reg <= MII_ANLPRNP) reg += 0x10; } if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { val = REG_RD(sc, BCE_EMAC_MDIO_MODE); val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL; REG_WR(sc, BCE_EMAC_MDIO_MODE, val); REG_RD(sc, BCE_EMAC_MDIO_MODE); DELAY(40); } val = BCE_MIPHY(phy) | BCE_MIREG(reg) | BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT | BCE_EMAC_MDIO_COMM_START_BUSY; REG_WR(sc, BCE_EMAC_MDIO_COMM, val); for (i = 0; i < BCE_PHY_TIMEOUT; i++) { DELAY(10); val = REG_RD(sc, BCE_EMAC_MDIO_COMM); if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) { DELAY(5); val = REG_RD(sc, BCE_EMAC_MDIO_COMM); val &= BCE_EMAC_MDIO_COMM_DATA; break; } } if (val & BCE_EMAC_MDIO_COMM_START_BUSY) { BCE_PRINTF("%s(%d): Error: PHY read timeout! phy = %d, " "reg = 0x%04X\n", __FILE__, __LINE__, phy, reg); val = 0x0; } else { val = REG_RD(sc, BCE_EMAC_MDIO_COMM); } if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { val = REG_RD(sc, BCE_EMAC_MDIO_MODE); val |= BCE_EMAC_MDIO_MODE_AUTO_POLL; REG_WR(sc, BCE_EMAC_MDIO_MODE, val); REG_RD(sc, BCE_EMAC_MDIO_MODE); DELAY(40); } DB_PRINT_PHY_REG(reg, val); return (val & 0xffff); } /****************************************************************************/ /* PHY register write. */ /* */ /* Implements register writes on the MII bus. */ /* */ /* Returns: */ /* The value of the register. */ /****************************************************************************/ static int bce_miibus_write_reg(device_t dev, int phy, int reg, int val) { struct bce_softc *sc; u32 val1; int i; sc = device_get_softc(dev); DB_PRINT_PHY_REG(reg, val); /* * The 5709S PHY is an IEEE Clause 45 PHY * with special mappings to work with IEEE * Clause 22 register accesses. */ if ((sc->bce_phy_flags & BCE_PHY_IEEE_CLAUSE_45_FLAG) != 0) { if (reg >= MII_BMCR && reg <= MII_ANLPRNP) reg += 0x10; } if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE); val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL; REG_WR(sc, BCE_EMAC_MDIO_MODE, val1); REG_RD(sc, BCE_EMAC_MDIO_MODE); DELAY(40); } val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val | BCE_EMAC_MDIO_COMM_COMMAND_WRITE | BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT; REG_WR(sc, BCE_EMAC_MDIO_COMM, val1); for (i = 0; i < BCE_PHY_TIMEOUT; i++) { DELAY(10); val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM); if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) { DELAY(5); break; } } if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY) BCE_PRINTF("%s(%d): PHY write timeout!\n", __FILE__, __LINE__); if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE); val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL; REG_WR(sc, BCE_EMAC_MDIO_MODE, val1); REG_RD(sc, BCE_EMAC_MDIO_MODE); DELAY(40); } return 0; } /****************************************************************************/ /* MII bus status change. */ /* */ /* Called by the MII bus driver when the PHY establishes link to set the */ /* MAC interface registers. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_miibus_statchg(device_t dev) { struct bce_softc *sc; struct mii_data *mii; struct ifmediareq ifmr; int media_active, media_status, val; sc = device_get_softc(dev); DBENTER(BCE_VERBOSE_PHY); if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0) { bzero(&ifmr, sizeof(ifmr)); bce_ifmedia_sts_rphy(sc, &ifmr); media_active = ifmr.ifm_active; media_status = ifmr.ifm_status; } else { mii = device_get_softc(sc->bce_miibus); media_active = mii->mii_media_active; media_status = mii->mii_media_status; } /* Ignore invalid media status. */ if ((media_status & (IFM_ACTIVE | IFM_AVALID)) != (IFM_ACTIVE | IFM_AVALID)) goto bce_miibus_statchg_exit; val = REG_RD(sc, BCE_EMAC_MODE); val &= ~(BCE_EMAC_MODE_PORT | BCE_EMAC_MODE_HALF_DUPLEX | BCE_EMAC_MODE_MAC_LOOP | BCE_EMAC_MODE_FORCE_LINK | BCE_EMAC_MODE_25G); /* Set MII or GMII interface based on the PHY speed. */ switch (IFM_SUBTYPE(media_active)) { case IFM_10_T: if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) { DBPRINT(sc, BCE_INFO_PHY, "Enabling 10Mb interface.\n"); val |= BCE_EMAC_MODE_PORT_MII_10; break; } /* fall-through */ case IFM_100_TX: DBPRINT(sc, BCE_INFO_PHY, "Enabling MII interface.\n"); val |= BCE_EMAC_MODE_PORT_MII; break; case IFM_2500_SX: DBPRINT(sc, BCE_INFO_PHY, "Enabling 2.5G MAC mode.\n"); val |= BCE_EMAC_MODE_25G; /* fall-through */ case IFM_1000_T: case IFM_1000_SX: DBPRINT(sc, BCE_INFO_PHY, "Enabling GMII interface.\n"); val |= BCE_EMAC_MODE_PORT_GMII; break; default: DBPRINT(sc, BCE_INFO_PHY, "Unknown link speed, enabling " "default GMII interface.\n"); val |= BCE_EMAC_MODE_PORT_GMII; } /* Set half or full duplex based on PHY settings. */ if ((IFM_OPTIONS(media_active) & IFM_FDX) == 0) { DBPRINT(sc, BCE_INFO_PHY, "Setting Half-Duplex interface.\n"); val |= BCE_EMAC_MODE_HALF_DUPLEX; } else DBPRINT(sc, BCE_INFO_PHY, "Setting Full-Duplex interface.\n"); REG_WR(sc, BCE_EMAC_MODE, val); if ((IFM_OPTIONS(media_active) & IFM_ETH_RXPAUSE) != 0) { DBPRINT(sc, BCE_INFO_PHY, "%s(): Enabling RX flow control.\n", __FUNCTION__); BCE_SETBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_RX_MODE_FLOW_EN); sc->bce_flags |= BCE_USING_RX_FLOW_CONTROL; } else { DBPRINT(sc, BCE_INFO_PHY, "%s(): Disabling RX flow control.\n", __FUNCTION__); BCE_CLRBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_RX_MODE_FLOW_EN); sc->bce_flags &= ~BCE_USING_RX_FLOW_CONTROL; } if ((IFM_OPTIONS(media_active) & IFM_ETH_TXPAUSE) != 0) { DBPRINT(sc, BCE_INFO_PHY, "%s(): Enabling TX flow control.\n", __FUNCTION__); BCE_SETBIT(sc, BCE_EMAC_TX_MODE, BCE_EMAC_TX_MODE_FLOW_EN); sc->bce_flags |= BCE_USING_TX_FLOW_CONTROL; } else { DBPRINT(sc, BCE_INFO_PHY, "%s(): Disabling TX flow control.\n", __FUNCTION__); BCE_CLRBIT(sc, BCE_EMAC_TX_MODE, BCE_EMAC_TX_MODE_FLOW_EN); sc->bce_flags &= ~BCE_USING_TX_FLOW_CONTROL; } /* ToDo: Update watermarks in bce_init_rx_context(). */ bce_miibus_statchg_exit: DBEXIT(BCE_VERBOSE_PHY); } /****************************************************************************/ /* Acquire NVRAM lock. */ /* */ /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */ /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ /* for use by the driver. */ /* */ /* Returns: */ /* 0 on success, positive value on failure. */ /****************************************************************************/ static int bce_acquire_nvram_lock(struct bce_softc *sc) { u32 val; int j, rc = 0; DBENTER(BCE_VERBOSE_NVRAM); /* Request access to the flash interface. */ REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2); for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { val = REG_RD(sc, BCE_NVM_SW_ARB); if (val & BCE_NVM_SW_ARB_ARB_ARB2) break; DELAY(5); } if (j >= NVRAM_TIMEOUT_COUNT) { DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n"); rc = EBUSY; } DBEXIT(BCE_VERBOSE_NVRAM); return (rc); } /****************************************************************************/ /* Release NVRAM lock. */ /* */ /* When the caller is finished accessing NVRAM the lock must be released. */ /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ /* for use by the driver. */ /* */ /* Returns: */ /* 0 on success, positive value on failure. */ /****************************************************************************/ static int bce_release_nvram_lock(struct bce_softc *sc) { u32 val; int j, rc = 0; DBENTER(BCE_VERBOSE_NVRAM); /* * Relinquish nvram interface. */ REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2); for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { val = REG_RD(sc, BCE_NVM_SW_ARB); if (!(val & BCE_NVM_SW_ARB_ARB_ARB2)) break; DELAY(5); } if (j >= NVRAM_TIMEOUT_COUNT) { DBPRINT(sc, BCE_WARN, "Timeout releasing NVRAM lock!\n"); rc = EBUSY; } DBEXIT(BCE_VERBOSE_NVRAM); return (rc); } #ifdef BCE_NVRAM_WRITE_SUPPORT /****************************************************************************/ /* Enable NVRAM write access. */ /* */ /* Before writing to NVRAM the caller must enable NVRAM writes. */ /* */ /* Returns: */ /* 0 on success, positive value on failure. */ /****************************************************************************/ static int bce_enable_nvram_write(struct bce_softc *sc) { u32 val; int rc = 0; DBENTER(BCE_VERBOSE_NVRAM); val = REG_RD(sc, BCE_MISC_CFG); REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI); if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) { int j; REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT); for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { DELAY(5); val = REG_RD(sc, BCE_NVM_COMMAND); if (val & BCE_NVM_COMMAND_DONE) break; } if (j >= NVRAM_TIMEOUT_COUNT) { DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n"); rc = EBUSY; } } DBENTER(BCE_VERBOSE_NVRAM); return (rc); } /****************************************************************************/ /* Disable NVRAM write access. */ /* */ /* When the caller is finished writing to NVRAM write access must be */ /* disabled. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_disable_nvram_write(struct bce_softc *sc) { u32 val; DBENTER(BCE_VERBOSE_NVRAM); val = REG_RD(sc, BCE_MISC_CFG); REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN); DBEXIT(BCE_VERBOSE_NVRAM); } #endif /****************************************************************************/ /* Enable NVRAM access. */ /* */ /* Before accessing NVRAM for read or write operations the caller must */ /* enabled NVRAM access. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_enable_nvram_access(struct bce_softc *sc) { u32 val; DBENTER(BCE_VERBOSE_NVRAM); val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE); /* Enable both bits, even on read. */ REG_WR(sc, BCE_NVM_ACCESS_ENABLE, val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN); DBEXIT(BCE_VERBOSE_NVRAM); } /****************************************************************************/ /* Disable NVRAM access. */ /* */ /* When the caller is finished accessing NVRAM access must be disabled. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_disable_nvram_access(struct bce_softc *sc) { u32 val; DBENTER(BCE_VERBOSE_NVRAM); val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE); /* Disable both bits, even after read. */ REG_WR(sc, BCE_NVM_ACCESS_ENABLE, val & ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN)); DBEXIT(BCE_VERBOSE_NVRAM); } #ifdef BCE_NVRAM_WRITE_SUPPORT /****************************************************************************/ /* Erase NVRAM page before writing. */ /* */ /* Non-buffered flash parts require that a page be erased before it is */ /* written. */ /* */ /* Returns: */ /* 0 on success, positive value on failure. */ /****************************************************************************/ static int bce_nvram_erase_page(struct bce_softc *sc, u32 offset) { u32 cmd; int j, rc = 0; DBENTER(BCE_VERBOSE_NVRAM); /* Buffered flash doesn't require an erase. */ if (sc->bce_flash_info->flags & BCE_NV_BUFFERED) goto bce_nvram_erase_page_exit; /* Build an erase command. */ cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR | BCE_NVM_COMMAND_DOIT; /* * Clear the DONE bit separately, set the NVRAM address to erase, * and issue the erase command. */ REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE); REG_WR(sc, BCE_NVM_COMMAND, cmd); /* Wait for completion. */ for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { u32 val; DELAY(5); val = REG_RD(sc, BCE_NVM_COMMAND); if (val & BCE_NVM_COMMAND_DONE) break; } if (j >= NVRAM_TIMEOUT_COUNT) { DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n"); rc = EBUSY; } bce_nvram_erase_page_exit: DBEXIT(BCE_VERBOSE_NVRAM); return (rc); } #endif /* BCE_NVRAM_WRITE_SUPPORT */ /****************************************************************************/ /* Read a dword (32 bits) from NVRAM. */ /* */ /* Read a 32 bit word from NVRAM. The caller is assumed to have already */ /* obtained the NVRAM lock and enabled the controller for NVRAM access. */ /* */ /* Returns: */ /* 0 on success and the 32 bit value read, positive value on failure. */ /****************************************************************************/ static int bce_nvram_read_dword(struct bce_softc *sc, u32 offset, u8 *ret_val, u32 cmd_flags) { u32 cmd; int i, rc = 0; DBENTER(BCE_EXTREME_NVRAM); /* Build the command word. */ cmd = BCE_NVM_COMMAND_DOIT | cmd_flags; /* Calculate the offset for buffered flash if translation is used. */ if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) { offset = ((offset / sc->bce_flash_info->page_size) << sc->bce_flash_info->page_bits) + (offset % sc->bce_flash_info->page_size); } /* * Clear the DONE bit separately, set the address to read, * and issue the read. */ REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE); REG_WR(sc, BCE_NVM_COMMAND, cmd); /* Wait for completion. */ for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) { u32 val; DELAY(5); val = REG_RD(sc, BCE_NVM_COMMAND); if (val & BCE_NVM_COMMAND_DONE) { val = REG_RD(sc, BCE_NVM_READ); val = bce_be32toh(val); memcpy(ret_val, &val, 4); break; } } /* Check for errors. */ if (i >= NVRAM_TIMEOUT_COUNT) { BCE_PRINTF("%s(%d): Timeout error reading NVRAM at " "offset 0x%08X!\n", __FILE__, __LINE__, offset); rc = EBUSY; } DBEXIT(BCE_EXTREME_NVRAM); return(rc); } #ifdef BCE_NVRAM_WRITE_SUPPORT /****************************************************************************/ /* Write a dword (32 bits) to NVRAM. */ /* */ /* Write a 32 bit word to NVRAM. The caller is assumed to have already */ /* obtained the NVRAM lock, enabled the controller for NVRAM access, and */ /* enabled NVRAM write access. */ /* */ /* Returns: */ /* 0 on success, positive value on failure. */ /****************************************************************************/ static int bce_nvram_write_dword(struct bce_softc *sc, u32 offset, u8 *val, u32 cmd_flags) { u32 cmd, val32; int j, rc = 0; DBENTER(BCE_VERBOSE_NVRAM); /* Build the command word. */ cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags; /* Calculate the offset for buffered flash if translation is used. */ if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) { offset = ((offset / sc->bce_flash_info->page_size) << sc->bce_flash_info->page_bits) + (offset % sc->bce_flash_info->page_size); } /* * Clear the DONE bit separately, convert NVRAM data to big-endian, * set the NVRAM address to write, and issue the write command */ REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); memcpy(&val32, val, 4); val32 = htobe32(val32); REG_WR(sc, BCE_NVM_WRITE, val32); REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE); REG_WR(sc, BCE_NVM_COMMAND, cmd); /* Wait for completion. */ for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { DELAY(5); if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE) break; } if (j >= NVRAM_TIMEOUT_COUNT) { BCE_PRINTF("%s(%d): Timeout error writing NVRAM at " "offset 0x%08X\n", __FILE__, __LINE__, offset); rc = EBUSY; } DBEXIT(BCE_VERBOSE_NVRAM); return (rc); } #endif /* BCE_NVRAM_WRITE_SUPPORT */ /****************************************************************************/ /* Initialize NVRAM access. */ /* */ /* Identify the NVRAM device in use and prepare the NVRAM interface to */ /* access that device. */ /* */ /* Returns: */ /* 0 on success, positive value on failure. */ /****************************************************************************/ static int bce_init_nvram(struct bce_softc *sc) { u32 val; int j, entry_count, rc = 0; const struct flash_spec *flash; DBENTER(BCE_VERBOSE_NVRAM); if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { sc->bce_flash_info = &flash_5709; goto bce_init_nvram_get_flash_size; } /* Determine the selected interface. */ val = REG_RD(sc, BCE_NVM_CFG1); entry_count = sizeof(flash_table) / sizeof(struct flash_spec); /* * Flash reconfiguration is required to support additional * NVRAM devices not directly supported in hardware. * Check if the flash interface was reconfigured * by the bootcode. */ if (val & 0x40000000) { /* Flash interface reconfigured by bootcode. */ DBPRINT(sc,BCE_INFO_LOAD, "bce_init_nvram(): Flash WAS reconfigured.\n"); for (j = 0, flash = &flash_table[0]; j < entry_count; j++, flash++) { if ((val & FLASH_BACKUP_STRAP_MASK) == (flash->config1 & FLASH_BACKUP_STRAP_MASK)) { sc->bce_flash_info = flash; break; } } } else { /* Flash interface not yet reconfigured. */ u32 mask; DBPRINT(sc, BCE_INFO_LOAD, "%s(): Flash was NOT reconfigured.\n", __FUNCTION__); if (val & (1 << 23)) mask = FLASH_BACKUP_STRAP_MASK; else mask = FLASH_STRAP_MASK; /* Look for the matching NVRAM device configuration data. */ for (j = 0, flash = &flash_table[0]; j < entry_count; j++, flash++) { /* Check if the device matches any of the known devices. */ if ((val & mask) == (flash->strapping & mask)) { /* Found a device match. */ sc->bce_flash_info = flash; /* Request access to the flash interface. */ if ((rc = bce_acquire_nvram_lock(sc)) != 0) return rc; /* Reconfigure the flash interface. */ bce_enable_nvram_access(sc); REG_WR(sc, BCE_NVM_CFG1, flash->config1); REG_WR(sc, BCE_NVM_CFG2, flash->config2); REG_WR(sc, BCE_NVM_CFG3, flash->config3); REG_WR(sc, BCE_NVM_WRITE1, flash->write1); bce_disable_nvram_access(sc); bce_release_nvram_lock(sc); break; } } } /* Check if a matching device was found. */ if (j == entry_count) { sc->bce_flash_info = NULL; BCE_PRINTF("%s(%d): Unknown Flash NVRAM found!\n", __FILE__, __LINE__); DBEXIT(BCE_VERBOSE_NVRAM); return (ENODEV); } bce_init_nvram_get_flash_size: /* Write the flash config data to the shared memory interface. */ val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG2); val &= BCE_SHARED_HW_CFG2_NVM_SIZE_MASK; if (val) sc->bce_flash_size = val; else sc->bce_flash_size = sc->bce_flash_info->total_size; DBPRINT(sc, BCE_INFO_LOAD, "%s(): Found %s, size = 0x%08X\n", __FUNCTION__, sc->bce_flash_info->name, sc->bce_flash_info->total_size); DBEXIT(BCE_VERBOSE_NVRAM); return rc; } /****************************************************************************/ /* Read an arbitrary range of data from NVRAM. */ /* */ /* Prepares the NVRAM interface for access and reads the requested data */ /* into the supplied buffer. */ /* */ /* Returns: */ /* 0 on success and the data read, positive value on failure. */ /****************************************************************************/ static int bce_nvram_read(struct bce_softc *sc, u32 offset, u8 *ret_buf, int buf_size) { int rc = 0; u32 cmd_flags, offset32, len32, extra; DBENTER(BCE_VERBOSE_NVRAM); if (buf_size == 0) goto bce_nvram_read_exit; /* Request access to the flash interface. */ if ((rc = bce_acquire_nvram_lock(sc)) != 0) goto bce_nvram_read_exit; /* Enable access to flash interface */ bce_enable_nvram_access(sc); len32 = buf_size; offset32 = offset; extra = 0; cmd_flags = 0; if (offset32 & 3) { u8 buf[4]; u32 pre_len; offset32 &= ~3; pre_len = 4 - (offset & 3); if (pre_len >= len32) { pre_len = len32; cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST; } else { cmd_flags = BCE_NVM_COMMAND_FIRST; } rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); if (rc) return rc; memcpy(ret_buf, buf + (offset & 3), pre_len); offset32 += 4; ret_buf += pre_len; len32 -= pre_len; } if (len32 & 3) { extra = 4 - (len32 & 3); len32 = (len32 + 4) & ~3; } if (len32 == 4) { u8 buf[4]; if (cmd_flags) cmd_flags = BCE_NVM_COMMAND_LAST; else cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST; rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); memcpy(ret_buf, buf, 4 - extra); } else if (len32 > 0) { u8 buf[4]; /* Read the first word. */ if (cmd_flags) cmd_flags = 0; else cmd_flags = BCE_NVM_COMMAND_FIRST; rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags); /* Advance to the next dword. */ offset32 += 4; ret_buf += 4; len32 -= 4; while (len32 > 4 && rc == 0) { rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0); /* Advance to the next dword. */ offset32 += 4; ret_buf += 4; len32 -= 4; } if (rc) goto bce_nvram_read_locked_exit; cmd_flags = BCE_NVM_COMMAND_LAST; rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); memcpy(ret_buf, buf, 4 - extra); } bce_nvram_read_locked_exit: /* Disable access to flash interface and release the lock. */ bce_disable_nvram_access(sc); bce_release_nvram_lock(sc); bce_nvram_read_exit: DBEXIT(BCE_VERBOSE_NVRAM); return rc; } #ifdef BCE_NVRAM_WRITE_SUPPORT /****************************************************************************/ /* Write an arbitrary range of data from NVRAM. */ /* */ /* Prepares the NVRAM interface for write access and writes the requested */ /* data from the supplied buffer. The caller is responsible for */ /* calculating any appropriate CRCs. */ /* */ /* Returns: */ /* 0 on success, positive value on failure. */ /****************************************************************************/ static int bce_nvram_write(struct bce_softc *sc, u32 offset, u8 *data_buf, int buf_size) { u32 written, offset32, len32; u8 *buf, start[4], end[4]; int rc = 0; int align_start, align_end; DBENTER(BCE_VERBOSE_NVRAM); buf = data_buf; offset32 = offset; len32 = buf_size; align_start = align_end = 0; if ((align_start = (offset32 & 3))) { offset32 &= ~3; len32 += align_start; if ((rc = bce_nvram_read(sc, offset32, start, 4))) goto bce_nvram_write_exit; } if (len32 & 3) { if ((len32 > 4) || !align_start) { align_end = 4 - (len32 & 3); len32 += align_end; if ((rc = bce_nvram_read(sc, offset32 + len32 - 4, end, 4))) { goto bce_nvram_write_exit; } } } if (align_start || align_end) { buf = malloc(len32, M_DEVBUF, M_NOWAIT); - if (buf == 0) { + if (buf == NULL) { rc = ENOMEM; goto bce_nvram_write_exit; } if (align_start) { memcpy(buf, start, 4); } if (align_end) { memcpy(buf + len32 - 4, end, 4); } memcpy(buf + align_start, data_buf, buf_size); } written = 0; while ((written < len32) && (rc == 0)) { u32 page_start, page_end, data_start, data_end; u32 addr, cmd_flags; int i; u8 flash_buffer[264]; /* Find the page_start addr */ page_start = offset32 + written; page_start -= (page_start % sc->bce_flash_info->page_size); /* Find the page_end addr */ page_end = page_start + sc->bce_flash_info->page_size; /* Find the data_start addr */ data_start = (written == 0) ? offset32 : page_start; /* Find the data_end addr */ data_end = (page_end > offset32 + len32) ? (offset32 + len32) : page_end; /* Request access to the flash interface. */ if ((rc = bce_acquire_nvram_lock(sc)) != 0) goto bce_nvram_write_exit; /* Enable access to flash interface */ bce_enable_nvram_access(sc); cmd_flags = BCE_NVM_COMMAND_FIRST; if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) { int j; /* Read the whole page into the buffer * (non-buffer flash only) */ for (j = 0; j < sc->bce_flash_info->page_size; j += 4) { if (j == (sc->bce_flash_info->page_size - 4)) { cmd_flags |= BCE_NVM_COMMAND_LAST; } rc = bce_nvram_read_dword(sc, page_start + j, &flash_buffer[j], cmd_flags); if (rc) goto bce_nvram_write_locked_exit; cmd_flags = 0; } } /* Enable writes to flash interface (unlock write-protect) */ if ((rc = bce_enable_nvram_write(sc)) != 0) goto bce_nvram_write_locked_exit; /* Erase the page */ if ((rc = bce_nvram_erase_page(sc, page_start)) != 0) goto bce_nvram_write_locked_exit; /* Re-enable the write again for the actual write */ bce_enable_nvram_write(sc); /* Loop to write back the buffer data from page_start to * data_start */ i = 0; if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) { for (addr = page_start; addr < data_start; addr += 4, i += 4) { rc = bce_nvram_write_dword(sc, addr, &flash_buffer[i], cmd_flags); if (rc != 0) goto bce_nvram_write_locked_exit; cmd_flags = 0; } } /* Loop to write the new data from data_start to data_end */ for (addr = data_start; addr < data_end; addr += 4, i++) { if ((addr == page_end - 4) || ((sc->bce_flash_info->flags & BCE_NV_BUFFERED) && (addr == data_end - 4))) { cmd_flags |= BCE_NVM_COMMAND_LAST; } rc = bce_nvram_write_dword(sc, addr, buf, cmd_flags); if (rc != 0) goto bce_nvram_write_locked_exit; cmd_flags = 0; buf += 4; } /* Loop to write back the buffer data from data_end * to page_end */ if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) { for (addr = data_end; addr < page_end; addr += 4, i += 4) { if (addr == page_end-4) { cmd_flags = BCE_NVM_COMMAND_LAST; } rc = bce_nvram_write_dword(sc, addr, &flash_buffer[i], cmd_flags); if (rc != 0) goto bce_nvram_write_locked_exit; cmd_flags = 0; } } /* Disable writes to flash interface (lock write-protect) */ bce_disable_nvram_write(sc); /* Disable access to flash interface */ bce_disable_nvram_access(sc); bce_release_nvram_lock(sc); /* Increment written */ written += data_end - data_start; } goto bce_nvram_write_exit; bce_nvram_write_locked_exit: bce_disable_nvram_write(sc); bce_disable_nvram_access(sc); bce_release_nvram_lock(sc); bce_nvram_write_exit: if (align_start || align_end) free(buf, M_DEVBUF); DBEXIT(BCE_VERBOSE_NVRAM); return (rc); } #endif /* BCE_NVRAM_WRITE_SUPPORT */ /****************************************************************************/ /* Verifies that NVRAM is accessible and contains valid data. */ /* */ /* Reads the configuration data from NVRAM and verifies that the CRC is */ /* correct. */ /* */ /* Returns: */ /* 0 on success, positive value on failure. */ /****************************************************************************/ static int bce_nvram_test(struct bce_softc *sc) { u32 buf[BCE_NVRAM_SIZE / 4]; u8 *data = (u8 *) buf; int rc = 0; u32 magic, csum; DBENTER(BCE_VERBOSE_NVRAM | BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET); /* * Check that the device NVRAM is valid by reading * the magic value at offset 0. */ if ((rc = bce_nvram_read(sc, 0, data, 4)) != 0) { BCE_PRINTF("%s(%d): Unable to read NVRAM!\n", __FILE__, __LINE__); goto bce_nvram_test_exit; } /* * Verify that offset 0 of the NVRAM contains * a valid magic number. */ magic = bce_be32toh(buf[0]); if (magic != BCE_NVRAM_MAGIC) { rc = ENODEV; BCE_PRINTF("%s(%d): Invalid NVRAM magic value! " "Expected: 0x%08X, Found: 0x%08X\n", __FILE__, __LINE__, BCE_NVRAM_MAGIC, magic); goto bce_nvram_test_exit; } /* * Verify that the device NVRAM includes valid * configuration data. */ if ((rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE)) != 0) { BCE_PRINTF("%s(%d): Unable to read manufacturing " "Information from NVRAM!\n", __FILE__, __LINE__); goto bce_nvram_test_exit; } csum = ether_crc32_le(data, 0x100); if (csum != BCE_CRC32_RESIDUAL) { rc = ENODEV; BCE_PRINTF("%s(%d): Invalid manufacturing information " "NVRAM CRC! Expected: 0x%08X, Found: 0x%08X\n", __FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum); goto bce_nvram_test_exit; } csum = ether_crc32_le(data + 0x100, 0x100); if (csum != BCE_CRC32_RESIDUAL) { rc = ENODEV; BCE_PRINTF("%s(%d): Invalid feature configuration " "information NVRAM CRC! Expected: 0x%08X, " "Found: 08%08X\n", __FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum); } bce_nvram_test_exit: DBEXIT(BCE_VERBOSE_NVRAM | BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET); return rc; } /****************************************************************************/ /* Calculates the size of the buffers to allocate based on the MTU. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_get_rx_buffer_sizes(struct bce_softc *sc, int mtu) { DBENTER(BCE_VERBOSE_LOAD); /* Use a single allocation type when header splitting enabled. */ if (bce_hdr_split == TRUE) { sc->rx_bd_mbuf_alloc_size = MHLEN; /* Make sure offset is 16 byte aligned for hardware. */ sc->rx_bd_mbuf_align_pad = roundup2(MSIZE - MHLEN, 16) - (MSIZE - MHLEN); sc->rx_bd_mbuf_data_len = sc->rx_bd_mbuf_alloc_size - sc->rx_bd_mbuf_align_pad; } else { if ((mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN) > MCLBYTES) { /* Setup for jumbo RX buffer allocations. */ sc->rx_bd_mbuf_alloc_size = MJUM9BYTES; sc->rx_bd_mbuf_align_pad = roundup2(MJUM9BYTES, 16) - MJUM9BYTES; sc->rx_bd_mbuf_data_len = sc->rx_bd_mbuf_alloc_size - sc->rx_bd_mbuf_align_pad; } else { /* Setup for standard RX buffer allocations. */ sc->rx_bd_mbuf_alloc_size = MCLBYTES; sc->rx_bd_mbuf_align_pad = roundup2(MCLBYTES, 16) - MCLBYTES; sc->rx_bd_mbuf_data_len = sc->rx_bd_mbuf_alloc_size - sc->rx_bd_mbuf_align_pad; } } // DBPRINT(sc, BCE_INFO_LOAD, DBPRINT(sc, BCE_WARN, "%s(): rx_bd_mbuf_alloc_size = %d, rx_bd_mbuf_data_len = %d, " "rx_bd_mbuf_align_pad = %d\n", __FUNCTION__, sc->rx_bd_mbuf_alloc_size, sc->rx_bd_mbuf_data_len, sc->rx_bd_mbuf_align_pad); DBEXIT(BCE_VERBOSE_LOAD); } /****************************************************************************/ /* Identifies the current media type of the controller and sets the PHY */ /* address. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_get_media(struct bce_softc *sc) { u32 val; DBENTER(BCE_VERBOSE_PHY); /* Assume PHY address for copper controllers. */ sc->bce_phy_addr = 1; if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { u32 val = REG_RD(sc, BCE_MISC_DUAL_MEDIA_CTRL); u32 bond_id = val & BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID; u32 strap; /* * The BCM5709S is software configurable * for Copper or SerDes operation. */ if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) { DBPRINT(sc, BCE_INFO_LOAD, "5709 bonded " "for copper.\n"); goto bce_get_media_exit; } else if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) { DBPRINT(sc, BCE_INFO_LOAD, "5709 bonded " "for dual media.\n"); sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; goto bce_get_media_exit; } if (val & BCE_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) strap = (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21; else strap = (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8; if (pci_get_function(sc->bce_dev) == 0) { switch (strap) { case 0x4: case 0x5: case 0x6: DBPRINT(sc, BCE_INFO_LOAD, "BCM5709 s/w configured for SerDes.\n"); sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; break; default: DBPRINT(sc, BCE_INFO_LOAD, "BCM5709 s/w configured for Copper.\n"); break; } } else { switch (strap) { case 0x1: case 0x2: case 0x4: DBPRINT(sc, BCE_INFO_LOAD, "BCM5709 s/w configured for SerDes.\n"); sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; break; default: DBPRINT(sc, BCE_INFO_LOAD, "BCM5709 s/w configured for Copper.\n"); break; } } } else if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) { sc->bce_flags |= BCE_NO_WOL_FLAG; if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) sc->bce_phy_flags |= BCE_PHY_IEEE_CLAUSE_45_FLAG; if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) { /* 5708S/09S/16S use a separate PHY for SerDes. */ sc->bce_phy_addr = 2; val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG); if (val & BCE_SHARED_HW_CFG_PHY_2_5G) { sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG; DBPRINT(sc, BCE_INFO_LOAD, "Found 2.5Gb " "capable adapter\n"); } } } else if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) || (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)) sc->bce_phy_flags |= BCE_PHY_CRC_FIX_FLAG; bce_get_media_exit: DBPRINT(sc, (BCE_INFO_LOAD | BCE_INFO_PHY), "Using PHY address %d.\n", sc->bce_phy_addr); DBEXIT(BCE_VERBOSE_PHY); } /****************************************************************************/ /* Performs PHY initialization required before MII drivers access the */ /* device. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_init_media(struct bce_softc *sc) { if ((sc->bce_phy_flags & (BCE_PHY_IEEE_CLAUSE_45_FLAG | BCE_PHY_REMOTE_CAP_FLAG)) == BCE_PHY_IEEE_CLAUSE_45_FLAG) { /* * Configure 5709S/5716S PHYs to use traditional IEEE * Clause 22 method. Otherwise we have no way to attach * the PHY in mii(4) layer. PHY specific configuration * is done in mii layer. */ /* Select auto-negotiation MMD of the PHY. */ bce_miibus_write_reg(sc->bce_dev, sc->bce_phy_addr, BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_ADDR_EXT); bce_miibus_write_reg(sc->bce_dev, sc->bce_phy_addr, BRGPHY_ADDR_EXT, BRGPHY_ADDR_EXT_AN_MMD); /* Set IEEE0 block of AN MMD (assumed in brgphy(4) code). */ bce_miibus_write_reg(sc->bce_dev, sc->bce_phy_addr, BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_COMBO_IEEE0); } } /****************************************************************************/ /* Free any DMA memory owned by the driver. */ /* */ /* Scans through each data structre that requires DMA memory and frees */ /* the memory if allocated. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_dma_free(struct bce_softc *sc) { int i; DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_UNLOAD | BCE_VERBOSE_CTX); /* Free, unmap, and destroy the status block. */ if (sc->status_block_paddr != 0) { bus_dmamap_unload( sc->status_tag, sc->status_map); sc->status_block_paddr = 0; } if (sc->status_block != NULL) { bus_dmamem_free( sc->status_tag, sc->status_block, sc->status_map); sc->status_block = NULL; } if (sc->status_tag != NULL) { bus_dma_tag_destroy(sc->status_tag); sc->status_tag = NULL; } /* Free, unmap, and destroy the statistics block. */ if (sc->stats_block_paddr != 0) { bus_dmamap_unload( sc->stats_tag, sc->stats_map); sc->stats_block_paddr = 0; } if (sc->stats_block != NULL) { bus_dmamem_free( sc->stats_tag, sc->stats_block, sc->stats_map); sc->stats_block = NULL; } if (sc->stats_tag != NULL) { bus_dma_tag_destroy(sc->stats_tag); sc->stats_tag = NULL; } /* Free, unmap and destroy all context memory pages. */ if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { for (i = 0; i < sc->ctx_pages; i++ ) { if (sc->ctx_paddr[i] != 0) { bus_dmamap_unload( sc->ctx_tag, sc->ctx_map[i]); sc->ctx_paddr[i] = 0; } if (sc->ctx_block[i] != NULL) { bus_dmamem_free( sc->ctx_tag, sc->ctx_block[i], sc->ctx_map[i]); sc->ctx_block[i] = NULL; } } /* Destroy the context memory tag. */ if (sc->ctx_tag != NULL) { bus_dma_tag_destroy(sc->ctx_tag); sc->ctx_tag = NULL; } } /* Free, unmap and destroy all TX buffer descriptor chain pages. */ for (i = 0; i < sc->tx_pages; i++ ) { if (sc->tx_bd_chain_paddr[i] != 0) { bus_dmamap_unload( sc->tx_bd_chain_tag, sc->tx_bd_chain_map[i]); sc->tx_bd_chain_paddr[i] = 0; } if (sc->tx_bd_chain[i] != NULL) { bus_dmamem_free( sc->tx_bd_chain_tag, sc->tx_bd_chain[i], sc->tx_bd_chain_map[i]); sc->tx_bd_chain[i] = NULL; } } /* Destroy the TX buffer descriptor tag. */ if (sc->tx_bd_chain_tag != NULL) { bus_dma_tag_destroy(sc->tx_bd_chain_tag); sc->tx_bd_chain_tag = NULL; } /* Free, unmap and destroy all RX buffer descriptor chain pages. */ for (i = 0; i < sc->rx_pages; i++ ) { if (sc->rx_bd_chain_paddr[i] != 0) { bus_dmamap_unload( sc->rx_bd_chain_tag, sc->rx_bd_chain_map[i]); sc->rx_bd_chain_paddr[i] = 0; } if (sc->rx_bd_chain[i] != NULL) { bus_dmamem_free( sc->rx_bd_chain_tag, sc->rx_bd_chain[i], sc->rx_bd_chain_map[i]); sc->rx_bd_chain[i] = NULL; } } /* Destroy the RX buffer descriptor tag. */ if (sc->rx_bd_chain_tag != NULL) { bus_dma_tag_destroy(sc->rx_bd_chain_tag); sc->rx_bd_chain_tag = NULL; } /* Free, unmap and destroy all page buffer descriptor chain pages. */ if (bce_hdr_split == TRUE) { for (i = 0; i < sc->pg_pages; i++ ) { if (sc->pg_bd_chain_paddr[i] != 0) { bus_dmamap_unload( sc->pg_bd_chain_tag, sc->pg_bd_chain_map[i]); sc->pg_bd_chain_paddr[i] = 0; } if (sc->pg_bd_chain[i] != NULL) { bus_dmamem_free( sc->pg_bd_chain_tag, sc->pg_bd_chain[i], sc->pg_bd_chain_map[i]); sc->pg_bd_chain[i] = NULL; } } /* Destroy the page buffer descriptor tag. */ if (sc->pg_bd_chain_tag != NULL) { bus_dma_tag_destroy(sc->pg_bd_chain_tag); sc->pg_bd_chain_tag = NULL; } } /* Unload and destroy the TX mbuf maps. */ for (i = 0; i < MAX_TX_BD_AVAIL; i++) { if (sc->tx_mbuf_map[i] != NULL) { bus_dmamap_unload(sc->tx_mbuf_tag, sc->tx_mbuf_map[i]); bus_dmamap_destroy(sc->tx_mbuf_tag, sc->tx_mbuf_map[i]); sc->tx_mbuf_map[i] = NULL; } } /* Destroy the TX mbuf tag. */ if (sc->tx_mbuf_tag != NULL) { bus_dma_tag_destroy(sc->tx_mbuf_tag); sc->tx_mbuf_tag = NULL; } /* Unload and destroy the RX mbuf maps. */ for (i = 0; i < MAX_RX_BD_AVAIL; i++) { if (sc->rx_mbuf_map[i] != NULL) { bus_dmamap_unload(sc->rx_mbuf_tag, sc->rx_mbuf_map[i]); bus_dmamap_destroy(sc->rx_mbuf_tag, sc->rx_mbuf_map[i]); sc->rx_mbuf_map[i] = NULL; } } /* Destroy the RX mbuf tag. */ if (sc->rx_mbuf_tag != NULL) { bus_dma_tag_destroy(sc->rx_mbuf_tag); sc->rx_mbuf_tag = NULL; } /* Unload and destroy the page mbuf maps. */ if (bce_hdr_split == TRUE) { for (i = 0; i < MAX_PG_BD_AVAIL; i++) { if (sc->pg_mbuf_map[i] != NULL) { bus_dmamap_unload(sc->pg_mbuf_tag, sc->pg_mbuf_map[i]); bus_dmamap_destroy(sc->pg_mbuf_tag, sc->pg_mbuf_map[i]); sc->pg_mbuf_map[i] = NULL; } } /* Destroy the page mbuf tag. */ if (sc->pg_mbuf_tag != NULL) { bus_dma_tag_destroy(sc->pg_mbuf_tag); sc->pg_mbuf_tag = NULL; } } /* Destroy the parent tag */ if (sc->parent_tag != NULL) { bus_dma_tag_destroy(sc->parent_tag); sc->parent_tag = NULL; } DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_UNLOAD | BCE_VERBOSE_CTX); } /****************************************************************************/ /* Get DMA memory from the OS. */ /* */ /* Validates that the OS has provided DMA buffers in response to a */ /* bus_dmamap_load() call and saves the physical address of those buffers. */ /* When the callback is used the OS will return 0 for the mapping function */ /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any */ /* failures back to the caller. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *busaddr = arg; KASSERT(nseg == 1, ("%s(): Too many segments returned (%d)!", __FUNCTION__, nseg)); /* Simulate a mapping failure. */ DBRUNIF(DB_RANDOMTRUE(dma_map_addr_failed_sim_control), error = ENOMEM); /* ToDo: How to increment debug sim_count variable here? */ /* Check for an error and signal the caller that an error occurred. */ if (error) { *busaddr = 0; } else { *busaddr = segs->ds_addr; } } /****************************************************************************/ /* Allocate any DMA memory needed by the driver. */ /* */ /* Allocates DMA memory needed for the various global structures needed by */ /* hardware. */ /* */ /* Memory alignment requirements: */ /* +-----------------+----------+----------+----------+----------+ */ /* | | 5706 | 5708 | 5709 | 5716 | */ /* +-----------------+----------+----------+----------+----------+ */ /* |Status Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */ /* |Statistics Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */ /* |RX Buffers | 16 bytes | 16 bytes | 16 bytes | 16 bytes | */ /* |PG Buffers | none | none | none | none | */ /* |TX Buffers | none | none | none | none | */ /* |Chain Pages(1) | 4KiB | 4KiB | 4KiB | 4KiB | */ /* |Context Memory | | | | | */ /* +-----------------+----------+----------+----------+----------+ */ /* */ /* (1) Must align with CPU page size (BCM_PAGE_SZIE). */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_dma_alloc(device_t dev) { struct bce_softc *sc; int i, error, rc = 0; bus_size_t max_size, max_seg_size; int max_segments; sc = device_get_softc(dev); DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX); /* * Allocate the parent bus DMA tag appropriate for PCI. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->parent_tag)) { BCE_PRINTF("%s(%d): Could not allocate parent DMA tag!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } /* * Create a DMA tag for the status block, allocate and clear the * memory, map the memory into DMA space, and fetch the physical * address of the block. */ if (bus_dma_tag_create(sc->parent_tag, BCE_DMA_ALIGN, BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, BCE_STATUS_BLK_SZ, 1, BCE_STATUS_BLK_SZ, 0, NULL, NULL, &sc->status_tag)) { BCE_PRINTF("%s(%d): Could not allocate status block " "DMA tag!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } if(bus_dmamem_alloc(sc->status_tag, (void **)&sc->status_block, BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->status_map)) { BCE_PRINTF("%s(%d): Could not allocate status block " "DMA memory!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } error = bus_dmamap_load(sc->status_tag, sc->status_map, sc->status_block, BCE_STATUS_BLK_SZ, bce_dma_map_addr, &sc->status_block_paddr, BUS_DMA_NOWAIT); if (error || sc->status_block_paddr == 0) { BCE_PRINTF("%s(%d): Could not map status block " "DMA memory!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } DBPRINT(sc, BCE_INFO_LOAD, "%s(): status_block_paddr = 0x%jX\n", __FUNCTION__, (uintmax_t) sc->status_block_paddr); /* * Create a DMA tag for the statistics block, allocate and clear the * memory, map the memory into DMA space, and fetch the physical * address of the block. */ if (bus_dma_tag_create(sc->parent_tag, BCE_DMA_ALIGN, BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, BCE_STATS_BLK_SZ, 1, BCE_STATS_BLK_SZ, 0, NULL, NULL, &sc->stats_tag)) { BCE_PRINTF("%s(%d): Could not allocate statistics block " "DMA tag!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } if (bus_dmamem_alloc(sc->stats_tag, (void **)&sc->stats_block, BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->stats_map)) { BCE_PRINTF("%s(%d): Could not allocate statistics block " "DMA memory!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } error = bus_dmamap_load(sc->stats_tag, sc->stats_map, sc->stats_block, BCE_STATS_BLK_SZ, bce_dma_map_addr, &sc->stats_block_paddr, BUS_DMA_NOWAIT); if (error || sc->stats_block_paddr == 0) { BCE_PRINTF("%s(%d): Could not map statistics block " "DMA memory!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } DBPRINT(sc, BCE_INFO_LOAD, "%s(): stats_block_paddr = 0x%jX\n", __FUNCTION__, (uintmax_t) sc->stats_block_paddr); /* BCM5709 uses host memory as cache for context memory. */ if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { sc->ctx_pages = 0x2000 / BCM_PAGE_SIZE; if (sc->ctx_pages == 0) sc->ctx_pages = 1; DBRUNIF((sc->ctx_pages > 512), BCE_PRINTF("%s(%d): Too many CTX pages! %d > 512\n", __FILE__, __LINE__, sc->ctx_pages)); /* * Create a DMA tag for the context pages, * allocate and clear the memory, map the * memory into DMA space, and fetch the * physical address of the block. */ if(bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, BCM_PAGE_SIZE, 1, BCM_PAGE_SIZE, 0, NULL, NULL, &sc->ctx_tag)) { BCE_PRINTF("%s(%d): Could not allocate CTX " "DMA tag!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } for (i = 0; i < sc->ctx_pages; i++) { if(bus_dmamem_alloc(sc->ctx_tag, (void **)&sc->ctx_block[i], BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->ctx_map[i])) { BCE_PRINTF("%s(%d): Could not allocate CTX " "DMA memory!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } error = bus_dmamap_load(sc->ctx_tag, sc->ctx_map[i], sc->ctx_block[i], BCM_PAGE_SIZE, bce_dma_map_addr, &sc->ctx_paddr[i], BUS_DMA_NOWAIT); if (error || sc->ctx_paddr[i] == 0) { BCE_PRINTF("%s(%d): Could not map CTX " "DMA memory!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } DBPRINT(sc, BCE_INFO_LOAD, "%s(): ctx_paddr[%d] " "= 0x%jX\n", __FUNCTION__, i, (uintmax_t) sc->ctx_paddr[i]); } } /* * Create a DMA tag for the TX buffer descriptor chain, * allocate and clear the memory, and fetch the * physical address of the block. */ if(bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ, 0, NULL, NULL, &sc->tx_bd_chain_tag)) { BCE_PRINTF("%s(%d): Could not allocate TX descriptor " "chain DMA tag!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } for (i = 0; i < sc->tx_pages; i++) { if(bus_dmamem_alloc(sc->tx_bd_chain_tag, (void **)&sc->tx_bd_chain[i], BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->tx_bd_chain_map[i])) { BCE_PRINTF("%s(%d): Could not allocate TX descriptor " "chain DMA memory!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } error = bus_dmamap_load(sc->tx_bd_chain_tag, sc->tx_bd_chain_map[i], sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ, bce_dma_map_addr, &sc->tx_bd_chain_paddr[i], BUS_DMA_NOWAIT); if (error || sc->tx_bd_chain_paddr[i] == 0) { BCE_PRINTF("%s(%d): Could not map TX descriptor " "chain DMA memory!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } DBPRINT(sc, BCE_INFO_LOAD, "%s(): tx_bd_chain_paddr[%d] = " "0x%jX\n", __FUNCTION__, i, (uintmax_t) sc->tx_bd_chain_paddr[i]); } /* Check the required size before mapping to conserve resources. */ if (bce_tso_enable) { max_size = BCE_TSO_MAX_SIZE; max_segments = BCE_MAX_SEGMENTS; max_seg_size = BCE_TSO_MAX_SEG_SIZE; } else { max_size = MCLBYTES * BCE_MAX_SEGMENTS; max_segments = BCE_MAX_SEGMENTS; max_seg_size = MCLBYTES; } /* Create a DMA tag for TX mbufs. */ if (bus_dma_tag_create(sc->parent_tag, 1, BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, max_size, max_segments, max_seg_size, 0, NULL, NULL, &sc->tx_mbuf_tag)) { BCE_PRINTF("%s(%d): Could not allocate TX mbuf DMA tag!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } /* Create DMA maps for the TX mbufs clusters. */ for (i = 0; i < TOTAL_TX_BD_ALLOC; i++) { if (bus_dmamap_create(sc->tx_mbuf_tag, BUS_DMA_NOWAIT, &sc->tx_mbuf_map[i])) { BCE_PRINTF("%s(%d): Unable to create TX mbuf DMA " "map!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } } /* * Create a DMA tag for the RX buffer descriptor chain, * allocate and clear the memory, and fetch the physical * address of the blocks. */ if (bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, BCE_DMA_BOUNDARY, BUS_SPACE_MAXADDR, sc->max_bus_addr, NULL, NULL, BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ, 0, NULL, NULL, &sc->rx_bd_chain_tag)) { BCE_PRINTF("%s(%d): Could not allocate RX descriptor chain " "DMA tag!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } for (i = 0; i < sc->rx_pages; i++) { if (bus_dmamem_alloc(sc->rx_bd_chain_tag, (void **)&sc->rx_bd_chain[i], BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->rx_bd_chain_map[i])) { BCE_PRINTF("%s(%d): Could not allocate RX descriptor " "chain DMA memory!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } error = bus_dmamap_load(sc->rx_bd_chain_tag, sc->rx_bd_chain_map[i], sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ, bce_dma_map_addr, &sc->rx_bd_chain_paddr[i], BUS_DMA_NOWAIT); if (error || sc->rx_bd_chain_paddr[i] == 0) { BCE_PRINTF("%s(%d): Could not map RX descriptor " "chain DMA memory!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } DBPRINT(sc, BCE_INFO_LOAD, "%s(): rx_bd_chain_paddr[%d] = " "0x%jX\n", __FUNCTION__, i, (uintmax_t) sc->rx_bd_chain_paddr[i]); } /* * Create a DMA tag for RX mbufs. */ if (bce_hdr_split == TRUE) max_size = ((sc->rx_bd_mbuf_alloc_size < MCLBYTES) ? MCLBYTES : sc->rx_bd_mbuf_alloc_size); else max_size = MJUM9BYTES; DBPRINT(sc, BCE_INFO_LOAD, "%s(): Creating rx_mbuf_tag " "(max size = 0x%jX)\n", __FUNCTION__, (uintmax_t)max_size); if (bus_dma_tag_create(sc->parent_tag, BCE_RX_BUF_ALIGN, BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, max_size, 1, max_size, 0, NULL, NULL, &sc->rx_mbuf_tag)) { BCE_PRINTF("%s(%d): Could not allocate RX mbuf DMA tag!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } /* Create DMA maps for the RX mbuf clusters. */ for (i = 0; i < TOTAL_RX_BD_ALLOC; i++) { if (bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_NOWAIT, &sc->rx_mbuf_map[i])) { BCE_PRINTF("%s(%d): Unable to create RX mbuf " "DMA map!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } } if (bce_hdr_split == TRUE) { /* * Create a DMA tag for the page buffer descriptor chain, * allocate and clear the memory, and fetch the physical * address of the blocks. */ if (bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, BCE_DMA_BOUNDARY, BUS_SPACE_MAXADDR, sc->max_bus_addr, NULL, NULL, BCE_PG_CHAIN_PAGE_SZ, 1, BCE_PG_CHAIN_PAGE_SZ, 0, NULL, NULL, &sc->pg_bd_chain_tag)) { BCE_PRINTF("%s(%d): Could not allocate page descriptor " "chain DMA tag!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } for (i = 0; i < sc->pg_pages; i++) { if (bus_dmamem_alloc(sc->pg_bd_chain_tag, (void **)&sc->pg_bd_chain[i], BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->pg_bd_chain_map[i])) { BCE_PRINTF("%s(%d): Could not allocate page " "descriptor chain DMA memory!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } error = bus_dmamap_load(sc->pg_bd_chain_tag, sc->pg_bd_chain_map[i], sc->pg_bd_chain[i], BCE_PG_CHAIN_PAGE_SZ, bce_dma_map_addr, &sc->pg_bd_chain_paddr[i], BUS_DMA_NOWAIT); if (error || sc->pg_bd_chain_paddr[i] == 0) { BCE_PRINTF("%s(%d): Could not map page descriptor " "chain DMA memory!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } DBPRINT(sc, BCE_INFO_LOAD, "%s(): pg_bd_chain_paddr[%d] = " "0x%jX\n", __FUNCTION__, i, (uintmax_t) sc->pg_bd_chain_paddr[i]); } /* * Create a DMA tag for page mbufs. */ if (bus_dma_tag_create(sc->parent_tag, 1, BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->pg_mbuf_tag)) { BCE_PRINTF("%s(%d): Could not allocate page mbuf " "DMA tag!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } /* Create DMA maps for the page mbuf clusters. */ for (i = 0; i < TOTAL_PG_BD_ALLOC; i++) { if (bus_dmamap_create(sc->pg_mbuf_tag, BUS_DMA_NOWAIT, &sc->pg_mbuf_map[i])) { BCE_PRINTF("%s(%d): Unable to create page mbuf " "DMA map!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } } } bce_dma_alloc_exit: DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX); return(rc); } /****************************************************************************/ /* Release all resources used by the driver. */ /* */ /* Releases all resources acquired by the driver including interrupts, */ /* interrupt handler, interfaces, mutexes, and DMA memory. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_release_resources(struct bce_softc *sc) { device_t dev; DBENTER(BCE_VERBOSE_RESET); dev = sc->bce_dev; bce_dma_free(sc); if (sc->bce_intrhand != NULL) { DBPRINT(sc, BCE_INFO_RESET, "Removing interrupt handler.\n"); bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand); } if (sc->bce_res_irq != NULL) { DBPRINT(sc, BCE_INFO_RESET, "Releasing IRQ.\n"); bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->bce_res_irq), sc->bce_res_irq); } if (sc->bce_flags & (BCE_USING_MSI_FLAG | BCE_USING_MSIX_FLAG)) { DBPRINT(sc, BCE_INFO_RESET, "Releasing MSI/MSI-X vector.\n"); pci_release_msi(dev); } if (sc->bce_res_mem != NULL) { DBPRINT(sc, BCE_INFO_RESET, "Releasing PCI memory.\n"); bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), sc->bce_res_mem); } if (sc->bce_ifp != NULL) { DBPRINT(sc, BCE_INFO_RESET, "Releasing IF.\n"); if_free(sc->bce_ifp); } if (mtx_initialized(&sc->bce_mtx)) BCE_LOCK_DESTROY(sc); DBEXIT(BCE_VERBOSE_RESET); } /****************************************************************************/ /* Firmware synchronization. */ /* */ /* Before performing certain events such as a chip reset, synchronize with */ /* the firmware first. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_fw_sync(struct bce_softc *sc, u32 msg_data) { int i, rc = 0; u32 val; DBENTER(BCE_VERBOSE_RESET); /* Don't waste any time if we've timed out before. */ if (sc->bce_fw_timed_out == TRUE) { rc = EBUSY; goto bce_fw_sync_exit; } /* Increment the message sequence number. */ sc->bce_fw_wr_seq++; msg_data |= sc->bce_fw_wr_seq; DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "bce_fw_sync(): msg_data = " "0x%08X\n", msg_data); /* Send the message to the bootcode driver mailbox. */ bce_shmem_wr(sc, BCE_DRV_MB, msg_data); /* Wait for the bootcode to acknowledge the message. */ for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) { /* Check for a response in the bootcode firmware mailbox. */ val = bce_shmem_rd(sc, BCE_FW_MB); if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ)) break; DELAY(1000); } /* If we've timed out, tell bootcode that we've stopped waiting. */ if (((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ)) && ((msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0)) { BCE_PRINTF("%s(%d): Firmware synchronization timeout! " "msg_data = 0x%08X\n", __FILE__, __LINE__, msg_data); msg_data &= ~BCE_DRV_MSG_CODE; msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT; bce_shmem_wr(sc, BCE_DRV_MB, msg_data); sc->bce_fw_timed_out = TRUE; rc = EBUSY; } bce_fw_sync_exit: DBEXIT(BCE_VERBOSE_RESET); return (rc); } /****************************************************************************/ /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_load_rv2p_fw(struct bce_softc *sc, const u32 *rv2p_code, u32 rv2p_code_len, u32 rv2p_proc) { int i; u32 val; DBENTER(BCE_VERBOSE_RESET); /* Set the page size used by RV2P. */ if (rv2p_proc == RV2P_PROC2) { BCE_RV2P_PROC2_CHG_MAX_BD_PAGE(USABLE_RX_BD_PER_PAGE); } for (i = 0; i < rv2p_code_len; i += 8) { REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code); rv2p_code++; REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code); rv2p_code++; if (rv2p_proc == RV2P_PROC1) { val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR; REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val); } else { val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR; REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val); } } /* Reset the processor, un-stall is done later. */ if (rv2p_proc == RV2P_PROC1) { REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET); } else { REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET); } DBEXIT(BCE_VERBOSE_RESET); } /****************************************************************************/ /* Load RISC processor firmware. */ /* */ /* Loads firmware from the file if_bcefw.h into the scratchpad memory */ /* associated with a particular processor. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg, struct fw_info *fw) { u32 offset; DBENTER(BCE_VERBOSE_RESET); bce_halt_cpu(sc, cpu_reg); /* Load the Text area. */ offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base); if (fw->text) { int j; for (j = 0; j < (fw->text_len / 4); j++, offset += 4) { REG_WR_IND(sc, offset, fw->text[j]); } } /* Load the Data area. */ offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base); if (fw->data) { int j; for (j = 0; j < (fw->data_len / 4); j++, offset += 4) { REG_WR_IND(sc, offset, fw->data[j]); } } /* Load the SBSS area. */ offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base); if (fw->sbss) { int j; for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) { REG_WR_IND(sc, offset, fw->sbss[j]); } } /* Load the BSS area. */ offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base); if (fw->bss) { int j; for (j = 0; j < (fw->bss_len/4); j++, offset += 4) { REG_WR_IND(sc, offset, fw->bss[j]); } } /* Load the Read-Only area. */ offset = cpu_reg->spad_base + (fw->rodata_addr - cpu_reg->mips_view_base); if (fw->rodata) { int j; for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) { REG_WR_IND(sc, offset, fw->rodata[j]); } } /* Clear the pre-fetch instruction and set the FW start address. */ REG_WR_IND(sc, cpu_reg->inst, 0); REG_WR_IND(sc, cpu_reg->pc, fw->start_addr); DBEXIT(BCE_VERBOSE_RESET); } /****************************************************************************/ /* Starts the RISC processor. */ /* */ /* Assumes the CPU starting address has already been set. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_start_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg) { u32 val; DBENTER(BCE_VERBOSE_RESET); /* Start the CPU. */ val = REG_RD_IND(sc, cpu_reg->mode); val &= ~cpu_reg->mode_value_halt; REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); REG_WR_IND(sc, cpu_reg->mode, val); DBEXIT(BCE_VERBOSE_RESET); } /****************************************************************************/ /* Halts the RISC processor. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_halt_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg) { u32 val; DBENTER(BCE_VERBOSE_RESET); /* Halt the CPU. */ val = REG_RD_IND(sc, cpu_reg->mode); val |= cpu_reg->mode_value_halt; REG_WR_IND(sc, cpu_reg->mode, val); REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); DBEXIT(BCE_VERBOSE_RESET); } /****************************************************************************/ /* Initialize the RX CPU. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_start_rxp_cpu(struct bce_softc *sc) { struct cpu_reg cpu_reg; DBENTER(BCE_VERBOSE_RESET); cpu_reg.mode = BCE_RXP_CPU_MODE; cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT; cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA; cpu_reg.state = BCE_RXP_CPU_STATE; cpu_reg.state_value_clear = 0xffffff; cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE; cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK; cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER; cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION; cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT; cpu_reg.spad_base = BCE_RXP_SCRATCH; cpu_reg.mips_view_base = 0x8000000; DBPRINT(sc, BCE_INFO_RESET, "Starting RX firmware.\n"); bce_start_cpu(sc, &cpu_reg); DBEXIT(BCE_VERBOSE_RESET); } /****************************************************************************/ /* Initialize the RX CPU. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_init_rxp_cpu(struct bce_softc *sc) { struct cpu_reg cpu_reg; struct fw_info fw; DBENTER(BCE_VERBOSE_RESET); cpu_reg.mode = BCE_RXP_CPU_MODE; cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT; cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA; cpu_reg.state = BCE_RXP_CPU_STATE; cpu_reg.state_value_clear = 0xffffff; cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE; cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK; cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER; cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION; cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT; cpu_reg.spad_base = BCE_RXP_SCRATCH; cpu_reg.mips_view_base = 0x8000000; if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { fw.ver_major = bce_RXP_b09FwReleaseMajor; fw.ver_minor = bce_RXP_b09FwReleaseMinor; fw.ver_fix = bce_RXP_b09FwReleaseFix; fw.start_addr = bce_RXP_b09FwStartAddr; fw.text_addr = bce_RXP_b09FwTextAddr; fw.text_len = bce_RXP_b09FwTextLen; fw.text_index = 0; fw.text = bce_RXP_b09FwText; fw.data_addr = bce_RXP_b09FwDataAddr; fw.data_len = bce_RXP_b09FwDataLen; fw.data_index = 0; fw.data = bce_RXP_b09FwData; fw.sbss_addr = bce_RXP_b09FwSbssAddr; fw.sbss_len = bce_RXP_b09FwSbssLen; fw.sbss_index = 0; fw.sbss = bce_RXP_b09FwSbss; fw.bss_addr = bce_RXP_b09FwBssAddr; fw.bss_len = bce_RXP_b09FwBssLen; fw.bss_index = 0; fw.bss = bce_RXP_b09FwBss; fw.rodata_addr = bce_RXP_b09FwRodataAddr; fw.rodata_len = bce_RXP_b09FwRodataLen; fw.rodata_index = 0; fw.rodata = bce_RXP_b09FwRodata; } else { fw.ver_major = bce_RXP_b06FwReleaseMajor; fw.ver_minor = bce_RXP_b06FwReleaseMinor; fw.ver_fix = bce_RXP_b06FwReleaseFix; fw.start_addr = bce_RXP_b06FwStartAddr; fw.text_addr = bce_RXP_b06FwTextAddr; fw.text_len = bce_RXP_b06FwTextLen; fw.text_index = 0; fw.text = bce_RXP_b06FwText; fw.data_addr = bce_RXP_b06FwDataAddr; fw.data_len = bce_RXP_b06FwDataLen; fw.data_index = 0; fw.data = bce_RXP_b06FwData; fw.sbss_addr = bce_RXP_b06FwSbssAddr; fw.sbss_len = bce_RXP_b06FwSbssLen; fw.sbss_index = 0; fw.sbss = bce_RXP_b06FwSbss; fw.bss_addr = bce_RXP_b06FwBssAddr; fw.bss_len = bce_RXP_b06FwBssLen; fw.bss_index = 0; fw.bss = bce_RXP_b06FwBss; fw.rodata_addr = bce_RXP_b06FwRodataAddr; fw.rodata_len = bce_RXP_b06FwRodataLen; fw.rodata_index = 0; fw.rodata = bce_RXP_b06FwRodata; } DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n"); bce_load_cpu_fw(sc, &cpu_reg, &fw); /* Delay RXP start until initialization is complete. */ DBEXIT(BCE_VERBOSE_RESET); } /****************************************************************************/ /* Initialize the TX CPU. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_init_txp_cpu(struct bce_softc *sc) { struct cpu_reg cpu_reg; struct fw_info fw; DBENTER(BCE_VERBOSE_RESET); cpu_reg.mode = BCE_TXP_CPU_MODE; cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT; cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA; cpu_reg.state = BCE_TXP_CPU_STATE; cpu_reg.state_value_clear = 0xffffff; cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE; cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK; cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER; cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION; cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT; cpu_reg.spad_base = BCE_TXP_SCRATCH; cpu_reg.mips_view_base = 0x8000000; if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { fw.ver_major = bce_TXP_b09FwReleaseMajor; fw.ver_minor = bce_TXP_b09FwReleaseMinor; fw.ver_fix = bce_TXP_b09FwReleaseFix; fw.start_addr = bce_TXP_b09FwStartAddr; fw.text_addr = bce_TXP_b09FwTextAddr; fw.text_len = bce_TXP_b09FwTextLen; fw.text_index = 0; fw.text = bce_TXP_b09FwText; fw.data_addr = bce_TXP_b09FwDataAddr; fw.data_len = bce_TXP_b09FwDataLen; fw.data_index = 0; fw.data = bce_TXP_b09FwData; fw.sbss_addr = bce_TXP_b09FwSbssAddr; fw.sbss_len = bce_TXP_b09FwSbssLen; fw.sbss_index = 0; fw.sbss = bce_TXP_b09FwSbss; fw.bss_addr = bce_TXP_b09FwBssAddr; fw.bss_len = bce_TXP_b09FwBssLen; fw.bss_index = 0; fw.bss = bce_TXP_b09FwBss; fw.rodata_addr = bce_TXP_b09FwRodataAddr; fw.rodata_len = bce_TXP_b09FwRodataLen; fw.rodata_index = 0; fw.rodata = bce_TXP_b09FwRodata; } else { fw.ver_major = bce_TXP_b06FwReleaseMajor; fw.ver_minor = bce_TXP_b06FwReleaseMinor; fw.ver_fix = bce_TXP_b06FwReleaseFix; fw.start_addr = bce_TXP_b06FwStartAddr; fw.text_addr = bce_TXP_b06FwTextAddr; fw.text_len = bce_TXP_b06FwTextLen; fw.text_index = 0; fw.text = bce_TXP_b06FwText; fw.data_addr = bce_TXP_b06FwDataAddr; fw.data_len = bce_TXP_b06FwDataLen; fw.data_index = 0; fw.data = bce_TXP_b06FwData; fw.sbss_addr = bce_TXP_b06FwSbssAddr; fw.sbss_len = bce_TXP_b06FwSbssLen; fw.sbss_index = 0; fw.sbss = bce_TXP_b06FwSbss; fw.bss_addr = bce_TXP_b06FwBssAddr; fw.bss_len = bce_TXP_b06FwBssLen; fw.bss_index = 0; fw.bss = bce_TXP_b06FwBss; fw.rodata_addr = bce_TXP_b06FwRodataAddr; fw.rodata_len = bce_TXP_b06FwRodataLen; fw.rodata_index = 0; fw.rodata = bce_TXP_b06FwRodata; } DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n"); bce_load_cpu_fw(sc, &cpu_reg, &fw); bce_start_cpu(sc, &cpu_reg); DBEXIT(BCE_VERBOSE_RESET); } /****************************************************************************/ /* Initialize the TPAT CPU. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_init_tpat_cpu(struct bce_softc *sc) { struct cpu_reg cpu_reg; struct fw_info fw; DBENTER(BCE_VERBOSE_RESET); cpu_reg.mode = BCE_TPAT_CPU_MODE; cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT; cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA; cpu_reg.state = BCE_TPAT_CPU_STATE; cpu_reg.state_value_clear = 0xffffff; cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE; cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK; cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER; cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION; cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT; cpu_reg.spad_base = BCE_TPAT_SCRATCH; cpu_reg.mips_view_base = 0x8000000; if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { fw.ver_major = bce_TPAT_b09FwReleaseMajor; fw.ver_minor = bce_TPAT_b09FwReleaseMinor; fw.ver_fix = bce_TPAT_b09FwReleaseFix; fw.start_addr = bce_TPAT_b09FwStartAddr; fw.text_addr = bce_TPAT_b09FwTextAddr; fw.text_len = bce_TPAT_b09FwTextLen; fw.text_index = 0; fw.text = bce_TPAT_b09FwText; fw.data_addr = bce_TPAT_b09FwDataAddr; fw.data_len = bce_TPAT_b09FwDataLen; fw.data_index = 0; fw.data = bce_TPAT_b09FwData; fw.sbss_addr = bce_TPAT_b09FwSbssAddr; fw.sbss_len = bce_TPAT_b09FwSbssLen; fw.sbss_index = 0; fw.sbss = bce_TPAT_b09FwSbss; fw.bss_addr = bce_TPAT_b09FwBssAddr; fw.bss_len = bce_TPAT_b09FwBssLen; fw.bss_index = 0; fw.bss = bce_TPAT_b09FwBss; fw.rodata_addr = bce_TPAT_b09FwRodataAddr; fw.rodata_len = bce_TPAT_b09FwRodataLen; fw.rodata_index = 0; fw.rodata = bce_TPAT_b09FwRodata; } else { fw.ver_major = bce_TPAT_b06FwReleaseMajor; fw.ver_minor = bce_TPAT_b06FwReleaseMinor; fw.ver_fix = bce_TPAT_b06FwReleaseFix; fw.start_addr = bce_TPAT_b06FwStartAddr; fw.text_addr = bce_TPAT_b06FwTextAddr; fw.text_len = bce_TPAT_b06FwTextLen; fw.text_index = 0; fw.text = bce_TPAT_b06FwText; fw.data_addr = bce_TPAT_b06FwDataAddr; fw.data_len = bce_TPAT_b06FwDataLen; fw.data_index = 0; fw.data = bce_TPAT_b06FwData; fw.sbss_addr = bce_TPAT_b06FwSbssAddr; fw.sbss_len = bce_TPAT_b06FwSbssLen; fw.sbss_index = 0; fw.sbss = bce_TPAT_b06FwSbss; fw.bss_addr = bce_TPAT_b06FwBssAddr; fw.bss_len = bce_TPAT_b06FwBssLen; fw.bss_index = 0; fw.bss = bce_TPAT_b06FwBss; fw.rodata_addr = bce_TPAT_b06FwRodataAddr; fw.rodata_len = bce_TPAT_b06FwRodataLen; fw.rodata_index = 0; fw.rodata = bce_TPAT_b06FwRodata; } DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n"); bce_load_cpu_fw(sc, &cpu_reg, &fw); bce_start_cpu(sc, &cpu_reg); DBEXIT(BCE_VERBOSE_RESET); } /****************************************************************************/ /* Initialize the CP CPU. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_init_cp_cpu(struct bce_softc *sc) { struct cpu_reg cpu_reg; struct fw_info fw; DBENTER(BCE_VERBOSE_RESET); cpu_reg.mode = BCE_CP_CPU_MODE; cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT; cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA; cpu_reg.state = BCE_CP_CPU_STATE; cpu_reg.state_value_clear = 0xffffff; cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE; cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK; cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER; cpu_reg.inst = BCE_CP_CPU_INSTRUCTION; cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT; cpu_reg.spad_base = BCE_CP_SCRATCH; cpu_reg.mips_view_base = 0x8000000; if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { fw.ver_major = bce_CP_b09FwReleaseMajor; fw.ver_minor = bce_CP_b09FwReleaseMinor; fw.ver_fix = bce_CP_b09FwReleaseFix; fw.start_addr = bce_CP_b09FwStartAddr; fw.text_addr = bce_CP_b09FwTextAddr; fw.text_len = bce_CP_b09FwTextLen; fw.text_index = 0; fw.text = bce_CP_b09FwText; fw.data_addr = bce_CP_b09FwDataAddr; fw.data_len = bce_CP_b09FwDataLen; fw.data_index = 0; fw.data = bce_CP_b09FwData; fw.sbss_addr = bce_CP_b09FwSbssAddr; fw.sbss_len = bce_CP_b09FwSbssLen; fw.sbss_index = 0; fw.sbss = bce_CP_b09FwSbss; fw.bss_addr = bce_CP_b09FwBssAddr; fw.bss_len = bce_CP_b09FwBssLen; fw.bss_index = 0; fw.bss = bce_CP_b09FwBss; fw.rodata_addr = bce_CP_b09FwRodataAddr; fw.rodata_len = bce_CP_b09FwRodataLen; fw.rodata_index = 0; fw.rodata = bce_CP_b09FwRodata; } else { fw.ver_major = bce_CP_b06FwReleaseMajor; fw.ver_minor = bce_CP_b06FwReleaseMinor; fw.ver_fix = bce_CP_b06FwReleaseFix; fw.start_addr = bce_CP_b06FwStartAddr; fw.text_addr = bce_CP_b06FwTextAddr; fw.text_len = bce_CP_b06FwTextLen; fw.text_index = 0; fw.text = bce_CP_b06FwText; fw.data_addr = bce_CP_b06FwDataAddr; fw.data_len = bce_CP_b06FwDataLen; fw.data_index = 0; fw.data = bce_CP_b06FwData; fw.sbss_addr = bce_CP_b06FwSbssAddr; fw.sbss_len = bce_CP_b06FwSbssLen; fw.sbss_index = 0; fw.sbss = bce_CP_b06FwSbss; fw.bss_addr = bce_CP_b06FwBssAddr; fw.bss_len = bce_CP_b06FwBssLen; fw.bss_index = 0; fw.bss = bce_CP_b06FwBss; fw.rodata_addr = bce_CP_b06FwRodataAddr; fw.rodata_len = bce_CP_b06FwRodataLen; fw.rodata_index = 0; fw.rodata = bce_CP_b06FwRodata; } DBPRINT(sc, BCE_INFO_RESET, "Loading CP firmware.\n"); bce_load_cpu_fw(sc, &cpu_reg, &fw); bce_start_cpu(sc, &cpu_reg); DBEXIT(BCE_VERBOSE_RESET); } /****************************************************************************/ /* Initialize the COM CPU. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_init_com_cpu(struct bce_softc *sc) { struct cpu_reg cpu_reg; struct fw_info fw; DBENTER(BCE_VERBOSE_RESET); cpu_reg.mode = BCE_COM_CPU_MODE; cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT; cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA; cpu_reg.state = BCE_COM_CPU_STATE; cpu_reg.state_value_clear = 0xffffff; cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE; cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK; cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER; cpu_reg.inst = BCE_COM_CPU_INSTRUCTION; cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT; cpu_reg.spad_base = BCE_COM_SCRATCH; cpu_reg.mips_view_base = 0x8000000; if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { fw.ver_major = bce_COM_b09FwReleaseMajor; fw.ver_minor = bce_COM_b09FwReleaseMinor; fw.ver_fix = bce_COM_b09FwReleaseFix; fw.start_addr = bce_COM_b09FwStartAddr; fw.text_addr = bce_COM_b09FwTextAddr; fw.text_len = bce_COM_b09FwTextLen; fw.text_index = 0; fw.text = bce_COM_b09FwText; fw.data_addr = bce_COM_b09FwDataAddr; fw.data_len = bce_COM_b09FwDataLen; fw.data_index = 0; fw.data = bce_COM_b09FwData; fw.sbss_addr = bce_COM_b09FwSbssAddr; fw.sbss_len = bce_COM_b09FwSbssLen; fw.sbss_index = 0; fw.sbss = bce_COM_b09FwSbss; fw.bss_addr = bce_COM_b09FwBssAddr; fw.bss_len = bce_COM_b09FwBssLen; fw.bss_index = 0; fw.bss = bce_COM_b09FwBss; fw.rodata_addr = bce_COM_b09FwRodataAddr; fw.rodata_len = bce_COM_b09FwRodataLen; fw.rodata_index = 0; fw.rodata = bce_COM_b09FwRodata; } else { fw.ver_major = bce_COM_b06FwReleaseMajor; fw.ver_minor = bce_COM_b06FwReleaseMinor; fw.ver_fix = bce_COM_b06FwReleaseFix; fw.start_addr = bce_COM_b06FwStartAddr; fw.text_addr = bce_COM_b06FwTextAddr; fw.text_len = bce_COM_b06FwTextLen; fw.text_index = 0; fw.text = bce_COM_b06FwText; fw.data_addr = bce_COM_b06FwDataAddr; fw.data_len = bce_COM_b06FwDataLen; fw.data_index = 0; fw.data = bce_COM_b06FwData; fw.sbss_addr = bce_COM_b06FwSbssAddr; fw.sbss_len = bce_COM_b06FwSbssLen; fw.sbss_index = 0; fw.sbss = bce_COM_b06FwSbss; fw.bss_addr = bce_COM_b06FwBssAddr; fw.bss_len = bce_COM_b06FwBssLen; fw.bss_index = 0; fw.bss = bce_COM_b06FwBss; fw.rodata_addr = bce_COM_b06FwRodataAddr; fw.rodata_len = bce_COM_b06FwRodataLen; fw.rodata_index = 0; fw.rodata = bce_COM_b06FwRodata; } DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n"); bce_load_cpu_fw(sc, &cpu_reg, &fw); bce_start_cpu(sc, &cpu_reg); DBEXIT(BCE_VERBOSE_RESET); } /****************************************************************************/ /* Initialize the RV2P, RX, TX, TPAT, COM, and CP CPUs. */ /* */ /* Loads the firmware for each CPU and starts the CPU. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_init_cpus(struct bce_softc *sc) { DBENTER(BCE_VERBOSE_RESET); if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { if ((BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax)) { bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc1, sizeof(bce_xi90_rv2p_proc1), RV2P_PROC1); bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc2, sizeof(bce_xi90_rv2p_proc2), RV2P_PROC2); } else { bce_load_rv2p_fw(sc, bce_xi_rv2p_proc1, sizeof(bce_xi_rv2p_proc1), RV2P_PROC1); bce_load_rv2p_fw(sc, bce_xi_rv2p_proc2, sizeof(bce_xi_rv2p_proc2), RV2P_PROC2); } } else { bce_load_rv2p_fw(sc, bce_rv2p_proc1, sizeof(bce_rv2p_proc1), RV2P_PROC1); bce_load_rv2p_fw(sc, bce_rv2p_proc2, sizeof(bce_rv2p_proc2), RV2P_PROC2); } bce_init_rxp_cpu(sc); bce_init_txp_cpu(sc); bce_init_tpat_cpu(sc); bce_init_com_cpu(sc); bce_init_cp_cpu(sc); DBEXIT(BCE_VERBOSE_RESET); } /****************************************************************************/ /* Initialize context memory. */ /* */ /* Clears the memory associated with each Context ID (CID). */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static int bce_init_ctx(struct bce_softc *sc) { u32 offset, val, vcid_addr; int i, j, rc, retry_cnt; rc = 0; DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX); if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { retry_cnt = CTX_INIT_RETRY_COUNT; DBPRINT(sc, BCE_INFO_CTX, "Initializing 5709 context.\n"); /* * BCM5709 context memory may be cached * in host memory so prepare the host memory * for access. */ val = BCE_CTX_COMMAND_ENABLED | BCE_CTX_COMMAND_MEM_INIT | (1 << 12); val |= (BCM_PAGE_BITS - 8) << 16; REG_WR(sc, BCE_CTX_COMMAND, val); /* Wait for mem init command to complete. */ for (i = 0; i < retry_cnt; i++) { val = REG_RD(sc, BCE_CTX_COMMAND); if (!(val & BCE_CTX_COMMAND_MEM_INIT)) break; DELAY(2); } if ((val & BCE_CTX_COMMAND_MEM_INIT) != 0) { BCE_PRINTF("%s(): Context memory initialization failed!\n", __FUNCTION__); rc = EBUSY; goto init_ctx_fail; } for (i = 0; i < sc->ctx_pages; i++) { /* Set the physical address of the context memory. */ REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA0, BCE_ADDR_LO(sc->ctx_paddr[i] & 0xfffffff0) | BCE_CTX_HOST_PAGE_TBL_DATA0_VALID); REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA1, BCE_ADDR_HI(sc->ctx_paddr[i])); REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_CTRL, i | BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); /* Verify the context memory write was successful. */ for (j = 0; j < retry_cnt; j++) { val = REG_RD(sc, BCE_CTX_HOST_PAGE_TBL_CTRL); if ((val & BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0) break; DELAY(5); } if ((val & BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) != 0) { BCE_PRINTF("%s(): Failed to initialize " "context page %d!\n", __FUNCTION__, i); rc = EBUSY; goto init_ctx_fail; } } } else { DBPRINT(sc, BCE_INFO, "Initializing 5706/5708 context.\n"); /* * For the 5706/5708, context memory is local to * the controller, so initialize the controller * context memory. */ vcid_addr = GET_CID_ADDR(96); while (vcid_addr) { vcid_addr -= PHY_CTX_SIZE; REG_WR(sc, BCE_CTX_VIRT_ADDR, 0); REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr); for(offset = 0; offset < PHY_CTX_SIZE; offset += 4) { CTX_WR(sc, 0x00, offset, 0); } REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr); REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr); } } init_ctx_fail: DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX); return (rc); } /****************************************************************************/ /* Fetch the permanent MAC address of the controller. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_get_mac_addr(struct bce_softc *sc) { u32 mac_lo = 0, mac_hi = 0; DBENTER(BCE_VERBOSE_RESET); /* * The NetXtreme II bootcode populates various NIC * power-on and runtime configuration items in a * shared memory area. The factory configured MAC * address is available from both NVRAM and the * shared memory area so we'll read the value from * shared memory for speed. */ mac_hi = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_UPPER); mac_lo = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_LOWER); if ((mac_lo == 0) && (mac_hi == 0)) { BCE_PRINTF("%s(%d): Invalid Ethernet address!\n", __FILE__, __LINE__); } else { sc->eaddr[0] = (u_char)(mac_hi >> 8); sc->eaddr[1] = (u_char)(mac_hi >> 0); sc->eaddr[2] = (u_char)(mac_lo >> 24); sc->eaddr[3] = (u_char)(mac_lo >> 16); sc->eaddr[4] = (u_char)(mac_lo >> 8); sc->eaddr[5] = (u_char)(mac_lo >> 0); } DBPRINT(sc, BCE_INFO_MISC, "Permanent Ethernet " "address = %6D\n", sc->eaddr, ":"); DBEXIT(BCE_VERBOSE_RESET); } /****************************************************************************/ /* Program the MAC address. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_set_mac_addr(struct bce_softc *sc) { u32 val; u8 *mac_addr = sc->eaddr; /* ToDo: Add support for setting multiple MAC addresses. */ DBENTER(BCE_VERBOSE_RESET); DBPRINT(sc, BCE_INFO_MISC, "Setting Ethernet address = " "%6D\n", sc->eaddr, ":"); val = (mac_addr[0] << 8) | mac_addr[1]; REG_WR(sc, BCE_EMAC_MAC_MATCH0, val); val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | (mac_addr[4] << 8) | mac_addr[5]; REG_WR(sc, BCE_EMAC_MAC_MATCH1, val); DBEXIT(BCE_VERBOSE_RESET); } /****************************************************************************/ /* Stop the controller. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_stop(struct bce_softc *sc) { struct ifnet *ifp; DBENTER(BCE_VERBOSE_RESET); BCE_LOCK_ASSERT(sc); ifp = sc->bce_ifp; callout_stop(&sc->bce_tick_callout); /* Disable the transmit/receive blocks. */ REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, BCE_MISC_ENABLE_CLR_DEFAULT); REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS); DELAY(20); bce_disable_intr(sc); /* Free RX buffers. */ if (bce_hdr_split == TRUE) { bce_free_pg_chain(sc); } bce_free_rx_chain(sc); /* Free TX buffers. */ bce_free_tx_chain(sc); sc->watchdog_timer = 0; sc->bce_link_up = FALSE; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); DBEXIT(BCE_VERBOSE_RESET); } static int bce_reset(struct bce_softc *sc, u32 reset_code) { u32 emac_mode_save, val; int i, rc = 0; static const u32 emac_mode_mask = BCE_EMAC_MODE_PORT | BCE_EMAC_MODE_HALF_DUPLEX | BCE_EMAC_MODE_25G; DBENTER(BCE_VERBOSE_RESET); DBPRINT(sc, BCE_VERBOSE_RESET, "%s(): reset_code = 0x%08X\n", __FUNCTION__, reset_code); /* * If ASF/IPMI is operational, then the EMAC Mode register already * contains appropriate values for the link settings that have * been auto-negotiated. Resetting the chip will clobber those * values. Save the important bits so we can restore them after * the reset. */ emac_mode_save = REG_RD(sc, BCE_EMAC_MODE) & emac_mode_mask; /* Wait for pending PCI transactions to complete. */ REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS); DELAY(5); /* Disable DMA */ if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL); val &= ~BCE_MISC_NEW_CORE_CTL_DMA_ENABLE; REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val); } /* Assume bootcode is running. */ sc->bce_fw_timed_out = FALSE; sc->bce_drv_cardiac_arrest = FALSE; /* Give the firmware a chance to prepare for the reset. */ rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code); if (rc) goto bce_reset_exit; /* Set a firmware reminder that this is a soft reset. */ bce_shmem_wr(sc, BCE_DRV_RESET_SIGNATURE, BCE_DRV_RESET_SIGNATURE_MAGIC); /* Dummy read to force the chip to complete all current transactions. */ val = REG_RD(sc, BCE_MISC_ID); /* Chip reset. */ if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { REG_WR(sc, BCE_MISC_COMMAND, BCE_MISC_COMMAND_SW_RESET); REG_RD(sc, BCE_MISC_COMMAND); DELAY(5); val = BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; pci_write_config(sc->bce_dev, BCE_PCICFG_MISC_CONFIG, val, 4); } else { val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val); /* Allow up to 30us for reset to complete. */ for (i = 0; i < 10; i++) { val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG); if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) { break; } DELAY(10); } /* Check that reset completed successfully. */ if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) { BCE_PRINTF("%s(%d): Reset failed!\n", __FILE__, __LINE__); rc = EBUSY; goto bce_reset_exit; } } /* Make sure byte swapping is properly configured. */ val = REG_RD(sc, BCE_PCI_SWAP_DIAG0); if (val != 0x01020304) { BCE_PRINTF("%s(%d): Byte swap is incorrect!\n", __FILE__, __LINE__); rc = ENODEV; goto bce_reset_exit; } /* Just completed a reset, assume that firmware is running again. */ sc->bce_fw_timed_out = FALSE; sc->bce_drv_cardiac_arrest = FALSE; /* Wait for the firmware to finish its initialization. */ rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code); if (rc) BCE_PRINTF("%s(%d): Firmware did not complete " "initialization!\n", __FILE__, __LINE__); /* Get firmware capabilities. */ bce_fw_cap_init(sc); bce_reset_exit: /* Restore EMAC Mode bits needed to keep ASF/IPMI running. */ if (reset_code == BCE_DRV_MSG_CODE_RESET) { val = REG_RD(sc, BCE_EMAC_MODE); val = (val & ~emac_mode_mask) | emac_mode_save; REG_WR(sc, BCE_EMAC_MODE, val); } DBEXIT(BCE_VERBOSE_RESET); return (rc); } static int bce_chipinit(struct bce_softc *sc) { u32 val; int rc = 0; DBENTER(BCE_VERBOSE_RESET); bce_disable_intr(sc); /* * Initialize DMA byte/word swapping, configure the number of DMA * channels and PCI clock compensation delay. */ val = BCE_DMA_CONFIG_DATA_BYTE_SWAP | BCE_DMA_CONFIG_DATA_WORD_SWAP | #if BYTE_ORDER == BIG_ENDIAN BCE_DMA_CONFIG_CNTL_BYTE_SWAP | #endif BCE_DMA_CONFIG_CNTL_WORD_SWAP | DMA_READ_CHANS << 12 | DMA_WRITE_CHANS << 16; val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY; if ((sc->bce_flags & BCE_PCIX_FLAG) && (sc->bus_speed_mhz == 133)) val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP; /* * This setting resolves a problem observed on certain Intel PCI * chipsets that cannot handle multiple outstanding DMA operations. * See errata E9_5706A1_65. */ if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) && (BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0) && !(sc->bce_flags & BCE_PCIX_FLAG)) val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA; REG_WR(sc, BCE_DMA_CONFIG, val); /* Enable the RX_V2P and Context state machines before access. */ REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE | BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE | BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE); /* Initialize context mapping and zero out the quick contexts. */ if ((rc = bce_init_ctx(sc)) != 0) goto bce_chipinit_exit; /* Initialize the on-boards CPUs */ bce_init_cpus(sc); /* Enable management frames (NC-SI) to flow to the MCP. */ if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) | BCE_RPM_MGMT_PKT_CTRL_MGMT_EN; REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val); } /* Prepare NVRAM for access. */ if ((rc = bce_init_nvram(sc)) != 0) goto bce_chipinit_exit; /* Set the kernel bypass block size */ val = REG_RD(sc, BCE_MQ_CONFIG); val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE; val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; /* Enable bins used on the 5709. */ if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { val |= BCE_MQ_CONFIG_BIN_MQ_MODE; if (BCE_CHIP_ID(sc) == BCE_CHIP_ID_5709_A1) val |= BCE_MQ_CONFIG_HALT_DIS; } REG_WR(sc, BCE_MQ_CONFIG, val); val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE); REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val); REG_WR(sc, BCE_MQ_KNL_WIND_END, val); /* Set the page size and clear the RV2P processor stall bits. */ val = (BCM_PAGE_BITS - 8) << 24; REG_WR(sc, BCE_RV2P_CONFIG, val); /* Configure page size. */ val = REG_RD(sc, BCE_TBDR_CONFIG); val &= ~BCE_TBDR_CONFIG_PAGE_SIZE; val |= (BCM_PAGE_BITS - 8) << 24 | 0x40; REG_WR(sc, BCE_TBDR_CONFIG, val); /* Set the perfect match control register to default. */ REG_WR_IND(sc, BCE_RXP_PM_CTRL, 0); bce_chipinit_exit: DBEXIT(BCE_VERBOSE_RESET); return(rc); } /****************************************************************************/ /* Initialize the controller in preparation to send/receive traffic. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_blockinit(struct bce_softc *sc) { u32 reg, val; int rc = 0; DBENTER(BCE_VERBOSE_RESET); /* Load the hardware default MAC address. */ bce_set_mac_addr(sc); /* Set the Ethernet backoff seed value */ val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) + (sc->eaddr[3] ) + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16); REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val); sc->last_status_idx = 0; sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE; /* Set up link change interrupt generation. */ REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK); /* Program the physical address of the status block. */ REG_WR(sc, BCE_HC_STATUS_ADDR_L, BCE_ADDR_LO(sc->status_block_paddr)); REG_WR(sc, BCE_HC_STATUS_ADDR_H, BCE_ADDR_HI(sc->status_block_paddr)); /* Program the physical address of the statistics block. */ REG_WR(sc, BCE_HC_STATISTICS_ADDR_L, BCE_ADDR_LO(sc->stats_block_paddr)); REG_WR(sc, BCE_HC_STATISTICS_ADDR_H, BCE_ADDR_HI(sc->stats_block_paddr)); /* * Program various host coalescing parameters. * Trip points control how many BDs should be ready before generating * an interrupt while ticks control how long a BD can sit in the chain * before generating an interrupt. */ REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, (sc->bce_tx_quick_cons_trip_int << 16) | sc->bce_tx_quick_cons_trip); REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, (sc->bce_rx_quick_cons_trip_int << 16) | sc->bce_rx_quick_cons_trip); REG_WR(sc, BCE_HC_TX_TICKS, (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks); REG_WR(sc, BCE_HC_RX_TICKS, (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks); REG_WR(sc, BCE_HC_STATS_TICKS, sc->bce_stats_ticks & 0xffff00); REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ /* Not used for L2. */ REG_WR(sc, BCE_HC_COMP_PROD_TRIP, 0); REG_WR(sc, BCE_HC_COM_TICKS, 0); REG_WR(sc, BCE_HC_CMD_TICKS, 0); /* Configure the Host Coalescing block. */ val = BCE_HC_CONFIG_RX_TMR_MODE | BCE_HC_CONFIG_TX_TMR_MODE | BCE_HC_CONFIG_COLLECT_STATS; #if 0 /* ToDo: Add MSI-X support. */ if (sc->bce_flags & BCE_USING_MSIX_FLAG) { u32 base = ((BCE_TX_VEC - 1) * BCE_HC_SB_CONFIG_SIZE) + BCE_HC_SB_CONFIG_1; REG_WR(sc, BCE_HC_MSIX_BIT_VECTOR, BCE_HC_MSIX_BIT_VECTOR_VAL); REG_WR(sc, base, BCE_HC_SB_CONFIG_1_TX_TMR_MODE | BCE_HC_SB_CONFIG_1_ONE_SHOT); REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF, (sc->tx_quick_cons_trip_int << 16) | sc->tx_quick_cons_trip); REG_WR(sc, base + BCE_HC_TX_TICKS_OFF, (sc->tx_ticks_int << 16) | sc->tx_ticks); val |= BCE_HC_CONFIG_SB_ADDR_INC_128B; } /* * Tell the HC block to automatically set the * INT_MASK bit after an MSI/MSI-X interrupt * is generated so the driver doesn't have to. */ if (sc->bce_flags & BCE_ONE_SHOT_MSI_FLAG) val |= BCE_HC_CONFIG_ONE_SHOT; /* Set the MSI-X status blocks to 128 byte boundaries. */ if (sc->bce_flags & BCE_USING_MSIX_FLAG) val |= BCE_HC_CONFIG_SB_ADDR_INC_128B; #endif REG_WR(sc, BCE_HC_CONFIG, val); /* Clear the internal statistics counters. */ REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW); /* Verify that bootcode is running. */ reg = bce_shmem_rd(sc, BCE_DEV_INFO_SIGNATURE); DBRUNIF(DB_RANDOMTRUE(bootcode_running_failure_sim_control), BCE_PRINTF("%s(%d): Simulating bootcode failure.\n", __FILE__, __LINE__); reg = 0); if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) != BCE_DEV_INFO_SIGNATURE_MAGIC) { BCE_PRINTF("%s(%d): Bootcode not running! Found: 0x%08X, " "Expected: 08%08X\n", __FILE__, __LINE__, (reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK), BCE_DEV_INFO_SIGNATURE_MAGIC); rc = ENODEV; goto bce_blockinit_exit; } /* Enable DMA */ if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL); val |= BCE_MISC_NEW_CORE_CTL_DMA_ENABLE; REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val); } /* Allow bootcode to apply additional fixes before enabling MAC. */ rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET); /* Enable link state change interrupt generation. */ REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE); /* Enable the RXP. */ bce_start_rxp_cpu(sc); /* Disable management frames (NC-SI) from flowing to the MCP. */ if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) & ~BCE_RPM_MGMT_PKT_CTRL_MGMT_EN; REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val); } /* Enable all remaining blocks in the MAC. */ if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT_XI); else REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT); REG_RD(sc, BCE_MISC_ENABLE_SET_BITS); DELAY(20); /* Save the current host coalescing block settings. */ sc->hc_command = REG_RD(sc, BCE_HC_COMMAND); bce_blockinit_exit: DBEXIT(BCE_VERBOSE_RESET); return (rc); } /****************************************************************************/ /* Encapsulate an mbuf into the rx_bd chain. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_get_rx_buf(struct bce_softc *sc, u16 prod, u16 chain_prod, u32 *prod_bseq) { bus_dma_segment_t segs[1]; struct mbuf *m_new = NULL; struct rx_bd *rxbd; int nsegs, error, rc = 0; #ifdef BCE_DEBUG u16 debug_chain_prod = chain_prod; #endif DBENTER(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD); /* Make sure the inputs are valid. */ DBRUNIF((chain_prod > MAX_RX_BD_ALLOC), BCE_PRINTF("%s(%d): RX producer out of range: " "0x%04X > 0x%04X\n", __FILE__, __LINE__, chain_prod, (u16)MAX_RX_BD_ALLOC)); DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): prod = 0x%04X, " "chain_prod = 0x%04X, prod_bseq = 0x%08X\n", __FUNCTION__, prod, chain_prod, *prod_bseq); /* Update some debug statistic counters */ DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), sc->rx_low_watermark = sc->free_rx_bd); DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++); /* Simulate an mbuf allocation failure. */ DBRUNIF(DB_RANDOMTRUE(mbuf_alloc_failed_sim_control), sc->mbuf_alloc_failed_count++; sc->mbuf_alloc_failed_sim_count++; rc = ENOBUFS; goto bce_get_rx_buf_exit); /* This is a new mbuf allocation. */ if (bce_hdr_split == TRUE) MGETHDR(m_new, M_NOWAIT, MT_DATA); else m_new = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, sc->rx_bd_mbuf_alloc_size); if (m_new == NULL) { sc->mbuf_alloc_failed_count++; rc = ENOBUFS; goto bce_get_rx_buf_exit; } DBRUN(sc->debug_rx_mbuf_alloc++); /* Make sure we have a valid packet header. */ M_ASSERTPKTHDR(m_new); /* Initialize the mbuf size and pad if necessary for alignment. */ m_new->m_pkthdr.len = m_new->m_len = sc->rx_bd_mbuf_alloc_size; m_adj(m_new, sc->rx_bd_mbuf_align_pad); /* ToDo: Consider calling m_fragment() to test error handling. */ /* Map the mbuf cluster into device memory. */ error = bus_dmamap_load_mbuf_sg(sc->rx_mbuf_tag, sc->rx_mbuf_map[chain_prod], m_new, segs, &nsegs, BUS_DMA_NOWAIT); /* Handle any mapping errors. */ if (error) { BCE_PRINTF("%s(%d): Error mapping mbuf into RX " "chain (%d)!\n", __FILE__, __LINE__, error); sc->dma_map_addr_rx_failed_count++; m_freem(m_new); DBRUN(sc->debug_rx_mbuf_alloc--); rc = ENOBUFS; goto bce_get_rx_buf_exit; } /* All mbufs must map to a single segment. */ KASSERT(nsegs == 1, ("%s(): Too many segments returned (%d)!", __FUNCTION__, nsegs)); /* Setup the rx_bd for the segment. */ rxbd = &sc->rx_bd_chain[RX_PAGE(chain_prod)][RX_IDX(chain_prod)]; rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[0].ds_addr)); rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[0].ds_addr)); rxbd->rx_bd_len = htole32(segs[0].ds_len); rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END); *prod_bseq += segs[0].ds_len; /* Save the mbuf and update our counter. */ sc->rx_mbuf_ptr[chain_prod] = m_new; sc->free_rx_bd -= nsegs; DBRUNMSG(BCE_INSANE_RECV, bce_dump_rx_mbuf_chain(sc, debug_chain_prod, nsegs)); DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): prod = 0x%04X, " "chain_prod = 0x%04X, prod_bseq = 0x%08X\n", __FUNCTION__, prod, chain_prod, *prod_bseq); bce_get_rx_buf_exit: DBEXIT(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD); return(rc); } /****************************************************************************/ /* Encapsulate an mbuf cluster into the page chain. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_get_pg_buf(struct bce_softc *sc, u16 prod, u16 prod_idx) { bus_dma_segment_t segs[1]; struct mbuf *m_new = NULL; struct rx_bd *pgbd; int error, nsegs, rc = 0; #ifdef BCE_DEBUG u16 debug_prod_idx = prod_idx; #endif DBENTER(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD); /* Make sure the inputs are valid. */ DBRUNIF((prod_idx > MAX_PG_BD_ALLOC), BCE_PRINTF("%s(%d): page producer out of range: " "0x%04X > 0x%04X\n", __FILE__, __LINE__, prod_idx, (u16)MAX_PG_BD_ALLOC)); DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): prod = 0x%04X, " "chain_prod = 0x%04X\n", __FUNCTION__, prod, prod_idx); /* Update counters if we've hit a new low or run out of pages. */ DBRUNIF((sc->free_pg_bd < sc->pg_low_watermark), sc->pg_low_watermark = sc->free_pg_bd); DBRUNIF((sc->free_pg_bd == sc->max_pg_bd), sc->pg_empty_count++); /* Simulate an mbuf allocation failure. */ DBRUNIF(DB_RANDOMTRUE(mbuf_alloc_failed_sim_control), sc->mbuf_alloc_failed_count++; sc->mbuf_alloc_failed_sim_count++; rc = ENOBUFS; goto bce_get_pg_buf_exit); /* This is a new mbuf allocation. */ m_new = m_getcl(M_NOWAIT, MT_DATA, 0); if (m_new == NULL) { sc->mbuf_alloc_failed_count++; rc = ENOBUFS; goto bce_get_pg_buf_exit; } DBRUN(sc->debug_pg_mbuf_alloc++); m_new->m_len = MCLBYTES; /* ToDo: Consider calling m_fragment() to test error handling. */ /* Map the mbuf cluster into device memory. */ error = bus_dmamap_load_mbuf_sg(sc->pg_mbuf_tag, sc->pg_mbuf_map[prod_idx], m_new, segs, &nsegs, BUS_DMA_NOWAIT); /* Handle any mapping errors. */ if (error) { BCE_PRINTF("%s(%d): Error mapping mbuf into page chain!\n", __FILE__, __LINE__); m_freem(m_new); DBRUN(sc->debug_pg_mbuf_alloc--); rc = ENOBUFS; goto bce_get_pg_buf_exit; } /* All mbufs must map to a single segment. */ KASSERT(nsegs == 1, ("%s(): Too many segments returned (%d)!", __FUNCTION__, nsegs)); /* ToDo: Do we need bus_dmamap_sync(,,BUS_DMASYNC_PREREAD) here? */ /* * The page chain uses the same rx_bd data structure * as the receive chain but doesn't require a byte sequence (bseq). */ pgbd = &sc->pg_bd_chain[PG_PAGE(prod_idx)][PG_IDX(prod_idx)]; pgbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[0].ds_addr)); pgbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[0].ds_addr)); pgbd->rx_bd_len = htole32(MCLBYTES); pgbd->rx_bd_flags = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END); /* Save the mbuf and update our counter. */ sc->pg_mbuf_ptr[prod_idx] = m_new; sc->free_pg_bd--; DBRUNMSG(BCE_INSANE_RECV, bce_dump_pg_mbuf_chain(sc, debug_prod_idx, 1)); DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): prod = 0x%04X, " "prod_idx = 0x%04X\n", __FUNCTION__, prod, prod_idx); bce_get_pg_buf_exit: DBEXIT(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD); return(rc); } /****************************************************************************/ /* Initialize the TX context memory. */ /* */ /* Returns: */ /* Nothing */ /****************************************************************************/ static void bce_init_tx_context(struct bce_softc *sc) { u32 val; DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_CTX); /* Initialize the context ID for an L2 TX chain. */ if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { /* Set the CID type to support an L2 connection. */ val = BCE_L2CTX_TX_TYPE_TYPE_L2_XI | BCE_L2CTX_TX_TYPE_SIZE_L2_XI; CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE_XI, val); val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2_XI | (8 << 16); CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_CMD_TYPE_XI, val); /* Point the hardware to the first page in the chain. */ val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]); CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TBDR_BHADDR_HI_XI, val); val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]); CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TBDR_BHADDR_LO_XI, val); } else { /* Set the CID type to support an L2 connection. */ val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2; CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE, val); val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16); CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_CMD_TYPE, val); /* Point the hardware to the first page in the chain. */ val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]); CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TBDR_BHADDR_HI, val); val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]); CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TBDR_BHADDR_LO, val); } DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_CTX); } /****************************************************************************/ /* Allocate memory and initialize the TX data structures. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_init_tx_chain(struct bce_softc *sc) { struct tx_bd *txbd; int i, rc = 0; DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_LOAD); /* Set the initial TX producer/consumer indices. */ sc->tx_prod = 0; sc->tx_cons = 0; sc->tx_prod_bseq = 0; sc->used_tx_bd = 0; sc->max_tx_bd = USABLE_TX_BD_ALLOC; DBRUN(sc->tx_hi_watermark = 0); DBRUN(sc->tx_full_count = 0); /* * The NetXtreme II supports a linked-list structre called * a Buffer Descriptor Chain (or BD chain). A BD chain * consists of a series of 1 or more chain pages, each of which * consists of a fixed number of BD entries. * The last BD entry on each page is a pointer to the next page * in the chain, and the last pointer in the BD chain * points back to the beginning of the chain. */ /* Set the TX next pointer chain entries. */ for (i = 0; i < sc->tx_pages; i++) { int j; txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE]; /* Check if we've reached the last page. */ if (i == (sc->tx_pages - 1)) j = 0; else j = i + 1; txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j])); txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j])); } bce_init_tx_context(sc); DBRUNMSG(BCE_INSANE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD_ALLOC)); DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_LOAD); return(rc); } /****************************************************************************/ /* Free memory and clear the TX data structures. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_free_tx_chain(struct bce_softc *sc) { int i; DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_UNLOAD); /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */ for (i = 0; i < MAX_TX_BD_AVAIL; i++) { if (sc->tx_mbuf_ptr[i] != NULL) { if (sc->tx_mbuf_map[i] != NULL) bus_dmamap_sync(sc->tx_mbuf_tag, sc->tx_mbuf_map[i], BUS_DMASYNC_POSTWRITE); m_freem(sc->tx_mbuf_ptr[i]); sc->tx_mbuf_ptr[i] = NULL; DBRUN(sc->debug_tx_mbuf_alloc--); } } /* Clear each TX chain page. */ for (i = 0; i < sc->tx_pages; i++) bzero((char *)sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ); sc->used_tx_bd = 0; /* Check if we lost any mbufs in the process. */ DBRUNIF((sc->debug_tx_mbuf_alloc), BCE_PRINTF("%s(%d): Memory leak! Lost %d mbufs " "from tx chain!\n", __FILE__, __LINE__, sc->debug_tx_mbuf_alloc)); DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_UNLOAD); } /****************************************************************************/ /* Initialize the RX context memory. */ /* */ /* Returns: */ /* Nothing */ /****************************************************************************/ static void bce_init_rx_context(struct bce_softc *sc) { u32 val; DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_CTX); /* Init the type, size, and BD cache levels for the RX context. */ val = BCE_L2CTX_RX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | BCE_L2CTX_RX_CTX_TYPE_SIZE_L2 | (0x02 << BCE_L2CTX_RX_BD_PRE_READ_SHIFT); /* * Set the level for generating pause frames * when the number of available rx_bd's gets * too low (the low watermark) and the level * when pause frames can be stopped (the high * watermark). */ if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { u32 lo_water, hi_water; if (sc->bce_flags & BCE_USING_TX_FLOW_CONTROL) { lo_water = BCE_L2CTX_RX_LO_WATER_MARK_DEFAULT; } else { lo_water = 0; } if (lo_water >= USABLE_RX_BD_ALLOC) { lo_water = 0; } hi_water = USABLE_RX_BD_ALLOC / 4; if (hi_water <= lo_water) { lo_water = 0; } lo_water /= BCE_L2CTX_RX_LO_WATER_MARK_SCALE; hi_water /= BCE_L2CTX_RX_HI_WATER_MARK_SCALE; if (hi_water > 0xf) hi_water = 0xf; else if (hi_water == 0) lo_water = 0; val |= (lo_water << BCE_L2CTX_RX_LO_WATER_MARK_SHIFT) | (hi_water << BCE_L2CTX_RX_HI_WATER_MARK_SHIFT); } CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_CTX_TYPE, val); /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */ if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { val = REG_RD(sc, BCE_MQ_MAP_L2_5); REG_WR(sc, BCE_MQ_MAP_L2_5, val | BCE_MQ_MAP_L2_5_ARM); } /* Point the hardware to the first page in the chain. */ val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]); CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_HI, val); val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]); CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_LO, val); DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_CTX); } /****************************************************************************/ /* Allocate memory and initialize the RX data structures. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_init_rx_chain(struct bce_softc *sc) { struct rx_bd *rxbd; int i, rc = 0; DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD | BCE_VERBOSE_CTX); /* Initialize the RX producer and consumer indices. */ sc->rx_prod = 0; sc->rx_cons = 0; sc->rx_prod_bseq = 0; sc->free_rx_bd = USABLE_RX_BD_ALLOC; sc->max_rx_bd = USABLE_RX_BD_ALLOC; /* Initialize the RX next pointer chain entries. */ for (i = 0; i < sc->rx_pages; i++) { int j; rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE]; /* Check if we've reached the last page. */ if (i == (sc->rx_pages - 1)) j = 0; else j = i + 1; /* Setup the chain page pointers. */ rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j])); rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j])); } /* Fill up the RX chain. */ bce_fill_rx_chain(sc); DBRUN(sc->rx_low_watermark = USABLE_RX_BD_ALLOC); DBRUN(sc->rx_empty_count = 0); for (i = 0; i < sc->rx_pages; i++) { bus_dmamap_sync(sc->rx_bd_chain_tag, sc->rx_bd_chain_map[i], BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } bce_init_rx_context(sc); DBRUNMSG(BCE_EXTREME_RECV, bce_dump_rx_bd_chain(sc, 0, TOTAL_RX_BD_ALLOC)); DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD | BCE_VERBOSE_CTX); /* ToDo: Are there possible failure modes here? */ return(rc); } /****************************************************************************/ /* Add mbufs to the RX chain until its full or an mbuf allocation error */ /* occurs. */ /* */ /* Returns: */ /* Nothing */ /****************************************************************************/ static void bce_fill_rx_chain(struct bce_softc *sc) { u16 prod, prod_idx; u32 prod_bseq; DBENTER(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD | BCE_VERBOSE_CTX); /* Get the RX chain producer indices. */ prod = sc->rx_prod; prod_bseq = sc->rx_prod_bseq; /* Keep filling the RX chain until it's full. */ while (sc->free_rx_bd > 0) { prod_idx = RX_CHAIN_IDX(prod); if (bce_get_rx_buf(sc, prod, prod_idx, &prod_bseq)) { /* Bail out if we can't add an mbuf to the chain. */ break; } prod = NEXT_RX_BD(prod); } /* Save the RX chain producer indices. */ sc->rx_prod = prod; sc->rx_prod_bseq = prod_bseq; /* We should never end up pointing to a next page pointer. */ DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE), BCE_PRINTF("%s(): Invalid rx_prod value: 0x%04X\n", __FUNCTION__, rx_prod)); /* Write the mailbox and tell the chip about the waiting rx_bd's. */ REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BDIDX, prod); REG_WR(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BSEQ, prod_bseq); DBEXIT(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD | BCE_VERBOSE_CTX); } /****************************************************************************/ /* Free memory and clear the RX data structures. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_free_rx_chain(struct bce_softc *sc) { int i; DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD); /* Free any mbufs still in the RX mbuf chain. */ for (i = 0; i < MAX_RX_BD_AVAIL; i++) { if (sc->rx_mbuf_ptr[i] != NULL) { if (sc->rx_mbuf_map[i] != NULL) bus_dmamap_sync(sc->rx_mbuf_tag, sc->rx_mbuf_map[i], BUS_DMASYNC_POSTREAD); m_freem(sc->rx_mbuf_ptr[i]); sc->rx_mbuf_ptr[i] = NULL; DBRUN(sc->debug_rx_mbuf_alloc--); } } /* Clear each RX chain page. */ for (i = 0; i < sc->rx_pages; i++) if (sc->rx_bd_chain[i] != NULL) bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ); sc->free_rx_bd = sc->max_rx_bd; /* Check if we lost any mbufs in the process. */ DBRUNIF((sc->debug_rx_mbuf_alloc), BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from rx chain!\n", __FUNCTION__, sc->debug_rx_mbuf_alloc)); DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD); } /****************************************************************************/ /* Allocate memory and initialize the page data structures. */ /* Assumes that bce_init_rx_chain() has not already been called. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_init_pg_chain(struct bce_softc *sc) { struct rx_bd *pgbd; int i, rc = 0; u32 val; DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD | BCE_VERBOSE_CTX); /* Initialize the page producer and consumer indices. */ sc->pg_prod = 0; sc->pg_cons = 0; sc->free_pg_bd = USABLE_PG_BD_ALLOC; sc->max_pg_bd = USABLE_PG_BD_ALLOC; DBRUN(sc->pg_low_watermark = sc->max_pg_bd); DBRUN(sc->pg_empty_count = 0); /* Initialize the page next pointer chain entries. */ for (i = 0; i < sc->pg_pages; i++) { int j; pgbd = &sc->pg_bd_chain[i][USABLE_PG_BD_PER_PAGE]; /* Check if we've reached the last page. */ if (i == (sc->pg_pages - 1)) j = 0; else j = i + 1; /* Setup the chain page pointers. */ pgbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->pg_bd_chain_paddr[j])); pgbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->pg_bd_chain_paddr[j])); } /* Setup the MQ BIN mapping for host_pg_bidx. */ if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) REG_WR(sc, BCE_MQ_MAP_L2_3, BCE_MQ_MAP_L2_3_DEFAULT); CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_PG_BUF_SIZE, 0); /* Configure the rx_bd and page chain mbuf cluster size. */ val = (sc->rx_bd_mbuf_data_len << 16) | MCLBYTES; CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_PG_BUF_SIZE, val); /* Configure the context reserved for jumbo support. */ CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_RBDC_KEY, BCE_L2CTX_RX_RBDC_JUMBO_KEY); /* Point the hardware to the first page in the page chain. */ val = BCE_ADDR_HI(sc->pg_bd_chain_paddr[0]); CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_PG_BDHADDR_HI, val); val = BCE_ADDR_LO(sc->pg_bd_chain_paddr[0]); CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_PG_BDHADDR_LO, val); /* Fill up the page chain. */ bce_fill_pg_chain(sc); for (i = 0; i < sc->pg_pages; i++) { bus_dmamap_sync(sc->pg_bd_chain_tag, sc->pg_bd_chain_map[i], BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } DBRUNMSG(BCE_EXTREME_RECV, bce_dump_pg_chain(sc, 0, TOTAL_PG_BD_ALLOC)); DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD | BCE_VERBOSE_CTX); return(rc); } /****************************************************************************/ /* Add mbufs to the page chain until its full or an mbuf allocation error */ /* occurs. */ /* */ /* Returns: */ /* Nothing */ /****************************************************************************/ static void bce_fill_pg_chain(struct bce_softc *sc) { u16 prod, prod_idx; DBENTER(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD | BCE_VERBOSE_CTX); /* Get the page chain prodcuer index. */ prod = sc->pg_prod; /* Keep filling the page chain until it's full. */ while (sc->free_pg_bd > 0) { prod_idx = PG_CHAIN_IDX(prod); if (bce_get_pg_buf(sc, prod, prod_idx)) { /* Bail out if we can't add an mbuf to the chain. */ break; } prod = NEXT_PG_BD(prod); } /* Save the page chain producer index. */ sc->pg_prod = prod; DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE), BCE_PRINTF("%s(): Invalid pg_prod value: 0x%04X\n", __FUNCTION__, pg_prod)); /* * Write the mailbox and tell the chip about * the new rx_bd's in the page chain. */ REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_PG_BDIDX, prod); DBEXIT(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD | BCE_VERBOSE_CTX); } /****************************************************************************/ /* Free memory and clear the RX data structures. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_free_pg_chain(struct bce_softc *sc) { int i; DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD); /* Free any mbufs still in the mbuf page chain. */ for (i = 0; i < MAX_PG_BD_AVAIL; i++) { if (sc->pg_mbuf_ptr[i] != NULL) { if (sc->pg_mbuf_map[i] != NULL) bus_dmamap_sync(sc->pg_mbuf_tag, sc->pg_mbuf_map[i], BUS_DMASYNC_POSTREAD); m_freem(sc->pg_mbuf_ptr[i]); sc->pg_mbuf_ptr[i] = NULL; DBRUN(sc->debug_pg_mbuf_alloc--); } } /* Clear each page chain pages. */ for (i = 0; i < sc->pg_pages; i++) bzero((char *)sc->pg_bd_chain[i], BCE_PG_CHAIN_PAGE_SZ); sc->free_pg_bd = sc->max_pg_bd; /* Check if we lost any mbufs in the process. */ DBRUNIF((sc->debug_pg_mbuf_alloc), BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from page chain!\n", __FUNCTION__, sc->debug_pg_mbuf_alloc)); DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD); } static u32 bce_get_rphy_link(struct bce_softc *sc) { u32 advertise, link; int fdpx; advertise = 0; fdpx = 0; if ((sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) != 0) link = bce_shmem_rd(sc, BCE_RPHY_SERDES_LINK); else link = bce_shmem_rd(sc, BCE_RPHY_COPPER_LINK); if (link & BCE_NETLINK_ANEG_ENB) advertise |= BCE_NETLINK_ANEG_ENB; if (link & BCE_NETLINK_SPEED_10HALF) advertise |= BCE_NETLINK_SPEED_10HALF; if (link & BCE_NETLINK_SPEED_10FULL) { advertise |= BCE_NETLINK_SPEED_10FULL; fdpx++; } if (link & BCE_NETLINK_SPEED_100HALF) advertise |= BCE_NETLINK_SPEED_100HALF; if (link & BCE_NETLINK_SPEED_100FULL) { advertise |= BCE_NETLINK_SPEED_100FULL; fdpx++; } if (link & BCE_NETLINK_SPEED_1000HALF) advertise |= BCE_NETLINK_SPEED_1000HALF; if (link & BCE_NETLINK_SPEED_1000FULL) { advertise |= BCE_NETLINK_SPEED_1000FULL; fdpx++; } if (link & BCE_NETLINK_SPEED_2500HALF) advertise |= BCE_NETLINK_SPEED_2500HALF; if (link & BCE_NETLINK_SPEED_2500FULL) { advertise |= BCE_NETLINK_SPEED_2500FULL; fdpx++; } if (fdpx) advertise |= BCE_NETLINK_FC_PAUSE_SYM | BCE_NETLINK_FC_PAUSE_ASYM; if ((sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) == 0) advertise |= BCE_NETLINK_PHY_APP_REMOTE | BCE_NETLINK_ETH_AT_WIRESPEED; return (advertise); } /****************************************************************************/ /* Set media options. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_ifmedia_upd(struct ifnet *ifp) { struct bce_softc *sc = ifp->if_softc; int error; DBENTER(BCE_VERBOSE); BCE_LOCK(sc); error = bce_ifmedia_upd_locked(ifp); BCE_UNLOCK(sc); DBEXIT(BCE_VERBOSE); return (error); } /****************************************************************************/ /* Set media options. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static int bce_ifmedia_upd_locked(struct ifnet *ifp) { struct bce_softc *sc = ifp->if_softc; struct mii_data *mii; struct mii_softc *miisc; struct ifmedia *ifm; u32 link; int error, fdx; DBENTER(BCE_VERBOSE_PHY); error = 0; BCE_LOCK_ASSERT(sc); sc->bce_link_up = FALSE; if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0) { ifm = &sc->bce_ifmedia; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); link = 0; fdx = IFM_OPTIONS(ifm->ifm_media) & IFM_FDX; switch(IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: /* * Check advertised link of remote PHY by reading * BCE_RPHY_SERDES_LINK or BCE_RPHY_COPPER_LINK. * Always use the same link type of remote PHY. */ link = bce_get_rphy_link(sc); break; case IFM_2500_SX: if ((sc->bce_phy_flags & (BCE_PHY_REMOTE_PORT_FIBER_FLAG | BCE_PHY_2_5G_CAPABLE_FLAG)) == 0) return (EINVAL); /* * XXX * Have to enable forced 2.5Gbps configuration. */ if (fdx != 0) link |= BCE_NETLINK_SPEED_2500FULL; else link |= BCE_NETLINK_SPEED_2500HALF; break; case IFM_1000_SX: if ((sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) == 0) return (EINVAL); /* * XXX * Have to disable 2.5Gbps configuration. */ if (fdx != 0) link = BCE_NETLINK_SPEED_1000FULL; else link = BCE_NETLINK_SPEED_1000HALF; break; case IFM_1000_T: if (sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) return (EINVAL); if (fdx != 0) link = BCE_NETLINK_SPEED_1000FULL; else link = BCE_NETLINK_SPEED_1000HALF; break; case IFM_100_TX: if (sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) return (EINVAL); if (fdx != 0) link = BCE_NETLINK_SPEED_100FULL; else link = BCE_NETLINK_SPEED_100HALF; break; case IFM_10_T: if (sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) return (EINVAL); if (fdx != 0) link = BCE_NETLINK_SPEED_10FULL; else link = BCE_NETLINK_SPEED_10HALF; break; default: return (EINVAL); } if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) { /* * XXX * Advertise pause capability for full-duplex media. */ if (fdx != 0) link |= BCE_NETLINK_FC_PAUSE_SYM | BCE_NETLINK_FC_PAUSE_ASYM; if ((sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) == 0) link |= BCE_NETLINK_PHY_APP_REMOTE | BCE_NETLINK_ETH_AT_WIRESPEED; } bce_shmem_wr(sc, BCE_MB_ARGS_0, link); error = bce_fw_sync(sc, BCE_DRV_MSG_CODE_CMD_SET_LINK); } else { mii = device_get_softc(sc->bce_miibus); /* Make sure the MII bus has been enumerated. */ if (mii) { LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); error = mii_mediachg(mii); } } DBEXIT(BCE_VERBOSE_PHY); return (error); } static void bce_ifmedia_sts_rphy(struct bce_softc *sc, struct ifmediareq *ifmr) { struct ifnet *ifp; u32 link; ifp = sc->bce_ifp; BCE_LOCK_ASSERT(sc); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; link = bce_shmem_rd(sc, BCE_LINK_STATUS); /* XXX Handle heart beat status? */ if ((link & BCE_LINK_STATUS_LINK_UP) != 0) ifmr->ifm_status |= IFM_ACTIVE; else { ifmr->ifm_active |= IFM_NONE; ifp->if_baudrate = 0; return; } switch (link & BCE_LINK_STATUS_SPEED_MASK) { case BCE_LINK_STATUS_10HALF: ifmr->ifm_active |= IFM_10_T | IFM_HDX; ifp->if_baudrate = IF_Mbps(10UL); break; case BCE_LINK_STATUS_10FULL: ifmr->ifm_active |= IFM_10_T | IFM_FDX; ifp->if_baudrate = IF_Mbps(10UL); break; case BCE_LINK_STATUS_100HALF: ifmr->ifm_active |= IFM_100_TX | IFM_HDX; ifp->if_baudrate = IF_Mbps(100UL); break; case BCE_LINK_STATUS_100FULL: ifmr->ifm_active |= IFM_100_TX | IFM_FDX; ifp->if_baudrate = IF_Mbps(100UL); break; case BCE_LINK_STATUS_1000HALF: if ((sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) == 0) ifmr->ifm_active |= IFM_1000_T | IFM_HDX; else ifmr->ifm_active |= IFM_1000_SX | IFM_HDX; ifp->if_baudrate = IF_Mbps(1000UL); break; case BCE_LINK_STATUS_1000FULL: if ((sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) == 0) ifmr->ifm_active |= IFM_1000_T | IFM_FDX; else ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; ifp->if_baudrate = IF_Mbps(1000UL); break; case BCE_LINK_STATUS_2500HALF: if ((sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) == 0) { ifmr->ifm_active |= IFM_NONE; return; } else ifmr->ifm_active |= IFM_2500_SX | IFM_HDX; ifp->if_baudrate = IF_Mbps(2500UL); break; case BCE_LINK_STATUS_2500FULL: if ((sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) == 0) { ifmr->ifm_active |= IFM_NONE; return; } else ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; ifp->if_baudrate = IF_Mbps(2500UL); break; default: ifmr->ifm_active |= IFM_NONE; return; } if ((link & BCE_LINK_STATUS_RX_FC_ENABLED) != 0) ifmr->ifm_active |= IFM_ETH_RXPAUSE; if ((link & BCE_LINK_STATUS_TX_FC_ENABLED) != 0) ifmr->ifm_active |= IFM_ETH_TXPAUSE; } /****************************************************************************/ /* Reports current media status. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct bce_softc *sc = ifp->if_softc; struct mii_data *mii; DBENTER(BCE_VERBOSE_PHY); BCE_LOCK(sc); if ((ifp->if_flags & IFF_UP) == 0) { BCE_UNLOCK(sc); return; } if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0) bce_ifmedia_sts_rphy(sc, ifmr); else { mii = device_get_softc(sc->bce_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } BCE_UNLOCK(sc); DBEXIT(BCE_VERBOSE_PHY); } /****************************************************************************/ /* Handles PHY generated interrupt events. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_phy_intr(struct bce_softc *sc) { u32 new_link_state, old_link_state; DBENTER(BCE_VERBOSE_PHY | BCE_VERBOSE_INTR); DBRUN(sc->phy_interrupts++); new_link_state = sc->status_block->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE; old_link_state = sc->status_block->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE; /* Handle any changes if the link state has changed. */ if (new_link_state != old_link_state) { /* Update the status_attn_bits_ack field. */ if (new_link_state) { REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD, STATUS_ATTN_BITS_LINK_STATE); DBPRINT(sc, BCE_INFO_PHY, "%s(): Link is now UP.\n", __FUNCTION__); } else { REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD, STATUS_ATTN_BITS_LINK_STATE); DBPRINT(sc, BCE_INFO_PHY, "%s(): Link is now DOWN.\n", __FUNCTION__); } if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0) { if (new_link_state) { if (bootverbose) if_printf(sc->bce_ifp, "link UP\n"); if_link_state_change(sc->bce_ifp, LINK_STATE_UP); } else { if (bootverbose) if_printf(sc->bce_ifp, "link DOWN\n"); if_link_state_change(sc->bce_ifp, LINK_STATE_DOWN); } } /* * Assume link is down and allow * tick routine to update the state * based on the actual media state. */ sc->bce_link_up = FALSE; callout_stop(&sc->bce_tick_callout); bce_tick(sc); } /* Acknowledge the link change interrupt. */ REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE); DBEXIT(BCE_VERBOSE_PHY | BCE_VERBOSE_INTR); } /****************************************************************************/ /* Reads the receive consumer value from the status block (skipping over */ /* chain page pointer if necessary). */ /* */ /* Returns: */ /* hw_cons */ /****************************************************************************/ static inline u16 bce_get_hw_rx_cons(struct bce_softc *sc) { u16 hw_cons; rmb(); hw_cons = sc->status_block->status_rx_quick_consumer_index0; if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) hw_cons++; return hw_cons; } /****************************************************************************/ /* Handles received frame interrupt events. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_rx_intr(struct bce_softc *sc) { struct ifnet *ifp = sc->bce_ifp; struct l2_fhdr *l2fhdr; struct ether_vlan_header *vh; unsigned int pkt_len; u16 sw_rx_cons, sw_rx_cons_idx, hw_rx_cons; u32 status; unsigned int rem_len; u16 sw_pg_cons, sw_pg_cons_idx; DBENTER(BCE_VERBOSE_RECV | BCE_VERBOSE_INTR); DBRUN(sc->interrupts_rx++); DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): rx_prod = 0x%04X, " "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n", __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq); /* Prepare the RX chain pages to be accessed by the host CPU. */ for (int i = 0; i < sc->rx_pages; i++) bus_dmamap_sync(sc->rx_bd_chain_tag, sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTREAD); /* Prepare the page chain pages to be accessed by the host CPU. */ if (bce_hdr_split == TRUE) { for (int i = 0; i < sc->pg_pages; i++) bus_dmamap_sync(sc->pg_bd_chain_tag, sc->pg_bd_chain_map[i], BUS_DMASYNC_POSTREAD); } /* Get the hardware's view of the RX consumer index. */ hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc); /* Get working copies of the driver's view of the consumer indices. */ sw_rx_cons = sc->rx_cons; sw_pg_cons = sc->pg_cons; /* Update some debug statistics counters */ DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), sc->rx_low_watermark = sc->free_rx_bd); DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++); /* Scan through the receive chain as long as there is work to do */ /* ToDo: Consider setting a limit on the number of packets processed. */ rmb(); while (sw_rx_cons != hw_rx_cons) { struct mbuf *m0; /* Convert the producer/consumer indices to an actual rx_bd index. */ sw_rx_cons_idx = RX_CHAIN_IDX(sw_rx_cons); /* Unmap the mbuf from DMA space. */ bus_dmamap_sync(sc->rx_mbuf_tag, sc->rx_mbuf_map[sw_rx_cons_idx], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rx_mbuf_tag, sc->rx_mbuf_map[sw_rx_cons_idx]); /* Remove the mbuf from the RX chain. */ m0 = sc->rx_mbuf_ptr[sw_rx_cons_idx]; sc->rx_mbuf_ptr[sw_rx_cons_idx] = NULL; DBRUN(sc->debug_rx_mbuf_alloc--); sc->free_rx_bd++; /* * Frames received on the NetXteme II are prepended * with an l2_fhdr structure which provides status * information about the received frame (including * VLAN tags and checksum info). The frames are * also automatically adjusted to word align the IP * header (i.e. two null bytes are inserted before * the Ethernet header). As a result the data * DMA'd by the controller into the mbuf looks * like this: * * +---------+-----+---------------------+-----+ * | l2_fhdr | pad | packet data | FCS | * +---------+-----+---------------------+-----+ * * The l2_fhdr needs to be checked and skipped and * the FCS needs to be stripped before sending the * packet up the stack. */ l2fhdr = mtod(m0, struct l2_fhdr *); /* Get the packet data + FCS length and the status. */ pkt_len = l2fhdr->l2_fhdr_pkt_len; status = l2fhdr->l2_fhdr_status; /* * Skip over the l2_fhdr and pad, resulting in the * following data in the mbuf: * +---------------------+-----+ * | packet data | FCS | * +---------------------+-----+ */ m_adj(m0, sizeof(struct l2_fhdr) + ETHER_ALIGN); /* * When split header mode is used, an ethernet frame * may be split across the receive chain and the * page chain. If that occurs an mbuf cluster must be * reassembled from the individual mbuf pieces. */ if (bce_hdr_split == TRUE) { /* * Check whether the received frame fits in a single * mbuf or not (i.e. packet data + FCS <= * sc->rx_bd_mbuf_data_len bytes). */ if (pkt_len > m0->m_len) { /* * The received frame is larger than a single mbuf. * If the frame was a TCP frame then only the TCP * header is placed in the mbuf, the remaining * payload (including FCS) is placed in the page * chain, the SPLIT flag is set, and the header * length is placed in the IP checksum field. * If the frame is not a TCP frame then the mbuf * is filled and the remaining bytes are placed * in the page chain. */ DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a large " "packet.\n", __FUNCTION__); DBRUN(sc->split_header_frames_rcvd++); /* * When the page chain is enabled and the TCP * header has been split from the TCP payload, * the ip_xsum structure will reflect the length * of the TCP header, not the IP checksum. Set * the packet length of the mbuf accordingly. */ if (status & L2_FHDR_STATUS_SPLIT) { m0->m_len = l2fhdr->l2_fhdr_ip_xsum; DBRUN(sc->split_header_tcp_frames_rcvd++); } rem_len = pkt_len - m0->m_len; /* Pull mbufs off the page chain for any remaining data. */ while (rem_len > 0) { struct mbuf *m_pg; sw_pg_cons_idx = PG_CHAIN_IDX(sw_pg_cons); /* Remove the mbuf from the page chain. */ m_pg = sc->pg_mbuf_ptr[sw_pg_cons_idx]; sc->pg_mbuf_ptr[sw_pg_cons_idx] = NULL; DBRUN(sc->debug_pg_mbuf_alloc--); sc->free_pg_bd++; /* Unmap the page chain mbuf from DMA space. */ bus_dmamap_sync(sc->pg_mbuf_tag, sc->pg_mbuf_map[sw_pg_cons_idx], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->pg_mbuf_tag, sc->pg_mbuf_map[sw_pg_cons_idx]); /* Adjust the mbuf length. */ if (rem_len < m_pg->m_len) { /* The mbuf chain is complete. */ m_pg->m_len = rem_len; rem_len = 0; } else { /* More packet data is waiting. */ rem_len -= m_pg->m_len; } /* Concatenate the mbuf cluster to the mbuf. */ m_cat(m0, m_pg); sw_pg_cons = NEXT_PG_BD(sw_pg_cons); } /* Set the total packet length. */ m0->m_pkthdr.len = pkt_len; } else { /* * The received packet is small and fits in a * single mbuf (i.e. the l2_fhdr + pad + packet + * FCS <= MHLEN). In other words, the packet is * 154 bytes or less in size. */ DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a small " "packet.\n", __FUNCTION__); /* Set the total packet length. */ m0->m_pkthdr.len = m0->m_len = pkt_len; } } else /* Set the total packet length. */ m0->m_pkthdr.len = m0->m_len = pkt_len; /* Remove the trailing Ethernet FCS. */ m_adj(m0, -ETHER_CRC_LEN); /* Check that the resulting mbuf chain is valid. */ DBRUN(m_sanity(m0, FALSE)); DBRUNIF(((m0->m_len < ETHER_HDR_LEN) | (m0->m_pkthdr.len > BCE_MAX_JUMBO_ETHER_MTU_VLAN)), BCE_PRINTF("Invalid Ethernet frame size!\n"); m_print(m0, 128)); DBRUNIF(DB_RANDOMTRUE(l2fhdr_error_sim_control), sc->l2fhdr_error_sim_count++; status = status | L2_FHDR_ERRORS_PHY_DECODE); /* Check the received frame for errors. */ if (status & (L2_FHDR_ERRORS_BAD_CRC | L2_FHDR_ERRORS_PHY_DECODE | L2_FHDR_ERRORS_ALIGNMENT | L2_FHDR_ERRORS_TOO_SHORT | L2_FHDR_ERRORS_GIANT_FRAME)) { /* Log the error and release the mbuf. */ sc->l2fhdr_error_count++; m_freem(m0); m0 = NULL; goto bce_rx_intr_next_rx; } /* Send the packet to the appropriate interface. */ m0->m_pkthdr.rcvif = ifp; /* Assume no hardware checksum. */ m0->m_pkthdr.csum_flags = 0; /* Validate the checksum if offload enabled. */ if (ifp->if_capenable & IFCAP_RXCSUM) { /* Check for an IP datagram. */ if (!(status & L2_FHDR_STATUS_SPLIT) && (status & L2_FHDR_STATUS_IP_DATAGRAM)) { m0->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; DBRUN(sc->csum_offload_ip++); /* Check if the IP checksum is valid. */ if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0) m0->m_pkthdr.csum_flags |= CSUM_IP_VALID; } /* Check for a valid TCP/UDP frame. */ if (status & (L2_FHDR_STATUS_TCP_SEGMENT | L2_FHDR_STATUS_UDP_DATAGRAM)) { /* Check for a good TCP/UDP checksum. */ if ((status & (L2_FHDR_ERRORS_TCP_XSUM | L2_FHDR_ERRORS_UDP_XSUM)) == 0) { DBRUN(sc->csum_offload_tcp_udp++); m0->m_pkthdr.csum_data = l2fhdr->l2_fhdr_tcp_udp_xsum; m0->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); } } } /* Attach the VLAN tag. */ if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && !(sc->rx_mode & BCE_EMAC_RX_MODE_KEEP_VLAN_TAG)) { DBRUN(sc->vlan_tagged_frames_rcvd++); if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { DBRUN(sc->vlan_tagged_frames_stripped++); #if __FreeBSD_version < 700000 VLAN_INPUT_TAG(ifp, m0, l2fhdr->l2_fhdr_vlan_tag, continue); #else m0->m_pkthdr.ether_vtag = l2fhdr->l2_fhdr_vlan_tag; m0->m_flags |= M_VLANTAG; #endif } else { /* * bce(4) controllers can't disable VLAN * tag stripping if management firmware * (ASF/IPMI/UMP) is running. So we always * strip VLAN tag and manually reconstruct * the VLAN frame by appending stripped * VLAN tag in driver if VLAN tag stripping * was disabled. * * TODO: LLC SNAP handling. */ bcopy(mtod(m0, uint8_t *), mtod(m0, uint8_t *) - ETHER_VLAN_ENCAP_LEN, ETHER_ADDR_LEN * 2); m0->m_data -= ETHER_VLAN_ENCAP_LEN; vh = mtod(m0, struct ether_vlan_header *); vh->evl_encap_proto = htons(ETHERTYPE_VLAN); vh->evl_tag = htons(l2fhdr->l2_fhdr_vlan_tag); m0->m_pkthdr.len += ETHER_VLAN_ENCAP_LEN; m0->m_len += ETHER_VLAN_ENCAP_LEN; } } /* Increment received packet statistics. */ if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); bce_rx_intr_next_rx: sw_rx_cons = NEXT_RX_BD(sw_rx_cons); /* If we have a packet, pass it up the stack */ if (m0) { /* Make sure we don't lose our place when we release the lock. */ sc->rx_cons = sw_rx_cons; sc->pg_cons = sw_pg_cons; BCE_UNLOCK(sc); (*ifp->if_input)(ifp, m0); BCE_LOCK(sc); /* Recover our place. */ sw_rx_cons = sc->rx_cons; sw_pg_cons = sc->pg_cons; } /* Refresh hw_cons to see if there's new work */ if (sw_rx_cons == hw_rx_cons) hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc); } /* No new packets. Refill the page chain. */ if (bce_hdr_split == TRUE) { sc->pg_cons = sw_pg_cons; bce_fill_pg_chain(sc); } /* No new packets. Refill the RX chain. */ sc->rx_cons = sw_rx_cons; bce_fill_rx_chain(sc); /* Prepare the page chain pages to be accessed by the NIC. */ for (int i = 0; i < sc->rx_pages; i++) bus_dmamap_sync(sc->rx_bd_chain_tag, sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE); if (bce_hdr_split == TRUE) { for (int i = 0; i < sc->pg_pages; i++) bus_dmamap_sync(sc->pg_bd_chain_tag, sc->pg_bd_chain_map[i], BUS_DMASYNC_PREWRITE); } DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): rx_prod = 0x%04X, " "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n", __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq); DBEXIT(BCE_VERBOSE_RECV | BCE_VERBOSE_INTR); } /****************************************************************************/ /* Reads the transmit consumer value from the status block (skipping over */ /* chain page pointer if necessary). */ /* */ /* Returns: */ /* hw_cons */ /****************************************************************************/ static inline u16 bce_get_hw_tx_cons(struct bce_softc *sc) { u16 hw_cons; mb(); hw_cons = sc->status_block->status_tx_quick_consumer_index0; if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) hw_cons++; return hw_cons; } /****************************************************************************/ /* Handles transmit completion interrupt events. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_tx_intr(struct bce_softc *sc) { struct ifnet *ifp = sc->bce_ifp; u16 hw_tx_cons, sw_tx_cons, sw_tx_chain_cons; DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_INTR); DBRUN(sc->interrupts_tx++); DBPRINT(sc, BCE_EXTREME_SEND, "%s(enter): tx_prod = 0x%04X, " "tx_cons = 0x%04X, tx_prod_bseq = 0x%08X\n", __FUNCTION__, sc->tx_prod, sc->tx_cons, sc->tx_prod_bseq); BCE_LOCK_ASSERT(sc); /* Get the hardware's view of the TX consumer index. */ hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc); sw_tx_cons = sc->tx_cons; /* Prevent speculative reads of the status block. */ bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, BUS_SPACE_BARRIER_READ); /* Cycle through any completed TX chain page entries. */ while (sw_tx_cons != hw_tx_cons) { #ifdef BCE_DEBUG struct tx_bd *txbd = NULL; #endif sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons); DBPRINT(sc, BCE_INFO_SEND, "%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, " "sw_tx_chain_cons = 0x%04X\n", __FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons); DBRUNIF((sw_tx_chain_cons > MAX_TX_BD_ALLOC), BCE_PRINTF("%s(%d): TX chain consumer out of range! " " 0x%04X > 0x%04X\n", __FILE__, __LINE__, sw_tx_chain_cons, (int) MAX_TX_BD_ALLOC); bce_breakpoint(sc)); DBRUN(txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)] [TX_IDX(sw_tx_chain_cons)]); DBRUNIF((txbd == NULL), BCE_PRINTF("%s(%d): Unexpected NULL tx_bd[0x%04X]!\n", __FILE__, __LINE__, sw_tx_chain_cons); bce_breakpoint(sc)); DBRUNMSG(BCE_INFO_SEND, BCE_PRINTF("%s(): ", __FUNCTION__); bce_dump_txbd(sc, sw_tx_chain_cons, txbd)); /* * Free the associated mbuf. Remember * that only the last tx_bd of a packet * has an mbuf pointer and DMA map. */ if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) { /* Validate that this is the last tx_bd. */ DBRUNIF((!(txbd->tx_bd_flags & TX_BD_FLAGS_END)), BCE_PRINTF("%s(%d): tx_bd END flag not set but " "txmbuf == NULL!\n", __FILE__, __LINE__); bce_breakpoint(sc)); DBRUNMSG(BCE_INFO_SEND, BCE_PRINTF("%s(): Unloading map/freeing mbuf " "from tx_bd[0x%04X]\n", __FUNCTION__, sw_tx_chain_cons)); /* Unmap the mbuf. */ bus_dmamap_unload(sc->tx_mbuf_tag, sc->tx_mbuf_map[sw_tx_chain_cons]); /* Free the mbuf. */ m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]); sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL; DBRUN(sc->debug_tx_mbuf_alloc--); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); } sc->used_tx_bd--; sw_tx_cons = NEXT_TX_BD(sw_tx_cons); /* Refresh hw_cons to see if there's new work. */ hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc); /* Prevent speculative reads of the status block. */ bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, BUS_SPACE_BARRIER_READ); } /* Clear the TX timeout timer. */ sc->watchdog_timer = 0; /* Clear the tx hardware queue full flag. */ if (sc->used_tx_bd < sc->max_tx_bd) { DBRUNIF((ifp->if_drv_flags & IFF_DRV_OACTIVE), DBPRINT(sc, BCE_INFO_SEND, "%s(): Open TX chain! %d/%d (used/total)\n", __FUNCTION__, sc->used_tx_bd, sc->max_tx_bd)); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } sc->tx_cons = sw_tx_cons; DBPRINT(sc, BCE_EXTREME_SEND, "%s(exit): tx_prod = 0x%04X, " "tx_cons = 0x%04X, tx_prod_bseq = 0x%08X\n", __FUNCTION__, sc->tx_prod, sc->tx_cons, sc->tx_prod_bseq); DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_INTR); } /****************************************************************************/ /* Disables interrupt generation. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_disable_intr(struct bce_softc *sc) { DBENTER(BCE_VERBOSE_INTR); REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT); REG_RD(sc, BCE_PCICFG_INT_ACK_CMD); DBEXIT(BCE_VERBOSE_INTR); } /****************************************************************************/ /* Enables interrupt generation. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_enable_intr(struct bce_softc *sc, int coal_now) { DBENTER(BCE_VERBOSE_INTR); REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx); REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx); /* Force an immediate interrupt (whether there is new data or not). */ if (coal_now) REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | BCE_HC_COMMAND_COAL_NOW); DBEXIT(BCE_VERBOSE_INTR); } /****************************************************************************/ /* Handles controller initialization. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_init_locked(struct bce_softc *sc) { struct ifnet *ifp; u32 ether_mtu = 0; DBENTER(BCE_VERBOSE_RESET); BCE_LOCK_ASSERT(sc); ifp = sc->bce_ifp; /* Check if the driver is still running and bail out if it is. */ if (ifp->if_drv_flags & IFF_DRV_RUNNING) goto bce_init_locked_exit; bce_stop(sc); if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) { BCE_PRINTF("%s(%d): Controller reset failed!\n", __FILE__, __LINE__); goto bce_init_locked_exit; } if (bce_chipinit(sc)) { BCE_PRINTF("%s(%d): Controller initialization failed!\n", __FILE__, __LINE__); goto bce_init_locked_exit; } if (bce_blockinit(sc)) { BCE_PRINTF("%s(%d): Block initialization failed!\n", __FILE__, __LINE__); goto bce_init_locked_exit; } /* Load our MAC address. */ bcopy(IF_LLADDR(sc->bce_ifp), sc->eaddr, ETHER_ADDR_LEN); bce_set_mac_addr(sc); if (bce_hdr_split == FALSE) bce_get_rx_buffer_sizes(sc, ifp->if_mtu); /* * Calculate and program the hardware Ethernet MTU * size. Be generous on the receive if we have room * and allowed by the user. */ if (bce_strict_rx_mtu == TRUE) ether_mtu = ifp->if_mtu; else { if (bce_hdr_split == TRUE) { if (ifp->if_mtu <= sc->rx_bd_mbuf_data_len + MCLBYTES) ether_mtu = sc->rx_bd_mbuf_data_len + MCLBYTES; else ether_mtu = ifp->if_mtu; } else { if (ifp->if_mtu <= sc->rx_bd_mbuf_data_len) ether_mtu = sc->rx_bd_mbuf_data_len; else ether_mtu = ifp->if_mtu; } } ether_mtu += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN; DBPRINT(sc, BCE_INFO_MISC, "%s(): setting h/w mtu = %d\n", __FUNCTION__, ether_mtu); /* Program the mtu, enabling jumbo frame support if necessary. */ if (ether_mtu > (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)) REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) | BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA); else REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu); /* Program appropriate promiscuous/multicast filtering. */ bce_set_rx_mode(sc); if (bce_hdr_split == TRUE) { /* Init page buffer descriptor chain. */ bce_init_pg_chain(sc); } /* Init RX buffer descriptor chain. */ bce_init_rx_chain(sc); /* Init TX buffer descriptor chain. */ bce_init_tx_chain(sc); /* Enable host interrupts. */ bce_enable_intr(sc, 1); bce_ifmedia_upd_locked(ifp); /* Let the OS know the driver is up and running. */ ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc); bce_init_locked_exit: DBEXIT(BCE_VERBOSE_RESET); } /****************************************************************************/ /* Initialize the controller just enough so that any management firmware */ /* running on the device will continue to operate correctly. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_mgmt_init_locked(struct bce_softc *sc) { struct ifnet *ifp; DBENTER(BCE_VERBOSE_RESET); BCE_LOCK_ASSERT(sc); /* Bail out if management firmware is not running. */ if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)) { DBPRINT(sc, BCE_VERBOSE_SPECIAL, "No management firmware running...\n"); goto bce_mgmt_init_locked_exit; } ifp = sc->bce_ifp; /* Enable all critical blocks in the MAC. */ REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT); REG_RD(sc, BCE_MISC_ENABLE_SET_BITS); DELAY(20); bce_ifmedia_upd_locked(ifp); bce_mgmt_init_locked_exit: DBEXIT(BCE_VERBOSE_RESET); } /****************************************************************************/ /* Handles controller initialization when called from an unlocked routine. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_init(void *xsc) { struct bce_softc *sc = xsc; DBENTER(BCE_VERBOSE_RESET); BCE_LOCK(sc); bce_init_locked(sc); BCE_UNLOCK(sc); DBEXIT(BCE_VERBOSE_RESET); } /****************************************************************************/ /* Modifies an mbuf for TSO on the hardware. */ /* */ /* Returns: */ /* Pointer to a modified mbuf. */ /****************************************************************************/ static struct mbuf * bce_tso_setup(struct bce_softc *sc, struct mbuf **m_head, u16 *flags) { struct mbuf *m; struct ether_header *eh; struct ip *ip; struct tcphdr *th; u16 etype; int hdr_len, ip_hlen = 0, tcp_hlen = 0, ip_len = 0; DBRUN(sc->tso_frames_requested++); /* Controller may modify mbuf chains. */ if (M_WRITABLE(*m_head) == 0) { m = m_dup(*m_head, M_NOWAIT); m_freem(*m_head); if (m == NULL) { sc->mbuf_alloc_failed_count++; *m_head = NULL; return (NULL); } *m_head = m; } /* * For TSO the controller needs two pieces of info, * the MSS and the IP+TCP options length. */ m = m_pullup(*m_head, sizeof(struct ether_header) + sizeof(struct ip)); if (m == NULL) { *m_head = NULL; return (NULL); } eh = mtod(m, struct ether_header *); etype = ntohs(eh->ether_type); /* Check for supported TSO Ethernet types (only IPv4 for now) */ switch (etype) { case ETHERTYPE_IP: ip = (struct ip *)(m->m_data + sizeof(struct ether_header)); /* TSO only supported for TCP protocol. */ if (ip->ip_p != IPPROTO_TCP) { BCE_PRINTF("%s(%d): TSO enabled for non-TCP frame!.\n", __FILE__, __LINE__); m_freem(*m_head); *m_head = NULL; return (NULL); } /* Get IP header length in bytes (min 20) */ ip_hlen = ip->ip_hl << 2; m = m_pullup(*m_head, sizeof(struct ether_header) + ip_hlen + sizeof(struct tcphdr)); if (m == NULL) { *m_head = NULL; return (NULL); } /* Get the TCP header length in bytes (min 20) */ ip = (struct ip *)(m->m_data + sizeof(struct ether_header)); th = (struct tcphdr *)((caddr_t)ip + ip_hlen); tcp_hlen = (th->th_off << 2); /* Make sure all IP/TCP options live in the same buffer. */ m = m_pullup(*m_head, sizeof(struct ether_header)+ ip_hlen + tcp_hlen); if (m == NULL) { *m_head = NULL; return (NULL); } /* Clear IP header length and checksum, will be calc'd by h/w. */ ip = (struct ip *)(m->m_data + sizeof(struct ether_header)); ip_len = ip->ip_len; ip->ip_len = 0; ip->ip_sum = 0; break; case ETHERTYPE_IPV6: BCE_PRINTF("%s(%d): TSO over IPv6 not supported!.\n", __FILE__, __LINE__); m_freem(*m_head); *m_head = NULL; return (NULL); /* NOT REACHED */ default: BCE_PRINTF("%s(%d): TSO enabled for unsupported protocol!.\n", __FILE__, __LINE__); m_freem(*m_head); *m_head = NULL; return (NULL); } hdr_len = sizeof(struct ether_header) + ip_hlen + tcp_hlen; DBPRINT(sc, BCE_EXTREME_SEND, "%s(): hdr_len = %d, e_hlen = %d, " "ip_hlen = %d, tcp_hlen = %d, ip_len = %d\n", __FUNCTION__, hdr_len, (int) sizeof(struct ether_header), ip_hlen, tcp_hlen, ip_len); /* Set the LSO flag in the TX BD */ *flags |= TX_BD_FLAGS_SW_LSO; /* Set the length of IP + TCP options (in 32 bit words) */ *flags |= (((ip_hlen + tcp_hlen - sizeof(struct ip) - sizeof(struct tcphdr)) >> 2) << 8); DBRUN(sc->tso_frames_completed++); return (*m_head); } /****************************************************************************/ /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */ /* memory visible to the controller. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /* Modified: */ /* m_head: May be set to NULL if MBUF is excessively fragmented. */ /****************************************************************************/ static int bce_tx_encap(struct bce_softc *sc, struct mbuf **m_head) { bus_dma_segment_t segs[BCE_MAX_SEGMENTS]; bus_dmamap_t map; struct tx_bd *txbd = NULL; struct mbuf *m0; u16 prod, chain_prod, mss = 0, vlan_tag = 0, flags = 0; u32 prod_bseq; #ifdef BCE_DEBUG u16 debug_prod; #endif int i, error, nsegs, rc = 0; DBENTER(BCE_VERBOSE_SEND); /* Make sure we have room in the TX chain. */ if (sc->used_tx_bd >= sc->max_tx_bd) goto bce_tx_encap_exit; /* Transfer any checksum offload flags to the bd. */ m0 = *m_head; if (m0->m_pkthdr.csum_flags) { if (m0->m_pkthdr.csum_flags & CSUM_TSO) { m0 = bce_tso_setup(sc, m_head, &flags); if (m0 == NULL) { DBRUN(sc->tso_frames_failed++); goto bce_tx_encap_exit; } mss = htole16(m0->m_pkthdr.tso_segsz); } else { if (m0->m_pkthdr.csum_flags & CSUM_IP) flags |= TX_BD_FLAGS_IP_CKSUM; if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; } } /* Transfer any VLAN tags to the bd. */ if (m0->m_flags & M_VLANTAG) { flags |= TX_BD_FLAGS_VLAN_TAG; vlan_tag = m0->m_pkthdr.ether_vtag; } /* Map the mbuf into DMAable memory. */ prod = sc->tx_prod; chain_prod = TX_CHAIN_IDX(prod); map = sc->tx_mbuf_map[chain_prod]; /* Map the mbuf into our DMA address space. */ error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0, segs, &nsegs, BUS_DMA_NOWAIT); /* Check if the DMA mapping was successful */ if (error == EFBIG) { sc->mbuf_frag_count++; /* Try to defrag the mbuf. */ m0 = m_collapse(*m_head, M_NOWAIT, BCE_MAX_SEGMENTS); if (m0 == NULL) { /* Defrag was unsuccessful */ m_freem(*m_head); *m_head = NULL; sc->mbuf_alloc_failed_count++; rc = ENOBUFS; goto bce_tx_encap_exit; } /* Defrag was successful, try mapping again */ *m_head = m0; error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0, segs, &nsegs, BUS_DMA_NOWAIT); /* Still getting an error after a defrag. */ if (error == ENOMEM) { /* Insufficient DMA buffers available. */ sc->dma_map_addr_tx_failed_count++; rc = error; goto bce_tx_encap_exit; } else if (error != 0) { /* Release it and return an error. */ BCE_PRINTF("%s(%d): Unknown error mapping mbuf into " "TX chain!\n", __FILE__, __LINE__); m_freem(m0); *m_head = NULL; sc->dma_map_addr_tx_failed_count++; rc = ENOBUFS; goto bce_tx_encap_exit; } } else if (error == ENOMEM) { /* Insufficient DMA buffers available. */ sc->dma_map_addr_tx_failed_count++; rc = error; goto bce_tx_encap_exit; } else if (error != 0) { m_freem(m0); *m_head = NULL; sc->dma_map_addr_tx_failed_count++; rc = error; goto bce_tx_encap_exit; } /* Make sure there's room in the chain */ if (nsegs > (sc->max_tx_bd - sc->used_tx_bd)) { bus_dmamap_unload(sc->tx_mbuf_tag, map); rc = ENOBUFS; goto bce_tx_encap_exit; } /* prod points to an empty tx_bd at this point. */ prod_bseq = sc->tx_prod_bseq; #ifdef BCE_DEBUG debug_prod = chain_prod; #endif DBPRINT(sc, BCE_INFO_SEND, "%s(start): prod = 0x%04X, chain_prod = 0x%04X, " "prod_bseq = 0x%08X\n", __FUNCTION__, prod, chain_prod, prod_bseq); /* * Cycle through each mbuf segment that makes up * the outgoing frame, gathering the mapping info * for that segment and creating a tx_bd for * the mbuf. */ for (i = 0; i < nsegs ; i++) { chain_prod = TX_CHAIN_IDX(prod); txbd= &sc->tx_bd_chain[TX_PAGE(chain_prod)] [TX_IDX(chain_prod)]; txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr)); txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr)); txbd->tx_bd_mss_nbytes = htole32(mss << 16) | htole16(segs[i].ds_len); txbd->tx_bd_vlan_tag = htole16(vlan_tag); txbd->tx_bd_flags = htole16(flags); prod_bseq += segs[i].ds_len; if (i == 0) txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START); prod = NEXT_TX_BD(prod); } /* Set the END flag on the last TX buffer descriptor. */ txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END); DBRUNMSG(BCE_EXTREME_SEND, bce_dump_tx_chain(sc, debug_prod, nsegs)); /* * Ensure that the mbuf pointer for this transmission * is placed at the array index of the last * descriptor in this chain. This is done * because a single map is used for all * segments of the mbuf and we don't want to * unload the map before all of the segments * have been freed. */ sc->tx_mbuf_ptr[chain_prod] = m0; sc->used_tx_bd += nsegs; /* Update some debug statistic counters */ DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark), sc->tx_hi_watermark = sc->used_tx_bd); DBRUNIF((sc->used_tx_bd == sc->max_tx_bd), sc->tx_full_count++); DBRUNIF(sc->debug_tx_mbuf_alloc++); DBRUNMSG(BCE_EXTREME_SEND, bce_dump_tx_mbuf_chain(sc, chain_prod, 1)); /* prod points to the next free tx_bd at this point. */ sc->tx_prod = prod; sc->tx_prod_bseq = prod_bseq; /* Tell the chip about the waiting TX frames. */ REG_WR16(sc, MB_GET_CID_ADDR(TX_CID) + BCE_L2MQ_TX_HOST_BIDX, sc->tx_prod); REG_WR(sc, MB_GET_CID_ADDR(TX_CID) + BCE_L2MQ_TX_HOST_BSEQ, sc->tx_prod_bseq); bce_tx_encap_exit: DBEXIT(BCE_VERBOSE_SEND); return(rc); } /****************************************************************************/ /* Main transmit routine when called from another routine with a lock. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_start_locked(struct ifnet *ifp) { struct bce_softc *sc = ifp->if_softc; struct mbuf *m_head = NULL; int count = 0; u16 tx_prod, tx_chain_prod; DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_CTX); BCE_LOCK_ASSERT(sc); /* prod points to the next free tx_bd. */ tx_prod = sc->tx_prod; tx_chain_prod = TX_CHAIN_IDX(tx_prod); DBPRINT(sc, BCE_INFO_SEND, "%s(enter): tx_prod = 0x%04X, tx_chain_prod = 0x%04X, " "tx_prod_bseq = 0x%08X\n", __FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq); /* If there's no link or the transmit queue is empty then just exit. */ if (sc->bce_link_up == FALSE) { DBPRINT(sc, BCE_INFO_SEND, "%s(): No link.\n", __FUNCTION__); goto bce_start_locked_exit; } if (IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { DBPRINT(sc, BCE_INFO_SEND, "%s(): Transmit queue empty.\n", __FUNCTION__); goto bce_start_locked_exit; } /* * Keep adding entries while there is space in the ring. */ while (sc->used_tx_bd < sc->max_tx_bd) { /* Check for any frames to send. */ IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); /* Stop when the transmit queue is empty. */ if (m_head == NULL) break; /* * Pack the data into the transmit ring. If we * don't have room, place the mbuf back at the * head of the queue and set the OACTIVE flag * to wait for the NIC to drain the chain. */ if (bce_tx_encap(sc, &m_head)) { if (m_head != NULL) IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_drv_flags |= IFF_DRV_OACTIVE; DBPRINT(sc, BCE_INFO_SEND, "TX chain is closed for business! Total " "tx_bd used = %d\n", sc->used_tx_bd); break; } count++; /* Send a copy of the frame to any BPF listeners. */ ETHER_BPF_MTAP(ifp, m_head); } /* Exit if no packets were dequeued. */ if (count == 0) { DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): No packets were " "dequeued\n", __FUNCTION__); goto bce_start_locked_exit; } DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): Inserted %d frames into " "send queue.\n", __FUNCTION__, count); /* Set the tx timeout. */ sc->watchdog_timer = BCE_TX_TIMEOUT; DBRUNMSG(BCE_VERBOSE_SEND, bce_dump_ctx(sc, TX_CID)); DBRUNMSG(BCE_VERBOSE_SEND, bce_dump_mq_regs(sc)); bce_start_locked_exit: DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_CTX); } /****************************************************************************/ /* Main transmit routine when called from another routine without a lock. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_start(struct ifnet *ifp) { struct bce_softc *sc = ifp->if_softc; DBENTER(BCE_VERBOSE_SEND); BCE_LOCK(sc); bce_start_locked(ifp); BCE_UNLOCK(sc); DBEXIT(BCE_VERBOSE_SEND); } /****************************************************************************/ /* Handles any IOCTL calls from the operating system. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct bce_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int mask, error = 0; DBENTER(BCE_VERBOSE_MISC); switch(command) { /* Set the interface MTU. */ case SIOCSIFMTU: /* Check that the MTU setting is supported. */ if ((ifr->ifr_mtu < BCE_MIN_MTU) || (ifr->ifr_mtu > BCE_MAX_JUMBO_MTU)) { error = EINVAL; break; } DBPRINT(sc, BCE_INFO_MISC, "SIOCSIFMTU: Changing MTU from %d to %d\n", (int) ifp->if_mtu, (int) ifr->ifr_mtu); BCE_LOCK(sc); ifp->if_mtu = ifr->ifr_mtu; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; bce_init_locked(sc); } BCE_UNLOCK(sc); break; /* Set interface flags. */ case SIOCSIFFLAGS: DBPRINT(sc, BCE_VERBOSE_SPECIAL, "Received SIOCSIFFLAGS\n"); BCE_LOCK(sc); /* Check if the interface is up. */ if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { /* Change promiscuous/multicast flags as necessary. */ bce_set_rx_mode(sc); } else { /* Start the HW */ bce_init_locked(sc); } } else { /* The interface is down, check if driver is running. */ if (ifp->if_drv_flags & IFF_DRV_RUNNING) { bce_stop(sc); /* If MFW is running, restart the controller a bit. */ if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { bce_reset(sc, BCE_DRV_MSG_CODE_RESET); bce_chipinit(sc); bce_mgmt_init_locked(sc); } } } BCE_UNLOCK(sc); break; /* Add/Delete multicast address */ case SIOCADDMULTI: case SIOCDELMULTI: DBPRINT(sc, BCE_VERBOSE_MISC, "Received SIOCADDMULTI/SIOCDELMULTI\n"); BCE_LOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) bce_set_rx_mode(sc); BCE_UNLOCK(sc); break; /* Set/Get Interface media */ case SIOCSIFMEDIA: case SIOCGIFMEDIA: DBPRINT(sc, BCE_VERBOSE_MISC, "Received SIOCSIFMEDIA/SIOCGIFMEDIA\n"); if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0) error = ifmedia_ioctl(ifp, ifr, &sc->bce_ifmedia, command); else { mii = device_get_softc(sc->bce_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); } break; /* Set interface capability */ case SIOCSIFCAP: mask = ifr->ifr_reqcap ^ ifp->if_capenable; DBPRINT(sc, BCE_INFO_MISC, "Received SIOCSIFCAP = 0x%08X\n", (u32) mask); /* Toggle the TX checksum capabilities enable flag. */ if (mask & IFCAP_TXCSUM && ifp->if_capabilities & IFCAP_TXCSUM) { ifp->if_capenable ^= IFCAP_TXCSUM; if (IFCAP_TXCSUM & ifp->if_capenable) ifp->if_hwassist |= BCE_IF_HWASSIST; else ifp->if_hwassist &= ~BCE_IF_HWASSIST; } /* Toggle the RX checksum capabilities enable flag. */ if (mask & IFCAP_RXCSUM && ifp->if_capabilities & IFCAP_RXCSUM) ifp->if_capenable ^= IFCAP_RXCSUM; /* Toggle the TSO capabilities enable flag. */ if (bce_tso_enable && (mask & IFCAP_TSO4) && ifp->if_capabilities & IFCAP_TSO4) { ifp->if_capenable ^= IFCAP_TSO4; if (IFCAP_TSO4 & ifp->if_capenable) ifp->if_hwassist |= CSUM_TSO; else ifp->if_hwassist &= ~CSUM_TSO; } if (mask & IFCAP_VLAN_HWCSUM && ifp->if_capabilities & IFCAP_VLAN_HWCSUM) ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; if ((mask & IFCAP_VLAN_HWTSO) != 0 && (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) ifp->if_capenable ^= IFCAP_VLAN_HWTSO; /* * Don't actually disable VLAN tag stripping as * management firmware (ASF/IPMI/UMP) requires the * feature. If VLAN tag stripping is disabled driver * will manually reconstruct the VLAN frame by * appending stripped VLAN tag. */ if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)) { ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) ifp->if_capenable &= ~IFCAP_VLAN_HWTSO; } VLAN_CAPABILITIES(ifp); break; default: /* We don't know how to handle the IOCTL, pass it on. */ error = ether_ioctl(ifp, command, data); break; } DBEXIT(BCE_VERBOSE_MISC); return(error); } /****************************************************************************/ /* Transmit timeout handler. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_watchdog(struct bce_softc *sc) { uint32_t status; DBENTER(BCE_EXTREME_SEND); BCE_LOCK_ASSERT(sc); status = 0; /* If the watchdog timer hasn't expired then just exit. */ if (sc->watchdog_timer == 0 || --sc->watchdog_timer) goto bce_watchdog_exit; status = REG_RD(sc, BCE_EMAC_RX_STATUS); /* If pause frames are active then don't reset the hardware. */ if ((sc->bce_flags & BCE_USING_RX_FLOW_CONTROL) != 0) { if ((status & BCE_EMAC_RX_STATUS_FFED) != 0) { /* * If link partner has us in XOFF state then wait for * the condition to clear. */ sc->watchdog_timer = BCE_TX_TIMEOUT; goto bce_watchdog_exit; } else if ((status & BCE_EMAC_RX_STATUS_FF_RECEIVED) != 0 && (status & BCE_EMAC_RX_STATUS_N_RECEIVED) != 0) { /* * If we're not currently XOFF'ed but have recently * been XOFF'd/XON'd then assume that's delaying TX * this time around. */ sc->watchdog_timer = BCE_TX_TIMEOUT; goto bce_watchdog_exit; } /* * Any other condition is unexpected and the controller * should be reset. */ } BCE_PRINTF("%s(%d): Watchdog timeout occurred, resetting!\n", __FILE__, __LINE__); DBRUNMSG(BCE_INFO, bce_dump_driver_state(sc); bce_dump_status_block(sc); bce_dump_stats_block(sc); bce_dump_ftqs(sc); bce_dump_txp_state(sc, 0); bce_dump_rxp_state(sc, 0); bce_dump_tpat_state(sc, 0); bce_dump_cp_state(sc, 0); bce_dump_com_state(sc, 0)); DBRUN(bce_breakpoint(sc)); sc->bce_ifp->if_drv_flags &= ~IFF_DRV_RUNNING; bce_init_locked(sc); sc->watchdog_timeouts++; bce_watchdog_exit: REG_WR(sc, BCE_EMAC_RX_STATUS, status); DBEXIT(BCE_EXTREME_SEND); } /* * Interrupt handler. */ /****************************************************************************/ /* Main interrupt entry point. Verifies that the controller generated the */ /* interrupt and then calls a separate routine for handle the various */ /* interrupt causes (PHY, TX, RX). */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_intr(void *xsc) { struct bce_softc *sc; struct ifnet *ifp; u32 status_attn_bits; u16 hw_rx_cons, hw_tx_cons; sc = xsc; ifp = sc->bce_ifp; DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_RECV | BCE_VERBOSE_INTR); DBRUNMSG(BCE_VERBOSE_INTR, bce_dump_status_block(sc)); DBRUNMSG(BCE_VERBOSE_INTR, bce_dump_stats_block(sc)); BCE_LOCK(sc); DBRUN(sc->interrupts_generated++); /* Synchnorize before we read from interface's status block */ bus_dmamap_sync(sc->status_tag, sc->status_map, BUS_DMASYNC_POSTREAD); /* * If the hardware status block index matches the last value read * by the driver and we haven't asserted our interrupt then there's * nothing to do. This may only happen in case of INTx due to the * interrupt arriving at the CPU before the status block is updated. */ if ((sc->bce_flags & (BCE_USING_MSI_FLAG | BCE_USING_MSIX_FLAG)) == 0 && sc->status_block->status_idx == sc->last_status_idx && (REG_RD(sc, BCE_PCICFG_MISC_STATUS) & BCE_PCICFG_MISC_STATUS_INTA_VALUE)) { DBPRINT(sc, BCE_VERBOSE_INTR, "%s(): Spurious interrupt.\n", __FUNCTION__); goto bce_intr_exit; } /* Ack the interrupt and stop others from occurring. */ REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | BCE_PCICFG_INT_ACK_CMD_MASK_INT); /* Check if the hardware has finished any work. */ hw_rx_cons = bce_get_hw_rx_cons(sc); hw_tx_cons = bce_get_hw_tx_cons(sc); /* Keep processing data as long as there is work to do. */ for (;;) { status_attn_bits = sc->status_block->status_attn_bits; DBRUNIF(DB_RANDOMTRUE(unexpected_attention_sim_control), BCE_PRINTF("Simulating unexpected status attention " "bit set."); sc->unexpected_attention_sim_count++; status_attn_bits = status_attn_bits | STATUS_ATTN_BITS_PARITY_ERROR); /* Was it a link change interrupt? */ if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != (sc->status_block->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) { bce_phy_intr(sc); /* Clear transient updates during link state change. */ REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT); REG_RD(sc, BCE_HC_COMMAND); } /* If any other attention is asserted, the chip is toast. */ if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) != (sc->status_block->status_attn_bits_ack & ~STATUS_ATTN_BITS_LINK_STATE))) { sc->unexpected_attention_count++; BCE_PRINTF("%s(%d): Fatal attention detected: " "0x%08X\n", __FILE__, __LINE__, sc->status_block->status_attn_bits); DBRUNMSG(BCE_FATAL, if (unexpected_attention_sim_control == 0) bce_breakpoint(sc)); bce_init_locked(sc); goto bce_intr_exit; } /* Check for any completed RX frames. */ if (hw_rx_cons != sc->hw_rx_cons) bce_rx_intr(sc); /* Check for any completed TX frames. */ if (hw_tx_cons != sc->hw_tx_cons) bce_tx_intr(sc); /* Save status block index value for the next interrupt. */ sc->last_status_idx = sc->status_block->status_idx; /* * Prevent speculative reads from getting * ahead of the status block. */ bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, BUS_SPACE_BARRIER_READ); /* * If there's no work left then exit the * interrupt service routine. */ hw_rx_cons = bce_get_hw_rx_cons(sc); hw_tx_cons = bce_get_hw_tx_cons(sc); if ((hw_rx_cons == sc->hw_rx_cons) && (hw_tx_cons == sc->hw_tx_cons)) break; } bus_dmamap_sync(sc->status_tag, sc->status_map, BUS_DMASYNC_PREREAD); /* Re-enable interrupts. */ bce_enable_intr(sc, 0); /* Handle any frames that arrived while handling the interrupt. */ if (ifp->if_drv_flags & IFF_DRV_RUNNING && !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) bce_start_locked(ifp); bce_intr_exit: BCE_UNLOCK(sc); DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_RECV | BCE_VERBOSE_INTR); } /****************************************************************************/ /* Programs the various packet receive modes (broadcast and multicast). */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_set_rx_mode(struct bce_softc *sc) { struct ifnet *ifp; struct ifmultiaddr *ifma; u32 hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 }; u32 rx_mode, sort_mode; int h, i; DBENTER(BCE_VERBOSE_MISC); BCE_LOCK_ASSERT(sc); ifp = sc->bce_ifp; /* Initialize receive mode default settings. */ rx_mode = sc->rx_mode & ~(BCE_EMAC_RX_MODE_PROMISCUOUS | BCE_EMAC_RX_MODE_KEEP_VLAN_TAG); sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN; /* * ASF/IPMI/UMP firmware requires that VLAN tag stripping * be enbled. */ if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) && (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG))) rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG; /* * Check for promiscuous, all multicast, or selected * multicast address filtering. */ if (ifp->if_flags & IFF_PROMISC) { DBPRINT(sc, BCE_INFO_MISC, "Enabling promiscuous mode.\n"); /* Enable promiscuous mode. */ rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS; sort_mode |= BCE_RPM_SORT_USER0_PROM_EN; } else if (ifp->if_flags & IFF_ALLMULTI) { DBPRINT(sc, BCE_INFO_MISC, "Enabling all multicast mode.\n"); /* Enable all multicast addresses. */ for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 0xffffffff); } sort_mode |= BCE_RPM_SORT_USER0_MC_EN; } else { /* Accept one or more multicast(s). */ DBPRINT(sc, BCE_INFO_MISC, "Enabling selective multicast mode.\n"); if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = ether_crc32_le(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) & 0xFF; hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F); } if_maddr_runlock(ifp); for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), hashes[i]); sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN; } /* Only make changes if the recive mode has actually changed. */ if (rx_mode != sc->rx_mode) { DBPRINT(sc, BCE_VERBOSE_MISC, "Enabling new receive mode: " "0x%08X\n", rx_mode); sc->rx_mode = rx_mode; REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode); } /* Disable and clear the exisitng sort before enabling a new sort. */ REG_WR(sc, BCE_RPM_SORT_USER0, 0x0); REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode); REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA); DBEXIT(BCE_VERBOSE_MISC); } /****************************************************************************/ /* Called periodically to updates statistics from the controllers */ /* statistics block. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_stats_update(struct bce_softc *sc) { struct statistics_block *stats; DBENTER(BCE_EXTREME_MISC); bus_dmamap_sync(sc->stats_tag, sc->stats_map, BUS_DMASYNC_POSTREAD); stats = (struct statistics_block *) sc->stats_block; /* * Update the sysctl statistics from the * hardware statistics. */ sc->stat_IfHCInOctets = ((u64) stats->stat_IfHCInOctets_hi << 32) + (u64) stats->stat_IfHCInOctets_lo; sc->stat_IfHCInBadOctets = ((u64) stats->stat_IfHCInBadOctets_hi << 32) + (u64) stats->stat_IfHCInBadOctets_lo; sc->stat_IfHCOutOctets = ((u64) stats->stat_IfHCOutOctets_hi << 32) + (u64) stats->stat_IfHCOutOctets_lo; sc->stat_IfHCOutBadOctets = ((u64) stats->stat_IfHCOutBadOctets_hi << 32) + (u64) stats->stat_IfHCOutBadOctets_lo; sc->stat_IfHCInUcastPkts = ((u64) stats->stat_IfHCInUcastPkts_hi << 32) + (u64) stats->stat_IfHCInUcastPkts_lo; sc->stat_IfHCInMulticastPkts = ((u64) stats->stat_IfHCInMulticastPkts_hi << 32) + (u64) stats->stat_IfHCInMulticastPkts_lo; sc->stat_IfHCInBroadcastPkts = ((u64) stats->stat_IfHCInBroadcastPkts_hi << 32) + (u64) stats->stat_IfHCInBroadcastPkts_lo; sc->stat_IfHCOutUcastPkts = ((u64) stats->stat_IfHCOutUcastPkts_hi << 32) + (u64) stats->stat_IfHCOutUcastPkts_lo; sc->stat_IfHCOutMulticastPkts = ((u64) stats->stat_IfHCOutMulticastPkts_hi << 32) + (u64) stats->stat_IfHCOutMulticastPkts_lo; sc->stat_IfHCOutBroadcastPkts = ((u64) stats->stat_IfHCOutBroadcastPkts_hi << 32) + (u64) stats->stat_IfHCOutBroadcastPkts_lo; /* ToDo: Preserve counters beyond 32 bits? */ /* ToDo: Read the statistics from auto-clear regs? */ sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors = stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors; sc->stat_Dot3StatsCarrierSenseErrors = stats->stat_Dot3StatsCarrierSenseErrors; sc->stat_Dot3StatsFCSErrors = stats->stat_Dot3StatsFCSErrors; sc->stat_Dot3StatsAlignmentErrors = stats->stat_Dot3StatsAlignmentErrors; sc->stat_Dot3StatsSingleCollisionFrames = stats->stat_Dot3StatsSingleCollisionFrames; sc->stat_Dot3StatsMultipleCollisionFrames = stats->stat_Dot3StatsMultipleCollisionFrames; sc->stat_Dot3StatsDeferredTransmissions = stats->stat_Dot3StatsDeferredTransmissions; sc->stat_Dot3StatsExcessiveCollisions = stats->stat_Dot3StatsExcessiveCollisions; sc->stat_Dot3StatsLateCollisions = stats->stat_Dot3StatsLateCollisions; sc->stat_EtherStatsCollisions = stats->stat_EtherStatsCollisions; sc->stat_EtherStatsFragments = stats->stat_EtherStatsFragments; sc->stat_EtherStatsJabbers = stats->stat_EtherStatsJabbers; sc->stat_EtherStatsUndersizePkts = stats->stat_EtherStatsUndersizePkts; sc->stat_EtherStatsOversizePkts = stats->stat_EtherStatsOversizePkts; sc->stat_EtherStatsPktsRx64Octets = stats->stat_EtherStatsPktsRx64Octets; sc->stat_EtherStatsPktsRx65Octetsto127Octets = stats->stat_EtherStatsPktsRx65Octetsto127Octets; sc->stat_EtherStatsPktsRx128Octetsto255Octets = stats->stat_EtherStatsPktsRx128Octetsto255Octets; sc->stat_EtherStatsPktsRx256Octetsto511Octets = stats->stat_EtherStatsPktsRx256Octetsto511Octets; sc->stat_EtherStatsPktsRx512Octetsto1023Octets = stats->stat_EtherStatsPktsRx512Octetsto1023Octets; sc->stat_EtherStatsPktsRx1024Octetsto1522Octets = stats->stat_EtherStatsPktsRx1024Octetsto1522Octets; sc->stat_EtherStatsPktsRx1523Octetsto9022Octets = stats->stat_EtherStatsPktsRx1523Octetsto9022Octets; sc->stat_EtherStatsPktsTx64Octets = stats->stat_EtherStatsPktsTx64Octets; sc->stat_EtherStatsPktsTx65Octetsto127Octets = stats->stat_EtherStatsPktsTx65Octetsto127Octets; sc->stat_EtherStatsPktsTx128Octetsto255Octets = stats->stat_EtherStatsPktsTx128Octetsto255Octets; sc->stat_EtherStatsPktsTx256Octetsto511Octets = stats->stat_EtherStatsPktsTx256Octetsto511Octets; sc->stat_EtherStatsPktsTx512Octetsto1023Octets = stats->stat_EtherStatsPktsTx512Octetsto1023Octets; sc->stat_EtherStatsPktsTx1024Octetsto1522Octets = stats->stat_EtherStatsPktsTx1024Octetsto1522Octets; sc->stat_EtherStatsPktsTx1523Octetsto9022Octets = stats->stat_EtherStatsPktsTx1523Octetsto9022Octets; sc->stat_XonPauseFramesReceived = stats->stat_XonPauseFramesReceived; sc->stat_XoffPauseFramesReceived = stats->stat_XoffPauseFramesReceived; sc->stat_OutXonSent = stats->stat_OutXonSent; sc->stat_OutXoffSent = stats->stat_OutXoffSent; sc->stat_FlowControlDone = stats->stat_FlowControlDone; sc->stat_MacControlFramesReceived = stats->stat_MacControlFramesReceived; sc->stat_XoffStateEntered = stats->stat_XoffStateEntered; sc->stat_IfInFramesL2FilterDiscards = stats->stat_IfInFramesL2FilterDiscards; sc->stat_IfInRuleCheckerDiscards = stats->stat_IfInRuleCheckerDiscards; sc->stat_IfInFTQDiscards = stats->stat_IfInFTQDiscards; sc->stat_IfInMBUFDiscards = stats->stat_IfInMBUFDiscards; sc->stat_IfInRuleCheckerP4Hit = stats->stat_IfInRuleCheckerP4Hit; sc->stat_CatchupInRuleCheckerDiscards = stats->stat_CatchupInRuleCheckerDiscards; sc->stat_CatchupInFTQDiscards = stats->stat_CatchupInFTQDiscards; sc->stat_CatchupInMBUFDiscards = stats->stat_CatchupInMBUFDiscards; sc->stat_CatchupInRuleCheckerP4Hit = stats->stat_CatchupInRuleCheckerP4Hit; sc->com_no_buffers = REG_RD_IND(sc, 0x120084); /* ToDo: Add additional statistics? */ DBEXIT(BCE_EXTREME_MISC); } static uint64_t bce_get_counter(struct ifnet *ifp, ift_counter cnt) { struct bce_softc *sc; uint64_t rv; sc = if_getsoftc(ifp); switch (cnt) { case IFCOUNTER_COLLISIONS: return (sc->stat_EtherStatsCollisions); case IFCOUNTER_IERRORS: return (sc->stat_EtherStatsUndersizePkts + sc->stat_EtherStatsOversizePkts + sc->stat_IfInMBUFDiscards + sc->stat_Dot3StatsAlignmentErrors + sc->stat_Dot3StatsFCSErrors + sc->stat_IfInRuleCheckerDiscards + sc->stat_IfInFTQDiscards + sc->l2fhdr_error_count + sc->com_no_buffers); case IFCOUNTER_OERRORS: rv = sc->stat_Dot3StatsExcessiveCollisions + sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors + sc->stat_Dot3StatsLateCollisions + sc->watchdog_timeouts; /* * Certain controllers don't report * carrier sense errors correctly. * See errata E11_5708CA0_1165. */ if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) && !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0)) rv += sc->stat_Dot3StatsCarrierSenseErrors; return (rv); default: return (if_get_counter_default(ifp, cnt)); } } /****************************************************************************/ /* Periodic function to notify the bootcode that the driver is still */ /* present. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_pulse(void *xsc) { struct bce_softc *sc = xsc; u32 msg; DBENTER(BCE_EXTREME_MISC); BCE_LOCK_ASSERT(sc); /* Tell the firmware that the driver is still running. */ msg = (u32) ++sc->bce_fw_drv_pulse_wr_seq; bce_shmem_wr(sc, BCE_DRV_PULSE_MB, msg); /* Update the bootcode condition. */ sc->bc_state = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); /* Report whether the bootcode still knows the driver is running. */ if (bce_verbose || bootverbose) { if (sc->bce_drv_cardiac_arrest == FALSE) { if (!(sc->bc_state & BCE_CONDITION_DRV_PRESENT)) { sc->bce_drv_cardiac_arrest = TRUE; BCE_PRINTF("%s(): Warning: bootcode " "thinks driver is absent! " "(bc_state = 0x%08X)\n", __FUNCTION__, sc->bc_state); } } else { /* * Not supported by all bootcode versions. * (v5.0.11+ and v5.2.1+) Older bootcode * will require the driver to reset the * controller to clear this condition. */ if (sc->bc_state & BCE_CONDITION_DRV_PRESENT) { sc->bce_drv_cardiac_arrest = FALSE; BCE_PRINTF("%s(): Bootcode found the " "driver pulse! (bc_state = 0x%08X)\n", __FUNCTION__, sc->bc_state); } } } /* Schedule the next pulse. */ callout_reset(&sc->bce_pulse_callout, hz, bce_pulse, sc); DBEXIT(BCE_EXTREME_MISC); } /****************************************************************************/ /* Periodic function to perform maintenance tasks. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_tick(void *xsc) { struct bce_softc *sc = xsc; struct mii_data *mii; struct ifnet *ifp; struct ifmediareq ifmr; ifp = sc->bce_ifp; DBENTER(BCE_EXTREME_MISC); BCE_LOCK_ASSERT(sc); /* Schedule the next tick. */ callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc); /* Update the statistics from the hardware statistics block. */ bce_stats_update(sc); /* Ensure page and RX chains get refilled in low-memory situations. */ if (bce_hdr_split == TRUE) bce_fill_pg_chain(sc); bce_fill_rx_chain(sc); /* Check that chip hasn't hung. */ bce_watchdog(sc); /* If link is up already up then we're done. */ if (sc->bce_link_up == TRUE) goto bce_tick_exit; /* Link is down. Check what the PHY's doing. */ if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0) { bzero(&ifmr, sizeof(ifmr)); bce_ifmedia_sts_rphy(sc, &ifmr); if ((ifmr.ifm_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { sc->bce_link_up = TRUE; bce_miibus_statchg(sc->bce_dev); } } else { mii = device_get_softc(sc->bce_miibus); mii_tick(mii); /* Check if the link has come up. */ if ((mii->mii_media_status & IFM_ACTIVE) && (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)) { DBPRINT(sc, BCE_VERBOSE_MISC, "%s(): Link up!\n", __FUNCTION__); sc->bce_link_up = TRUE; if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX || IFM_SUBTYPE(mii->mii_media_active) == IFM_2500_SX) && (bce_verbose || bootverbose)) BCE_PRINTF("Gigabit link up!\n"); } } if (sc->bce_link_up == TRUE) { /* Now that link is up, handle any outstanding TX traffic. */ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { DBPRINT(sc, BCE_VERBOSE_MISC, "%s(): Found " "pending TX traffic.\n", __FUNCTION__); bce_start_locked(ifp); } } bce_tick_exit: DBEXIT(BCE_EXTREME_MISC); } static void bce_fw_cap_init(struct bce_softc *sc) { u32 ack, cap, link; ack = 0; cap = bce_shmem_rd(sc, BCE_FW_CAP_MB); if ((cap & BCE_FW_CAP_SIGNATURE_MAGIC_MASK) != BCE_FW_CAP_SIGNATURE_MAGIC) return; if ((cap & (BCE_FW_CAP_MFW_KEEP_VLAN | BCE_FW_CAP_BC_KEEP_VLAN)) == (BCE_FW_CAP_MFW_KEEP_VLAN | BCE_FW_CAP_BC_KEEP_VLAN)) ack |= BCE_DRV_ACK_CAP_SIGNATURE_MAGIC | BCE_FW_CAP_MFW_KEEP_VLAN | BCE_FW_CAP_BC_KEEP_VLAN; if ((sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) != 0 && (cap & BCE_FW_CAP_REMOTE_PHY_CAP) != 0) { sc->bce_phy_flags &= ~BCE_PHY_REMOTE_PORT_FIBER_FLAG; sc->bce_phy_flags |= BCE_PHY_REMOTE_CAP_FLAG; link = bce_shmem_rd(sc, BCE_LINK_STATUS); if ((link & BCE_LINK_STATUS_SERDES_LINK) != 0) sc->bce_phy_flags |= BCE_PHY_REMOTE_PORT_FIBER_FLAG; ack |= BCE_DRV_ACK_CAP_SIGNATURE_MAGIC | BCE_FW_CAP_REMOTE_PHY_CAP; } if (ack != 0) bce_shmem_wr(sc, BCE_DRV_ACK_CAP_MB, ack); } #ifdef BCE_DEBUG /****************************************************************************/ /* Allows the driver state to be dumped through the sysctl interface. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS) { int error; int result; struct bce_softc *sc; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { sc = (struct bce_softc *)arg1; bce_dump_driver_state(sc); } return error; } /****************************************************************************/ /* Allows the hardware state to be dumped through the sysctl interface. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS) { int error; int result; struct bce_softc *sc; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { sc = (struct bce_softc *)arg1; bce_dump_hw_state(sc); } return error; } /****************************************************************************/ /* Allows the status block to be dumped through the sysctl interface. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_sysctl_status_block(SYSCTL_HANDLER_ARGS) { int error; int result; struct bce_softc *sc; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { sc = (struct bce_softc *)arg1; bce_dump_status_block(sc); } return error; } /****************************************************************************/ /* Allows the stats block to be dumped through the sysctl interface. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_sysctl_stats_block(SYSCTL_HANDLER_ARGS) { int error; int result; struct bce_softc *sc; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { sc = (struct bce_softc *)arg1; bce_dump_stats_block(sc); } return error; } /****************************************************************************/ /* Allows the stat counters to be cleared without unloading/reloading the */ /* driver. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_sysctl_stats_clear(SYSCTL_HANDLER_ARGS) { int error; int result; struct bce_softc *sc; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { sc = (struct bce_softc *)arg1; struct statistics_block *stats; stats = (struct statistics_block *) sc->stats_block; bzero(stats, sizeof(struct statistics_block)); bus_dmamap_sync(sc->stats_tag, sc->stats_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Clear the internal H/W statistics counters. */ REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW); /* Reset the driver maintained statistics. */ sc->interrupts_rx = sc->interrupts_tx = 0; sc->tso_frames_requested = sc->tso_frames_completed = sc->tso_frames_failed = 0; sc->rx_empty_count = sc->tx_full_count = 0; sc->rx_low_watermark = USABLE_RX_BD_ALLOC; sc->tx_hi_watermark = 0; sc->l2fhdr_error_count = sc->l2fhdr_error_sim_count = 0; sc->mbuf_alloc_failed_count = sc->mbuf_alloc_failed_sim_count = 0; sc->dma_map_addr_rx_failed_count = sc->dma_map_addr_tx_failed_count = 0; sc->mbuf_frag_count = 0; sc->csum_offload_tcp_udp = sc->csum_offload_ip = 0; sc->vlan_tagged_frames_rcvd = sc->vlan_tagged_frames_stripped = 0; sc->split_header_frames_rcvd = sc->split_header_tcp_frames_rcvd = 0; /* Clear firmware maintained statistics. */ REG_WR_IND(sc, 0x120084, 0); } return error; } /****************************************************************************/ /* Allows the shared memory contents to be dumped through the sysctl . */ /* interface. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_sysctl_shmem_state(SYSCTL_HANDLER_ARGS) { int error; int result; struct bce_softc *sc; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { sc = (struct bce_softc *)arg1; bce_dump_shmem_state(sc); } return error; } /****************************************************************************/ /* Allows the bootcode state to be dumped through the sysctl interface. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_sysctl_bc_state(SYSCTL_HANDLER_ARGS) { int error; int result; struct bce_softc *sc; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { sc = (struct bce_softc *)arg1; bce_dump_bc_state(sc); } return error; } /****************************************************************************/ /* Provides a sysctl interface to allow dumping the RX BD chain. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_sysctl_dump_rx_bd_chain(SYSCTL_HANDLER_ARGS) { int error; int result; struct bce_softc *sc; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { sc = (struct bce_softc *)arg1; bce_dump_rx_bd_chain(sc, 0, TOTAL_RX_BD_ALLOC); } return error; } /****************************************************************************/ /* Provides a sysctl interface to allow dumping the RX MBUF chain. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_sysctl_dump_rx_mbuf_chain(SYSCTL_HANDLER_ARGS) { int error; int result; struct bce_softc *sc; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { sc = (struct bce_softc *)arg1; bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD_ALLOC); } return error; } /****************************************************************************/ /* Provides a sysctl interface to allow dumping the TX chain. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS) { int error; int result; struct bce_softc *sc; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { sc = (struct bce_softc *)arg1; bce_dump_tx_chain(sc, 0, TOTAL_TX_BD_ALLOC); } return error; } /****************************************************************************/ /* Provides a sysctl interface to allow dumping the page chain. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_sysctl_dump_pg_chain(SYSCTL_HANDLER_ARGS) { int error; int result; struct bce_softc *sc; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { sc = (struct bce_softc *)arg1; bce_dump_pg_chain(sc, 0, TOTAL_PG_BD_ALLOC); } return error; } /****************************************************************************/ /* Provides a sysctl interface to allow reading arbitrary NVRAM offsets in */ /* the device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_sysctl_nvram_read(SYSCTL_HANDLER_ARGS) { struct bce_softc *sc = (struct bce_softc *)arg1; int error; u32 result; u32 val[1]; u8 *data = (u8 *) val; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || (req->newptr == NULL)) return (error); error = bce_nvram_read(sc, result, data, 4); BCE_PRINTF("offset 0x%08X = 0x%08X\n", result, bce_be32toh(val[0])); return (error); } /****************************************************************************/ /* Provides a sysctl interface to allow reading arbitrary registers in the */ /* device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_sysctl_reg_read(SYSCTL_HANDLER_ARGS) { struct bce_softc *sc = (struct bce_softc *)arg1; int error; u32 val, result; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || (req->newptr == NULL)) return (error); /* Make sure the register is accessible. */ if (result < 0x8000) { val = REG_RD(sc, result); BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val); } else if (result < 0x0280000) { val = REG_RD_IND(sc, result); BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val); } return (error); } /****************************************************************************/ /* Provides a sysctl interface to allow reading arbitrary PHY registers in */ /* the device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_sysctl_phy_read(SYSCTL_HANDLER_ARGS) { struct bce_softc *sc; device_t dev; int error, result; u16 val; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || (req->newptr == NULL)) return (error); /* Make sure the register is accessible. */ if (result < 0x20) { sc = (struct bce_softc *)arg1; dev = sc->bce_dev; val = bce_miibus_read_reg(dev, sc->bce_phy_addr, result); BCE_PRINTF("phy 0x%02X = 0x%04X\n", result, val); } return (error); } /****************************************************************************/ /* Provides a sysctl interface for dumping the nvram contents. */ /* DO NOT ENABLE ON PRODUCTION SYSTEMS! */ /* */ /* Returns: */ /* 0 for success, positive errno for failure. */ /****************************************************************************/ static int bce_sysctl_nvram_dump(SYSCTL_HANDLER_ARGS) { struct bce_softc *sc = (struct bce_softc *)arg1; int error, i; if (sc->nvram_buf == NULL) sc->nvram_buf = malloc(sc->bce_flash_size, M_TEMP, M_ZERO | M_WAITOK); error = 0; if (req->oldlen == sc->bce_flash_size) { for (i = 0; i < sc->bce_flash_size && error == 0; i++) error = bce_nvram_read(sc, i, &sc->nvram_buf[i], 1); } if (error == 0) error = SYSCTL_OUT(req, sc->nvram_buf, sc->bce_flash_size); return error; } #ifdef BCE_NVRAM_WRITE_SUPPORT /****************************************************************************/ /* Provides a sysctl interface for writing to nvram. */ /* DO NOT ENABLE ON PRODUCTION SYSTEMS! */ /* */ /* Returns: */ /* 0 for success, positive errno for failure. */ /****************************************************************************/ static int bce_sysctl_nvram_write(SYSCTL_HANDLER_ARGS) { struct bce_softc *sc = (struct bce_softc *)arg1; int error; if (sc->nvram_buf == NULL) sc->nvram_buf = malloc(sc->bce_flash_size, M_TEMP, M_ZERO | M_WAITOK); else bzero(sc->nvram_buf, sc->bce_flash_size); error = SYSCTL_IN(req, sc->nvram_buf, sc->bce_flash_size); if (error == 0) return (error); if (req->newlen == sc->bce_flash_size) error = bce_nvram_write(sc, 0, sc->nvram_buf, sc->bce_flash_size); return error; } #endif /****************************************************************************/ /* Provides a sysctl interface to allow reading a CID. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_sysctl_dump_ctx(SYSCTL_HANDLER_ARGS) { struct bce_softc *sc; int error, result; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || (req->newptr == NULL)) return (error); /* Make sure the register is accessible. */ if (result <= TX_CID) { sc = (struct bce_softc *)arg1; bce_dump_ctx(sc, result); } return (error); } /****************************************************************************/ /* Provides a sysctl interface to forcing the driver to dump state and */ /* enter the debugger. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS) { int error; int result; struct bce_softc *sc; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { sc = (struct bce_softc *)arg1; bce_breakpoint(sc); } return error; } #endif /****************************************************************************/ /* Adds any sysctl parameters for tuning or debugging purposes. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static void bce_add_sysctls(struct bce_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; DBENTER(BCE_VERBOSE_MISC); ctx = device_get_sysctl_ctx(sc->bce_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bce_dev)); #ifdef BCE_DEBUG SYSCTL_ADD_INT(ctx, children, OID_AUTO, "l2fhdr_error_sim_control", CTLFLAG_RW, &l2fhdr_error_sim_control, 0, "Debug control to force l2fhdr errors"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "l2fhdr_error_sim_count", CTLFLAG_RD, &sc->l2fhdr_error_sim_count, 0, "Number of simulated l2_fhdr errors"); #endif SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "l2fhdr_error_count", CTLFLAG_RD, &sc->l2fhdr_error_count, 0, "Number of l2_fhdr errors"); #ifdef BCE_DEBUG SYSCTL_ADD_INT(ctx, children, OID_AUTO, "mbuf_alloc_failed_sim_control", CTLFLAG_RW, &mbuf_alloc_failed_sim_control, 0, "Debug control to force mbuf allocation failures"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mbuf_alloc_failed_sim_count", CTLFLAG_RD, &sc->mbuf_alloc_failed_sim_count, 0, "Number of simulated mbuf cluster allocation failures"); #endif SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mbuf_alloc_failed_count", CTLFLAG_RD, &sc->mbuf_alloc_failed_count, 0, "Number of mbuf allocation failures"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mbuf_frag_count", CTLFLAG_RD, &sc->mbuf_frag_count, 0, "Number of fragmented mbufs"); #ifdef BCE_DEBUG SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dma_map_addr_failed_sim_control", CTLFLAG_RW, &dma_map_addr_failed_sim_control, 0, "Debug control to force DMA mapping failures"); /* ToDo: Figure out how to update this value in bce_dma_map_addr(). */ SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "dma_map_addr_failed_sim_count", CTLFLAG_RD, &sc->dma_map_addr_failed_sim_count, 0, "Number of simulated DMA mapping failures"); #endif SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "dma_map_addr_rx_failed_count", CTLFLAG_RD, &sc->dma_map_addr_rx_failed_count, 0, "Number of RX DMA mapping failures"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "dma_map_addr_tx_failed_count", CTLFLAG_RD, &sc->dma_map_addr_tx_failed_count, 0, "Number of TX DMA mapping failures"); #ifdef BCE_DEBUG SYSCTL_ADD_INT(ctx, children, OID_AUTO, "unexpected_attention_sim_control", CTLFLAG_RW, &unexpected_attention_sim_control, 0, "Debug control to simulate unexpected attentions"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "unexpected_attention_sim_count", CTLFLAG_RW, &sc->unexpected_attention_sim_count, 0, "Number of simulated unexpected attentions"); #endif SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "unexpected_attention_count", CTLFLAG_RW, &sc->unexpected_attention_count, 0, "Number of unexpected attentions"); #ifdef BCE_DEBUG SYSCTL_ADD_INT(ctx, children, OID_AUTO, "debug_bootcode_running_failure", CTLFLAG_RW, &bootcode_running_failure_sim_control, 0, "Debug control to force bootcode running failures"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_low_watermark", CTLFLAG_RD, &sc->rx_low_watermark, 0, "Lowest level of free rx_bd's"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rx_empty_count", CTLFLAG_RD, &sc->rx_empty_count, "Number of times the RX chain was empty"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_hi_watermark", CTLFLAG_RD, &sc->tx_hi_watermark, 0, "Highest level of used tx_bd's"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tx_full_count", CTLFLAG_RD, &sc->tx_full_count, "Number of times the TX chain was full"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tso_frames_requested", CTLFLAG_RD, &sc->tso_frames_requested, "Number of TSO frames requested"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tso_frames_completed", CTLFLAG_RD, &sc->tso_frames_completed, "Number of TSO frames completed"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tso_frames_failed", CTLFLAG_RD, &sc->tso_frames_failed, "Number of TSO frames failed"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "csum_offload_ip", CTLFLAG_RD, &sc->csum_offload_ip, "Number of IP checksum offload frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "csum_offload_tcp_udp", CTLFLAG_RD, &sc->csum_offload_tcp_udp, "Number of TCP/UDP checksum offload frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "vlan_tagged_frames_rcvd", CTLFLAG_RD, &sc->vlan_tagged_frames_rcvd, "Number of VLAN tagged frames received"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "vlan_tagged_frames_stripped", CTLFLAG_RD, &sc->vlan_tagged_frames_stripped, "Number of VLAN tagged frames stripped"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "interrupts_rx", CTLFLAG_RD, &sc->interrupts_rx, "Number of RX interrupts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "interrupts_tx", CTLFLAG_RD, &sc->interrupts_tx, "Number of TX interrupts"); if (bce_hdr_split == TRUE) { SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "split_header_frames_rcvd", CTLFLAG_RD, &sc->split_header_frames_rcvd, "Number of split header frames received"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "split_header_tcp_frames_rcvd", CTLFLAG_RD, &sc->split_header_tcp_frames_rcvd, "Number of split header TCP frames received"); } SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "nvram_dump", CTLTYPE_OPAQUE | CTLFLAG_RD, (void *)sc, 0, bce_sysctl_nvram_dump, "S", ""); #ifdef BCE_NVRAM_WRITE_SUPPORT SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "nvram_write", CTLTYPE_OPAQUE | CTLFLAG_WR, (void *)sc, 0, bce_sysctl_nvram_write, "S", ""); #endif #endif /* BCE_DEBUG */ SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "stat_IfHcInOctets", CTLFLAG_RD, &sc->stat_IfHCInOctets, "Bytes received"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "stat_IfHCInBadOctets", CTLFLAG_RD, &sc->stat_IfHCInBadOctets, "Bad bytes received"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "stat_IfHCOutOctets", CTLFLAG_RD, &sc->stat_IfHCOutOctets, "Bytes sent"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "stat_IfHCOutBadOctets", CTLFLAG_RD, &sc->stat_IfHCOutBadOctets, "Bad bytes sent"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "stat_IfHCInUcastPkts", CTLFLAG_RD, &sc->stat_IfHCInUcastPkts, "Unicast packets received"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "stat_IfHCInMulticastPkts", CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts, "Multicast packets received"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "stat_IfHCInBroadcastPkts", CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts, "Broadcast packets received"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "stat_IfHCOutUcastPkts", CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts, "Unicast packets sent"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "stat_IfHCOutMulticastPkts", CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts, "Multicast packets sent"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "stat_IfHCOutBroadcastPkts", CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts, "Broadcast packets sent"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_emac_tx_stat_dot3statsinternalmactransmiterrors", CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors, 0, "Internal MAC transmit errors"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_Dot3StatsCarrierSenseErrors", CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors, 0, "Carrier sense errors"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_Dot3StatsFCSErrors", CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors, 0, "Frame check sequence errors"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_Dot3StatsAlignmentErrors", CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors, 0, "Alignment errors"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_Dot3StatsSingleCollisionFrames", CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames, 0, "Single Collision Frames"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_Dot3StatsMultipleCollisionFrames", CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames, 0, "Multiple Collision Frames"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_Dot3StatsDeferredTransmissions", CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions, 0, "Deferred Transmissions"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_Dot3StatsExcessiveCollisions", CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions, 0, "Excessive Collisions"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_Dot3StatsLateCollisions", CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions, 0, "Late Collisions"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsCollisions", CTLFLAG_RD, &sc->stat_EtherStatsCollisions, 0, "Collisions"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsFragments", CTLFLAG_RD, &sc->stat_EtherStatsFragments, 0, "Fragments"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsJabbers", CTLFLAG_RD, &sc->stat_EtherStatsJabbers, 0, "Jabbers"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsUndersizePkts", CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts, 0, "Undersize packets"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsOversizePkts", CTLFLAG_RD, &sc->stat_EtherStatsOversizePkts, 0, "stat_EtherStatsOversizePkts"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsPktsRx64Octets", CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets, 0, "Bytes received in 64 byte packets"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsPktsRx65Octetsto127Octets", CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets, 0, "Bytes received in 65 to 127 byte packets"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsPktsRx128Octetsto255Octets", CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets, 0, "Bytes received in 128 to 255 byte packets"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsPktsRx256Octetsto511Octets", CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets, 0, "Bytes received in 256 to 511 byte packets"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsPktsRx512Octetsto1023Octets", CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets, 0, "Bytes received in 512 to 1023 byte packets"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsPktsRx1024Octetsto1522Octets", CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets, 0, "Bytes received in 1024 t0 1522 byte packets"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsPktsRx1523Octetsto9022Octets", CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets, 0, "Bytes received in 1523 to 9022 byte packets"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsPktsTx64Octets", CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets, 0, "Bytes sent in 64 byte packets"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsPktsTx65Octetsto127Octets", CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets, 0, "Bytes sent in 65 to 127 byte packets"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsPktsTx128Octetsto255Octets", CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets, 0, "Bytes sent in 128 to 255 byte packets"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsPktsTx256Octetsto511Octets", CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets, 0, "Bytes sent in 256 to 511 byte packets"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsPktsTx512Octetsto1023Octets", CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets, 0, "Bytes sent in 512 to 1023 byte packets"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsPktsTx1024Octetsto1522Octets", CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets, 0, "Bytes sent in 1024 to 1522 byte packets"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsPktsTx1523Octetsto9022Octets", CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets, 0, "Bytes sent in 1523 to 9022 byte packets"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_XonPauseFramesReceived", CTLFLAG_RD, &sc->stat_XonPauseFramesReceived, 0, "XON pause frames receved"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_XoffPauseFramesReceived", CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived, 0, "XOFF pause frames received"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_OutXonSent", CTLFLAG_RD, &sc->stat_OutXonSent, 0, "XON pause frames sent"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_OutXoffSent", CTLFLAG_RD, &sc->stat_OutXoffSent, 0, "XOFF pause frames sent"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_FlowControlDone", CTLFLAG_RD, &sc->stat_FlowControlDone, 0, "Flow control done"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_MacControlFramesReceived", CTLFLAG_RD, &sc->stat_MacControlFramesReceived, 0, "MAC control frames received"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_XoffStateEntered", CTLFLAG_RD, &sc->stat_XoffStateEntered, 0, "XOFF state entered"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_IfInFramesL2FilterDiscards", CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards, 0, "Received L2 packets discarded"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_IfInRuleCheckerDiscards", CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards, 0, "Received packets discarded by rule"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_IfInFTQDiscards", CTLFLAG_RD, &sc->stat_IfInFTQDiscards, 0, "Received packet FTQ discards"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_IfInMBUFDiscards", CTLFLAG_RD, &sc->stat_IfInMBUFDiscards, 0, "Received packets discarded due to lack " "of controller buffer memory"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_IfInRuleCheckerP4Hit", CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit, 0, "Received packets rule checker hits"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_CatchupInRuleCheckerDiscards", CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards, 0, "Received packets discarded in Catchup path"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_CatchupInFTQDiscards", CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards, 0, "Received packets discarded in FTQ in Catchup path"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_CatchupInMBUFDiscards", CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards, 0, "Received packets discarded in controller " "buffer memory in Catchup path"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_CatchupInRuleCheckerP4Hit", CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit, 0, "Received packets rule checker hits in Catchup path"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "com_no_buffers", CTLFLAG_RD, &sc->com_no_buffers, 0, "Valid packets received but no RX buffers available"); #ifdef BCE_DEBUG SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "driver_state", CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0, bce_sysctl_driver_state, "I", "Drive state information"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_state", CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0, bce_sysctl_hw_state, "I", "Hardware state information"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "status_block", CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0, bce_sysctl_status_block, "I", "Dump status block"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "stats_block", CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0, bce_sysctl_stats_block, "I", "Dump statistics block"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "stats_clear", CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0, bce_sysctl_stats_clear, "I", "Clear statistics block"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "shmem_state", CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0, bce_sysctl_shmem_state, "I", "Shared memory state information"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "bc_state", CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0, bce_sysctl_bc_state, "I", "Bootcode state information"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dump_rx_bd_chain", CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0, bce_sysctl_dump_rx_bd_chain, "I", "Dump RX BD chain"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dump_rx_mbuf_chain", CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0, bce_sysctl_dump_rx_mbuf_chain, "I", "Dump RX MBUF chain"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dump_tx_chain", CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0, bce_sysctl_dump_tx_chain, "I", "Dump tx_bd chain"); if (bce_hdr_split == TRUE) { SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dump_pg_chain", CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0, bce_sysctl_dump_pg_chain, "I", "Dump page chain"); } SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dump_ctx", CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0, bce_sysctl_dump_ctx, "I", "Dump context memory"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "breakpoint", CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0, bce_sysctl_breakpoint, "I", "Driver breakpoint"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read", CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0, bce_sysctl_reg_read, "I", "Register read"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "nvram_read", CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0, bce_sysctl_nvram_read, "I", "NVRAM read"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "phy_read", CTLTYPE_INT | CTLFLAG_RW, (void *)sc, 0, bce_sysctl_phy_read, "I", "PHY register read"); #endif DBEXIT(BCE_VERBOSE_MISC); } /****************************************************************************/ /* BCE Debug Routines */ /****************************************************************************/ #ifdef BCE_DEBUG /****************************************************************************/ /* Freezes the controller to allow for a cohesive state dump. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_freeze_controller(struct bce_softc *sc) { u32 val; val = REG_RD(sc, BCE_MISC_COMMAND); val |= BCE_MISC_COMMAND_DISABLE_ALL; REG_WR(sc, BCE_MISC_COMMAND, val); } /****************************************************************************/ /* Unfreezes the controller after a freeze operation. This may not always */ /* work and the controller will require a reset! */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_unfreeze_controller(struct bce_softc *sc) { u32 val; val = REG_RD(sc, BCE_MISC_COMMAND); val |= BCE_MISC_COMMAND_ENABLE_ALL; REG_WR(sc, BCE_MISC_COMMAND, val); } /****************************************************************************/ /* Prints out Ethernet frame information from an mbuf. */ /* */ /* Partially decode an Ethernet frame to look at some important headers. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_enet(struct bce_softc *sc, struct mbuf *m) { struct ether_vlan_header *eh; u16 etype; int ehlen; struct ip *ip; struct tcphdr *th; struct udphdr *uh; struct arphdr *ah; BCE_PRINTF( "-----------------------------" " Frame Decode " "-----------------------------\n"); eh = mtod(m, struct ether_vlan_header *); /* Handle VLAN encapsulation if present. */ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { etype = ntohs(eh->evl_proto); ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; } else { etype = ntohs(eh->evl_encap_proto); ehlen = ETHER_HDR_LEN; } /* ToDo: Add VLAN output. */ BCE_PRINTF("enet: dest = %6D, src = %6D, type = 0x%04X, hlen = %d\n", eh->evl_dhost, ":", eh->evl_shost, ":", etype, ehlen); switch (etype) { case ETHERTYPE_IP: ip = (struct ip *)(m->m_data + ehlen); BCE_PRINTF("--ip: dest = 0x%08X , src = 0x%08X, " "len = %d bytes, protocol = 0x%02X, xsum = 0x%04X\n", ntohl(ip->ip_dst.s_addr), ntohl(ip->ip_src.s_addr), ntohs(ip->ip_len), ip->ip_p, ntohs(ip->ip_sum)); switch (ip->ip_p) { case IPPROTO_TCP: th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); BCE_PRINTF("-tcp: dest = %d, src = %d, hlen = " "%d bytes, flags = 0x%b, csum = 0x%04X\n", ntohs(th->th_dport), ntohs(th->th_sport), (th->th_off << 2), th->th_flags, "\20\10CWR\07ECE\06URG\05ACK\04PSH\03RST" "\02SYN\01FIN", ntohs(th->th_sum)); break; case IPPROTO_UDP: uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2)); BCE_PRINTF("-udp: dest = %d, src = %d, len = %d " "bytes, csum = 0x%04X\n", ntohs(uh->uh_dport), ntohs(uh->uh_sport), ntohs(uh->uh_ulen), ntohs(uh->uh_sum)); break; case IPPROTO_ICMP: BCE_PRINTF("icmp:\n"); break; default: BCE_PRINTF("----: Other IP protocol.\n"); } break; case ETHERTYPE_IPV6: BCE_PRINTF("ipv6: No decode supported.\n"); break; case ETHERTYPE_ARP: BCE_PRINTF("-arp: "); ah = (struct arphdr *) (m->m_data + ehlen); switch (ntohs(ah->ar_op)) { case ARPOP_REVREQUEST: printf("reverse ARP request\n"); break; case ARPOP_REVREPLY: printf("reverse ARP reply\n"); break; case ARPOP_REQUEST: printf("ARP request\n"); break; case ARPOP_REPLY: printf("ARP reply\n"); break; default: printf("other ARP operation\n"); } break; default: BCE_PRINTF("----: Other protocol.\n"); } BCE_PRINTF( "-----------------------------" "--------------" "-----------------------------\n"); } /****************************************************************************/ /* Prints out information about an mbuf. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m) { struct mbuf *mp = m; if (m == NULL) { BCE_PRINTF("mbuf: null pointer\n"); return; } while (mp) { BCE_PRINTF("mbuf: %p, m_len = %d, m_flags = 0x%b, " "m_data = %p\n", mp, mp->m_len, mp->m_flags, "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY", mp->m_data); if (mp->m_flags & M_PKTHDR) { BCE_PRINTF("- m_pkthdr: len = %d, flags = 0x%b, " "csum_flags = %b\n", mp->m_pkthdr.len, mp->m_flags, M_FLAG_PRINTF, mp->m_pkthdr.csum_flags, CSUM_BITS); } if (mp->m_flags & M_EXT) { BCE_PRINTF("- m_ext: %p, ext_size = %d, type = ", mp->m_ext.ext_buf, mp->m_ext.ext_size); switch (mp->m_ext.ext_type) { case EXT_CLUSTER: printf("EXT_CLUSTER\n"); break; case EXT_SFBUF: printf("EXT_SFBUF\n"); break; case EXT_JUMBO9: printf("EXT_JUMBO9\n"); break; case EXT_JUMBO16: printf("EXT_JUMBO16\n"); break; case EXT_PACKET: printf("EXT_PACKET\n"); break; case EXT_MBUF: printf("EXT_MBUF\n"); break; case EXT_NET_DRV: printf("EXT_NET_DRV\n"); break; case EXT_MOD_TYPE: printf("EXT_MDD_TYPE\n"); break; case EXT_DISPOSABLE: printf("EXT_DISPOSABLE\n"); break; case EXT_EXTREF: printf("EXT_EXTREF\n"); break; default: printf("UNKNOWN\n"); } } mp = mp->m_next; } } /****************************************************************************/ /* Prints out the mbufs in the TX mbuf chain. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_tx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count) { struct mbuf *m; BCE_PRINTF( "----------------------------" " tx mbuf data " "----------------------------\n"); for (int i = 0; i < count; i++) { m = sc->tx_mbuf_ptr[chain_prod]; BCE_PRINTF("txmbuf[0x%04X]\n", chain_prod); bce_dump_mbuf(sc, m); chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod)); } BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out the mbufs in the RX mbuf chain. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_rx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count) { struct mbuf *m; BCE_PRINTF( "----------------------------" " rx mbuf data " "----------------------------\n"); for (int i = 0; i < count; i++) { m = sc->rx_mbuf_ptr[chain_prod]; BCE_PRINTF("rxmbuf[0x%04X]\n", chain_prod); bce_dump_mbuf(sc, m); chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod)); } BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out the mbufs in the mbuf page chain. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_pg_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count) { struct mbuf *m; BCE_PRINTF( "----------------------------" " pg mbuf data " "----------------------------\n"); for (int i = 0; i < count; i++) { m = sc->pg_mbuf_ptr[chain_prod]; BCE_PRINTF("pgmbuf[0x%04X]\n", chain_prod); bce_dump_mbuf(sc, m); chain_prod = PG_CHAIN_IDX(NEXT_PG_BD(chain_prod)); } BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out a tx_bd structure. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd) { int i = 0; if (idx > MAX_TX_BD_ALLOC) /* Index out of range. */ BCE_PRINTF("tx_bd[0x%04X]: Invalid tx_bd index!\n", idx); else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) /* TX Chain page pointer. */ BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page " "pointer\n", idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo); else { /* Normal tx_bd entry. */ BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, " "mss_nbytes = 0x%08X, vlan tag = 0x%04X, flags = " "0x%04X (", idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo, txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag, txbd->tx_bd_flags); if (txbd->tx_bd_flags & TX_BD_FLAGS_CONN_FAULT) { if (i>0) printf("|"); printf("CONN_FAULT"); i++; } if (txbd->tx_bd_flags & TX_BD_FLAGS_TCP_UDP_CKSUM) { if (i>0) printf("|"); printf("TCP_UDP_CKSUM"); i++; } if (txbd->tx_bd_flags & TX_BD_FLAGS_IP_CKSUM) { if (i>0) printf("|"); printf("IP_CKSUM"); i++; } if (txbd->tx_bd_flags & TX_BD_FLAGS_VLAN_TAG) { if (i>0) printf("|"); printf("VLAN"); i++; } if (txbd->tx_bd_flags & TX_BD_FLAGS_COAL_NOW) { if (i>0) printf("|"); printf("COAL_NOW"); i++; } if (txbd->tx_bd_flags & TX_BD_FLAGS_DONT_GEN_CRC) { if (i>0) printf("|"); printf("DONT_GEN_CRC"); i++; } if (txbd->tx_bd_flags & TX_BD_FLAGS_START) { if (i>0) printf("|"); printf("START"); i++; } if (txbd->tx_bd_flags & TX_BD_FLAGS_END) { if (i>0) printf("|"); printf("END"); i++; } if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_LSO) { if (i>0) printf("|"); printf("LSO"); i++; } if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_OPTION_WORD) { if (i>0) printf("|"); printf("SW_OPTION=%d", ((txbd->tx_bd_flags & TX_BD_FLAGS_SW_OPTION_WORD) >> 8)); i++; } if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_FLAGS) { if (i>0) printf("|"); printf("SW_FLAGS"); i++; } if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_SNAP) { if (i>0) printf("|"); printf("SNAP)"); } else { printf(")\n"); } } } /****************************************************************************/ /* Prints out a rx_bd structure. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd) { if (idx > MAX_RX_BD_ALLOC) /* Index out of range. */ BCE_PRINTF("rx_bd[0x%04X]: Invalid rx_bd index!\n", idx); else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) /* RX Chain page pointer. */ BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page " "pointer\n", idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo); else /* Normal rx_bd entry. */ BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = " "0x%08X, flags = 0x%08X\n", idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo, rxbd->rx_bd_len, rxbd->rx_bd_flags); } /****************************************************************************/ /* Prints out a rx_bd structure in the page chain. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_pgbd(struct bce_softc *sc, int idx, struct rx_bd *pgbd) { if (idx > MAX_PG_BD_ALLOC) /* Index out of range. */ BCE_PRINTF("pg_bd[0x%04X]: Invalid pg_bd index!\n", idx); else if ((idx & USABLE_PG_BD_PER_PAGE) == USABLE_PG_BD_PER_PAGE) /* Page Chain page pointer. */ BCE_PRINTF("px_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n", idx, pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo); else /* Normal rx_bd entry. */ BCE_PRINTF("pg_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, " "flags = 0x%08X\n", idx, pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo, pgbd->rx_bd_len, pgbd->rx_bd_flags); } /****************************************************************************/ /* Prints out a l2_fhdr structure. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr) { BCE_PRINTF("l2_fhdr[0x%04X]: status = 0x%b, " "pkt_len = %d, vlan = 0x%04x, ip_xsum/hdr_len = 0x%04X, " "tcp_udp_xsum = 0x%04X\n", idx, l2fhdr->l2_fhdr_status, BCE_L2FHDR_PRINTFB, l2fhdr->l2_fhdr_pkt_len, l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum, l2fhdr->l2_fhdr_tcp_udp_xsum); } /****************************************************************************/ /* Prints out context memory info. (Only useful for CID 0 to 16.) */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_ctx(struct bce_softc *sc, u16 cid) { if (cid > TX_CID) { BCE_PRINTF(" Unknown CID\n"); return; } BCE_PRINTF( "----------------------------" " CTX Data " "----------------------------\n"); BCE_PRINTF(" 0x%04X - (CID) Context ID\n", cid); if (cid == RX_CID) { BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_BDIDX) host rx " "producer index\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_HOST_BDIDX)); BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_BSEQ) host " "byte sequence\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_HOST_BSEQ)); BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BSEQ) h/w byte sequence\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BSEQ)); BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDHADDR_HI) h/w buffer " "descriptor address\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BDHADDR_HI)); BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDHADDR_LO) h/w buffer " "descriptor address\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BDHADDR_LO)); BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDIDX) h/w rx consumer " "index\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BDIDX)); BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_PG_BDIDX) host page " "producer index\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_HOST_PG_BDIDX)); BCE_PRINTF(" 0x%08X - (L2CTX_RX_PG_BUF_SIZE) host rx_bd/page " "buffer size\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_PG_BUF_SIZE)); BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDHADDR_HI) h/w page " "chain address\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_PG_BDHADDR_HI)); BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDHADDR_LO) h/w page " "chain address\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_PG_BDHADDR_LO)); BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDIDX) h/w page " "consumer index\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_PG_BDIDX)); } else if (cid == TX_CID) { if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { BCE_PRINTF(" 0x%08X - (L2CTX_TX_TYPE_XI) ctx type\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_TYPE_XI)); BCE_PRINTF(" 0x%08X - (L2CTX_CMD_TX_TYPE_XI) ctx " "cmd\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_CMD_TYPE_XI)); BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BDHADDR_HI_XI) " "h/w buffer descriptor address\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_TBDR_BHADDR_HI_XI)); BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BHADDR_LO_XI) " "h/w buffer descriptor address\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_TBDR_BHADDR_LO_XI)); BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BIDX_XI) " "host producer index\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_HOST_BIDX_XI)); BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BSEQ_XI) " "host byte sequence\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_HOST_BSEQ_XI)); } else { BCE_PRINTF(" 0x%08X - (L2CTX_TX_TYPE) ctx type\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_TYPE)); BCE_PRINTF(" 0x%08X - (L2CTX_TX_CMD_TYPE) ctx cmd\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_CMD_TYPE)); BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BDHADDR_HI) " "h/w buffer descriptor address\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_TBDR_BHADDR_HI)); BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BHADDR_LO) " "h/w buffer descriptor address\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_TBDR_BHADDR_LO)); BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BIDX) host " "producer index\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_HOST_BIDX)); BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BSEQ) host byte " "sequence\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_HOST_BSEQ)); } } BCE_PRINTF( "----------------------------" " Raw CTX " "----------------------------\n"); for (int i = 0x0; i < 0x300; i += 0x10) { BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", i, CTX_RD(sc, GET_CID_ADDR(cid), i), CTX_RD(sc, GET_CID_ADDR(cid), i + 0x4), CTX_RD(sc, GET_CID_ADDR(cid), i + 0x8), CTX_RD(sc, GET_CID_ADDR(cid), i + 0xc)); } BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out the FTQ data. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_ftqs(struct bce_softc *sc) { u32 cmd, ctl, cur_depth, max_depth, valid_cnt, val; BCE_PRINTF( "----------------------------" " FTQ Data " "----------------------------\n"); BCE_PRINTF(" FTQ Command Control Depth_Now " "Max_Depth Valid_Cnt \n"); BCE_PRINTF(" ------- ---------- ---------- ---------- " "---------- ----------\n"); /* Setup the generic statistic counters for the FTQ valid count. */ val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PPQ_VALID_CNT << 24) | (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPCQ_VALID_CNT << 16) | (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPQ_VALID_CNT << 8) | (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RLUPQ_VALID_CNT); REG_WR(sc, BCE_HC_STAT_GEN_SEL_0, val); val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCHQ_VALID_CNT << 24) | (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMAQ_VALID_CNT << 16) | (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PTQ_VALID_CNT << 8) | (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PMQ_VALID_CNT); REG_WR(sc, BCE_HC_STAT_GEN_SEL_1, val); val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPATQ_VALID_CNT << 24) | (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMAQ_VALID_CNT << 16) | (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXPQ_VALID_CNT << 8) | (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDRQ_VALID_CNT); REG_WR(sc, BCE_HC_STAT_GEN_SEL_2, val); val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMQ_VALID_CNT << 24) | (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMTQ_VALID_CNT << 16) | (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMXQ_VALID_CNT << 8) | (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TASQ_VALID_CNT); REG_WR(sc, BCE_HC_STAT_GEN_SEL_3, val); /* Input queue to the Receive Lookup state machine */ cmd = REG_RD(sc, BCE_RLUP_FTQ_CMD); ctl = REG_RD(sc, BCE_RLUP_FTQ_CTL); cur_depth = (ctl & BCE_RLUP_FTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_RLUP_FTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0); BCE_PRINTF(" RLUP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Input queue to the Receive Processor */ cmd = REG_RD_IND(sc, BCE_RXP_FTQ_CMD); ctl = REG_RD_IND(sc, BCE_RXP_FTQ_CTL); cur_depth = (ctl & BCE_RXP_FTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_RXP_FTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1); BCE_PRINTF(" RXP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Input queue to the Recevie Processor */ cmd = REG_RD_IND(sc, BCE_RXP_CFTQ_CMD); ctl = REG_RD_IND(sc, BCE_RXP_CFTQ_CTL); cur_depth = (ctl & BCE_RXP_CFTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_RXP_CFTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2); BCE_PRINTF(" RXPC 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Input queue to the Receive Virtual to Physical state machine */ cmd = REG_RD(sc, BCE_RV2P_PFTQ_CMD); ctl = REG_RD(sc, BCE_RV2P_PFTQ_CTL); cur_depth = (ctl & BCE_RV2P_PFTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_RV2P_PFTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT3); BCE_PRINTF(" RV2PP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Input queue to the Recevie Virtual to Physical state machine */ cmd = REG_RD(sc, BCE_RV2P_MFTQ_CMD); ctl = REG_RD(sc, BCE_RV2P_MFTQ_CTL); cur_depth = (ctl & BCE_RV2P_MFTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_RV2P_MFTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT4); BCE_PRINTF(" RV2PM 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Input queue to the Receive Virtual to Physical state machine */ cmd = REG_RD(sc, BCE_RV2P_TFTQ_CMD); ctl = REG_RD(sc, BCE_RV2P_TFTQ_CTL); cur_depth = (ctl & BCE_RV2P_TFTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_RV2P_TFTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT5); BCE_PRINTF(" RV2PT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Input queue to the Receive DMA state machine */ cmd = REG_RD(sc, BCE_RDMA_FTQ_CMD); ctl = REG_RD(sc, BCE_RDMA_FTQ_CTL); cur_depth = (ctl & BCE_RDMA_FTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_RDMA_FTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT6); BCE_PRINTF(" RDMA 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Input queue to the Transmit Scheduler state machine */ cmd = REG_RD(sc, BCE_TSCH_FTQ_CMD); ctl = REG_RD(sc, BCE_TSCH_FTQ_CTL); cur_depth = (ctl & BCE_TSCH_FTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_TSCH_FTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT7); BCE_PRINTF(" TSCH 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Input queue to the Transmit Buffer Descriptor state machine */ cmd = REG_RD(sc, BCE_TBDR_FTQ_CMD); ctl = REG_RD(sc, BCE_TBDR_FTQ_CTL); cur_depth = (ctl & BCE_TBDR_FTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_TBDR_FTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT8); BCE_PRINTF(" TBDR 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Input queue to the Transmit Processor */ cmd = REG_RD_IND(sc, BCE_TXP_FTQ_CMD); ctl = REG_RD_IND(sc, BCE_TXP_FTQ_CTL); cur_depth = (ctl & BCE_TXP_FTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_TXP_FTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT9); BCE_PRINTF(" TXP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Input queue to the Transmit DMA state machine */ cmd = REG_RD(sc, BCE_TDMA_FTQ_CMD); ctl = REG_RD(sc, BCE_TDMA_FTQ_CTL); cur_depth = (ctl & BCE_TDMA_FTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_TDMA_FTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT10); BCE_PRINTF(" TDMA 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Input queue to the Transmit Patch-Up Processor */ cmd = REG_RD_IND(sc, BCE_TPAT_FTQ_CMD); ctl = REG_RD_IND(sc, BCE_TPAT_FTQ_CTL); cur_depth = (ctl & BCE_TPAT_FTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_TPAT_FTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT11); BCE_PRINTF(" TPAT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Input queue to the Transmit Assembler state machine */ cmd = REG_RD_IND(sc, BCE_TAS_FTQ_CMD); ctl = REG_RD_IND(sc, BCE_TAS_FTQ_CTL); cur_depth = (ctl & BCE_TAS_FTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_TAS_FTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT12); BCE_PRINTF(" TAS 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Input queue to the Completion Processor */ cmd = REG_RD_IND(sc, BCE_COM_COMXQ_FTQ_CMD); ctl = REG_RD_IND(sc, BCE_COM_COMXQ_FTQ_CTL); cur_depth = (ctl & BCE_COM_COMXQ_FTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_COM_COMXQ_FTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT13); BCE_PRINTF(" COMX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Input queue to the Completion Processor */ cmd = REG_RD_IND(sc, BCE_COM_COMTQ_FTQ_CMD); ctl = REG_RD_IND(sc, BCE_COM_COMTQ_FTQ_CTL); cur_depth = (ctl & BCE_COM_COMTQ_FTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_COM_COMTQ_FTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT14); BCE_PRINTF(" COMT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Input queue to the Completion Processor */ cmd = REG_RD_IND(sc, BCE_COM_COMQ_FTQ_CMD); ctl = REG_RD_IND(sc, BCE_COM_COMQ_FTQ_CTL); cur_depth = (ctl & BCE_COM_COMQ_FTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_COM_COMQ_FTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT15); BCE_PRINTF(" COMX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Setup the generic statistic counters for the FTQ valid count. */ val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSQ_VALID_CNT << 16) | (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CPQ_VALID_CNT << 8) | (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_MGMQ_VALID_CNT); if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) val = val | (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PCSQ_VALID_CNT_XI << 24); REG_WR(sc, BCE_HC_STAT_GEN_SEL_0, val); /* Input queue to the Management Control Processor */ cmd = REG_RD_IND(sc, BCE_MCP_MCPQ_FTQ_CMD); ctl = REG_RD_IND(sc, BCE_MCP_MCPQ_FTQ_CTL); cur_depth = (ctl & BCE_MCP_MCPQ_FTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_MCP_MCPQ_FTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0); BCE_PRINTF(" MCP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Input queue to the Command Processor */ cmd = REG_RD_IND(sc, BCE_CP_CPQ_FTQ_CMD); ctl = REG_RD_IND(sc, BCE_CP_CPQ_FTQ_CTL); cur_depth = (ctl & BCE_CP_CPQ_FTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_CP_CPQ_FTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1); BCE_PRINTF(" CP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Input queue to the Completion Scheduler state machine */ cmd = REG_RD(sc, BCE_CSCH_CH_FTQ_CMD); ctl = REG_RD(sc, BCE_CSCH_CH_FTQ_CTL); cur_depth = (ctl & BCE_CSCH_CH_FTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_CSCH_CH_FTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2); BCE_PRINTF(" CS 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { /* Input queue to the RV2P Command Scheduler */ cmd = REG_RD(sc, BCE_RV2PCSR_FTQ_CMD); ctl = REG_RD(sc, BCE_RV2PCSR_FTQ_CTL); cur_depth = (ctl & 0xFFC00000) >> 22; max_depth = (ctl & 0x003FF000) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT3); BCE_PRINTF(" RV2PCSR 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); } BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out the TX chain. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_tx_chain(struct bce_softc *sc, u16 tx_prod, int count) { struct tx_bd *txbd; /* First some info about the tx_bd chain structure. */ BCE_PRINTF( "----------------------------" " tx_bd chain " "----------------------------\n"); BCE_PRINTF("page size = 0x%08X, tx chain pages = 0x%08X\n", (u32) BCM_PAGE_SIZE, (u32) sc->tx_pages); BCE_PRINTF("tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n", (u32) TOTAL_TX_BD_PER_PAGE, (u32) USABLE_TX_BD_PER_PAGE); BCE_PRINTF("total tx_bd = 0x%08X\n", (u32) TOTAL_TX_BD_ALLOC); BCE_PRINTF( "----------------------------" " tx_bd data " "----------------------------\n"); /* Now print out a decoded list of TX buffer descriptors. */ for (int i = 0; i < count; i++) { txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)]; bce_dump_txbd(sc, tx_prod, txbd); tx_prod++; } BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out the RX chain. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_rx_bd_chain(struct bce_softc *sc, u16 rx_prod, int count) { struct rx_bd *rxbd; /* First some info about the rx_bd chain structure. */ BCE_PRINTF( "----------------------------" " rx_bd chain " "----------------------------\n"); BCE_PRINTF("page size = 0x%08X, rx chain pages = 0x%08X\n", (u32) BCM_PAGE_SIZE, (u32) sc->rx_pages); BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n", (u32) TOTAL_RX_BD_PER_PAGE, (u32) USABLE_RX_BD_PER_PAGE); BCE_PRINTF("total rx_bd = 0x%08X\n", (u32) TOTAL_RX_BD_ALLOC); BCE_PRINTF( "----------------------------" " rx_bd data " "----------------------------\n"); /* Now print out the rx_bd's themselves. */ for (int i = 0; i < count; i++) { rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)]; bce_dump_rxbd(sc, rx_prod, rxbd); rx_prod = RX_CHAIN_IDX(rx_prod + 1); } BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out the page chain. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_pg_chain(struct bce_softc *sc, u16 pg_prod, int count) { struct rx_bd *pgbd; /* First some info about the page chain structure. */ BCE_PRINTF( "----------------------------" " page chain " "----------------------------\n"); BCE_PRINTF("page size = 0x%08X, pg chain pages = 0x%08X\n", (u32) BCM_PAGE_SIZE, (u32) sc->pg_pages); BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n", (u32) TOTAL_PG_BD_PER_PAGE, (u32) USABLE_PG_BD_PER_PAGE); BCE_PRINTF("total pg_bd = 0x%08X\n", (u32) TOTAL_PG_BD_ALLOC); BCE_PRINTF( "----------------------------" " page data " "----------------------------\n"); /* Now print out the rx_bd's themselves. */ for (int i = 0; i < count; i++) { pgbd = &sc->pg_bd_chain[PG_PAGE(pg_prod)][PG_IDX(pg_prod)]; bce_dump_pgbd(sc, pg_prod, pgbd); pg_prod = PG_CHAIN_IDX(pg_prod + 1); } BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } #define BCE_PRINT_RX_CONS(arg) \ if (sblk->status_rx_quick_consumer_index##arg) \ BCE_PRINTF("0x%04X(0x%04X) - rx_quick_consumer_index%d\n", \ sblk->status_rx_quick_consumer_index##arg, (u16) \ RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index##arg), \ arg); #define BCE_PRINT_TX_CONS(arg) \ if (sblk->status_tx_quick_consumer_index##arg) \ BCE_PRINTF("0x%04X(0x%04X) - tx_quick_consumer_index%d\n", \ sblk->status_tx_quick_consumer_index##arg, (u16) \ TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index##arg), \ arg); /****************************************************************************/ /* Prints out the status block from host memory. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_status_block(struct bce_softc *sc) { struct status_block *sblk; bus_dmamap_sync(sc->status_tag, sc->status_map, BUS_DMASYNC_POSTREAD); sblk = sc->status_block; BCE_PRINTF( "----------------------------" " Status Block " "----------------------------\n"); /* Theses indices are used for normal L2 drivers. */ BCE_PRINTF(" 0x%08X - attn_bits\n", sblk->status_attn_bits); BCE_PRINTF(" 0x%08X - attn_bits_ack\n", sblk->status_attn_bits_ack); BCE_PRINT_RX_CONS(0); BCE_PRINT_TX_CONS(0) BCE_PRINTF(" 0x%04X - status_idx\n", sblk->status_idx); /* Theses indices are not used for normal L2 drivers. */ BCE_PRINT_RX_CONS(1); BCE_PRINT_RX_CONS(2); BCE_PRINT_RX_CONS(3); BCE_PRINT_RX_CONS(4); BCE_PRINT_RX_CONS(5); BCE_PRINT_RX_CONS(6); BCE_PRINT_RX_CONS(7); BCE_PRINT_RX_CONS(8); BCE_PRINT_RX_CONS(9); BCE_PRINT_RX_CONS(10); BCE_PRINT_RX_CONS(11); BCE_PRINT_RX_CONS(12); BCE_PRINT_RX_CONS(13); BCE_PRINT_RX_CONS(14); BCE_PRINT_RX_CONS(15); BCE_PRINT_TX_CONS(1); BCE_PRINT_TX_CONS(2); BCE_PRINT_TX_CONS(3); if (sblk->status_completion_producer_index || sblk->status_cmd_consumer_index) BCE_PRINTF("com_prod = 0x%08X, cmd_cons = 0x%08X\n", sblk->status_completion_producer_index, sblk->status_cmd_consumer_index); BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } #define BCE_PRINT_64BIT_STAT(arg) \ if (sblk->arg##_lo || sblk->arg##_hi) \ BCE_PRINTF("0x%08X:%08X : %s\n", sblk->arg##_hi, \ sblk->arg##_lo, #arg); #define BCE_PRINT_32BIT_STAT(arg) \ if (sblk->arg) \ BCE_PRINTF(" 0x%08X : %s\n", \ sblk->arg, #arg); /****************************************************************************/ /* Prints out the statistics block from host memory. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_stats_block(struct bce_softc *sc) { struct statistics_block *sblk; bus_dmamap_sync(sc->stats_tag, sc->stats_map, BUS_DMASYNC_POSTREAD); sblk = sc->stats_block; BCE_PRINTF( "---------------" " Stats Block (All Stats Not Shown Are 0) " "---------------\n"); BCE_PRINT_64BIT_STAT(stat_IfHCInOctets); BCE_PRINT_64BIT_STAT(stat_IfHCInBadOctets); BCE_PRINT_64BIT_STAT(stat_IfHCOutOctets); BCE_PRINT_64BIT_STAT(stat_IfHCOutBadOctets); BCE_PRINT_64BIT_STAT(stat_IfHCInUcastPkts); BCE_PRINT_64BIT_STAT(stat_IfHCInBroadcastPkts); BCE_PRINT_64BIT_STAT(stat_IfHCInMulticastPkts); BCE_PRINT_64BIT_STAT(stat_IfHCOutUcastPkts); BCE_PRINT_64BIT_STAT(stat_IfHCOutBroadcastPkts); BCE_PRINT_64BIT_STAT(stat_IfHCOutMulticastPkts); BCE_PRINT_32BIT_STAT( stat_emac_tx_stat_dot3statsinternalmactransmiterrors); BCE_PRINT_32BIT_STAT(stat_Dot3StatsCarrierSenseErrors); BCE_PRINT_32BIT_STAT(stat_Dot3StatsFCSErrors); BCE_PRINT_32BIT_STAT(stat_Dot3StatsAlignmentErrors); BCE_PRINT_32BIT_STAT(stat_Dot3StatsSingleCollisionFrames); BCE_PRINT_32BIT_STAT(stat_Dot3StatsMultipleCollisionFrames); BCE_PRINT_32BIT_STAT(stat_Dot3StatsDeferredTransmissions); BCE_PRINT_32BIT_STAT(stat_Dot3StatsExcessiveCollisions); BCE_PRINT_32BIT_STAT(stat_Dot3StatsLateCollisions); BCE_PRINT_32BIT_STAT(stat_EtherStatsCollisions); BCE_PRINT_32BIT_STAT(stat_EtherStatsFragments); BCE_PRINT_32BIT_STAT(stat_EtherStatsJabbers); BCE_PRINT_32BIT_STAT(stat_EtherStatsUndersizePkts); BCE_PRINT_32BIT_STAT(stat_EtherStatsOversizePkts); BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx64Octets); BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx65Octetsto127Octets); BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx128Octetsto255Octets); BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx256Octetsto511Octets); BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx512Octetsto1023Octets); BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx1024Octetsto1522Octets); BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx1523Octetsto9022Octets); BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx64Octets); BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx65Octetsto127Octets); BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx128Octetsto255Octets); BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx256Octetsto511Octets); BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx512Octetsto1023Octets); BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx1024Octetsto1522Octets); BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx1523Octetsto9022Octets); BCE_PRINT_32BIT_STAT(stat_XonPauseFramesReceived); BCE_PRINT_32BIT_STAT(stat_XoffPauseFramesReceived); BCE_PRINT_32BIT_STAT(stat_OutXonSent); BCE_PRINT_32BIT_STAT(stat_OutXoffSent); BCE_PRINT_32BIT_STAT(stat_FlowControlDone); BCE_PRINT_32BIT_STAT(stat_MacControlFramesReceived); BCE_PRINT_32BIT_STAT(stat_XoffStateEntered); BCE_PRINT_32BIT_STAT(stat_IfInFramesL2FilterDiscards); BCE_PRINT_32BIT_STAT(stat_IfInRuleCheckerDiscards); BCE_PRINT_32BIT_STAT(stat_IfInFTQDiscards); BCE_PRINT_32BIT_STAT(stat_IfInMBUFDiscards); BCE_PRINT_32BIT_STAT(stat_IfInRuleCheckerP4Hit); BCE_PRINT_32BIT_STAT(stat_CatchupInRuleCheckerDiscards); BCE_PRINT_32BIT_STAT(stat_CatchupInFTQDiscards); BCE_PRINT_32BIT_STAT(stat_CatchupInMBUFDiscards); BCE_PRINT_32BIT_STAT(stat_CatchupInRuleCheckerP4Hit); BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out a summary of the driver state. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_driver_state(struct bce_softc *sc) { u32 val_hi, val_lo; BCE_PRINTF( "-----------------------------" " Driver State " "-----------------------------\n"); val_hi = BCE_ADDR_HI(sc); val_lo = BCE_ADDR_LO(sc); BCE_PRINTF("0x%08X:%08X - (sc) driver softc structure virtual " "address\n", val_hi, val_lo); val_hi = BCE_ADDR_HI(sc->bce_vhandle); val_lo = BCE_ADDR_LO(sc->bce_vhandle); BCE_PRINTF("0x%08X:%08X - (sc->bce_vhandle) PCI BAR virtual " "address\n", val_hi, val_lo); val_hi = BCE_ADDR_HI(sc->status_block); val_lo = BCE_ADDR_LO(sc->status_block); BCE_PRINTF("0x%08X:%08X - (sc->status_block) status block " "virtual address\n", val_hi, val_lo); val_hi = BCE_ADDR_HI(sc->stats_block); val_lo = BCE_ADDR_LO(sc->stats_block); BCE_PRINTF("0x%08X:%08X - (sc->stats_block) statistics block " "virtual address\n", val_hi, val_lo); val_hi = BCE_ADDR_HI(sc->tx_bd_chain); val_lo = BCE_ADDR_LO(sc->tx_bd_chain); BCE_PRINTF("0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain " "virtual adddress\n", val_hi, val_lo); val_hi = BCE_ADDR_HI(sc->rx_bd_chain); val_lo = BCE_ADDR_LO(sc->rx_bd_chain); BCE_PRINTF("0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain " "virtual address\n", val_hi, val_lo); if (bce_hdr_split == TRUE) { val_hi = BCE_ADDR_HI(sc->pg_bd_chain); val_lo = BCE_ADDR_LO(sc->pg_bd_chain); BCE_PRINTF("0x%08X:%08X - (sc->pg_bd_chain) page chain " "virtual address\n", val_hi, val_lo); } val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr); val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr); BCE_PRINTF("0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain " "virtual address\n", val_hi, val_lo); val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr); val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr); BCE_PRINTF("0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain " "virtual address\n", val_hi, val_lo); if (bce_hdr_split == TRUE) { val_hi = BCE_ADDR_HI(sc->pg_mbuf_ptr); val_lo = BCE_ADDR_LO(sc->pg_mbuf_ptr); BCE_PRINTF("0x%08X:%08X - (sc->pg_mbuf_ptr) page mbuf chain " "virtual address\n", val_hi, val_lo); } BCE_PRINTF(" 0x%016llX - (sc->interrupts_generated) " "h/w intrs\n", (long long unsigned int) sc->interrupts_generated); BCE_PRINTF(" 0x%016llX - (sc->interrupts_rx) " "rx interrupts handled\n", (long long unsigned int) sc->interrupts_rx); BCE_PRINTF(" 0x%016llX - (sc->interrupts_tx) " "tx interrupts handled\n", (long long unsigned int) sc->interrupts_tx); BCE_PRINTF(" 0x%016llX - (sc->phy_interrupts) " "phy interrupts handled\n", (long long unsigned int) sc->phy_interrupts); BCE_PRINTF(" 0x%08X - (sc->last_status_idx) " "status block index\n", sc->last_status_idx); BCE_PRINTF(" 0x%04X(0x%04X) - (sc->tx_prod) tx producer " "index\n", sc->tx_prod, (u16) TX_CHAIN_IDX(sc->tx_prod)); BCE_PRINTF(" 0x%04X(0x%04X) - (sc->tx_cons) tx consumer " "index\n", sc->tx_cons, (u16) TX_CHAIN_IDX(sc->tx_cons)); BCE_PRINTF(" 0x%08X - (sc->tx_prod_bseq) tx producer " "byte seq index\n", sc->tx_prod_bseq); BCE_PRINTF(" 0x%08X - (sc->debug_tx_mbuf_alloc) tx " "mbufs allocated\n", sc->debug_tx_mbuf_alloc); BCE_PRINTF(" 0x%08X - (sc->used_tx_bd) used " "tx_bd's\n", sc->used_tx_bd); BCE_PRINTF(" 0x%04X/0x%04X - (sc->tx_hi_watermark)/" "(sc->max_tx_bd)\n", sc->tx_hi_watermark, sc->max_tx_bd); BCE_PRINTF(" 0x%04X(0x%04X) - (sc->rx_prod) rx producer " "index\n", sc->rx_prod, (u16) RX_CHAIN_IDX(sc->rx_prod)); BCE_PRINTF(" 0x%04X(0x%04X) - (sc->rx_cons) rx consumer " "index\n", sc->rx_cons, (u16) RX_CHAIN_IDX(sc->rx_cons)); BCE_PRINTF(" 0x%08X - (sc->rx_prod_bseq) rx producer " "byte seq index\n", sc->rx_prod_bseq); BCE_PRINTF(" 0x%04X/0x%04X - (sc->rx_low_watermark)/" "(sc->max_rx_bd)\n", sc->rx_low_watermark, sc->max_rx_bd); BCE_PRINTF(" 0x%08X - (sc->debug_rx_mbuf_alloc) rx " "mbufs allocated\n", sc->debug_rx_mbuf_alloc); BCE_PRINTF(" 0x%08X - (sc->free_rx_bd) free " "rx_bd's\n", sc->free_rx_bd); if (bce_hdr_split == TRUE) { BCE_PRINTF(" 0x%04X(0x%04X) - (sc->pg_prod) page producer " "index\n", sc->pg_prod, (u16) PG_CHAIN_IDX(sc->pg_prod)); BCE_PRINTF(" 0x%04X(0x%04X) - (sc->pg_cons) page consumer " "index\n", sc->pg_cons, (u16) PG_CHAIN_IDX(sc->pg_cons)); BCE_PRINTF(" 0x%08X - (sc->debug_pg_mbuf_alloc) page " "mbufs allocated\n", sc->debug_pg_mbuf_alloc); } BCE_PRINTF(" 0x%08X - (sc->free_pg_bd) free page " "rx_bd's\n", sc->free_pg_bd); BCE_PRINTF(" 0x%04X/0x%04X - (sc->pg_low_watermark)/" "(sc->max_pg_bd)\n", sc->pg_low_watermark, sc->max_pg_bd); BCE_PRINTF(" 0x%08X - (sc->mbuf_alloc_failed_count) " "mbuf alloc failures\n", sc->mbuf_alloc_failed_count); BCE_PRINTF(" 0x%08X - (sc->bce_flags) " "bce mac flags\n", sc->bce_flags); BCE_PRINTF(" 0x%08X - (sc->bce_phy_flags) " "bce phy flags\n", sc->bce_phy_flags); BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out the hardware state through a summary of important register, */ /* followed by a complete register dump. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_hw_state(struct bce_softc *sc) { u32 val; BCE_PRINTF( "----------------------------" " Hardware State " "----------------------------\n"); BCE_PRINTF("%s - bootcode version\n", sc->bce_bc_ver); val = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS); BCE_PRINTF("0x%08X - (0x%06X) misc_enable_status_bits\n", val, BCE_MISC_ENABLE_STATUS_BITS); val = REG_RD(sc, BCE_DMA_STATUS); BCE_PRINTF("0x%08X - (0x%06X) dma_status\n", val, BCE_DMA_STATUS); val = REG_RD(sc, BCE_CTX_STATUS); BCE_PRINTF("0x%08X - (0x%06X) ctx_status\n", val, BCE_CTX_STATUS); val = REG_RD(sc, BCE_EMAC_STATUS); BCE_PRINTF("0x%08X - (0x%06X) emac_status\n", val, BCE_EMAC_STATUS); val = REG_RD(sc, BCE_RPM_STATUS); BCE_PRINTF("0x%08X - (0x%06X) rpm_status\n", val, BCE_RPM_STATUS); /* ToDo: Create a #define for this constant. */ val = REG_RD(sc, 0x2004); BCE_PRINTF("0x%08X - (0x%06X) rlup_status\n", val, 0x2004); val = REG_RD(sc, BCE_RV2P_STATUS); BCE_PRINTF("0x%08X - (0x%06X) rv2p_status\n", val, BCE_RV2P_STATUS); /* ToDo: Create a #define for this constant. */ val = REG_RD(sc, 0x2c04); BCE_PRINTF("0x%08X - (0x%06X) rdma_status\n", val, 0x2c04); val = REG_RD(sc, BCE_TBDR_STATUS); BCE_PRINTF("0x%08X - (0x%06X) tbdr_status\n", val, BCE_TBDR_STATUS); val = REG_RD(sc, BCE_TDMA_STATUS); BCE_PRINTF("0x%08X - (0x%06X) tdma_status\n", val, BCE_TDMA_STATUS); val = REG_RD(sc, BCE_HC_STATUS); BCE_PRINTF("0x%08X - (0x%06X) hc_status\n", val, BCE_HC_STATUS); val = REG_RD_IND(sc, BCE_TXP_CPU_STATE); BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n", val, BCE_TXP_CPU_STATE); val = REG_RD_IND(sc, BCE_TPAT_CPU_STATE); BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n", val, BCE_TPAT_CPU_STATE); val = REG_RD_IND(sc, BCE_RXP_CPU_STATE); BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n", val, BCE_RXP_CPU_STATE); val = REG_RD_IND(sc, BCE_COM_CPU_STATE); BCE_PRINTF("0x%08X - (0x%06X) com_cpu_state\n", val, BCE_COM_CPU_STATE); val = REG_RD_IND(sc, BCE_MCP_CPU_STATE); BCE_PRINTF("0x%08X - (0x%06X) mcp_cpu_state\n", val, BCE_MCP_CPU_STATE); val = REG_RD_IND(sc, BCE_CP_CPU_STATE); BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_state\n", val, BCE_CP_CPU_STATE); BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); BCE_PRINTF( "----------------------------" " Register Dump " "----------------------------\n"); for (int i = 0x400; i < 0x8000; i += 0x10) { BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", i, REG_RD(sc, i), REG_RD(sc, i + 0x4), REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC)); } BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out the contentst of shared memory which is used for host driver */ /* to bootcode firmware communication. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_shmem_state(struct bce_softc *sc) { BCE_PRINTF( "----------------------------" " Hardware State " "----------------------------\n"); BCE_PRINTF("0x%08X - Shared memory base address\n", sc->bce_shmem_base); BCE_PRINTF("%s - bootcode version\n", sc->bce_bc_ver); BCE_PRINTF( "----------------------------" " Shared Mem " "----------------------------\n"); for (int i = 0x0; i < 0x200; i += 0x10) { BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", i, bce_shmem_rd(sc, i), bce_shmem_rd(sc, i + 0x4), bce_shmem_rd(sc, i + 0x8), bce_shmem_rd(sc, i + 0xC)); } BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out the mailbox queue registers. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_mq_regs(struct bce_softc *sc) { BCE_PRINTF( "----------------------------" " MQ Regs " "----------------------------\n"); BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); for (int i = 0x3c00; i < 0x4000; i += 0x10) { BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", i, REG_RD(sc, i), REG_RD(sc, i + 0x4), REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC)); } BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out the bootcode state. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_bc_state(struct bce_softc *sc) { u32 val; BCE_PRINTF( "----------------------------" " Bootcode State " "----------------------------\n"); BCE_PRINTF("%s - bootcode version\n", sc->bce_bc_ver); val = bce_shmem_rd(sc, BCE_BC_RESET_TYPE); BCE_PRINTF("0x%08X - (0x%06X) reset_type\n", val, BCE_BC_RESET_TYPE); val = bce_shmem_rd(sc, BCE_BC_STATE); BCE_PRINTF("0x%08X - (0x%06X) state\n", val, BCE_BC_STATE); val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); BCE_PRINTF("0x%08X - (0x%06X) condition\n", val, BCE_BC_STATE_CONDITION); val = bce_shmem_rd(sc, BCE_BC_STATE_DEBUG_CMD); BCE_PRINTF("0x%08X - (0x%06X) debug_cmd\n", val, BCE_BC_STATE_DEBUG_CMD); BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out the TXP processor state. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_txp_state(struct bce_softc *sc, int regs) { u32 val; u32 fw_version[3]; BCE_PRINTF( "----------------------------" " TXP State " "----------------------------\n"); for (int i = 0; i < 3; i++) fw_version[i] = htonl(REG_RD_IND(sc, (BCE_TXP_SCRATCH + 0x10 + i * 4))); BCE_PRINTF("Firmware version - %s\n", (char *) fw_version); val = REG_RD_IND(sc, BCE_TXP_CPU_MODE); BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_mode\n", val, BCE_TXP_CPU_MODE); val = REG_RD_IND(sc, BCE_TXP_CPU_STATE); BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n", val, BCE_TXP_CPU_STATE); val = REG_RD_IND(sc, BCE_TXP_CPU_EVENT_MASK); BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_event_mask\n", val, BCE_TXP_CPU_EVENT_MASK); if (regs) { BCE_PRINTF( "----------------------------" " Register Dump " "----------------------------\n"); for (int i = BCE_TXP_CPU_MODE; i < 0x68000; i += 0x10) { /* Skip the big blank spaces */ if (i < 0x454000 && i > 0x5ffff) BCE_PRINTF("0x%04X: 0x%08X 0x%08X " "0x%08X 0x%08X\n", i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4), REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC)); } } BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out the RXP processor state. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_rxp_state(struct bce_softc *sc, int regs) { u32 val; u32 fw_version[3]; BCE_PRINTF( "----------------------------" " RXP State " "----------------------------\n"); for (int i = 0; i < 3; i++) fw_version[i] = htonl(REG_RD_IND(sc, (BCE_RXP_SCRATCH + 0x10 + i * 4))); BCE_PRINTF("Firmware version - %s\n", (char *) fw_version); val = REG_RD_IND(sc, BCE_RXP_CPU_MODE); BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_mode\n", val, BCE_RXP_CPU_MODE); val = REG_RD_IND(sc, BCE_RXP_CPU_STATE); BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n", val, BCE_RXP_CPU_STATE); val = REG_RD_IND(sc, BCE_RXP_CPU_EVENT_MASK); BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_event_mask\n", val, BCE_RXP_CPU_EVENT_MASK); if (regs) { BCE_PRINTF( "----------------------------" " Register Dump " "----------------------------\n"); for (int i = BCE_RXP_CPU_MODE; i < 0xe8fff; i += 0x10) { /* Skip the big blank sapces */ if (i < 0xc5400 && i > 0xdffff) BCE_PRINTF("0x%04X: 0x%08X 0x%08X " "0x%08X 0x%08X\n", i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4), REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC)); } } BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out the TPAT processor state. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_tpat_state(struct bce_softc *sc, int regs) { u32 val; u32 fw_version[3]; BCE_PRINTF( "----------------------------" " TPAT State " "----------------------------\n"); for (int i = 0; i < 3; i++) fw_version[i] = htonl(REG_RD_IND(sc, (BCE_TPAT_SCRATCH + 0x410 + i * 4))); BCE_PRINTF("Firmware version - %s\n", (char *) fw_version); val = REG_RD_IND(sc, BCE_TPAT_CPU_MODE); BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_mode\n", val, BCE_TPAT_CPU_MODE); val = REG_RD_IND(sc, BCE_TPAT_CPU_STATE); BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n", val, BCE_TPAT_CPU_STATE); val = REG_RD_IND(sc, BCE_TPAT_CPU_EVENT_MASK); BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_event_mask\n", val, BCE_TPAT_CPU_EVENT_MASK); if (regs) { BCE_PRINTF( "----------------------------" " Register Dump " "----------------------------\n"); for (int i = BCE_TPAT_CPU_MODE; i < 0xa3fff; i += 0x10) { /* Skip the big blank spaces */ if (i < 0x854000 && i > 0x9ffff) BCE_PRINTF("0x%04X: 0x%08X 0x%08X " "0x%08X 0x%08X\n", i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4), REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC)); } } BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out the Command Procesor (CP) state. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_cp_state(struct bce_softc *sc, int regs) { u32 val; u32 fw_version[3]; BCE_PRINTF( "----------------------------" " CP State " "----------------------------\n"); for (int i = 0; i < 3; i++) fw_version[i] = htonl(REG_RD_IND(sc, (BCE_CP_SCRATCH + 0x10 + i * 4))); BCE_PRINTF("Firmware version - %s\n", (char *) fw_version); val = REG_RD_IND(sc, BCE_CP_CPU_MODE); BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_mode\n", val, BCE_CP_CPU_MODE); val = REG_RD_IND(sc, BCE_CP_CPU_STATE); BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_state\n", val, BCE_CP_CPU_STATE); val = REG_RD_IND(sc, BCE_CP_CPU_EVENT_MASK); BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_event_mask\n", val, BCE_CP_CPU_EVENT_MASK); if (regs) { BCE_PRINTF( "----------------------------" " Register Dump " "----------------------------\n"); for (int i = BCE_CP_CPU_MODE; i < 0x1aa000; i += 0x10) { /* Skip the big blank spaces */ if (i < 0x185400 && i > 0x19ffff) BCE_PRINTF("0x%04X: 0x%08X 0x%08X " "0x%08X 0x%08X\n", i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4), REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC)); } } BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out the Completion Procesor (COM) state. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_com_state(struct bce_softc *sc, int regs) { u32 val; u32 fw_version[4]; BCE_PRINTF( "----------------------------" " COM State " "----------------------------\n"); for (int i = 0; i < 3; i++) fw_version[i] = htonl(REG_RD_IND(sc, (BCE_COM_SCRATCH + 0x10 + i * 4))); BCE_PRINTF("Firmware version - %s\n", (char *) fw_version); val = REG_RD_IND(sc, BCE_COM_CPU_MODE); BCE_PRINTF("0x%08X - (0x%06X) com_cpu_mode\n", val, BCE_COM_CPU_MODE); val = REG_RD_IND(sc, BCE_COM_CPU_STATE); BCE_PRINTF("0x%08X - (0x%06X) com_cpu_state\n", val, BCE_COM_CPU_STATE); val = REG_RD_IND(sc, BCE_COM_CPU_EVENT_MASK); BCE_PRINTF("0x%08X - (0x%06X) com_cpu_event_mask\n", val, BCE_COM_CPU_EVENT_MASK); if (regs) { BCE_PRINTF( "----------------------------" " Register Dump " "----------------------------\n"); for (int i = BCE_COM_CPU_MODE; i < 0x1053e8; i += 0x10) { BCE_PRINTF("0x%04X: 0x%08X 0x%08X " "0x%08X 0x%08X\n", i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4), REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC)); } } BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out the Receive Virtual 2 Physical (RV2P) state. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_rv2p_state(struct bce_softc *sc) { u32 val, pc1, pc2, fw_ver_high, fw_ver_low; BCE_PRINTF( "----------------------------" " RV2P State " "----------------------------\n"); /* Stall the RV2P processors. */ val = REG_RD_IND(sc, BCE_RV2P_CONFIG); val |= BCE_RV2P_CONFIG_STALL_PROC1 | BCE_RV2P_CONFIG_STALL_PROC2; REG_WR_IND(sc, BCE_RV2P_CONFIG, val); /* Read the firmware version. */ val = 0x00000001; REG_WR_IND(sc, BCE_RV2P_PROC1_ADDR_CMD, val); fw_ver_low = REG_RD_IND(sc, BCE_RV2P_INSTR_LOW); fw_ver_high = REG_RD_IND(sc, BCE_RV2P_INSTR_HIGH) & BCE_RV2P_INSTR_HIGH_HIGH; BCE_PRINTF("RV2P1 Firmware version - 0x%08X:0x%08X\n", fw_ver_high, fw_ver_low); val = 0x00000001; REG_WR_IND(sc, BCE_RV2P_PROC2_ADDR_CMD, val); fw_ver_low = REG_RD_IND(sc, BCE_RV2P_INSTR_LOW); fw_ver_high = REG_RD_IND(sc, BCE_RV2P_INSTR_HIGH) & BCE_RV2P_INSTR_HIGH_HIGH; BCE_PRINTF("RV2P2 Firmware version - 0x%08X:0x%08X\n", fw_ver_high, fw_ver_low); /* Resume the RV2P processors. */ val = REG_RD_IND(sc, BCE_RV2P_CONFIG); val &= ~(BCE_RV2P_CONFIG_STALL_PROC1 | BCE_RV2P_CONFIG_STALL_PROC2); REG_WR_IND(sc, BCE_RV2P_CONFIG, val); /* Fetch the program counter value. */ val = 0x68007800; REG_WR_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK, val); val = REG_RD_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK); pc1 = (val & BCE_RV2P_DEBUG_VECT_PEEK_1_VALUE); pc2 = (val & BCE_RV2P_DEBUG_VECT_PEEK_2_VALUE) >> 16; BCE_PRINTF("0x%08X - RV2P1 program counter (1st read)\n", pc1); BCE_PRINTF("0x%08X - RV2P2 program counter (1st read)\n", pc2); /* Fetch the program counter value again to see if it is advancing. */ val = 0x68007800; REG_WR_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK, val); val = REG_RD_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK); pc1 = (val & BCE_RV2P_DEBUG_VECT_PEEK_1_VALUE); pc2 = (val & BCE_RV2P_DEBUG_VECT_PEEK_2_VALUE) >> 16; BCE_PRINTF("0x%08X - RV2P1 program counter (2nd read)\n", pc1); BCE_PRINTF("0x%08X - RV2P2 program counter (2nd read)\n", pc2); BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out the driver state and then enters the debugger. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_breakpoint(struct bce_softc *sc) { /* * Unreachable code to silence compiler warnings * about unused functions. */ if (0) { bce_freeze_controller(sc); bce_unfreeze_controller(sc); bce_dump_enet(sc, NULL); bce_dump_txbd(sc, 0, NULL); bce_dump_rxbd(sc, 0, NULL); bce_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD_ALLOC); bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD_ALLOC); bce_dump_pg_mbuf_chain(sc, 0, USABLE_PG_BD_ALLOC); bce_dump_l2fhdr(sc, 0, NULL); bce_dump_ctx(sc, RX_CID); bce_dump_ftqs(sc); bce_dump_tx_chain(sc, 0, USABLE_TX_BD_ALLOC); bce_dump_rx_bd_chain(sc, 0, USABLE_RX_BD_ALLOC); bce_dump_pg_chain(sc, 0, USABLE_PG_BD_ALLOC); bce_dump_status_block(sc); bce_dump_stats_block(sc); bce_dump_driver_state(sc); bce_dump_hw_state(sc); bce_dump_bc_state(sc); bce_dump_txp_state(sc, 0); bce_dump_rxp_state(sc, 0); bce_dump_tpat_state(sc, 0); bce_dump_cp_state(sc, 0); bce_dump_com_state(sc, 0); bce_dump_rv2p_state(sc); bce_dump_pgbd(sc, 0, NULL); } bce_dump_status_block(sc); bce_dump_driver_state(sc); /* Call the debugger. */ breakpoint(); } #endif Index: head/sys/dev/beri/virtio/virtio_block.c =================================================================== --- head/sys/dev/beri/virtio/virtio_block.c (revision 313981) +++ head/sys/dev/beri/virtio/virtio_block.c (revision 313982) @@ -1,561 +1,561 @@ /*- * Copyright (c) 2014 Ruslan Bukin * All rights reserved. * * This software was developed by SRI International and the University of * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237) * ("CTSRD"), as part of the DARPA CRASH research programme. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * BERI virtio block backend driver */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pio_if.h" #define DPRINTF(fmt, ...) /* We use indirect descriptors */ #define NUM_DESCS 1 #define NUM_QUEUES 1 #define VTBLK_BLK_ID_BYTES 20 #define VTBLK_MAXSEGS 256 struct beri_vtblk_softc { struct resource *res[1]; bus_space_tag_t bst; bus_space_handle_t bsh; struct cdev *cdev; device_t dev; int opened; device_t pio_recv; device_t pio_send; struct vqueue_info vs_queues[NUM_QUEUES]; char ident[VTBLK_BLK_ID_BYTES]; struct ucred *cred; struct vnode *vnode; struct thread *vtblk_ktd; struct sx sc_mtx; int beri_mem_offset; struct md_ioctl *mdio; struct virtio_blk_config *cfg; }; static struct resource_spec beri_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { -1, 0 } }; static int vtblk_rdwr(struct beri_vtblk_softc *sc, struct iovec *iov, int cnt, int offset, int operation, int iolen) { struct vnode *vp; struct mount *mp; struct uio auio; int error; bzero(&auio, sizeof(auio)); vp = sc->vnode; KASSERT(vp != NULL, ("file not opened")); auio.uio_iov = iov; auio.uio_iovcnt = cnt; auio.uio_offset = offset; auio.uio_segflg = UIO_SYSSPACE; auio.uio_rw = operation; auio.uio_resid = iolen; auio.uio_td = curthread; if (operation == 0) { vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred); VOP_UNLOCK(vp, 0); } else { (void) vn_start_write(vp, &mp, V_WAIT); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); error = VOP_WRITE(vp, &auio, IO_SYNC, sc->cred); VOP_UNLOCK(vp, 0); vn_finished_write(mp); } return (error); } static void vtblk_proc(struct beri_vtblk_softc *sc, struct vqueue_info *vq) { struct iovec iov[VTBLK_MAXSEGS + 2]; uint16_t flags[VTBLK_MAXSEGS + 2]; struct virtio_blk_outhdr *vbh; struct iovec *tiov; uint8_t *status; off_t offset; int iolen; int type; int i, n; int err; n = vq_getchain(sc->beri_mem_offset, vq, iov, VTBLK_MAXSEGS + 2, flags); KASSERT(n >= 2 && n <= VTBLK_MAXSEGS + 2, ("wrong n value %d", n)); tiov = getcopy(iov, n); vbh = iov[0].iov_base; status = iov[n-1].iov_base; KASSERT(iov[n-1].iov_len == 1, ("iov_len == %d", iov[n-1].iov_len)); type = be32toh(vbh->type) & ~VIRTIO_BLK_T_BARRIER; offset = be64toh(vbh->sector) * DEV_BSIZE; iolen = 0; for (i = 1; i < (n-1); i++) { iolen += iov[i].iov_len; } switch (type) { case VIRTIO_BLK_T_OUT: case VIRTIO_BLK_T_IN: err = vtblk_rdwr(sc, tiov + 1, i - 1, offset, type, iolen); break; case VIRTIO_BLK_T_GET_ID: /* Assume a single buffer */ strlcpy(iov[1].iov_base, sc->ident, MIN(iov[1].iov_len, sizeof(sc->ident))); err = 0; break; case VIRTIO_BLK_T_FLUSH: /* Possible? */ default: err = -ENOSYS; break; } if (err < 0) { if (err == -ENOSYS) { *status = VIRTIO_BLK_S_UNSUPP; } else *status = VIRTIO_BLK_S_IOERR; } else *status = VIRTIO_BLK_S_OK; free(tiov, M_DEVBUF); vq_relchain(vq, iov, n, 1); } static int close_file(struct beri_vtblk_softc *sc, struct thread *td) { int error; if (sc->vnode != NULL) { vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); sc->vnode->v_vflag &= ~VV_MD; VOP_UNLOCK(sc->vnode, 0); error = vn_close(sc->vnode, (FREAD|FWRITE), sc->cred, td); if (error != 0) return (error); sc->vnode = NULL; } if (sc->cred != NULL) crfree(sc->cred); return (0); } static int open_file(struct beri_vtblk_softc *sc, struct thread *td) { struct nameidata nd; struct vattr vattr; int error; int flags; flags = (FREAD | FWRITE); NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->mdio->md_file, td); error = vn_open(&nd, &flags, 0, NULL); if (error != 0) return (error); NDFREE(&nd, NDF_ONLY_PNBUF); if (nd.ni_vp->v_type != VREG) { return (EINVAL); } error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred); if (error != 0) return (error); if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) { vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY); if (nd.ni_vp->v_iflag & VI_DOOMED) { return (1); } } nd.ni_vp->v_vflag |= VV_MD; VOP_UNLOCK(nd.ni_vp, 0); sc->vnode = nd.ni_vp; sc->cred = crhold(td->td_ucred); return (0); } static int vtblk_notify(struct beri_vtblk_softc *sc) { struct vqueue_info *vq; int queue; int reg; vq = &sc->vs_queues[0]; if (!vq_ring_ready(vq)) return (0); if (!sc->opened) return (0); reg = READ2(sc, VIRTIO_MMIO_QUEUE_NOTIFY); queue = be16toh(reg); KASSERT(queue == 0, ("we support single queue only")); /* Process new descriptors */ vq = &sc->vs_queues[queue]; vq->vq_save_used = be16toh(vq->vq_used->idx); while (vq_has_descs(vq)) vtblk_proc(sc, vq); /* Interrupt the other side */ if ((be16toh(vq->vq_avail->flags) & VRING_AVAIL_F_NO_INTERRUPT) == 0) { reg = htobe32(VIRTIO_MMIO_INT_VRING); WRITE4(sc, VIRTIO_MMIO_INTERRUPT_STATUS, reg); PIO_SET(sc->pio_send, Q_INTR, 1); } return (0); } static int vq_init(struct beri_vtblk_softc *sc) { struct vqueue_info *vq; uint8_t *base; int size; int reg; int pfn; vq = &sc->vs_queues[0]; vq->vq_qsize = NUM_DESCS; reg = READ4(sc, VIRTIO_MMIO_QUEUE_PFN); pfn = be32toh(reg); vq->vq_pfn = pfn; size = vring_size(vq->vq_qsize, VRING_ALIGN); base = paddr_map(sc->beri_mem_offset, (pfn << PAGE_SHIFT), size); /* First pages are descriptors */ vq->vq_desc = (struct vring_desc *)base; base += vq->vq_qsize * sizeof(struct vring_desc); /* Then avail ring */ vq->vq_avail = (struct vring_avail *)base; base += (2 + vq->vq_qsize + 1) * sizeof(uint16_t); /* Then it's rounded up to the next page */ base = (uint8_t *)roundup2((uintptr_t)base, VRING_ALIGN); /* And the last pages are the used ring */ vq->vq_used = (struct vring_used *)base; /* Mark queue as allocated, and start at 0 when we use it. */ vq->vq_flags = VQ_ALLOC; vq->vq_last_avail = 0; return (0); } static void vtblk_thread(void *arg) { struct beri_vtblk_softc *sc; int err; sc = arg; sx_xlock(&sc->sc_mtx); for (;;) { err = msleep(sc, &sc->sc_mtx, PCATCH | PZERO, "prd", hz); vtblk_notify(sc); } sx_xunlock(&sc->sc_mtx); kthread_exit(); } static int backend_info(struct beri_vtblk_softc *sc) { struct virtio_blk_config *cfg; uint32_t *s; int reg; int i; /* Specify that we provide block device */ reg = htobe32(VIRTIO_ID_BLOCK); WRITE4(sc, VIRTIO_MMIO_DEVICE_ID, reg); /* Queue size */ reg = htobe32(NUM_DESCS); WRITE4(sc, VIRTIO_MMIO_QUEUE_NUM_MAX, reg); /* Our features */ reg = htobe32(VIRTIO_RING_F_INDIRECT_DESC | VIRTIO_BLK_F_BLK_SIZE | VIRTIO_BLK_F_SEG_MAX); WRITE4(sc, VIRTIO_MMIO_HOST_FEATURES, reg); cfg = sc->cfg; cfg->capacity = htobe64(sc->mdio->md_mediasize / DEV_BSIZE); cfg->size_max = 0; /* not negotiated */ cfg->seg_max = htobe32(VTBLK_MAXSEGS); cfg->blk_size = htobe32(DEV_BSIZE); s = (uint32_t *)cfg; for (i = 0; i < sizeof(struct virtio_blk_config); i+=4) { WRITE4(sc, VIRTIO_MMIO_CONFIG + i, *s); s+=1; } sprintf(sc->ident, "Virtio block backend"); return (0); } static void vtblk_intr(void *arg) { struct beri_vtblk_softc *sc; int pending; int reg; sc = arg; reg = PIO_READ(sc->pio_recv); /* Ack */ PIO_SET(sc->pio_recv, reg, 0); pending = htobe32(reg); if (pending & Q_PFN) { vq_init(sc); } if (pending & Q_NOTIFY) { wakeup(sc); } } static int beri_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) { struct beri_vtblk_softc *sc; int err; sc = dev->si_drv1; switch (cmd) { case MDIOCATTACH: /* take file as argument */ if (sc->vnode != NULL) { /* Already opened */ return (1); } sc->mdio = (struct md_ioctl *)addr; backend_info(sc); DPRINTF("opening file, td 0x%08x\n", (int)td); err = open_file(sc, td); if (err) return (err); PIO_SETUP_IRQ(sc->pio_recv, vtblk_intr, sc); sc->opened = 1; break; case MDIOCDETACH: - if (sc->vnode == 0) { + if (sc->vnode == NULL) { /* File not opened */ return (1); } sc->opened = 0; DPRINTF("closing file, td 0x%08x\n", (int)td); err = close_file(sc, td); if (err) return (err); PIO_TEARDOWN_IRQ(sc->pio_recv); break; default: break; } return (0); } static struct cdevsw beri_cdevsw = { .d_version = D_VERSION, .d_ioctl = beri_ioctl, .d_name = "virtio block backend", }; static int beri_vtblk_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "sri-cambridge,beri-vtblk")) return (ENXIO); device_set_desc(dev, "SRI-Cambridge BERI block"); return (BUS_PROBE_DEFAULT); } static int beri_vtblk_attach(device_t dev) { struct beri_vtblk_softc *sc; int error; sc = device_get_softc(dev); sc->dev = dev; if (bus_alloc_resources(dev, beri_spec, sc->res)) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } /* Memory interface */ sc->bst = rman_get_bustag(sc->res[0]); sc->bsh = rman_get_bushandle(sc->res[0]); sc->cfg = malloc(sizeof(struct virtio_blk_config), M_DEVBUF, M_NOWAIT|M_ZERO); sx_init(&sc->sc_mtx, device_get_nameunit(sc->dev)); error = kthread_add(vtblk_thread, sc, NULL, &sc->vtblk_ktd, 0, 0, "beri_virtio_block"); if (error) { device_printf(dev, "cannot create kthread\n"); return (ENXIO); } if (setup_offset(dev, &sc->beri_mem_offset) != 0) return (ENXIO); if (setup_pio(dev, "pio-send", &sc->pio_send) != 0) return (ENXIO); if (setup_pio(dev, "pio-recv", &sc->pio_recv) != 0) return (ENXIO); sc->cdev = make_dev(&beri_cdevsw, 0, UID_ROOT, GID_WHEEL, S_IRWXU, "beri_vtblk"); if (sc->cdev == NULL) { device_printf(dev, "Failed to create character device.\n"); return (ENXIO); } sc->cdev->si_drv1 = sc; return (0); } static device_method_t beri_vtblk_methods[] = { DEVMETHOD(device_probe, beri_vtblk_probe), DEVMETHOD(device_attach, beri_vtblk_attach), { 0, 0 } }; static driver_t beri_vtblk_driver = { "beri_vtblk", beri_vtblk_methods, sizeof(struct beri_vtblk_softc), }; static devclass_t beri_vtblk_devclass; DRIVER_MODULE(beri_vtblk, simplebus, beri_vtblk_driver, beri_vtblk_devclass, 0, 0); Index: head/sys/dev/bhnd/cores/usb/bhnd_usb.c =================================================================== --- head/sys/dev/bhnd/cores/usb/bhnd_usb.c (revision 313981) +++ head/sys/dev/bhnd/cores/usb/bhnd_usb.c (revision 313982) @@ -1,486 +1,486 @@ /*- * Copyright (c) 2010, Aleksandr Rybalko * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Ported version of BroadCom USB core driver from ZRouter project */ #include #include #include #include #include #include #include #include #include #include #include #include #include "bhnd_usbvar.h" /****************************** Variables ************************************/ static const struct bhnd_device bhnd_usb_devs[] = { BHND_DEVICE(BCM, USB, "USB1.1 Host/Device core", NULL), BHND_DEVICE(BCM, USB20H, "USB2.0 Host core", NULL), BHND_DEVICE(BCM, USB20D, "USB2.0 Device core", NULL), BHND_DEVICE(BCM, USB11H, "USB1.1 Host core", NULL), BHND_DEVICE(BCM, USB11D, "USB1.1 Device core", NULL), BHND_DEVICE_END }; /****************************** Prototypes ***********************************/ static int bhnd_usb_attach(device_t); static int bhnd_usb_probe(device_t); static device_t bhnd_usb_add_child(device_t dev, u_int order, const char *name, int unit); static int bhnd_usb_print_all_resources(device_t dev); static int bhnd_usb_print_child(device_t bus, device_t child); static struct resource * bhnd_usb_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); static int bhnd_usb_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r); static struct resource_list * bhnd_usb_get_reslist(device_t dev, device_t child); static int bhnd_usb_probe(device_t dev) { const struct bhnd_device *id; id = bhnd_device_lookup(dev, bhnd_usb_devs, sizeof(bhnd_usb_devs[0])); if (id == NULL) return (ENXIO); device_set_desc(dev, id->desc); return (BUS_PROBE_DEFAULT); } static int bhnd_usb_attach(device_t dev) { struct bhnd_usb_softc *sc; int rid; uint32_t tmp; int tries, err; sc = device_get_softc(dev); bhnd_reset_hw(dev, 0); /* * Allocate the resources which the parent bus has already * determined for us. * XXX: There are few windows (usually 2), RID should be chip-specific */ rid = 0; sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_mem == NULL) { BHND_ERROR_DEV(dev, "unable to allocate memory"); return (ENXIO); } sc->sc_bt = rman_get_bustag(sc->sc_mem); sc->sc_bh = rman_get_bushandle(sc->sc_mem); sc->sc_maddr = rman_get_start(sc->sc_mem); sc->sc_msize = rman_get_size(sc->sc_mem); rid = 0; sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->sc_irq == NULL) { BHND_ERROR_DEV(dev, "unable to allocate IRQ"); return (ENXIO); } sc->sc_irqn = rman_get_start(sc->sc_irq); sc->mem_rman.rm_start = sc->sc_maddr; sc->mem_rman.rm_end = sc->sc_maddr + sc->sc_msize - 1; sc->mem_rman.rm_type = RMAN_ARRAY; sc->mem_rman.rm_descr = "BHND USB core I/O memory addresses"; if (rman_init(&sc->mem_rman) != 0 || rman_manage_region(&sc->mem_rman, sc->mem_rman.rm_start, sc->mem_rman.rm_end) != 0) { panic("%s: sc->mem_rman", __func__); } sc->irq_rman.rm_start = sc->sc_irqn; sc->irq_rman.rm_end = sc->sc_irqn; sc->irq_rman.rm_type = RMAN_ARRAY; sc->irq_rman.rm_descr = "BHND USB core IRQ"; /* * BHND USB share same IRQ between OHCI and EHCI */ if (rman_init(&sc->irq_rman) != 0 || rman_manage_region(&sc->irq_rman, sc->irq_rman.rm_start, sc->irq_rman.rm_end) != 0) panic("%s: failed to set up IRQ rman", __func__); /* TODO: macros for registers */ bus_write_4(sc->sc_mem, 0x200, 0x7ff); DELAY(100); #define OHCI_CONTROL 0x04 bus_write_4(sc->sc_mem, OHCI_CONTROL, 0); if ( bhnd_get_device(dev) == BHND_COREID_USB20H) { uint32_t rev; BHND_INFO_DEV(dev, "USB HOST 2.0 setup for rev %d", rev); rev = bhnd_get_hwrev(dev); if (rev == 1/* ? == 2 */) { /* SiBa code */ /* Change Flush control reg */ tmp = bus_read_4(sc->sc_mem, 0x400) & ~0x8; bus_write_4(sc->sc_mem, 0x400, tmp); tmp = bus_read_4(sc->sc_mem, 0x400); BHND_DEBUG_DEV(dev, "USB20H fcr: 0x%x", tmp); /* Change Shim control reg */ tmp = bus_read_4(sc->sc_mem, 0x304) & ~0x100; bus_write_4(sc->sc_mem, 0x304, tmp); tmp = bus_read_4(sc->sc_mem, 0x304); BHND_DEBUG_DEV(dev, "USB20H shim: 0x%x", tmp); } else if (rev >= 5) { /* BCMA code */ err = bhnd_alloc_pmu(dev); if(err) { BHND_ERROR_DEV(dev, "can't alloc pmu: %d", err); return (err); } err = bhnd_request_ext_rsrc(dev, 1); if(err) { BHND_ERROR_DEV(dev, "can't req ext: %d", err); return (err); } /* Take out of resets */ bus_write_4(sc->sc_mem, 0x200, 0x4ff); DELAY(25); bus_write_4(sc->sc_mem, 0x200, 0x6ff); DELAY(25); /* Make sure digital and AFE are locked in USB PHY */ bus_write_4(sc->sc_mem, 0x524, 0x6b); DELAY(50); bus_read_4(sc->sc_mem, 0x524); DELAY(50); bus_write_4(sc->sc_mem, 0x524, 0xab); DELAY(50); bus_read_4(sc->sc_mem, 0x524); DELAY(50); bus_write_4(sc->sc_mem, 0x524, 0x2b); DELAY(50); bus_read_4(sc->sc_mem, 0x524); DELAY(50); bus_write_4(sc->sc_mem, 0x524, 0x10ab); DELAY(50); bus_read_4(sc->sc_mem, 0x524); tries = 10000; for (;;) { DELAY(10); tmp = bus_read_4(sc->sc_mem, 0x528); if (tmp & 0xc000) break; if (--tries != 0) continue; tmp = bus_read_4(sc->sc_mem, 0x528); BHND_ERROR_DEV(dev, "USB20H mdio_rddata 0x%08x", tmp); } /* XXX: Puzzle code */ bus_write_4(sc->sc_mem, 0x528, 0x80000000); bus_read_4(sc->sc_mem, 0x314); DELAY(265); bus_write_4(sc->sc_mem, 0x200, 0x7ff); DELAY(10); /* Take USB and HSIC out of non-driving modes */ bus_write_4(sc->sc_mem, 0x510, 0); } } bus_generic_probe(dev); if (bhnd_get_device(dev) == BHND_COREID_USB20H && ( bhnd_get_hwrev(dev) > 0)) bhnd_usb_add_child(dev, 0, "ehci", -1); bhnd_usb_add_child(dev, 1, "ohci", -1); bus_generic_attach(dev); return (0); } static struct resource * bhnd_usb_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *rv; struct resource_list *rl; struct resource_list_entry *rle; int isdefault, needactivate; struct bhnd_usb_softc *sc = device_get_softc(bus); isdefault = RMAN_IS_DEFAULT_RANGE(start,end); needactivate = flags & RF_ACTIVE; rl = BUS_GET_RESOURCE_LIST(bus, child); rle = NULL; if (isdefault) { BHND_INFO_DEV(bus, "trying allocate def %d - %d for %s", type, *rid, device_get_nameunit(child) ); rle = resource_list_find(rl, type, *rid); if (rle == NULL) return (NULL); if (rle->res != NULL) panic("%s: resource entry is busy", __func__); start = rle->start; end = rle->end; count = rle->count; } else { BHND_INFO_DEV(bus, "trying allocate %d - %d (%jx-%jx) for %s", type, *rid, start, end, device_get_nameunit(child) ); } /* * If the request is for a resource which we manage, * attempt to satisfy the allocation ourselves. */ if (type == SYS_RES_MEMORY) { rv = rman_reserve_resource(&sc->mem_rman, start, end, count, flags, child); - if (rv == 0) { + if (rv == NULL) { BHND_ERROR_DEV(bus, "could not reserve resource"); return (0); } rman_set_rid(rv, *rid); if (needactivate && bus_activate_resource(child, type, *rid, rv)) { BHND_ERROR_DEV(bus, "could not activate resource"); rman_release_resource(rv); return (0); } return (rv); } if (type == SYS_RES_IRQ) { rv = rman_reserve_resource(&sc->irq_rman, start, end, count, flags, child); - if (rv == 0) { + if (rv == NULL) { BHND_ERROR_DEV(bus, "could not reserve resource"); return (0); } rman_set_rid(rv, *rid); if (needactivate && bus_activate_resource(child, type, *rid, rv)) { BHND_ERROR_DEV(bus, "could not activate resource"); rman_release_resource(rv); return (0); } return (rv); } /* * Pass the request to the parent. */ return (resource_list_alloc(rl, bus, child, type, rid, start, end, count, flags)); } static struct resource_list * bhnd_usb_get_reslist(device_t dev, device_t child) { struct bhnd_usb_devinfo *sdi; sdi = device_get_ivars(child); return (&sdi->sdi_rl); } static int bhnd_usb_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct resource_list *rl; struct resource_list_entry *rle; rl = bhnd_usb_get_reslist(dev, child); if (rl == NULL) return (EINVAL); rle = resource_list_find(rl, type, rid); if (rle == NULL) return (EINVAL); rman_release_resource(r); rle->res = NULL; return (0); } static int bhnd_usb_print_all_resources(device_t dev) { struct bhnd_usb_devinfo *sdi; struct resource_list *rl; int retval; retval = 0; sdi = device_get_ivars(dev); rl = &sdi->sdi_rl; if (STAILQ_FIRST(rl)) retval += printf(" at"); retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld"); return (retval); } static int bhnd_usb_print_child(device_t bus, device_t child) { int retval = 0; retval += bus_print_child_header(bus, child); retval += bhnd_usb_print_all_resources(child); if (device_get_flags(child)) retval += printf(" flags %#x", device_get_flags(child)); retval += printf(" on %s\n", device_get_nameunit(bus)); return (retval); } static device_t bhnd_usb_add_child(device_t dev, u_int order, const char *name, int unit) { struct bhnd_usb_softc *sc; struct bhnd_usb_devinfo *sdi; device_t child; sc = device_get_softc(dev); child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (NULL); sdi = malloc(sizeof(struct bhnd_usb_devinfo), M_DEVBUF, M_NOWAIT|M_ZERO); if (sdi == NULL) return (NULL); if (strncmp(name, "ohci", 4) == 0) { sdi->sdi_maddr = sc->sc_maddr + 0x000; sdi->sdi_msize = 0x200; sdi->sdi_irq = sc->sc_irqn; BHND_INFO_DEV(dev, "ohci: irq=%d maddr=0x%jx", sdi->sdi_irq, sdi->sdi_maddr); } else if (strncmp(name, "ehci", 4) == 0) { sdi->sdi_maddr = sc->sc_maddr + 0x000; sdi->sdi_msize = 0x1000; sdi->sdi_irq = sc->sc_irqn; BHND_INFO_DEV(dev, "ehci: irq=%d maddr=0x%jx", sdi->sdi_irq, sdi->sdi_maddr); } else { panic("Unknown subdevice"); /* Unknown subdevice */ sdi->sdi_maddr = 1; sdi->sdi_msize = 1; sdi->sdi_irq = 1; } resource_list_init(&sdi->sdi_rl); /* * Determine memory window on bus and irq if one is needed. */ resource_list_add(&sdi->sdi_rl, SYS_RES_MEMORY, 0, sdi->sdi_maddr, sdi->sdi_maddr + sdi->sdi_msize - 1, sdi->sdi_msize); resource_list_add(&sdi->sdi_rl, SYS_RES_IRQ, 0, sdi->sdi_irq, sdi->sdi_irq, 1); device_set_ivars(child, sdi); return (child); } static device_method_t bhnd_usb_methods[] = { /* Device interface */ DEVMETHOD(device_attach, bhnd_usb_attach), DEVMETHOD(device_probe, bhnd_usb_probe), /* Bus interface */ DEVMETHOD(bus_add_child, bhnd_usb_add_child), DEVMETHOD(bus_alloc_resource, bhnd_usb_alloc_resource), DEVMETHOD(bus_get_resource_list, bhnd_usb_get_reslist), DEVMETHOD(bus_print_child, bhnd_usb_print_child), DEVMETHOD(bus_release_resource, bhnd_usb_release_resource), /* Bus interface: generic part */ DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD_END }; static devclass_t bhnd_usb_devclass; DEFINE_CLASS_0(bhnd_usb, bhnd_usb_driver, bhnd_usb_methods, sizeof(struct bhnd_usb_softc)); DRIVER_MODULE(bhnd_usb, bhnd, bhnd_usb_driver, bhnd_usb_devclass, 0, 0); MODULE_VERSION(bhnd_usb, 1); Index: head/sys/dev/buslogic/bt_pci.c =================================================================== --- head/sys/dev/buslogic/bt_pci.c (revision 313981) +++ head/sys/dev/buslogic/bt_pci.c (revision 313982) @@ -1,219 +1,219 @@ /*- * Product specific probe and attach routines for: * Buslogic BT946, BT948, BT956, BT958 SCSI controllers * * Copyright (c) 1995, 1997, 1998 Justin T. Gibbs * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #define BT_PCI_IOADDR PCIR_BAR(0) #define BT_PCI_MEMADDR PCIR_BAR(1) #define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER 0x1040104Bul #define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC 0x0140104Bul #define PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT 0x8130104Bul static int bt_pci_alloc_resources(device_t dev) { int type = 0, rid, zero; - struct resource *regs = 0; - struct resource *irq = 0; + struct resource *regs = NULL; + struct resource *irq = NULL; #if 0 /* XXX Memory Mapped I/O seems to cause problems */ type = SYS_RES_MEMORY; rid = BT_PCI_MEMADDR; regs = bus_alloc_resource_any(dev, type, &rid, RF_ACTIVE); #else type = SYS_RES_IOPORT; rid = BT_PCI_IOADDR; regs = bus_alloc_resource_any(dev, type, &rid, RF_ACTIVE); #endif if (!regs) return (ENOMEM); zero = 0; irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &zero, RF_ACTIVE | RF_SHAREABLE); if (!irq) { bus_release_resource(dev, type, rid, regs); return (ENOMEM); } bt_init_softc(dev, regs, irq, 0); return (0); } static void bt_pci_release_resources(device_t dev) { struct bt_softc *bt = device_get_softc(dev); if (bt->port) /* XXX can't cope with memory registers anyway */ bus_release_resource(dev, SYS_RES_IOPORT, BT_PCI_IOADDR, bt->port); if (bt->irq) bus_release_resource(dev, SYS_RES_IRQ, 0, bt->irq); bt_free_softc(dev); } static int bt_pci_probe(device_t dev) { switch (pci_get_devid(dev)) { case PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER: case PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC: { struct bt_softc *bt = device_get_softc(dev); pci_info_data_t pci_info; int error; error = bt_pci_alloc_resources(dev); if (error) return (error); /* * Determine if an ISA compatible I/O port has been * enabled. If so, record the port so it will not * be probed by our ISA probe. If the PCI I/O port * was not set to the compatibility port, disable it. */ error = bt_cmd(bt, BOP_INQUIRE_PCI_INFO, /*param*/NULL, /*paramlen*/0, (u_int8_t*)&pci_info, sizeof(pci_info), DEFAULT_CMD_TIMEOUT); if (error == 0 && pci_info.io_port < BIO_DISABLED) { bt_mark_probed_bio(pci_info.io_port); if (rman_get_start(bt->port) != bt_iop_from_bio(pci_info.io_port)) { u_int8_t new_addr; new_addr = BIO_DISABLED; bt_cmd(bt, BOP_MODIFY_IO_ADDR, /*param*/&new_addr, /*paramlen*/1, /*reply_buf*/NULL, /*reply_len*/0, DEFAULT_CMD_TIMEOUT); } } bt_pci_release_resources(dev); device_set_desc(dev, "Buslogic Multi-Master SCSI Host Adapter"); return (BUS_PROBE_DEFAULT); } default: break; } return (ENXIO); } static int bt_pci_attach(device_t dev) { struct bt_softc *bt = device_get_softc(dev); int error; /* Initialize softc */ error = bt_pci_alloc_resources(dev); if (error) { device_printf(dev, "can't allocate resources in bt_pci_attach\n"); return error; } /* Allocate a dmatag for our CCB DMA maps */ if (bus_dma_tag_create( /* PCI parent */ bus_get_dma_tag(dev), /* alignemnt */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR_32BIT, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ BUS_SPACE_MAXSIZE_32BIT, /* nsegments */ ~0, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ 0, /* lockfunc */ NULL, /* lockarg */ NULL, &bt->parent_dmat) != 0) { bt_pci_release_resources(dev); return (ENOMEM); } if (bt_probe(dev) || bt_fetch_adapter_info(dev) || bt_init(dev)) { bt_pci_release_resources(dev); return (ENXIO); } error = bt_attach(dev); if (error) { bt_pci_release_resources(dev); return (error); } return (0); } static device_method_t bt_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, bt_pci_probe), DEVMETHOD(device_attach, bt_pci_attach), { 0, 0 } }; static driver_t bt_pci_driver = { "bt", bt_pci_methods, sizeof(struct bt_softc), }; static devclass_t bt_devclass; DRIVER_MODULE(bt, pci, bt_pci_driver, bt_devclass, 0, 0); MODULE_DEPEND(bt, pci, 1, 1, 1); Index: head/sys/dev/ce/if_ce.c =================================================================== --- head/sys/dev/ce/if_ce.c (revision 313981) +++ head/sys/dev/ce/if_ce.c (revision 313982) @@ -1,2643 +1,2643 @@ /* * Cronyx-Tau32-PCI adapter driver for FreeBSD. * * Copyright (C) 2003-2005 Cronyx Engineering. * Copyright (C) 2003-2005 Kurakin Roman, * * This software is distributed with NO WARRANTIES, not even the implied * warranties for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * Authors grant any other persons or organisations a permission to use, * modify and redistribute this software in source and binary forms, * as long as this message is kept with the software, all derivative * works or modified versions. * * $Cronyx: if_ce.c,v 1.9.2.8 2005/11/21 14:17:44 rik Exp $ */ #include __FBSDID("$FreeBSD$"); #include #if __FreeBSD_version >= 500000 # define NPCI 1 #else # include "pci.h" #endif #if NPCI > 0 #include #include #include #include #include #include #include #include #include #include #include #if __FreeBSD_version >= 504000 #include #endif #include #include #include #include #include #include #if __FreeBSD_version > 501000 # include # include #else # include # include #endif #include #include #include "opt_ng_cronyx.h" #ifdef NETGRAPH_CRONYX # include "opt_netgraph.h" # ifndef NETGRAPH # error #option NETGRAPH missed from configuration # endif # include # include # include #else # include # include # define PP_CISCO IFF_LINK2 # include #endif #include #include #include #include /* If we don't have Cronyx's sppp version, we don't have fr support via sppp */ #ifndef PP_FR #define PP_FR 0 #endif #ifndef IFP2SP #define IFP2SP(ifp) ((struct sppp*)ifp) #endif #ifndef SP2IFP #define SP2IFP(sp) ((struct ifnet*)sp) #endif #ifndef PCIR_BAR #define PCIR_BAR(x) (PCIR_MAPS + (x) * 4) #endif /* define as our previous return value */ #ifndef BUS_PROBE_DEFAULT #define BUS_PROBE_DEFAULT 0 #endif #define CE_DEBUG(d,s) ({if (d->chan->debug) {\ printf ("%s: ", d->name); printf s;}}) #define CE_DEBUG2(d,s) ({if (d->chan->debug>1) {\ printf ("%s: ", d->name); printf s;}}) #ifndef IF_DRAIN #define IF_DRAIN(ifq) do { \ struct mbuf *m; \ for (;;) { \ IF_DEQUEUE(ifq, m); \ if (m == NULL) \ break; \ m_freem(m); \ } \ } while (0) #endif #ifndef _IF_QLEN #define _IF_QLEN(ifq) ((ifq)->ifq_len) #endif #ifndef callout_drain #define callout_drain callout_stop #endif #define CE_LOCK_NAME "ceX" #define CE_LOCK(_bd) mtx_lock (&(_bd)->ce_mtx) #define CE_UNLOCK(_bd) mtx_unlock (&(_bd)->ce_mtx) #define CE_LOCK_ASSERT(_bd) mtx_assert (&(_bd)->ce_mtx, MA_OWNED) #define CDEV_MAJOR 185 static int ce_probe __P((device_t)); static int ce_attach __P((device_t)); static int ce_detach __P((device_t)); static device_method_t ce_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ce_probe), DEVMETHOD(device_attach, ce_attach), DEVMETHOD(device_detach, ce_detach), DEVMETHOD_END }; typedef struct _ce_dma_mem_t { unsigned long phys; void *virt; size_t size; #if __FreeBSD_version >= 500000 bus_dma_tag_t dmat; bus_dmamap_t mapp; #endif } ce_dma_mem_t; typedef struct _drv_t { char name [8]; int running; ce_board_t *board; ce_chan_t *chan; struct ifqueue rqueue; #ifdef NETGRAPH char nodename [NG_NODESIZE]; hook_p hook; hook_p debug_hook; node_p node; struct ifqueue queue; struct ifqueue hi_queue; #else struct ifnet *ifp; #endif short timeout; struct callout timeout_handle; #if __FreeBSD_version >= 500000 struct cdev *devt; #else /* __FreeBSD_version < 500000 */ dev_t devt; #endif ce_dma_mem_t dmamem; } drv_t; typedef struct _bdrv_t { ce_board_t *board; struct resource *ce_res; struct resource *ce_irq; void *ce_intrhand; ce_dma_mem_t dmamem; drv_t channel [NCHAN]; #if __FreeBSD_version >= 504000 struct mtx ce_mtx; #endif } bdrv_t; static driver_t ce_driver = { "ce", ce_methods, sizeof(bdrv_t), }; static devclass_t ce_devclass; static void ce_receive (ce_chan_t *c, unsigned char *data, int len); static void ce_transmit (ce_chan_t *c, void *attachment, int len); static void ce_error (ce_chan_t *c, int data); static void ce_up (drv_t *d); static void ce_start (drv_t *d); static void ce_down (drv_t *d); static void ce_watchdog (drv_t *d); static void ce_watchdog_timer (void *arg); #ifdef NETGRAPH extern struct ng_type typestruct; #else static void ce_ifstart (struct ifnet *ifp); static void ce_tlf (struct sppp *sp); static void ce_tls (struct sppp *sp); static int ce_sioctl (struct ifnet *ifp, u_long cmd, caddr_t data); static void ce_initialize (void *softc); #endif static ce_board_t *adapter [NBRD]; static drv_t *channel [NBRD*NCHAN]; static struct callout led_timo [NBRD]; static struct callout timeout_handle; static int ce_destroy = 0; #if __FreeBSD_version < 500000 static int ce_open (dev_t dev, int oflags, int devtype, struct proc *p); static int ce_close (dev_t dev, int fflag, int devtype, struct proc *p); static int ce_ioctl (dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p); #else static int ce_open (struct cdev *dev, int oflags, int devtype, struct thread *td); static int ce_close (struct cdev *dev, int fflag, int devtype, struct thread *td); static int ce_ioctl (struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td); #endif #if __FreeBSD_version < 500000 static struct cdevsw ce_cdevsw = { ce_open, ce_close, noread, nowrite, ce_ioctl, nopoll, nommap, nostrategy, "ce", CDEV_MAJOR, nodump, nopsize, D_NAGGED, -1 }; #elif __FreeBSD_version == 500000 static struct cdevsw ce_cdevsw = { ce_open, ce_close, noread, nowrite, ce_ioctl, nopoll, nommap, nostrategy, "ce", CDEV_MAJOR, nodump, nopsize, D_NAGGED, }; #elif __FreeBSD_version <= 501000 static struct cdevsw ce_cdevsw = { .d_open = ce_open, .d_close = ce_close, .d_read = noread, .d_write = nowrite, .d_ioctl = ce_ioctl, .d_poll = nopoll, .d_mmap = nommap, .d_strategy = nostrategy, .d_name = "ce", .d_maj = CDEV_MAJOR, .d_dump = nodump, .d_flags = D_NAGGED, }; #elif __FreeBSD_version < 502103 static struct cdevsw ce_cdevsw = { .d_open = ce_open, .d_close = ce_close, .d_ioctl = ce_ioctl, .d_name = "ce", .d_maj = CDEV_MAJOR, .d_flags = D_NAGGED, }; #elif __FreeBSD_version < 600000 static struct cdevsw ce_cdevsw = { .d_version = D_VERSION, .d_open = ce_open, .d_close = ce_close, .d_ioctl = ce_ioctl, .d_name = "ce", .d_maj = CDEV_MAJOR, .d_flags = D_NEEDGIANT, }; #else /* __FreeBSD_version >= 600000 */ static struct cdevsw ce_cdevsw = { .d_version = D_VERSION, .d_open = ce_open, .d_close = ce_close, .d_ioctl = ce_ioctl, .d_name = "ce", }; #endif /* * Make an mbuf from data. */ static struct mbuf *makembuf (void *buf, unsigned len) { struct mbuf *m; MGETHDR (m, M_NOWAIT, MT_DATA); if (! m) return 0; if (!(MCLGET(m, M_NOWAIT))) { m_freem (m); return 0; } m->m_pkthdr.len = m->m_len = len; bcopy (buf, mtod (m, caddr_t), len); return m; } static int ce_probe (device_t dev) { if ((pci_get_vendor (dev) == TAU32_PCI_VENDOR_ID) && (pci_get_device (dev) == TAU32_PCI_DEVICE_ID)) { device_set_desc (dev, "Cronyx-Tau32-PCI serial adapter"); return BUS_PROBE_DEFAULT; } return ENXIO; } static void ce_timeout (void *arg) { drv_t *d; int s, i, k; for (i = 0; i < NBRD; ++i) { if (adapter[i] == NULL) continue; for (k = 0; k < NCHAN; ++k) { s = splimp (); if (ce_destroy) { splx (s); return; } d = channel[i * NCHAN + k]; if (!d) { splx (s); continue; } CE_LOCK ((bdrv_t *)d->board->sys); switch (d->chan->type) { case T_E1: ce_e1_timer (d->chan); break; default: break; } CE_UNLOCK ((bdrv_t *)d->board->sys); splx (s); } } s = splimp (); if (!ce_destroy) callout_reset (&timeout_handle, hz, ce_timeout, 0); splx (s); } static void ce_led_off (void *arg) { ce_board_t *b = arg; bdrv_t *bd = (bdrv_t *) b->sys; int s; s = splimp (); if (ce_destroy) { splx (s); return; } CE_LOCK (bd); TAU32_LedSet (b->ddk.pControllerObject, 0); CE_UNLOCK (bd); splx (s); } static void ce_intr (void *arg) { bdrv_t *bd = arg; ce_board_t *b = bd->board; int s; int i; #if __FreeBSD_version >= 500000 && defined NETGRAPH int error; #endif s = splimp (); if (ce_destroy) { splx (s); return; } CE_LOCK (bd); /* Turn LED on. */ TAU32_LedSet (b->ddk.pControllerObject, 1); TAU32_HandleInterrupt (b->ddk.pControllerObject); /* Turn LED off 50 msec later. */ callout_reset (&led_timo[b->num], hz/20, ce_led_off, b); CE_UNLOCK (bd); splx (s); /* Pass packets in a lock-free state */ for (i = 0; i < NCHAN && b->chan[i].type; i++) { drv_t *d = b->chan[i].sys; struct mbuf *m; if (!d || !d->running) continue; while (_IF_QLEN(&d->rqueue)) { IF_DEQUEUE (&d->rqueue,m); if (!m) continue; #ifdef NETGRAPH if (d->hook) { #if __FreeBSD_version >= 500000 NG_SEND_DATA_ONLY (error, d->hook, m); #else ng_queue_data (d->hook, m, 0); #endif } else { IF_DRAIN (&d->rqueue); } #else sppp_input (d->ifp, m); #endif } } } #if __FreeBSD_version >= 500000 static void ce_bus_dmamap_addr (void *arg, bus_dma_segment_t *segs, int nseg, int error) { unsigned long *addr; if (error) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); addr = arg; *addr = segs->ds_addr; } #ifndef BUS_DMA_ZERO #define BUS_DMA_ZERO 0 #endif static int ce_bus_dma_mem_alloc (int bnum, int cnum, ce_dma_mem_t *dmem) { int error; error = bus_dma_tag_create (NULL, 16, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, dmem->size, 1, dmem->size, 0, #if __FreeBSD_version >= 502000 NULL, NULL, #endif &dmem->dmat); if (error) { if (cnum >= 0) printf ("ce%d-%d: ", bnum, cnum); else printf ("ce%d: ", bnum); printf ("couldn't allocate tag for dma memory\n"); return 0; } error = bus_dmamem_alloc (dmem->dmat, (void **)&dmem->virt, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &dmem->mapp); if (error) { if (cnum >= 0) printf ("ce%d-%d: ", bnum, cnum); else printf ("ce%d: ", bnum); printf ("couldn't allocate mem for dma memory\n"); bus_dma_tag_destroy (dmem->dmat); return 0; } error = bus_dmamap_load (dmem->dmat, dmem->mapp, dmem->virt, dmem->size, ce_bus_dmamap_addr, &dmem->phys, 0); if (error) { if (cnum >= 0) printf ("ce%d-%d: ", bnum, cnum); else printf ("ce%d: ", bnum); printf ("couldn't load mem map for dma memory\n"); bus_dmamem_free (dmem->dmat, dmem->virt, dmem->mapp); bus_dma_tag_destroy (dmem->dmat); return 0; } #if __FreeBSD_version >= 502000 bzero (dmem->virt, dmem->size); #endif return 1; } static void ce_bus_dma_mem_free (ce_dma_mem_t *dmem) { bus_dmamap_unload (dmem->dmat, dmem->mapp); bus_dmamem_free (dmem->dmat, dmem->virt, dmem->mapp); bus_dma_tag_destroy (dmem->dmat); } #else static int ce_bus_dma_mem_alloc (int bnum, int cnum, ce_dma_mem_t *dmem) { dmem->virt = contigmalloc (dmem->size, M_DEVBUF, M_WAITOK, 0x100000, 0xffffffff, 16, 0); if (dmem->virt == NULL) { if (cnum >= 0) printf ("ce%d-%d: ", bnum, cnum); else printf ("ce%d: ", bnum); printf ("couldn't allocate dma memory\n"); return 0; } dmem->phys = vtophys (dmem->virt); bzero (dmem->virt, dmem->size); return 1; } static void ce_bus_dma_mem_free (ce_dma_mem_t *dmem) { contigfree (dmem->virt, dmem->size, M_DEVBUF); } #endif /* * Called if the probe succeeded. */ static int ce_attach (device_t dev) { bdrv_t *bd = device_get_softc (dev); int unit = device_get_unit (dev); #if __FreeBSD_version >= 504000 char *ce_ln = CE_LOCK_NAME; #endif vm_offset_t vbase; int rid, error; ce_board_t *b; ce_chan_t *c; drv_t *d; int s; b = malloc (sizeof(ce_board_t), M_DEVBUF, M_WAITOK); if (!b) { printf ("ce%d: couldn't allocate memory\n", unit); return (ENXIO); } bzero (b, sizeof(ce_board_t)); b->ddk.sys = &b; #if __FreeBSD_version >= 440000 pci_enable_busmaster (dev); #endif bd->dmamem.size = TAU32_ControllerObjectSize; if (! ce_bus_dma_mem_alloc (unit, -1, &bd->dmamem)) { free (b, M_DEVBUF); return (ENXIO); } b->ddk.pControllerObject = bd->dmamem.virt; bd->board = b; b->sys = bd; rid = PCIR_BAR(0); bd->ce_res = bus_alloc_resource (dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, RF_ACTIVE); if (! bd->ce_res) { printf ("ce%d: cannot map memory\n", unit); ce_bus_dma_mem_free (&bd->dmamem); free (b, M_DEVBUF); return (ENXIO); } vbase = (vm_offset_t) rman_get_virtual (bd->ce_res); b->ddk.PciBar1VirtualAddress = (void *)vbase; b->ddk.ControllerObjectPhysicalAddress = bd->dmamem.phys; b->ddk.pErrorNotifyCallback = ce_error_callback; b->ddk.pStatusNotifyCallback = ce_status_callback; b->num = unit; TAU32_BeforeReset(&b->ddk); pci_write_config (dev, TAU32_PCI_RESET_ADDRESS, TAU32_PCI_RESET_ON, 4); pci_write_config (dev, TAU32_PCI_RESET_ADDRESS, TAU32_PCI_RESET_OFF, 4); if(!TAU32_Initialize(&b->ddk, 0)) { printf ("ce%d: init adapter error 0x%08x, bus dead bits 0x%08lx\n", unit, b->ddk.InitErrors, b->ddk.DeadBits); bus_release_resource (dev, SYS_RES_MEMORY, PCIR_BAR(0), bd->ce_res); ce_bus_dma_mem_free (&bd->dmamem); free (b, M_DEVBUF); return (ENXIO); } s = splimp (); ce_init_board (b); rid = 0; bd->ce_irq = bus_alloc_resource (dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); if (! bd->ce_irq) { printf ("ce%d: cannot map interrupt\n", unit); bus_release_resource (dev, SYS_RES_MEMORY, PCIR_BAR(0), bd->ce_res); ce_bus_dma_mem_free (&bd->dmamem); free (b, M_DEVBUF); splx (s); return (ENXIO); } #if __FreeBSD_version >= 500000 callout_init (&led_timo[unit], 1); #else callout_init (&led_timo[unit]); #endif error = bus_setup_intr (dev, bd->ce_irq, #if __FreeBSD_version >= 500013 INTR_TYPE_NET|INTR_MPSAFE, #else INTR_TYPE_NET, #endif NULL, ce_intr, bd, &bd->ce_intrhand); if (error) { printf ("ce%d: cannot set up irq\n", unit); bus_release_resource (dev, SYS_RES_IRQ, 0, bd->ce_irq); bus_release_resource (dev, SYS_RES_MEMORY, PCIR_BAR(0), bd->ce_res); ce_bus_dma_mem_free (&bd->dmamem); free (b, M_DEVBUF); splx (s); return (ENXIO); } switch (b->ddk.Model) { case 1: strcpy (b->name, TAU32_BASE_NAME); break; case 2: strcpy (b->name, TAU32_LITE_NAME); break; case 3: strcpy (b->name, TAU32_ADPCM_NAME); break; default: strcpy (b->name, TAU32_UNKNOWN_NAME); break; } printf ("ce%d: %s\n", unit, b->name); for (c = b->chan; c < b->chan + NCHAN; ++c) { c->num = (c - b->chan); c->board = b; d = &bd->channel[c->num]; d->dmamem.size = sizeof(ce_buf_t); if (! ce_bus_dma_mem_alloc (unit, c->num, &d->dmamem)) continue; channel [b->num * NCHAN + c->num] = d; sprintf (d->name, "ce%d.%d", b->num, c->num); d->board = b; d->chan = c; c->sys = d; } for (c = b->chan; c < b->chan + NCHAN; ++c) { if (c->sys == NULL) continue; d = c->sys; callout_init (&d->timeout_handle, 1); #ifdef NETGRAPH if (ng_make_node_common (&typestruct, &d->node) != 0) { printf ("%s: cannot make common node\n", d->name); d->node = NULL; continue; } #if __FreeBSD_version >= 500000 NG_NODE_SET_PRIVATE (d->node, d); #else d->node->private = d; #endif sprintf (d->nodename, "%s%d", NG_CE_NODE_TYPE, c->board->num * NCHAN + c->num); if (ng_name_node (d->node, d->nodename)) { printf ("%s: cannot name node\n", d->nodename); #if __FreeBSD_version >= 500000 NG_NODE_UNREF (d->node); #else ng_rmnode (d->node); ng_unref (d->node); #endif continue; } d->queue.ifq_maxlen = ifqmaxlen; d->hi_queue.ifq_maxlen = ifqmaxlen; d->rqueue.ifq_maxlen = ifqmaxlen; #if __FreeBSD_version >= 500000 mtx_init (&d->queue.ifq_mtx, "ce_queue", NULL, MTX_DEF); mtx_init (&d->hi_queue.ifq_mtx, "ce_queue_hi", NULL, MTX_DEF); mtx_init (&d->rqueue.ifq_mtx, "ce_rqueue", NULL, MTX_DEF); #endif #else /*NETGRAPH*/ #if __FreeBSD_version >= 600031 d->ifp = if_alloc(IFT_PPP); #else d->ifp = malloc (sizeof(struct sppp), M_DEVBUF, M_WAITOK); bzero (d->ifp, sizeof(struct sppp)); #endif if (!d->ifp) { printf ("%s: cannot if_alloc() interface\n", d->name); continue; } d->ifp->if_softc = d; #if __FreeBSD_version > 501000 if_initname (d->ifp, "ce", b->num * NCHAN + c->num); #else d->ifp->if_unit = b->num * NCHAN + c->num; d->ifp->if_name = "ce"; #endif d->ifp->if_mtu = PP_MTU; d->ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST; d->ifp->if_ioctl = ce_sioctl; d->ifp->if_start = ce_ifstart; d->ifp->if_init = ce_initialize; d->rqueue.ifq_maxlen = ifqmaxlen; #if __FreeBSD_version >= 500000 mtx_init (&d->rqueue.ifq_mtx, "ce_rqueue", NULL, MTX_DEF); #endif sppp_attach (d->ifp); if_attach (d->ifp); IFP2SP(d->ifp)->pp_tlf = ce_tlf; IFP2SP(d->ifp)->pp_tls = ce_tls; /* If BPF is in the kernel, call the attach for it. * The header size of PPP or Cisco/HDLC is 4 bytes. */ bpfattach (d->ifp, DLT_PPP, 4); #endif /*NETGRAPH*/ ce_start_chan (c, 1, 1, d->dmamem.virt, d->dmamem.phys); /* Register callback functions. */ ce_register_transmit (c, &ce_transmit); ce_register_receive (c, &ce_receive); ce_register_error (c, &ce_error); d->devt = make_dev (&ce_cdevsw, b->num*NCHAN+c->num, UID_ROOT, GID_WHEEL, 0600, "ce%d", b->num*NCHAN+c->num); } #if __FreeBSD_version >= 504000 ce_ln[2] = '0' + unit; mtx_init (&bd->ce_mtx, ce_ln, MTX_NETWORK_LOCK, MTX_DEF|MTX_RECURSE); #endif CE_LOCK (bd); TAU32_EnableInterrupts(b->ddk.pControllerObject); adapter[unit] = b; CE_UNLOCK (bd); splx (s); return 0; } static int ce_detach (device_t dev) { bdrv_t *bd = device_get_softc (dev); ce_board_t *b = bd->board; ce_chan_t *c; int s; #if __FreeBSD_version >= 504000 KASSERT (mtx_initialized (&bd->ce_mtx), ("ce mutex not initialized")); #endif s = splimp (); CE_LOCK (bd); /* Check if the device is busy (open). */ for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; /* XXX Non existen chan! */ if (! d || ! d->chan) continue; if (d->running) { CE_UNLOCK (bd); splx (s); return EBUSY; } } /* Ok, we can unload driver */ /* At first we should disable interrupts */ ce_destroy = 1; TAU32_DisableInterrupts(b->ddk.pControllerObject); callout_stop (&led_timo[b->num]); for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (! d || ! d->chan) continue; callout_stop (&d->timeout_handle); #ifndef NETGRAPH /* Detach from the packet filter list of interfaces. */ bpfdetach (d->ifp); /* Detach from the sync PPP list. */ sppp_detach (d->ifp); /* Detach from the system list of interfaces. */ if_detach (d->ifp); #if __FreeBSD_version > 600031 if_free(d->ifp); #else free (d->ifp, M_DEVBUF); #endif IF_DRAIN (&d->rqueue); #if __FreeBSD_version >= 500000 mtx_destroy (&d->rqueue.ifq_mtx); #endif #else #if __FreeBSD_version >= 500000 if (d->node) { ng_rmnode_self (d->node); NG_NODE_UNREF (d->node); d->node = NULL; } IF_DRAIN (&d->rqueue); mtx_destroy (&d->queue.ifq_mtx); mtx_destroy (&d->hi_queue.ifq_mtx); mtx_destroy (&d->rqueue.ifq_mtx); #else ng_rmnode (d->node); d->node = 0; #endif #endif destroy_dev (d->devt); } CE_UNLOCK (bd); splx (s); callout_drain (&led_timo[b->num]); /* Disable the interrupt request. */ bus_teardown_intr (dev, bd->ce_irq, bd->ce_intrhand); bus_release_resource (dev, SYS_RES_IRQ, 0, bd->ce_irq); TAU32_DestructiveHalt (b->ddk.pControllerObject, 0); bus_release_resource (dev, SYS_RES_MEMORY, PCIR_BAR(0), bd->ce_res); for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (! d || ! d->chan) continue; callout_drain (&d->timeout_handle); - channel [b->num * NCHAN + c->num] = 0; + channel [b->num * NCHAN + c->num] = NULL; /* Deallocate buffers. */ ce_bus_dma_mem_free (&d->dmamem); } - adapter [b->num] = 0; + adapter [b->num] = NULL; ce_bus_dma_mem_free (&bd->dmamem); free (b, M_DEVBUF); #if __FreeBSD_version >= 504000 mtx_destroy (&bd->ce_mtx); #endif return 0; } #ifndef NETGRAPH static void ce_ifstart (struct ifnet *ifp) { drv_t *d = ifp->if_softc; bdrv_t *bd = d->board->sys; CE_LOCK (bd); ce_start (d); CE_UNLOCK (bd); } static void ce_tlf (struct sppp *sp) { drv_t *d = SP2IFP(sp)->if_softc; CE_DEBUG2 (d, ("ce_tlf\n")); sp->pp_down (sp); } static void ce_tls (struct sppp *sp) { drv_t *d = SP2IFP(sp)->if_softc; CE_DEBUG2 (d, ("ce_tls\n")); sp->pp_up (sp); } /* * Process an ioctl request. */ static int ce_sioctl (struct ifnet *ifp, u_long cmd, caddr_t data) { drv_t *d = ifp->if_softc; bdrv_t *bd = d->board->sys; int error, s, was_up, should_be_up; #if __FreeBSD_version >= 600034 was_up = (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; #else was_up = (ifp->if_flags & IFF_RUNNING) != 0; #endif error = sppp_ioctl (ifp, cmd, data); if (error) return error; if (! (ifp->if_flags & IFF_DEBUG)) d->chan->debug = 0; else d->chan->debug = d->chan->debug_shadow; switch (cmd) { default: CE_DEBUG2 (d, ("ioctl 0x%lx\n", cmd)); return 0; case SIOCADDMULTI: CE_DEBUG2 (d, ("ioctl SIOCADDMULTI\n")); return 0; case SIOCDELMULTI: CE_DEBUG2 (d, ("ioctl SIOCDELMULTI\n")); return 0; case SIOCSIFFLAGS: CE_DEBUG2 (d, ("ioctl SIOCSIFFLAGS\n")); break; case SIOCSIFADDR: CE_DEBUG2 (d, ("ioctl SIOCSIFADDR\n")); break; } /* We get here only in case of SIFFLAGS or SIFADDR. */ s = splimp (); CE_LOCK (bd); #if __FreeBSD_version >= 600034 should_be_up = (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; #else should_be_up = (ifp->if_flags & IFF_RUNNING) != 0; #endif if (! was_up && should_be_up) { /* Interface goes up -- start it. */ ce_up (d); ce_start (d); } else if (was_up && ! should_be_up) { /* Interface is going down -- stop it. */ /* if ((IFP2SP(ifp)->pp_flags & PP_FR) || (ifp->if_flags & PP_CISCO))*/ ce_down (d); } CE_DEBUG (d, ("ioctl 0x%lx p4\n", cmd)); CE_UNLOCK (bd); splx (s); return 0; } /* * Initialization of interface. * It seems to be never called by upper level? */ static void ce_initialize (void *softc) { drv_t *d = softc; CE_DEBUG (d, ("ce_initialize\n")); } #endif /*NETGRAPH*/ /* * Stop the interface. Called on splimp(). */ static void ce_down (drv_t *d) { CE_DEBUG (d, ("ce_down\n")); /* Interface is going down -- stop it. */ ce_set_dtr (d->chan, 0); ce_set_rts (d->chan, 0); d->running = 0; callout_stop (&d->timeout_handle); } /* * Start the interface. Called on splimp(). */ static void ce_up (drv_t *d) { CE_DEBUG (d, ("ce_up\n")); ce_set_dtr (d->chan, 1); ce_set_rts (d->chan, 1); d->running = 1; } /* * Start output on the interface. Get another datagram to send * off of the interface queue, and copy it to the interface * before starting the output. */ static void ce_send (drv_t *d) { struct mbuf *m; u_short len; CE_DEBUG2 (d, ("ce_send\n")); /* No output if the interface is down. */ if (! d->running) return; while (ce_transmit_space (d->chan)) { /* Get the packet to send. */ #ifdef NETGRAPH IF_DEQUEUE (&d->hi_queue, m); if (! m) IF_DEQUEUE (&d->queue, m); #else m = sppp_dequeue (d->ifp); #endif if (! m) return; #ifndef NETGRAPH #if __FreeBSD_version >= 500000 BPF_MTAP (d->ifp, m); #else if (d->ifp->if_bpf) bpf_mtap (d->ifp, m); #endif #endif #if __FreeBSD_version >= 490000 len = m_length (m, NULL); #else len = m->m_pkthdr.len; #endif if (len >= BUFSZ) printf ("%s: too long packet: %d bytes: ", d->name, len); else if (! m->m_next) ce_send_packet (d->chan, (u_char*) mtod (m, caddr_t), len, 0); else { ce_buf_item_t *item = (ce_buf_item_t*)d->chan->tx_queue; m_copydata (m, 0, len, item->buf); ce_send_packet (d->chan, item->buf, len, 0); } m_freem (m); /* Set up transmit timeout, if the transmit ring is not empty.*/ d->timeout = 10; } #ifndef NETGRAPH #if __FreeBSD_version >= 600034 d->ifp->if_flags |= IFF_DRV_OACTIVE; #else d->ifp->if_flags |= IFF_OACTIVE; #endif #endif } /* * Start output on the interface. * Always called on splimp(). */ static void ce_start (drv_t *d) { if (d->running) { if (! d->chan->dtr) ce_set_dtr (d->chan, 1); if (! d->chan->rts) ce_set_rts (d->chan, 1); ce_send (d); callout_reset (&d->timeout_handle, hz, ce_watchdog_timer, d); } } /* * Handle transmit timeouts. * Recover after lost transmit interrupts. * Always called on splimp(). */ static void ce_watchdog (drv_t *d) { CE_DEBUG (d, ("device timeout\n")); if (d->running) { ce_set_dtr (d->chan, 0); ce_set_rts (d->chan, 0); /* ce_stop_chan (d->chan);*/ /* ce_start_chan (d->chan, 1, 1, 0, 0);*/ ce_set_dtr (d->chan, 1); ce_set_rts (d->chan, 1); ce_start (d); } } static void ce_watchdog_timer (void *arg) { drv_t *d = arg; bdrv_t *bd = d->board->sys; CE_LOCK(bd); if (d->timeout == 1) ce_watchdog (d); if (d->timeout) d->timeout--; callout_reset (&d->timeout_handle, hz, ce_watchdog_timer, d); CE_UNLOCK(bd); } static void ce_transmit (ce_chan_t *c, void *attachment, int len) { drv_t *d = c->sys; d->timeout = 0; #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_OPACKETS, 1); #if __FreeBSD_version >= 600034 d->ifp->if_flags &= ~IFF_DRV_OACTIVE; #else d->ifp->if_flags &= ~IFF_OACTIVE; #endif #endif ce_start (d); } static void ce_receive (ce_chan_t *c, unsigned char *data, int len) { drv_t *d = c->sys; struct mbuf *m; if (! d->running) return; m = makembuf (data, len); if (! m) { CE_DEBUG (d, ("no memory for packet\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_IQDROPS, 1); #endif return; } if (c->debug > 1) m_print (m, 0); #ifdef NETGRAPH m->m_pkthdr.rcvif = 0; IF_ENQUEUE(&d->rqueue, m); #else if_inc_counter(d->ifp, IFCOUNTER_IPACKETS, 1); m->m_pkthdr.rcvif = d->ifp; /* Check if there's a BPF listener on this interface. * If so, hand off the raw packet to bpf. */ BPF_MTAP(d->ifp, m); IF_ENQUEUE(&d->rqueue, m); #endif } static void ce_error (ce_chan_t *c, int data) { drv_t *d = c->sys; switch (data) { case CE_FRAME: CE_DEBUG (d, ("frame error\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CE_CRC: CE_DEBUG (d, ("crc error\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CE_OVERRUN: CE_DEBUG (d, ("overrun error\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_COLLISIONS, 1); if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CE_OVERFLOW: CE_DEBUG (d, ("overflow error\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CE_UNDERRUN: CE_DEBUG (d, ("underrun error\n")); d->timeout = 0; #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_OERRORS, 1); #if __FreeBSD_version >= 600034 d->ifp->if_flags &= ~IFF_DRV_OACTIVE; #else d->ifp->if_flags &= ~IFF_OACTIVE; #endif #endif ce_start (d); break; default: CE_DEBUG (d, ("error #%d\n", data)); break; } } /* * You also need read, write, open, close routines. * This should get you started */ #if __FreeBSD_version < 500000 static int ce_open (dev_t dev, int oflags, int devtype, struct proc *p) #else static int ce_open (struct cdev *dev, int oflags, int devtype, struct thread *td) #endif { int unit = dev2unit (dev); drv_t *d; if (unit >= NBRD*NCHAN || ! (d = channel[unit])) return ENXIO; CE_DEBUG2 (d, ("ce_open\n")); return 0; } /* * Only called on the LAST close. */ #if __FreeBSD_version < 500000 static int ce_close (dev_t dev, int fflag, int devtype, struct proc *p) #else static int ce_close (struct cdev *dev, int fflag, int devtype, struct thread *td) #endif { drv_t *d = channel [dev2unit (dev)]; CE_DEBUG2 (d, ("ce_close\n")); return 0; } static int ce_modem_status (ce_chan_t *c) { drv_t *d = c->sys; bdrv_t *bd = d->board->sys; int status, s; status = d->running ? TIOCM_LE : 0; s = splimp (); CE_LOCK (bd); if (ce_get_cd (c)) status |= TIOCM_CD; if (ce_get_cts (c)) status |= TIOCM_CTS; if (ce_get_dsr (c)) status |= TIOCM_DSR; if (c->dtr) status |= TIOCM_DTR; if (c->rts) status |= TIOCM_RTS; CE_UNLOCK (bd); splx (s); return status; } #if __FreeBSD_version < 500000 static int ce_ioctl (dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) #else static int ce_ioctl (struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td) #endif { drv_t *d = channel [dev2unit (dev)]; bdrv_t *bd = d->board->sys; ce_chan_t *c = d->chan; struct serial_statistics *st; struct e1_statistics *opte1; int error, s; char mask[16]; switch (cmd) { case SERIAL_GETREGISTERED: CE_DEBUG2 (d, ("ioctl: getregistered\n")); bzero (mask, sizeof(mask)); for (s=0; sifp)->pp_flags & PP_FR) ? "fr" : (d->ifp->if_flags & PP_CISCO) ? "cisco" : "ppp"); return 0; case SERIAL_SETPROTO: CE_DEBUG2 (d, ("ioctl: setproto\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; #if __FreeBSD_version >= 600034 if (d->ifp->if_flags & IFF_DRV_RUNNING) #else if (d->ifp->if_flags & IFF_RUNNING) #endif return EBUSY; if (! strcmp ("cisco", (char*)data)) { IFP2SP(d->ifp)->pp_flags &= ~(PP_FR); IFP2SP(d->ifp)->pp_flags |= PP_KEEPALIVE; d->ifp->if_flags |= PP_CISCO; #if PP_FR != 0 } else if (! strcmp ("fr", (char*)data)) { d->ifp->if_flags &= ~(PP_CISCO); IFP2SP(d->ifp)->pp_flags |= PP_FR | PP_KEEPALIVE; #endif } else if (! strcmp ("ppp", (char*)data)) { IFP2SP(d->ifp)->pp_flags &= ~PP_FR; IFP2SP(d->ifp)->pp_flags &= ~PP_KEEPALIVE; d->ifp->if_flags &= ~(PP_CISCO); } else return EINVAL; return 0; case SERIAL_GETKEEPALIVE: CE_DEBUG2 (d, ("ioctl: getkeepalive\n")); if ((IFP2SP(d->ifp)->pp_flags & PP_FR) || (d->ifp->if_flags & PP_CISCO)) return EINVAL; *(int*)data = (IFP2SP(d->ifp)->pp_flags & PP_KEEPALIVE) ? 1 : 0; return 0; case SERIAL_SETKEEPALIVE: CE_DEBUG2 (d, ("ioctl: setkeepalive\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; if ((IFP2SP(d->ifp)->pp_flags & PP_FR) || (d->ifp->if_flags & PP_CISCO)) return EINVAL; s = splimp (); CE_LOCK (bd); if (*(int*)data) IFP2SP(d->ifp)->pp_flags |= PP_KEEPALIVE; else IFP2SP(d->ifp)->pp_flags &= ~PP_KEEPALIVE; CE_UNLOCK (bd); splx (s); return 0; #endif /*NETGRAPH*/ case SERIAL_GETMODE: CE_DEBUG2 (d, ("ioctl: getmode\n")); *(int*)data = SERIAL_HDLC; return 0; case SERIAL_SETMODE: /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; if (*(int*)data != SERIAL_HDLC) return EINVAL; return 0; case SERIAL_GETCFG: CE_DEBUG2 (d, ("ioctl: getcfg\n")); *(char*)data = 'c'; return 0; case SERIAL_SETCFG: CE_DEBUG2 (d, ("ioctl: setcfg\n")); #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; if (*((char*)data) != 'c') return EINVAL; return 0; case SERIAL_GETSTAT: CE_DEBUG2 (d, ("ioctl: getstat\n")); st = (struct serial_statistics*) data; st->rintr = c->rintr; st->tintr = c->tintr; st->mintr = 0; st->ibytes = c->ibytes; st->ipkts = c->ipkts; st->obytes = c->obytes; st->opkts = c->opkts; st->ierrs = c->overrun + c->frame + c->crc; st->oerrs = c->underrun; return 0; case SERIAL_GETESTAT: CE_DEBUG2 (d, ("ioctl: getestat\n")); if (c->type != T_E1) return EINVAL; opte1 = (struct e1_statistics*) data; opte1->status = 0; if (c->status & ESTS_NOALARM) opte1->status |= E1_NOALARM; if (c->status & ESTS_LOS) opte1->status |= E1_LOS; if (c->status & ESTS_LOF) opte1->status |= E1_LOF; if (c->status & ESTS_AIS) opte1->status |= E1_AIS; if (c->status & ESTS_LOMF) opte1->status |= E1_LOMF; if (c->status & ESTS_AIS16) opte1->status |= E1_AIS16; if (c->status & ESTS_FARLOF) opte1->status |= E1_FARLOF; if (c->status & ESTS_FARLOMF) opte1->status |= E1_FARLOMF; if (c->status & ESTS_TSTREQ) opte1->status |= E1_TSTREQ; if (c->status & ESTS_TSTERR) opte1->status |= E1_TSTERR; opte1->cursec = c->cursec; opte1->totsec = c->totsec + c->cursec; opte1->currnt.bpv = c->currnt.bpv; opte1->currnt.fse = c->currnt.fse; opte1->currnt.crce = c->currnt.crce; opte1->currnt.rcrce = c->currnt.rcrce; opte1->currnt.uas = c->currnt.uas; opte1->currnt.les = c->currnt.les; opte1->currnt.es = c->currnt.es; opte1->currnt.bes = c->currnt.bes; opte1->currnt.ses = c->currnt.ses; opte1->currnt.oofs = c->currnt.oofs; opte1->currnt.css = c->currnt.css; opte1->currnt.dm = c->currnt.dm; opte1->total.bpv = c->total.bpv + c->currnt.bpv; opte1->total.fse = c->total.fse + c->currnt.fse; opte1->total.crce = c->total.crce + c->currnt.crce; opte1->total.rcrce = c->total.rcrce + c->currnt.rcrce; opte1->total.uas = c->total.uas + c->currnt.uas; opte1->total.les = c->total.les + c->currnt.les; opte1->total.es = c->total.es + c->currnt.es; opte1->total.bes = c->total.bes + c->currnt.bes; opte1->total.ses = c->total.ses + c->currnt.ses; opte1->total.oofs = c->total.oofs + c->currnt.oofs; opte1->total.css = c->total.css + c->currnt.css; opte1->total.dm = c->total.dm + c->currnt.dm; for (s=0; s<48; ++s) { opte1->interval[s].bpv = c->interval[s].bpv; opte1->interval[s].fse = c->interval[s].fse; opte1->interval[s].crce = c->interval[s].crce; opte1->interval[s].rcrce = c->interval[s].rcrce; opte1->interval[s].uas = c->interval[s].uas; opte1->interval[s].les = c->interval[s].les; opte1->interval[s].es = c->interval[s].es; opte1->interval[s].bes = c->interval[s].bes; opte1->interval[s].ses = c->interval[s].ses; opte1->interval[s].oofs = c->interval[s].oofs; opte1->interval[s].css = c->interval[s].css; opte1->interval[s].dm = c->interval[s].dm; } return 0; case SERIAL_CLRSTAT: CE_DEBUG2 (d, ("ioctl: clrstat\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; c->rintr = 0; c->tintr = 0; c->ibytes = 0; c->obytes = 0; c->ipkts = 0; c->opkts = 0; c->overrun = 0; c->frame = 0; c->crc = 0; c->underrun = 0; bzero (&c->currnt, sizeof (c->currnt)); bzero (&c->total, sizeof (c->total)); bzero (c->interval, sizeof (c->interval)); return 0; case SERIAL_GETLOOP: CE_DEBUG2 (d, ("ioctl: getloop\n")); if (c->type != T_E1) return EINVAL; *(int*)data = c->lloop; return 0; case SERIAL_SETLOOP: CE_DEBUG2 (d, ("ioctl: setloop\n")); if (c->type != T_E1) return EINVAL; /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; s = splimp (); CE_LOCK (bd); ce_set_lloop (c, *(int*)data); CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETRLOOP: CE_DEBUG2 (d, ("ioctl: getrloop\n")); if (c->type != T_E1) return EINVAL; *(int*)data = c->rloop; return 0; case SERIAL_SETRLOOP: CE_DEBUG2 (d, ("ioctl: setloop\n")); if (c->type != T_E1) return EINVAL; /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; s = splimp (); CE_LOCK (bd); ce_set_rloop (c, *(int*)data); CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETDEBUG: CE_DEBUG2 (d, ("ioctl: getdebug\n")); *(int*)data = d->chan->debug; return 0; case SERIAL_SETDEBUG: CE_DEBUG2 (d, ("ioctl: setdebug\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; #ifndef NETGRAPH /* * The debug_shadow is always greater than zero for logic * simplicity. For switching debug off the IFF_DEBUG is * responsible. */ d->chan->debug_shadow = (*(int*)data) ? (*(int*)data) : 1; if (d->ifp->if_flags & IFF_DEBUG) d->chan->debug = d->chan->debug_shadow; #else d->chan->debug = *(int*)data; #endif return 0; case SERIAL_GETBAUD: CE_DEBUG2 (d, ("ioctl: getbaud\n")); *(long*)data = c->baud; return 0; case SERIAL_SETBAUD: CE_DEBUG2 (d, ("ioctl: setbaud\n")); if (c->type != T_E1 || !c->unfram) return EINVAL; /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; s = splimp (); CE_LOCK (bd); ce_set_baud (c, *(long*)data); CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETTIMESLOTS: CE_DEBUG2 (d, ("ioctl: gettimeslots\n")); if ((c->type != T_E1 || c->unfram) && c->type != T_DATA) return EINVAL; *(u_long*)data = c->ts; return 0; case SERIAL_SETTIMESLOTS: CE_DEBUG2 (d, ("ioctl: settimeslots\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; if ((c->type != T_E1 || c->unfram) && c->type != T_DATA) return EINVAL; s = splimp (); CE_LOCK (bd); ce_set_ts (c, *(u_long*)data); CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETHIGAIN: CE_DEBUG2 (d, ("ioctl: gethigain\n")); if (c->type != T_E1) return EINVAL; *(int*)data = c->higain; return 0; case SERIAL_SETHIGAIN: CE_DEBUG2 (d, ("ioctl: sethigain\n")); if (c->type != T_E1) return EINVAL; /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; s = splimp (); CE_LOCK (bd); ce_set_higain (c, *(int*)data); CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETPHONY: CE_DEBUG2 (d, ("ioctl: getphony\n")); *(int*)data = c->phony; return 0; case SERIAL_SETPHONY: CE_DEBUG2 (d, ("ioctl: setphony\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; s = splimp (); CE_LOCK (bd); ce_set_phony (c, *(int*)data); CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETUNFRAM: CE_DEBUG2 (d, ("ioctl: getunfram\n")); if (c->type != T_E1 || c->num != 0) return EINVAL; *(int*)data = c->unfram; return 0; case SERIAL_SETUNFRAM: CE_DEBUG2 (d, ("ioctl: setunfram\n")); if (c->type != T_E1 || c->num != 0) return EINVAL; /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; s = splimp (); CE_LOCK (bd); ce_set_unfram (c, *(int*)data); CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETSCRAMBLER: CE_DEBUG2 (d, ("ioctl: getscrambler\n")); if (!c->unfram) return EINVAL; *(int*)data = c->scrambler; return 0; case SERIAL_SETSCRAMBLER: CE_DEBUG2 (d, ("ioctl: setscrambler\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; if (!c->unfram) return EINVAL; s = splimp (); CE_LOCK (bd); ce_set_scrambler (c, *(int*)data); CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETMONITOR: CE_DEBUG2 (d, ("ioctl: getmonitor\n")); if (c->type != T_E1) return EINVAL; *(int*)data = c->monitor; return 0; case SERIAL_SETMONITOR: CE_DEBUG2 (d, ("ioctl: setmonitor\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); CE_LOCK (bd); ce_set_monitor (c, *(int*)data); CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETUSE16: CE_DEBUG2 (d, ("ioctl: getuse16\n")); if (c->type != T_E1 || c->unfram) return EINVAL; *(int*)data = c->use16; return 0; case SERIAL_SETUSE16: CE_DEBUG2 (d, ("ioctl: setuse16\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); CE_LOCK (bd); ce_set_use16 (c, *(int*)data); CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETCRC4: CE_DEBUG2 (d, ("ioctl: getcrc4\n")); if (c->type != T_E1 || c->unfram) return EINVAL; *(int*)data = c->crc4; return 0; case SERIAL_SETCRC4: CE_DEBUG2 (d, ("ioctl: setcrc4\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; if (c->type != T_E1 || c->unfram) return EINVAL; s = splimp (); CE_LOCK (bd); ce_set_crc4 (c, *(int*)data); CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETCLK: CE_DEBUG2 (d, ("ioctl: getclk\n")); if (c->type != T_E1) return EINVAL; switch (c->gsyn) { default: *(int*)data = E1CLK_INTERNAL; break; case GSYN_RCV: *(int*)data = E1CLK_RECEIVE; break; case GSYN_RCV0: *(int*)data = E1CLK_RECEIVE_CHAN0; break; case GSYN_RCV1: *(int*)data = E1CLK_RECEIVE_CHAN1; break; } return 0; case SERIAL_SETCLK: CE_DEBUG2 (d, ("ioctl: setclk\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); CE_LOCK (bd); switch (*(int*)data) { default: ce_set_gsyn (c, GSYN_INT); break; case E1CLK_RECEIVE: ce_set_gsyn (c, GSYN_RCV); break; case E1CLK_RECEIVE_CHAN0: ce_set_gsyn (c, GSYN_RCV0); break; case E1CLK_RECEIVE_CHAN1: ce_set_gsyn (c, GSYN_RCV1); break; } CE_UNLOCK (bd); splx (s); return 0; #if 0 case SERIAL_RESET: CE_DEBUG2 (d, ("ioctl: reset\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; s = splimp (); CE_LOCK (bd); /* ce_reset (c->board, 0, 0);*/ CE_UNLOCK (bd); splx (s); return 0; case SERIAL_HARDRESET: CE_DEBUG2 (d, ("ioctl: hardreset\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; s = splimp (); CE_LOCK (bd); /* hard_reset (c->board); */ CE_UNLOCK (bd); splx (s); return 0; #endif case SERIAL_GETCABLE: CE_DEBUG2 (d, ("ioctl: getcable\n")); if (c->type != T_E1) return EINVAL; s = splimp (); CE_LOCK (bd); *(int*)data = CABLE_TP; CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETDIR: CE_DEBUG2 (d, ("ioctl: getdir\n")); if (c->type != T_E1 && c->type != T_DATA) return EINVAL; *(int*)data = c->dir; return 0; case SERIAL_SETDIR: CE_DEBUG2 (d, ("ioctl: setdir\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; s = splimp (); CE_LOCK (bd); ce_set_dir (c, *(int*)data); CE_UNLOCK (bd); splx (s); return 0; case TIOCSDTR: /* Set DTR */ s = splimp (); CE_LOCK (bd); ce_set_dtr (c, 1); CE_UNLOCK (bd); splx (s); return 0; case TIOCCDTR: /* Clear DTR */ s = splimp (); CE_LOCK (bd); ce_set_dtr (c, 0); CE_UNLOCK (bd); splx (s); return 0; case TIOCMSET: /* Set DTR/RTS */ s = splimp (); CE_LOCK (bd); ce_set_dtr (c, (*(int*)data & TIOCM_DTR) ? 1 : 0); ce_set_rts (c, (*(int*)data & TIOCM_RTS) ? 1 : 0); CE_UNLOCK (bd); splx (s); return 0; case TIOCMBIS: /* Add DTR/RTS */ s = splimp (); CE_LOCK (bd); if (*(int*)data & TIOCM_DTR) ce_set_dtr (c, 1); if (*(int*)data & TIOCM_RTS) ce_set_rts (c, 1); CE_UNLOCK (bd); splx (s); return 0; case TIOCMBIC: /* Clear DTR/RTS */ s = splimp (); CE_LOCK (bd); if (*(int*)data & TIOCM_DTR) ce_set_dtr (c, 0); if (*(int*)data & TIOCM_RTS) ce_set_rts (c, 0); CE_UNLOCK (bd); splx (s); return 0; case TIOCMGET: /* Get modem status */ *(int*)data = ce_modem_status (c); return 0; } return ENOTTY; } #ifdef NETGRAPH #if __FreeBSD_version >= 500000 static int ng_ce_constructor (node_p node) { drv_t *d = NG_NODE_PRIVATE (node); #else static int ng_ce_constructor (node_p *node) { drv_t *d = (*node)->private; #endif CE_DEBUG (d, ("Constructor\n")); return EINVAL; } static int ng_ce_newhook (node_p node, hook_p hook, const char *name) { int s; #if __FreeBSD_version >= 500000 drv_t *d = NG_NODE_PRIVATE (node); #else drv_t *d = node->private; #endif bdrv_t *bd = d->board->sys; CE_DEBUG (d, ("Newhook\n")); /* Attach debug hook */ if (strcmp (name, NG_CE_HOOK_DEBUG) == 0) { #if __FreeBSD_version >= 500000 NG_HOOK_SET_PRIVATE (hook, NULL); #else hook->private = 0; #endif d->debug_hook = hook; return 0; } /* Check for raw hook */ if (strcmp (name, NG_CE_HOOK_RAW) != 0) return EINVAL; #if __FreeBSD_version >= 500000 NG_HOOK_SET_PRIVATE (hook, d); #else hook->private = d; #endif d->hook = hook; s = splimp (); CE_LOCK (bd); ce_up (d); CE_UNLOCK (bd); splx (s); return 0; } static char *format_timeslots (u_long s) { static char buf [100]; char *p = buf; int i; for (i=1; i<32; ++i) if ((s >> i) & 1) { int prev = (i > 1) & (s >> (i-1)); int next = (i < 31) & (s >> (i+1)); if (prev) { if (next) continue; *p++ = '-'; } else if (p > buf) *p++ = ','; if (i >= 10) *p++ = '0' + i / 10; *p++ = '0' + i % 10; } *p = 0; return buf; } static int print_modems (char *s, ce_chan_t *c, int need_header) { int status = ce_modem_status (c); int length = 0; if (need_header) length += sprintf (s + length, " LE DTR DSR RTS CTS CD\n"); length += sprintf (s + length, "%4s %4s %4s %4s %4s %4s\n", status & TIOCM_LE ? "On" : "-", status & TIOCM_DTR ? "On" : "-", status & TIOCM_DSR ? "On" : "-", status & TIOCM_RTS ? "On" : "-", status & TIOCM_CTS ? "On" : "-", status & TIOCM_CD ? "On" : "-"); return length; } static int print_stats (char *s, ce_chan_t *c, int need_header) { int length = 0; if (need_header) length += sprintf (s + length, " Rintr Tintr Mintr Ibytes Ipkts Ierrs Obytes Opkts Oerrs\n"); length += sprintf (s + length, "%7ld %7ld %7ld %8lu %7ld %7ld %8lu %7ld %7ld\n", c->rintr, c->tintr, 0l, (unsigned long) c->ibytes, c->ipkts, c->overrun + c->frame + c->crc, (unsigned long) c->obytes, c->opkts, c->underrun); return length; } static char *format_e1_status (u_char status) { static char buf [80]; if (status & E1_NOALARM) return "Ok"; buf[0] = 0; if (status & E1_LOS) strcat (buf, ",LOS"); if (status & E1_AIS) strcat (buf, ",AIS"); if (status & E1_LOF) strcat (buf, ",LOF"); if (status & E1_LOMF) strcat (buf, ",LOMF"); if (status & E1_FARLOF) strcat (buf, ",FARLOF"); if (status & E1_AIS16) strcat (buf, ",AIS16"); if (status & E1_FARLOMF) strcat (buf, ",FARLOMF"); if (status & E1_TSTREQ) strcat (buf, ",TSTREQ"); if (status & E1_TSTERR) strcat (buf, ",TSTERR"); if (buf[0] == ',') return buf+1; return "Unknown"; } static int print_frac (char *s, int leftalign, u_long numerator, u_long divider) { int n, length = 0; if (numerator < 1 || divider < 1) { length += sprintf (s+length, leftalign ? "/- " : " -"); return length; } n = (int) (0.5 + 1000.0 * numerator / divider); if (n < 1000) { length += sprintf (s+length, leftalign ? "/.%-3d" : " .%03d", n); return length; } *(s + length) = leftalign ? '/' : ' '; length ++; if (n >= 1000000) n = (n+500) / 1000 * 1000; else if (n >= 100000) n = (n+50) / 100 * 100; else if (n >= 10000) n = (n+5) / 10 * 10; switch (n) { case 1000: length += printf (s+length, ".999"); return length; case 10000: n = 9990; break; case 100000: n = 99900; break; case 1000000: n = 999000; break; } if (n < 10000) length += sprintf (s+length, "%d.%d", n/1000, n/10%100); else if (n < 100000) length += sprintf (s+length, "%d.%d", n/1000, n/100%10); else if (n < 1000000) length += sprintf (s+length, "%d.", n/1000); else length += sprintf (s+length, "%d", n/1000); return length; } static int print_e1_stats (char *s, ce_chan_t *c) { struct e1_counters total; u_long totsec; int length = 0; totsec = c->totsec + c->cursec; total.bpv = c->total.bpv + c->currnt.bpv; total.fse = c->total.fse + c->currnt.fse; total.crce = c->total.crce + c->currnt.crce; total.rcrce = c->total.rcrce + c->currnt.rcrce; total.uas = c->total.uas + c->currnt.uas; total.les = c->total.les + c->currnt.les; total.es = c->total.es + c->currnt.es; total.bes = c->total.bes + c->currnt.bes; total.ses = c->total.ses + c->currnt.ses; total.oofs = c->total.oofs + c->currnt.oofs; total.css = c->total.css + c->currnt.css; total.dm = c->total.dm + c->currnt.dm; length += sprintf (s + length, " Unav/Degr Bpv/Fsyn CRC/RCRC Err/Lerr Sev/Bur Oof/Slp Status\n"); /* Unavailable seconds, degraded minutes */ length += print_frac (s + length, 0, c->currnt.uas, c->cursec); length += print_frac (s + length, 1, 60 * c->currnt.dm, c->cursec); /* Bipolar violations, frame sync errors */ length += print_frac (s + length, 0, c->currnt.bpv, c->cursec); length += print_frac (s + length, 1, c->currnt.fse, c->cursec); /* CRC errors, remote CRC errors (E-bit) */ length += print_frac (s + length, 0, c->currnt.crce, c->cursec); length += print_frac (s + length, 1, c->currnt.rcrce, c->cursec); /* Errored seconds, line errored seconds */ length += print_frac (s + length, 0, c->currnt.es, c->cursec); length += print_frac (s + length, 1, c->currnt.les, c->cursec); /* Severely errored seconds, burst errored seconds */ length += print_frac (s + length, 0, c->currnt.ses, c->cursec); length += print_frac (s + length, 1, c->currnt.bes, c->cursec); /* Out of frame seconds, controlled slip seconds */ length += print_frac (s + length, 0, c->currnt.oofs, c->cursec); length += print_frac (s + length, 1, c->currnt.css, c->cursec); length += sprintf (s + length, " %s\n", format_e1_status (c->status)); /* Print total statistics. */ length += print_frac (s + length, 0, total.uas, totsec); length += print_frac (s + length, 1, 60 * total.dm, totsec); length += print_frac (s + length, 0, total.bpv, totsec); length += print_frac (s + length, 1, total.fse, totsec); length += print_frac (s + length, 0, total.crce, totsec); length += print_frac (s + length, 1, total.rcrce, totsec); length += print_frac (s + length, 0, total.es, totsec); length += print_frac (s + length, 1, total.les, totsec); length += print_frac (s + length, 0, total.ses, totsec); length += print_frac (s + length, 1, total.bes, totsec); length += print_frac (s + length, 0, total.oofs, totsec); length += print_frac (s + length, 1, total.css, totsec); length += sprintf (s + length, " -- Total\n"); return length; } static int print_chan (char *s, ce_chan_t *c) { drv_t *d = c->sys; int length = 0; length += sprintf (s + length, "ce%d", c->board->num * NCHAN + c->num); if (d->chan->debug) length += sprintf (s + length, " debug=%d", d->chan->debug); if (c->board->mux) { length += sprintf (s + length, " cfg=C"); } else { length += sprintf (s + length, " cfg=A"); } if (c->baud) length += sprintf (s + length, " %ld", c->baud); else length += sprintf (s + length, " extclock"); if (c->type == T_E1) switch (c->gsyn) { case GSYN_INT : length += sprintf (s + length, " syn=int"); break; case GSYN_RCV : length += sprintf (s + length, " syn=rcv"); break; case GSYN_RCV0 : length += sprintf (s + length, " syn=rcv0"); break; case GSYN_RCV1 : length += sprintf (s + length, " syn=rcv1"); break; } if (c->type == T_E1) length += sprintf (s + length, " higain=%s", c->higain ? "on" : "off"); length += sprintf (s + length, " loop=%s", c->lloop ? "on" : "off"); if (c->type == T_E1) length += sprintf (s + length, " ts=%s", format_timeslots (c->ts)); length += sprintf (s + length, "\n"); return length; } #if __FreeBSD_version >= 500000 static int ng_ce_rcvmsg (node_p node, item_p item, hook_p lasthook) { drv_t *d = NG_NODE_PRIVATE (node); struct ng_mesg *msg; #else static int ng_ce_rcvmsg (node_p node, struct ng_mesg *msg, const char *retaddr, struct ng_mesg **rptr) { drv_t *d = node->private; #endif struct ng_mesg *resp = NULL; int error = 0; CE_DEBUG (d, ("Rcvmsg\n")); #if __FreeBSD_version >= 500000 NGI_GET_MSG (item, msg); #endif switch (msg->header.typecookie) { default: error = EINVAL; break; case NGM_CE_COOKIE: printf ("Not implemented yet\n"); error = EINVAL; break; case NGM_GENERIC_COOKIE: switch (msg->header.cmd) { default: error = EINVAL; break; case NGM_TEXT_STATUS: { char *s; int l = 0; int dl = sizeof (struct ng_mesg) + 730; #if __FreeBSD_version >= 500000 NG_MKRESPONSE (resp, msg, dl, M_NOWAIT); if (! resp) { error = ENOMEM; break; } #else resp = malloc (M_NETGRAPH, M_NOWAIT); if (! resp) { error = ENOMEM; break; } bzero (resp, dl); #endif s = (resp)->data; if (d) { l += print_chan (s + l, d->chan); l += print_stats (s + l, d->chan, 1); l += print_modems (s + l, d->chan, 1); l += print_e1_stats (s + l, d->chan); } else l += sprintf (s + l, "Error: node not connect to channel"); #if __FreeBSD_version < 500000 (resp)->header.version = NG_VERSION; (resp)->header.arglen = strlen (s) + 1; (resp)->header.token = msg->header.token; (resp)->header.typecookie = NGM_CE_COOKIE; (resp)->header.cmd = msg->header.cmd; #endif strncpy ((resp)->header.cmdstr, "status", NG_CMDSTRSIZ); } break; } break; } #if __FreeBSD_version >= 500000 NG_RESPOND_MSG (error, node, item, resp); NG_FREE_MSG (msg); #else *rptr = resp; free (msg, M_NETGRAPH); #endif return error; } #if __FreeBSD_version >= 500000 static int ng_ce_rcvdata (hook_p hook, item_p item) { drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE(hook)); struct mbuf *m; #if __FreeBSD_version < 502120 meta_p meta; #else struct ng_tag_prio *ptag; #endif #else static int ng_ce_rcvdata (hook_p hook, struct mbuf *m, meta_p meta) { drv_t *d = hook->node->private; #endif bdrv_t *bd = d->board->sys; struct ifqueue *q; int s; CE_DEBUG2 (d, ("Rcvdata\n")); #if __FreeBSD_version >= 500000 NGI_GET_M (item, m); #if __FreeBSD_version < 502120 NGI_GET_META (item, meta); #endif NG_FREE_ITEM (item); if (! NG_HOOK_PRIVATE (hook) || ! d) { NG_FREE_M (m); #if __FreeBSD_version < 502120 NG_FREE_META (meta); #endif #else if (! hook->private || ! d) { NG_FREE_DATA (m,meta); #endif return ENETDOWN; } #if __FreeBSD_version >= 502120 /* Check for high priority data */ if ((ptag = (struct ng_tag_prio *)m_tag_locate(m, NGM_GENERIC_COOKIE, NG_TAG_PRIO, NULL)) != NULL && (ptag->priority > NG_PRIO_CUTOFF) ) q = &d->hi_queue; else q = &d->queue; #else q = (meta && meta->priority > 0) ? &d->hi_queue : &d->queue; #endif s = splimp (); CE_LOCK (bd); #if __FreeBSD_version >= 500000 IF_LOCK (q); if (_IF_QFULL (q)) { IF_UNLOCK (q); CE_UNLOCK (bd); splx (s); NG_FREE_M (m); #if __FreeBSD_version < 502120 NG_FREE_META (meta); #endif return ENOBUFS; } _IF_ENQUEUE (q, m); IF_UNLOCK (q); #else if (IF_QFULL (q)) { IF_DROP (q); CE_UNLOCK (bd); splx (s); NG_FREE_DATA (m, meta); return ENOBUFS; } IF_ENQUEUE (q, m); #endif ce_start (d); CE_UNLOCK (bd); splx (s); return 0; } static int ng_ce_rmnode (node_p node) { #if __FreeBSD_version >= 500000 drv_t *d = NG_NODE_PRIVATE (node); CE_DEBUG (d, ("Rmnode\n")); if (d && d->running) { bdrv_t *bd = d->board->sys; int s = splimp (); CE_LOCK (bd); ce_down (d); CE_UNLOCK (bd); splx (s); } #ifdef KLD_MODULE #if __FreeBSD_version >= 502120 if (node->nd_flags & NGF_REALLY_DIE) { #else if (node->nd_flags & NG_REALLY_DIE) { #endif NG_NODE_SET_PRIVATE (node, NULL); NG_NODE_UNREF (node); } #if __FreeBSD_version >= 502120 NG_NODE_REVIVE(node); /* Persistent node */ #else node->nd_flags &= ~NG_INVALID; #endif #endif #else /* __FreeBSD_version < 500000 */ drv_t *d = node->private; if (d && d->running) { bdrv_t *bd = d->board->sys; int s = splimp (); CE_LOCK (bd); ce_down (d); CE_UNLOCK (bd); splx (s); } node->flags |= NG_INVALID; ng_cutlinks (node); #ifdef KLD_MODULE ng_unname (node); ng_unref (node); #endif #endif return 0; } static int ng_ce_connect (hook_p hook) { #if __FreeBSD_version >= 500000 drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE (hook)); #else drv_t *d = hook->node->private; #endif if (d) { CE_DEBUG (d, ("Connect\n")); callout_reset (&d->timeout_handle, hz, ce_watchdog_timer, d); } return 0; } static int ng_ce_disconnect (hook_p hook) { #if __FreeBSD_version >= 500000 drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE (hook)); #else drv_t *d = hook->node->private; #endif if (d) { CE_DEBUG (d, ("Disconnect\n")); #if __FreeBSD_version >= 500000 if (NG_HOOK_PRIVATE (hook)) #else if (hook->private) #endif { bdrv_t *bd = d->board->sys; int s = splimp (); CE_LOCK (bd); ce_down (d); CE_UNLOCK (bd); splx (s); } /* If we were wait it than it reasserted now, just stop it. */ if (!callout_drain (&d->timeout_handle)) callout_stop (&d->timeout_handle); } return 0; } #endif static int ce_modevent (module_t mod, int type, void *unused) { #if __FreeBSD_version < 500000 dev_t dev; struct cdevsw *cdsw; #endif static int load_count = 0; #if __FreeBSD_version < 500000 dev = makedev (CDEV_MAJOR, 0); #endif switch (type) { case MOD_LOAD: #if __FreeBSD_version < 500000 if (dev != NODEV && (cdsw = devsw (dev)) && cdsw->d_maj == CDEV_MAJOR) { printf ("Tau32-PCI driver is already in system\n"); return (ENXIO); } #endif #if __FreeBSD_version >= 500000 && defined NETGRAPH if (ng_newtype (&typestruct)) printf ("Failed to register ng_ce\n"); #endif ++load_count; #if __FreeBSD_version <= 500000 cdevsw_add (&ce_cdevsw); #endif #if __FreeBSD_version >= 500000 callout_init (&timeout_handle, 1); #else callout_init (&timeout_handle); #endif callout_reset (&timeout_handle, hz*5, ce_timeout, 0); break; case MOD_UNLOAD: if (load_count == 1) { printf ("Removing device entry for Tau32-PCI\n"); #if __FreeBSD_version <= 500000 cdevsw_remove (&ce_cdevsw); #endif #if __FreeBSD_version >= 500000 && defined NETGRAPH ng_rmtype (&typestruct); #endif } /* If we were wait it than it reasserted now, just stop it. * Actually we shouldn't get this condition. But code could be * changed in the future, so just be a litle paranoid. */ if (!callout_drain (&timeout_handle)) callout_stop (&timeout_handle); --load_count; break; case MOD_SHUTDOWN: break; } return 0; } #ifdef NETGRAPH #if __FreeBSD_version >= 502100 static struct ng_type typestruct = { .version = NG_ABI_VERSION, .name = NG_CE_NODE_TYPE, .constructor = ng_ce_constructor, .rcvmsg = ng_ce_rcvmsg, .shutdown = ng_ce_rmnode, .newhook = ng_ce_newhook, .connect = ng_ce_connect, .rcvdata = ng_ce_rcvdata, .disconnect = ng_ce_disconnect, }; #else /* __FreeBSD_version < 502100 */ static struct ng_type typestruct = { #if __FreeBSD_version >= 500000 NG_ABI_VERSION, #else NG_VERSION, #endif NG_CE_NODE_TYPE, ce_modevent, ng_ce_constructor, ng_ce_rcvmsg, ng_ce_rmnode, ng_ce_newhook, NULL, ng_ce_connect, ng_ce_rcvdata, #if __FreeBSD_version < 500000 NULL, #endif ng_ce_disconnect, NULL }; #endif /* __FreeBSD_version < 502100 */ #endif /*NETGRAPH*/ #if __FreeBSD_version >= 500000 #ifdef NETGRAPH MODULE_DEPEND (ng_ce, netgraph, NG_ABI_VERSION, NG_ABI_VERSION, NG_ABI_VERSION); #else MODULE_DEPEND (ce, sppp, 1, 1, 1); #endif #ifdef KLD_MODULE DRIVER_MODULE (cemod, pci, ce_driver, ce_devclass, ce_modevent, NULL); #else DRIVER_MODULE (ce, pci, ce_driver, ce_devclass, ce_modevent, NULL); #endif #else /* if __FreeBSD_version < 500000*/ #ifdef NETGRAPH DRIVER_MODULE (ce, pci, ce_driver, ce_devclass, ng_mod_event, &typestruct); #else DRIVER_MODULE (ce, pci, ce_driver, ce_devclass, ce_modevent, NULL); #endif #endif /* __FreeBSD_version < 500000 */ #endif /* NPCI */ Index: head/sys/dev/cm/smc90cx6.c =================================================================== --- head/sys/dev/cm/smc90cx6.c (revision 313981) +++ head/sys/dev/cm/smc90cx6.c (revision 313982) @@ -1,924 +1,924 @@ /* $NetBSD: smc90cx6.c,v 1.38 2001/07/07 15:57:53 thorpej Exp $ */ #include __FBSDID("$FreeBSD$"); /*- * Copyright (c) 1994, 1995, 1998 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Ignatios Souvatzis. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Chip core driver for the SMC90c26 / SMC90c56 (and SMC90c66 in '56 * compatibility mode) boards */ /* #define CMSOFTCOPY */ #define CMRETRANSMIT /**/ /* #define CM_DEBUG */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MODULE_DEPEND(if_cm, arcnet, 1, 1, 1); /* these should be elsewhere */ #define ARC_MIN_LEN 1 #define ARC_MIN_FORBID_LEN 254 #define ARC_MAX_FORBID_LEN 256 #define ARC_MAX_LEN 508 #define ARC_ADDR_LEN 1 /* for watchdog timer. This should be more than enough. */ #define ARCTIMEOUT (5*IFNET_SLOWHZ) devclass_t cm_devclass; /* * This currently uses 2 bufs for tx, 2 for rx * * New rx protocol: * * rx has a fillcount variable. If fillcount > (NRXBUF-1), * rx can be switched off from rx hard int. * Else rx is restarted on the other receiver. * rx soft int counts down. if it is == (NRXBUF-1), it restarts * the receiver. * To ensure packet ordering (we need that for 1201 later), we have a counter * which is incremented modulo 256 on each receive and a per buffer * variable, which is set to the counter on filling. The soft int can * compare both values to determine the older packet. * * Transmit direction: * * cm_start checks tx_fillcount * case 2: return * * else fill tx_act ^ 1 && inc tx_fillcount * * check tx_fillcount again. * case 2: set IFF_DRV_OACTIVE to stop arc_output from filling us. * case 1: start tx * * tint clears IFF_OACTIVE, decrements and checks tx_fillcount * case 1: start tx on tx_act ^ 1, softcall cm_start * case 0: softcall cm_start * * #define fill(i) get mbuf && copy mbuf to chip(i) */ void cm_init(void *); static void cm_init_locked(struct cm_softc *); static void cm_reset_locked(struct cm_softc *); void cm_start(struct ifnet *); void cm_start_locked(struct ifnet *); int cm_ioctl(struct ifnet *, unsigned long, caddr_t); void cm_watchdog(void *); void cm_srint_locked(void *vsc); static void cm_tint_locked(struct cm_softc *, int); void cm_reconwatch_locked(void *); /* * Release all resources */ void cm_release_resources(dev) device_t dev; { struct cm_softc *sc = device_get_softc(dev); if (sc->port_res != NULL) { bus_release_resource(dev, SYS_RES_IOPORT, 0, sc->port_res); sc->port_res = NULL; } if (sc->mem_res != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); sc->mem_res = NULL; } if (sc->irq_res != NULL) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); sc->irq_res = NULL; } } int cm_attach(dev) device_t dev; { struct cm_softc *sc = device_get_softc(dev); struct ifnet *ifp; u_int8_t linkaddress; ifp = sc->sc_ifp = if_alloc(IFT_ARCNET); if (ifp == NULL) return (ENOSPC); /* * read the arcnet address from the board */ GETREG(CMRESET); do { DELAY(200); } while (!(GETREG(CMSTAT) & CM_POR)); linkaddress = GETMEM(CMMACOFF); /* clear the int mask... */ sc->sc_intmask = 0; PUTREG(CMSTAT, 0); PUTREG(CMCMD, CM_CONF(CONF_LONG)); PUTREG(CMCMD, CM_CLR(CLR_POR|CLR_RECONFIG)); sc->sc_recontime = sc->sc_reconcount = 0; /* * set interface to stopped condition (reset) */ cm_stop_locked(sc); ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_output = arc_output; ifp->if_start = cm_start; ifp->if_ioctl = cm_ioctl; ifp->if_init = cm_init; /* XXX IFQ_SET_READY(&ifp->if_snd); */ ifp->if_snd.ifq_maxlen = ifqmaxlen; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX; arc_ifattach(ifp, linkaddress); #ifdef CMSOFTCOPY sc->sc_rxcookie = softintr_establish(IPL_SOFTNET, cm_srint, sc); sc->sc_txcookie = softintr_establish(IPL_SOFTNET, (void (*)(void *))cm_start, ifp); #endif callout_init_mtx(&sc->sc_recon_ch, &sc->sc_mtx, 0); callout_init_mtx(&sc->sc_watchdog_timer, &sc->sc_mtx, 0); if_printf(ifp, "link addr 0x%02x (%d)\n", linkaddress, linkaddress); return 0; } /* * Initialize device * */ void cm_init(xsc) void *xsc; { struct cm_softc *sc = (struct cm_softc *)xsc; CM_LOCK(sc); cm_init_locked(sc); CM_UNLOCK(sc); } static void cm_init_locked(struct cm_softc *sc) { struct ifnet *ifp = sc->sc_ifp; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { ifp->if_drv_flags |= IFF_DRV_RUNNING; cm_reset_locked(sc); } } /* * Reset the interface... * * Assumes that it is called with sc_mtx held */ void cm_reset_locked(sc) struct cm_softc *sc; { struct ifnet *ifp; int linkaddress; ifp = sc->sc_ifp; #ifdef CM_DEBUG if_printf(ifp, "reset\n"); #endif /* stop and restart hardware */ GETREG(CMRESET); do { DELAY(200); } while (!(GETREG(CMSTAT) & CM_POR)); linkaddress = GETMEM(CMMACOFF); #if defined(CM_DEBUG) && (CM_DEBUG > 2) if_printf(ifp, "reset: card reset, link addr = 0x%02x (%d)\n", linkaddress, linkaddress); #endif /* tell the routing level about the (possibly changed) link address */ arc_storelladdr(ifp, linkaddress); arc_frag_init(ifp); /* POR is NMI, but we need it below: */ sc->sc_intmask = CM_RECON|CM_POR; PUTREG(CMSTAT, sc->sc_intmask); PUTREG(CMCMD, CM_CONF(CONF_LONG)); #ifdef CM_DEBUG if_printf(ifp, "reset: chip configured, status=0x%02x\n", GETREG(CMSTAT)); #endif PUTREG(CMCMD, CM_CLR(CLR_POR|CLR_RECONFIG)); #ifdef CM_DEBUG if_printf(ifp, "reset: bits cleared, status=0x%02x\n", GETREG(CMSTAT)); #endif sc->sc_reconcount_excessive = ARC_EXCESSIVE_RECONS; /* start receiver */ sc->sc_intmask |= CM_RI; sc->sc_rx_fillcount = 0; sc->sc_rx_act = 2; PUTREG(CMCMD, CM_RXBC(2)); PUTREG(CMSTAT, sc->sc_intmask); #ifdef CM_DEBUG if_printf(ifp, "reset: started receiver, status=0x%02x\n", GETREG(CMSTAT)); #endif /* and init transmitter status */ sc->sc_tx_act = 0; sc->sc_tx_fillcount = 0; ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; callout_reset(&sc->sc_watchdog_timer, hz, cm_watchdog, sc); cm_start_locked(ifp); } /* * Take interface offline */ void cm_stop_locked(sc) struct cm_softc *sc; { /* Stop the interrupts */ PUTREG(CMSTAT, 0); /* Stop the interface */ GETREG(CMRESET); /* Stop watchdog timer */ callout_stop(&sc->sc_watchdog_timer); sc->sc_timer = 0; } void cm_start(struct ifnet *ifp) { struct cm_softc *sc = ifp->if_softc; CM_LOCK(sc); cm_start_locked(ifp); CM_UNLOCK(sc); } /* * Start output on interface. Get another datagram to send * off the interface queue, and copy it to the * interface becore starting the output * * Assumes that sc_mtx is held */ void cm_start_locked(ifp) struct ifnet *ifp; { struct cm_softc *sc = ifp->if_softc; struct mbuf *m, *mp; int cm_ram_ptr; int len, tlen, offset, buffer; #ifdef CMTIMINGS u_long copystart, lencopy, perbyte; #endif #if defined(CM_DEBUG) && (CM_DEBUG > 3) if_printf(ifp, "start(%p)\n", ifp); #endif if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; if (sc->sc_tx_fillcount >= 2) return; m = arc_frag_next(ifp); buffer = sc->sc_tx_act ^ 1; - if (m == 0) + if (m == NULL) return; #ifdef CM_DEBUG if (m->m_len < ARC_HDRLEN) m = m_pullup(m, ARC_HDRLEN);/* gcc does structure padding */ if_printf(ifp, "start: filling %d from %d to %d type %d\n", buffer, mtod(m, u_char *)[0], mtod(m, u_char *)[1], mtod(m, u_char *)[2]); #else if (m->m_len < 2) m = m_pullup(m, 2); #endif cm_ram_ptr = buffer * 512; - if (m == 0) + if (m == NULL) return; /* write the addresses to RAM and throw them away */ /* * Hardware does this: Yet Another Microsecond Saved. * (btw, timing code says usually 2 microseconds) * PUTMEM(cm_ram_ptr + 0, mtod(m, u_char *)[0]); */ PUTMEM(cm_ram_ptr + 1, mtod(m, u_char *)[1]); m_adj(m, 2); /* get total length left at this point */ tlen = m->m_pkthdr.len; if (tlen < ARC_MIN_FORBID_LEN) { offset = 256 - tlen; PUTMEM(cm_ram_ptr + 2, offset); } else { PUTMEM(cm_ram_ptr + 2, 0); if (tlen <= ARC_MAX_FORBID_LEN) offset = 255; /* !!! */ else { if (tlen > ARC_MAX_LEN) tlen = ARC_MAX_LEN; offset = 512 - tlen; } PUTMEM(cm_ram_ptr + 3, offset); } cm_ram_ptr += offset; /* lets loop through the mbuf chain */ for (mp = m; mp; mp = mp->m_next) { if ((len = mp->m_len)) { /* YAMS */ bus_space_write_region_1( rman_get_bustag(sc->mem_res), rman_get_bushandle(sc->mem_res), cm_ram_ptr, mtod(mp, caddr_t), len); cm_ram_ptr += len; } } sc->sc_broadcast[buffer] = (m->m_flags & M_BCAST) != 0; sc->sc_retransmits[buffer] = (m->m_flags & M_BCAST) ? 1 : 5; if (++sc->sc_tx_fillcount > 1) { /* * We are filled up to the rim. No more bufs for the moment, * please. */ ifp->if_drv_flags |= IFF_DRV_OACTIVE; } else { #ifdef CM_DEBUG if_printf(ifp, "start: starting transmitter on buffer %d\n", buffer); #endif /* Transmitter was off, start it */ sc->sc_tx_act = buffer; /* * We still can accept another buf, so don't: * ifp->if_drv_flags |= IFF_DRV_OACTIVE; */ sc->sc_intmask |= CM_TA; PUTREG(CMCMD, CM_TX(buffer)); PUTREG(CMSTAT, sc->sc_intmask); sc->sc_timer = ARCTIMEOUT; } m_freem(m); /* * After 10 times reading the docs, I realized * that in the case the receiver NAKs the buffer request, * the hardware retries till shutdown. * This is integrated now in the code above. */ } #ifdef CMSOFTCOPY void cm_srint(void *vsc) { struct cm_softc *sc = (struct cm_softc *)vsc; CM_LOCK(sc); cm_srint_locked(vsc); CM_UNLOCK(sc); } #endif /* * Arcnet interface receiver soft interrupt: * get the stuff out of any filled buffer we find. */ void cm_srint_locked(vsc) void *vsc; { struct cm_softc *sc = (struct cm_softc *)vsc; int buffer, len, offset, type; int cm_ram_ptr; struct mbuf *m; struct arc_header *ah; struct ifnet *ifp; ifp = sc->sc_ifp; buffer = sc->sc_rx_act ^ 1; /* Allocate header mbuf */ MGETHDR(m, M_NOWAIT, MT_DATA); - if (m == 0) { + if (m == NULL) { /* * in case s.th. goes wrong with mem, drop it * to make sure the receiver can be started again * count it as input error (we dont have any other * detectable) */ if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); goto cleanup; } m->m_pkthdr.rcvif = ifp; /* * Align so that IP packet will be longword aligned. Here we * assume that m_data of new packet is longword aligned. * When implementing PHDS, we might have to change it to 2, * (2*sizeof(ulong) - CM_HDRNEWLEN)), packet type dependent. */ cm_ram_ptr = buffer * 512; offset = GETMEM(cm_ram_ptr + 2); if (offset) len = 256 - offset; else { offset = GETMEM(cm_ram_ptr + 3); len = 512 - offset; } /* * first +2 bytes for align fixup below * second +2 bytes are for src/dst addresses */ if ((len + 2 + 2) > MHLEN) { /* attach an mbuf cluster */ if (!(MCLGET(m, M_NOWAIT))) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); goto cleanup; } } - if (m == 0) { + if (m == NULL) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); goto cleanup; } type = GETMEM(cm_ram_ptr + offset); m->m_data += 1 + arc_isphds(type); /* mbuf filled with ARCnet addresses */ m->m_pkthdr.len = m->m_len = len + 2; ah = mtod(m, struct arc_header *); ah->arc_shost = GETMEM(cm_ram_ptr + 0); ah->arc_dhost = GETMEM(cm_ram_ptr + 1); bus_space_read_region_1( rman_get_bustag(sc->mem_res), rman_get_bushandle(sc->mem_res), cm_ram_ptr + offset, mtod(m, u_char *) + 2, len); CM_UNLOCK(sc); arc_input(ifp, m); CM_LOCK(sc); m = NULL; if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); cleanup: if (m != NULL) m_freem(m); /* mark buffer as invalid by source id 0 */ PUTMEM(buffer << 9, 0); if (--sc->sc_rx_fillcount == 2 - 1) { /* was off, restart it on buffer just emptied */ sc->sc_rx_act = buffer; sc->sc_intmask |= CM_RI; /* this also clears the RI flag interrupt: */ PUTREG(CMCMD, CM_RXBC(buffer)); PUTREG(CMSTAT, sc->sc_intmask); #ifdef CM_DEBUG if_printf(ifp, "srint: restarted rx on buf %d\n", buffer); #endif } } static inline void cm_tint_locked(sc, isr) struct cm_softc *sc; int isr; { struct ifnet *ifp; int buffer; #ifdef CMTIMINGS int clknow; #endif ifp = sc->sc_ifp; buffer = sc->sc_tx_act; /* * retransmit code: * Normal situtations first for fast path: * If acknowledgement received ok or broadcast, we're ok. * else if */ if (isr & CM_TMA || sc->sc_broadcast[buffer]) if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); #ifdef CMRETRANSMIT else if (ifp->if_flags & IFF_LINK2 && sc->sc_timer > 0 && --sc->sc_retransmits[buffer] > 0) { /* retransmit same buffer */ PUTREG(CMCMD, CM_TX(buffer)); return; } #endif else if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); /* We know we can accept another buffer at this point. */ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (--sc->sc_tx_fillcount > 0) { /* * start tx on other buffer. * This also clears the int flag */ buffer ^= 1; sc->sc_tx_act = buffer; /* * already given: * sc->sc_intmask |= CM_TA; * PUTREG(CMSTAT, sc->sc_intmask); */ PUTREG(CMCMD, CM_TX(buffer)); /* init watchdog timer */ sc->sc_timer = ARCTIMEOUT; #if defined(CM_DEBUG) && (CM_DEBUG > 1) if_printf(ifp, "tint: starting tx on buffer %d, status 0x%02x\n", buffer, GETREG(CMSTAT)); #endif } else { /* have to disable TX interrupt */ sc->sc_intmask &= ~CM_TA; PUTREG(CMSTAT, sc->sc_intmask); /* ... and watchdog timer */ sc->sc_timer = 0; #ifdef CM_DEBUG if_printf(ifp, "tint: no more buffers to send, status 0x%02x\n", GETREG(CMSTAT)); #endif } /* XXXX TODO */ #ifdef CMSOFTCOPY /* schedule soft int to fill a new buffer for us */ softintr_schedule(sc->sc_txcookie); #else /* call it directly */ cm_start_locked(ifp); #endif } /* * Our interrupt routine */ void cmintr(arg) void *arg; { struct cm_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; u_char isr, maskedisr; int buffer; u_long newsec; CM_LOCK(sc); isr = GETREG(CMSTAT); maskedisr = isr & sc->sc_intmask; if (!maskedisr || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { CM_UNLOCK(sc); return; } do { #if defined(CM_DEBUG) && (CM_DEBUG > 1) if_printf(ifp, "intr: status 0x%02x, intmask 0x%02x\n", isr, sc->sc_intmask); #endif if (maskedisr & CM_POR) { /* * XXX We should never see this. Don't bother to store * the address. * sc->sc_ifp->if_l2com->ac_anaddr = GETMEM(CMMACOFF); */ PUTREG(CMCMD, CM_CLR(CLR_POR)); log(LOG_WARNING, "%s: intr: got spurious power on reset int\n", ifp->if_xname); } if (maskedisr & CM_RECON) { /* * we dont need to: * PUTREG(CMCMD, CM_CONF(CONF_LONG)); */ PUTREG(CMCMD, CM_CLR(CLR_RECONFIG)); if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); /* * If less than 2 seconds per reconfig: * If ARC_EXCESSIVE_RECONFIGS * since last burst, complain and set treshold for * warnings to ARC_EXCESSIVE_RECONS_REWARN. * * This allows for, e.g., new stations on the cable, or * cable switching as long as it is over after * (normally) 16 seconds. * * XXX TODO: check timeout bits in status word and * double time if necessary. */ callout_stop(&sc->sc_recon_ch); newsec = time_second; if ((newsec - sc->sc_recontime <= 2) && (++sc->sc_reconcount == ARC_EXCESSIVE_RECONS)) { log(LOG_WARNING, "%s: excessive token losses, " "cable problem?\n", ifp->if_xname); } sc->sc_recontime = newsec; callout_reset(&sc->sc_recon_ch, 15 * hz, cm_reconwatch_locked, (void *)sc); } if (maskedisr & CM_RI) { #if defined(CM_DEBUG) && (CM_DEBUG > 1) if_printf(ifp, "intr: hard rint, act %d\n", sc->sc_rx_act); #endif buffer = sc->sc_rx_act; /* look if buffer is marked invalid: */ if (GETMEM(buffer * 512) == 0) { /* * invalid marked buffer (or illegally * configured sender) */ log(LOG_WARNING, "%s: spurious RX interrupt or sender 0 " " (ignored)\n", ifp->if_xname); /* * restart receiver on same buffer. * XXX maybe better reset interface? */ PUTREG(CMCMD, CM_RXBC(buffer)); } else { if (++sc->sc_rx_fillcount > 1) { sc->sc_intmask &= ~CM_RI; PUTREG(CMSTAT, sc->sc_intmask); } else { buffer ^= 1; sc->sc_rx_act = buffer; /* * Start receiver on other receive * buffer. This also clears the RI * interrupt flag. */ PUTREG(CMCMD, CM_RXBC(buffer)); /* in RX intr, so mask is ok for RX */ #ifdef CM_DEBUG if_printf(ifp, "strt rx for buf %d, " "stat 0x%02x\n", sc->sc_rx_act, GETREG(CMSTAT)); #endif } #ifdef CMSOFTCOPY /* * this one starts a soft int to copy out * of the hw */ softintr_schedule(sc->sc_rxcookie); #else /* this one does the copy here */ cm_srint_locked(sc); #endif } } if (maskedisr & CM_TA) { cm_tint_locked(sc, isr); } isr = GETREG(CMSTAT); maskedisr = isr & sc->sc_intmask; } while (maskedisr); #if defined(CM_DEBUG) && (CM_DEBUG > 1) if_printf(ifp, "intr (exit): status 0x%02x, intmask 0x%02x\n", isr, sc->sc_intmask); #endif CM_UNLOCK(sc); } void cm_reconwatch_locked(arg) void *arg; { struct cm_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; if (sc->sc_reconcount >= ARC_EXCESSIVE_RECONS) { sc->sc_reconcount = 0; log(LOG_WARNING, "%s: token valid again.\n", ifp->if_xname); } sc->sc_reconcount = 0; } /* * Process an ioctl request. * This code needs some work - it looks pretty ugly. */ int cm_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct cm_softc *sc; int error; error = 0; sc = ifp->if_softc; #if defined(CM_DEBUG) && (CM_DEBUG > 2) if_printf(ifp, "ioctl() called, cmd = 0x%lx\n", command); #endif switch (command) { case SIOCSIFADDR: case SIOCGIFADDR: case SIOCADDMULTI: case SIOCDELMULTI: case SIOCSIFMTU: error = arc_ioctl(ifp, command, data); break; case SIOCSIFFLAGS: CM_LOCK(sc); if ((ifp->if_flags & IFF_UP) == 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { /* * If interface is marked down and it is running, * then stop it. */ cm_stop_locked(sc); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; } else if ((ifp->if_flags & IFF_UP) != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { /* * If interface is marked up and it is stopped, then * start it. */ cm_init_locked(sc); } CM_UNLOCK(sc); break; default: error = EINVAL; break; } return (error); } /* * watchdog routine for transmitter. * * We need this, because else a receiver whose hardware is alive, but whose * software has not enabled the Receiver, would make our hardware wait forever * Discovered this after 20 times reading the docs. * * Only thing we do is disable transmitter. We'll get a transmit timeout, * and the int handler will have to decide not to retransmit (in case * retransmission is implemented). */ void cm_watchdog(void *arg) { struct cm_softc *sc; sc = arg; callout_reset(&sc->sc_watchdog_timer, hz, cm_watchdog, sc); if (sc->sc_timer == 0 || --sc->sc_timer > 0) return; PUTREG(CMCMD, CM_TXDIS); } Index: head/sys/dev/cp/if_cp.c =================================================================== --- head/sys/dev/cp/if_cp.c (revision 313981) +++ head/sys/dev/cp/if_cp.c (revision 313982) @@ -1,2269 +1,2269 @@ /*- * Cronyx-Tau-PCI adapter driver for FreeBSD. * Supports PPP/HDLC, Cisco/HDLC and FrameRelay protocol in synchronous mode, * and asynchronous channels with full modem control. * Keepalive protocol implemented in both Cisco and PPP modes. * * Copyright (C) 1999-2004 Cronyx Engineering. * Author: Kurakin Roman, * * Copyright (C) 1999-2002 Cronyx Engineering. * Author: Serge Vakulenko, * * This software is distributed with NO WARRANTIES, not even the implied * warranties for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * Authors grant any other persons or organisations a permission to use, * modify and redistribute this software in source and binary forms, * as long as this message is kept with the software, all derivative * works or modified versions. * * Cronyx Id: if_cp.c,v 1.1.2.41 2004/06/23 17:09:13 rik Exp $ */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_ng_cronyx.h" #ifdef NETGRAPH_CRONYX # include "opt_netgraph.h" # ifndef NETGRAPH # error #option NETGRAPH missed from configuration # endif # include # include # include #else # include # include #include # define PP_CISCO IFF_LINK2 # include #endif #include #include #include #include /* If we don't have Cronyx's sppp version, we don't have fr support via sppp */ #ifndef PP_FR #define PP_FR 0 #endif #define CP_DEBUG(d,s) ({if (d->chan->debug) {\ printf ("%s: ", d->name); printf s;}}) #define CP_DEBUG2(d,s) ({if (d->chan->debug>1) {\ printf ("%s: ", d->name); printf s;}}) #define CP_LOCK_NAME "cpX" #define CP_LOCK(_bd) mtx_lock (&(_bd)->cp_mtx) #define CP_UNLOCK(_bd) mtx_unlock (&(_bd)->cp_mtx) #define CP_LOCK_ASSERT(_bd) mtx_assert (&(_bd)->cp_mtx, MA_OWNED) static int cp_probe __P((device_t)); static int cp_attach __P((device_t)); static int cp_detach __P((device_t)); static device_method_t cp_methods[] = { /* Device interface */ DEVMETHOD(device_probe, cp_probe), DEVMETHOD(device_attach, cp_attach), DEVMETHOD(device_detach, cp_detach), DEVMETHOD_END }; typedef struct _cp_dma_mem_t { unsigned long phys; void *virt; size_t size; bus_dma_tag_t dmat; bus_dmamap_t mapp; } cp_dma_mem_t; typedef struct _drv_t { char name [8]; int running; cp_chan_t *chan; cp_board_t *board; cp_dma_mem_t dmamem; #ifdef NETGRAPH char nodename [NG_NODESIZE]; hook_p hook; hook_p debug_hook; node_p node; struct ifqueue queue; struct ifqueue hi_queue; #else struct ifqueue queue; struct ifnet *ifp; #endif short timeout; struct callout timeout_handle; struct cdev *devt; } drv_t; typedef struct _bdrv_t { cp_board_t *board; struct resource *cp_res; struct resource *cp_irq; void *cp_intrhand; cp_dma_mem_t dmamem; drv_t channel [NCHAN]; struct mtx cp_mtx; } bdrv_t; static driver_t cp_driver = { "cp", cp_methods, sizeof(bdrv_t), }; static devclass_t cp_devclass; static void cp_receive (cp_chan_t *c, unsigned char *data, int len); static void cp_transmit (cp_chan_t *c, void *attachment, int len); static void cp_error (cp_chan_t *c, int data); static void cp_up (drv_t *d); static void cp_start (drv_t *d); static void cp_down (drv_t *d); static void cp_watchdog (drv_t *d); static void cp_watchdog_timer (void *arg); #ifdef NETGRAPH extern struct ng_type typestruct; #else static void cp_ifstart (struct ifnet *ifp); static void cp_tlf (struct sppp *sp); static void cp_tls (struct sppp *sp); static int cp_sioctl (struct ifnet *ifp, u_long cmd, caddr_t data); static void cp_initialize (void *softc); #endif static cp_board_t *adapter [NBRD]; static drv_t *channel [NBRD*NCHAN]; static struct callout led_timo [NBRD]; static struct callout timeout_handle; static int cp_destroy = 0; static int cp_open (struct cdev *dev, int oflags, int devtype, struct thread *td); static int cp_close (struct cdev *dev, int fflag, int devtype, struct thread *td); static int cp_ioctl (struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td); static struct cdevsw cp_cdevsw = { .d_version = D_VERSION, .d_open = cp_open, .d_close = cp_close, .d_ioctl = cp_ioctl, .d_name = "cp", }; /* * Make an mbuf from data. */ static struct mbuf *makembuf (void *buf, unsigned len) { struct mbuf *m; MGETHDR (m, M_NOWAIT, MT_DATA); if (! m) return 0; if (!(MCLGET (m, M_NOWAIT))) { m_freem (m); return 0; } m->m_pkthdr.len = m->m_len = len; bcopy (buf, mtod (m, caddr_t), len); return m; } static int cp_probe (device_t dev) { if ((pci_get_vendor (dev) == cp_vendor_id) && (pci_get_device (dev) == cp_device_id)) { device_set_desc (dev, "Cronyx-Tau-PCI serial adapter"); return BUS_PROBE_DEFAULT; } return ENXIO; } static void cp_timeout (void *arg) { drv_t *d; int s, i, k; for (i = 0; i < NBRD; ++i) { if (adapter[i] == NULL) continue; for (k = 0; k < NCHAN; ++k) { s = splimp (); if (cp_destroy) { splx (s); return; } d = channel[i * NCHAN + k]; if (!d) { splx (s); continue; } CP_LOCK ((bdrv_t *)d->board->sys); switch (d->chan->type) { case T_G703: cp_g703_timer (d->chan); break; case T_E1: cp_e1_timer (d->chan); break; case T_E3: case T_T3: case T_STS1: cp_e3_timer (d->chan); break; default: break; } CP_UNLOCK ((bdrv_t *)d->board->sys); splx (s); } } s = splimp (); if (!cp_destroy) callout_reset (&timeout_handle, hz, cp_timeout, 0); splx (s); } static void cp_led_off (void *arg) { cp_board_t *b = arg; bdrv_t *bd = (bdrv_t *) b->sys; int s; s = splimp (); if (cp_destroy) { splx (s); return; } CP_LOCK (bd); cp_led (b, 0); CP_UNLOCK (bd); splx (s); } static void cp_intr (void *arg) { bdrv_t *bd = arg; cp_board_t *b = bd->board; #ifndef NETGRAPH int i; #endif int s = splimp (); if (cp_destroy) { splx (s); return; } CP_LOCK (bd); /* Check if we are ready */ if (b->sys == NULL) { /* Not we are not, just cleanup. */ cp_interrupt_poll (b, 1); CP_UNLOCK (bd); return; } /* Turn LED on. */ cp_led (b, 1); cp_interrupt (b); /* Turn LED off 50 msec later. */ callout_reset (&led_timo[b->num], hz/20, cp_led_off, b); CP_UNLOCK (bd); splx (s); #ifndef NETGRAPH /* Pass packets in a lock-free state */ for (i = 0; i < NCHAN && b->chan[i].type; i++) { drv_t *d = b->chan[i].sys; struct mbuf *m; if (!d || !d->running) continue; while (_IF_QLEN(&d->queue)) { IF_DEQUEUE (&d->queue,m); if (!m) continue; sppp_input (d->ifp, m); } } #endif } static void cp_bus_dmamap_addr (void *arg, bus_dma_segment_t *segs, int nseg, int error) { unsigned long *addr; if (error) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); addr = arg; *addr = segs->ds_addr; } static int cp_bus_dma_mem_alloc (int bnum, int cnum, cp_dma_mem_t *dmem) { int error; error = bus_dma_tag_create (NULL, 16, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, dmem->size, 1, dmem->size, 0, NULL, NULL, &dmem->dmat); if (error) { if (cnum >= 0) printf ("cp%d-%d: ", bnum, cnum); else printf ("cp%d: ", bnum); printf ("couldn't allocate tag for dma memory\n"); return 0; } error = bus_dmamem_alloc (dmem->dmat, (void **)&dmem->virt, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &dmem->mapp); if (error) { if (cnum >= 0) printf ("cp%d-%d: ", bnum, cnum); else printf ("cp%d: ", bnum); printf ("couldn't allocate mem for dma memory\n"); bus_dma_tag_destroy (dmem->dmat); return 0; } error = bus_dmamap_load (dmem->dmat, dmem->mapp, dmem->virt, dmem->size, cp_bus_dmamap_addr, &dmem->phys, 0); if (error) { if (cnum >= 0) printf ("cp%d-%d: ", bnum, cnum); else printf ("cp%d: ", bnum); printf ("couldn't load mem map for dma memory\n"); bus_dmamem_free (dmem->dmat, dmem->virt, dmem->mapp); bus_dma_tag_destroy (dmem->dmat); return 0; } return 1; } static void cp_bus_dma_mem_free (cp_dma_mem_t *dmem) { bus_dmamap_unload (dmem->dmat, dmem->mapp); bus_dmamem_free (dmem->dmat, dmem->virt, dmem->mapp); bus_dma_tag_destroy (dmem->dmat); } /* * Called if the probe succeeded. */ static int cp_attach (device_t dev) { bdrv_t *bd = device_get_softc (dev); int unit = device_get_unit (dev); char *cp_ln = CP_LOCK_NAME; unsigned short res; vm_offset_t vbase; int rid, error; cp_board_t *b; cp_chan_t *c; drv_t *d; int s = splimp (); b = malloc (sizeof(cp_board_t), M_DEVBUF, M_WAITOK); if (!b) { printf ("cp%d: couldn't allocate memory\n", unit); splx (s); return (ENXIO); } bzero (b, sizeof(cp_board_t)); bd->board = b; rid = PCIR_BAR(0); bd->cp_res = bus_alloc_resource (dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, RF_ACTIVE); if (! bd->cp_res) { printf ("cp%d: cannot map memory\n", unit); free (b, M_DEVBUF); splx (s); return (ENXIO); } vbase = (vm_offset_t) rman_get_virtual (bd->cp_res); cp_ln[2] = '0' + unit; mtx_init (&bd->cp_mtx, cp_ln, MTX_NETWORK_LOCK, MTX_DEF|MTX_RECURSE); res = cp_init (b, unit, (u_char*) vbase); if (res) { printf ("cp%d: can't init, error code:%x\n", unit, res); bus_release_resource (dev, SYS_RES_MEMORY, PCIR_BAR(0), bd->cp_res); free (b, M_DEVBUF); splx (s); return (ENXIO); } bd->dmamem.size = sizeof(cp_qbuf_t); if (! cp_bus_dma_mem_alloc (unit, -1, &bd->dmamem)) { free (b, M_DEVBUF); splx (s); return (ENXIO); } CP_LOCK (bd); cp_reset (b, bd->dmamem.virt, bd->dmamem.phys); CP_UNLOCK (bd); rid = 0; bd->cp_irq = bus_alloc_resource (dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); if (! bd->cp_irq) { cp_destroy = 1; printf ("cp%d: cannot map interrupt\n", unit); bus_release_resource (dev, SYS_RES_MEMORY, PCIR_BAR(0), bd->cp_res); mtx_destroy (&bd->cp_mtx); free (b, M_DEVBUF); splx (s); return (ENXIO); } callout_init (&led_timo[unit], 1); error = bus_setup_intr (dev, bd->cp_irq, INTR_TYPE_NET|INTR_MPSAFE, NULL, cp_intr, bd, &bd->cp_intrhand); if (error) { cp_destroy = 1; printf ("cp%d: cannot set up irq\n", unit); bus_release_resource (dev, SYS_RES_IRQ, 0, bd->cp_irq); bus_release_resource (dev, SYS_RES_MEMORY, PCIR_BAR(0), bd->cp_res); mtx_destroy (&bd->cp_mtx); free (b, M_DEVBUF); splx (s); return (ENXIO); } printf ("cp%d: %s, clock %ld MHz\n", unit, b->name, b->osc / 1000000); for (c = b->chan; c < b->chan + NCHAN; ++c) { if (! c->type) continue; d = &bd->channel[c->num]; d->dmamem.size = sizeof(cp_buf_t); if (! cp_bus_dma_mem_alloc (unit, c->num, &d->dmamem)) continue; channel [b->num*NCHAN + c->num] = d; sprintf (d->name, "cp%d.%d", b->num, c->num); d->board = b; d->chan = c; c->sys = d; callout_init (&d->timeout_handle, 1); #ifdef NETGRAPH if (ng_make_node_common (&typestruct, &d->node) != 0) { printf ("%s: cannot make common node\n", d->name); d->node = NULL; continue; } NG_NODE_SET_PRIVATE (d->node, d); sprintf (d->nodename, "%s%d", NG_CP_NODE_TYPE, c->board->num*NCHAN + c->num); if (ng_name_node (d->node, d->nodename)) { printf ("%s: cannot name node\n", d->nodename); NG_NODE_UNREF (d->node); continue; } d->queue.ifq_maxlen = ifqmaxlen; d->hi_queue.ifq_maxlen = ifqmaxlen; mtx_init (&d->queue.ifq_mtx, "cp_queue", NULL, MTX_DEF); mtx_init (&d->hi_queue.ifq_mtx, "cp_queue_hi", NULL, MTX_DEF); #else /*NETGRAPH*/ d->ifp = if_alloc(IFT_PPP); if (d->ifp == NULL) { printf ("%s: cannot if_alloc() interface\n", d->name); continue; } d->ifp->if_softc = d; if_initname (d->ifp, "cp", b->num * NCHAN + c->num); d->ifp->if_mtu = PP_MTU; d->ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST; d->ifp->if_ioctl = cp_sioctl; d->ifp->if_start = cp_ifstart; d->ifp->if_init = cp_initialize; d->queue.ifq_maxlen = NRBUF; mtx_init (&d->queue.ifq_mtx, "cp_queue", NULL, MTX_DEF); sppp_attach (d->ifp); if_attach (d->ifp); IFP2SP(d->ifp)->pp_tlf = cp_tlf; IFP2SP(d->ifp)->pp_tls = cp_tls; /* If BPF is in the kernel, call the attach for it. * The header size of PPP or Cisco/HDLC is 4 bytes. */ bpfattach (d->ifp, DLT_PPP, 4); #endif /*NETGRAPH*/ cp_start_e1 (c); cp_start_chan (c, 1, 1, d->dmamem.virt, d->dmamem.phys); /* Register callback functions. */ cp_register_transmit (c, &cp_transmit); cp_register_receive (c, &cp_receive); cp_register_error (c, &cp_error); d->devt = make_dev (&cp_cdevsw, b->num*NCHAN+c->num, UID_ROOT, GID_WHEEL, 0600, "cp%d", b->num*NCHAN+c->num); } CP_LOCK (bd); b->sys = bd; adapter[unit] = b; CP_UNLOCK (bd); splx (s); return 0; } static int cp_detach (device_t dev) { bdrv_t *bd = device_get_softc (dev); cp_board_t *b = bd->board; cp_chan_t *c; int s; KASSERT (mtx_initialized (&bd->cp_mtx), ("cp mutex not initialized")); s = splimp (); CP_LOCK (bd); /* Check if the device is busy (open). */ for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (! d || ! d->chan->type) continue; if (d->running) { CP_UNLOCK (bd); splx (s); return EBUSY; } } /* Ok, we can unload driver */ /* At first we should stop all channels */ for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (! d || ! d->chan->type) continue; cp_stop_chan (c); cp_stop_e1 (c); cp_set_dtr (d->chan, 0); cp_set_rts (d->chan, 0); } /* Reset the adapter. */ cp_destroy = 1; cp_interrupt_poll (b, 1); cp_led_off (b); cp_reset (b, 0 ,0); callout_stop (&led_timo[b->num]); /* Disable the interrupt request. */ bus_teardown_intr (dev, bd->cp_irq, bd->cp_intrhand); for (c=b->chan; cchan+NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (! d || ! d->chan->type) continue; callout_stop (&d->timeout_handle); #ifndef NETGRAPH /* Detach from the packet filter list of interfaces. */ bpfdetach (d->ifp); /* Detach from the sync PPP list. */ sppp_detach (d->ifp); /* Detach from the system list of interfaces. */ if_detach (d->ifp); if_free (d->ifp); IF_DRAIN (&d->queue); mtx_destroy (&d->queue.ifq_mtx); #else if (d->node) { ng_rmnode_self (d->node); NG_NODE_UNREF (d->node); d->node = NULL; } mtx_destroy (&d->queue.ifq_mtx); mtx_destroy (&d->hi_queue.ifq_mtx); #endif destroy_dev (d->devt); } b->sys = NULL; CP_UNLOCK (bd); bus_release_resource (dev, SYS_RES_IRQ, 0, bd->cp_irq); bus_release_resource (dev, SYS_RES_MEMORY, PCIR_BAR(0), bd->cp_res); CP_LOCK (bd); cp_led_off (b); CP_UNLOCK (bd); callout_drain (&led_timo[b->num]); splx (s); for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (! d || ! d->chan->type) continue; callout_drain (&d->timeout_handle); - channel [b->num*NCHAN + c->num] = 0; + channel [b->num*NCHAN + c->num] = NULL; /* Deallocate buffers. */ cp_bus_dma_mem_free (&d->dmamem); } - adapter [b->num] = 0; + adapter [b->num] = NULL; cp_bus_dma_mem_free (&bd->dmamem); free (b, M_DEVBUF); mtx_destroy (&bd->cp_mtx); return 0; } #ifndef NETGRAPH static void cp_ifstart (struct ifnet *ifp) { drv_t *d = ifp->if_softc; bdrv_t *bd = d->board->sys; CP_LOCK (bd); cp_start (d); CP_UNLOCK (bd); } static void cp_tlf (struct sppp *sp) { drv_t *d = SP2IFP(sp)->if_softc; CP_DEBUG2 (d, ("cp_tlf\n")); /* XXXRIK: Don't forget to protect them by LOCK, or kill them. */ /* cp_set_dtr (d->chan, 0);*/ /* cp_set_rts (d->chan, 0);*/ if (!(sp->pp_flags & PP_FR) && !(d->ifp->if_flags & PP_CISCO)) sp->pp_down (sp); } static void cp_tls (struct sppp *sp) { drv_t *d = SP2IFP(sp)->if_softc; CP_DEBUG2 (d, ("cp_tls\n")); if (!(sp->pp_flags & PP_FR) && !(d->ifp->if_flags & PP_CISCO)) sp->pp_up (sp); } /* * Process an ioctl request. */ static int cp_sioctl (struct ifnet *ifp, u_long cmd, caddr_t data) { drv_t *d = ifp->if_softc; bdrv_t *bd = d->board->sys; int error, s, was_up, should_be_up; was_up = (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; error = sppp_ioctl (ifp, cmd, data); if (error) return error; if (! (ifp->if_flags & IFF_DEBUG)) d->chan->debug = 0; else d->chan->debug = d->chan->debug_shadow; switch (cmd) { default: CP_DEBUG2 (d, ("ioctl 0x%lx\n", cmd)); return 0; case SIOCADDMULTI: CP_DEBUG2 (d, ("ioctl SIOCADDMULTI\n")); return 0; case SIOCDELMULTI: CP_DEBUG2 (d, ("ioctl SIOCDELMULTI\n")); return 0; case SIOCSIFFLAGS: CP_DEBUG2 (d, ("ioctl SIOCSIFFLAGS\n")); break; case SIOCSIFADDR: CP_DEBUG2 (d, ("ioctl SIOCSIFADDR\n")); break; } /* We get here only in case of SIFFLAGS or SIFADDR. */ s = splimp (); CP_LOCK (bd); should_be_up = (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; if (! was_up && should_be_up) { /* Interface goes up -- start it. */ cp_up (d); cp_start (d); } else if (was_up && ! should_be_up) { /* Interface is going down -- stop it. */ /* if ((IFP2SP(ifp)->pp_flags & PP_FR) || (ifp->if_flags & PP_CISCO))*/ cp_down (d); } CP_DEBUG (d, ("ioctl 0x%lx p4\n", cmd)); CP_UNLOCK (bd); splx (s); return 0; } /* * Initialization of interface. * It seems to be never called by upper level? */ static void cp_initialize (void *softc) { drv_t *d = softc; CP_DEBUG (d, ("cp_initialize\n")); } #endif /*NETGRAPH*/ /* * Stop the interface. Called on splimp(). */ static void cp_down (drv_t *d) { CP_DEBUG (d, ("cp_down\n")); /* Interface is going down -- stop it. */ cp_set_dtr (d->chan, 0); cp_set_rts (d->chan, 0); d->running = 0; callout_stop (&d->timeout_handle); } /* * Start the interface. Called on splimp(). */ static void cp_up (drv_t *d) { CP_DEBUG (d, ("cp_up\n")); cp_set_dtr (d->chan, 1); cp_set_rts (d->chan, 1); d->running = 1; } /* * Start output on the interface. Get another datagram to send * off of the interface queue, and copy it to the interface * before starting the output. */ static void cp_send (drv_t *d) { struct mbuf *m; u_short len; CP_DEBUG2 (d, ("cp_send, tn=%d te=%d\n", d->chan->tn, d->chan->te)); /* No output if the interface is down. */ if (! d->running) return; /* No output if the modem is off. */ if (! (d->chan->lloop || d->chan->type != T_SERIAL || cp_get_dsr (d->chan))) return; while (cp_transmit_space (d->chan)) { /* Get the packet to send. */ #ifdef NETGRAPH IF_DEQUEUE (&d->hi_queue, m); if (! m) IF_DEQUEUE (&d->queue, m); #else m = sppp_dequeue (d->ifp); #endif if (! m) return; #ifndef NETGRAPH BPF_MTAP (d->ifp, m); #endif len = m_length (m, NULL); if (len >= BUFSZ) printf ("%s: too long packet: %d bytes: ", d->name, len); else if (! m->m_next) cp_send_packet (d->chan, (u_char*) mtod (m, caddr_t), len, 0); else { u_char *buf = d->chan->tbuf[d->chan->te]; m_copydata (m, 0, len, buf); cp_send_packet (d->chan, buf, len, 0); } m_freem (m); /* Set up transmit timeout, if the transmit ring is not empty.*/ d->timeout = 10; } #ifndef NETGRAPH d->ifp->if_drv_flags |= IFF_DRV_OACTIVE; #endif } /* * Start output on the interface. * Always called on splimp(). */ static void cp_start (drv_t *d) { if (d->running) { if (! d->chan->dtr) cp_set_dtr (d->chan, 1); if (! d->chan->rts) cp_set_rts (d->chan, 1); cp_send (d); callout_reset (&d->timeout_handle, hz, cp_watchdog_timer, d); } } /* * Handle transmit timeouts. * Recover after lost transmit interrupts. * Always called on splimp(). */ static void cp_watchdog (drv_t *d) { CP_DEBUG (d, ("device timeout\n")); if (d->running) { cp_stop_chan (d->chan); cp_stop_e1 (d->chan); cp_start_e1 (d->chan); cp_start_chan (d->chan, 1, 1, 0, 0); cp_set_dtr (d->chan, 1); cp_set_rts (d->chan, 1); cp_start (d); } } static void cp_watchdog_timer (void *arg) { drv_t *d = arg; bdrv_t *bd = d->board->sys; CP_LOCK (bd); if (d->timeout == 1) cp_watchdog (d); if (d->timeout) d->timeout--; callout_reset (&d->timeout_handle, hz, cp_watchdog_timer, d); CP_UNLOCK (bd); } static void cp_transmit (cp_chan_t *c, void *attachment, int len) { drv_t *d = c->sys; d->timeout = 0; #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_OPACKETS, 1); d->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; #endif cp_start (d); } static void cp_receive (cp_chan_t *c, unsigned char *data, int len) { drv_t *d = c->sys; struct mbuf *m; #ifdef NETGRAPH int error; #endif if (! d->running) return; m = makembuf (data, len); if (! m) { CP_DEBUG (d, ("no memory for packet\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_IQDROPS, 1); #endif return; } if (c->debug > 1) m_print (m, 0); #ifdef NETGRAPH m->m_pkthdr.rcvif = 0; NG_SEND_DATA_ONLY (error, d->hook, m); #else if_inc_counter(d->ifp, IFCOUNTER_IPACKETS, 1); m->m_pkthdr.rcvif = d->ifp; /* Check if there's a BPF listener on this interface. * If so, hand off the raw packet to bpf. */ BPF_MTAP(d->ifp, m); IF_ENQUEUE (&d->queue, m); #endif } static void cp_error (cp_chan_t *c, int data) { drv_t *d = c->sys; switch (data) { case CP_FRAME: CP_DEBUG (d, ("frame error\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CP_CRC: CP_DEBUG (d, ("crc error\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CP_OVERRUN: CP_DEBUG (d, ("overrun error\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_COLLISIONS, 1); if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CP_OVERFLOW: CP_DEBUG (d, ("overflow error\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CP_UNDERRUN: CP_DEBUG (d, ("underrun error\n")); d->timeout = 0; #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_OERRORS, 1); d->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; #endif cp_start (d); break; default: CP_DEBUG (d, ("error #%d\n", data)); break; } } /* * You also need read, write, open, close routines. * This should get you started */ static int cp_open (struct cdev *dev, int oflags, int devtype, struct thread *td) { int unit = dev2unit (dev); drv_t *d; if (unit >= NBRD*NCHAN || ! (d = channel[unit])) return ENXIO; CP_DEBUG2 (d, ("cp_open\n")); return 0; } /* * Only called on the LAST close. */ static int cp_close (struct cdev *dev, int fflag, int devtype, struct thread *td) { drv_t *d = channel [dev2unit (dev)]; CP_DEBUG2 (d, ("cp_close\n")); return 0; } static int cp_modem_status (cp_chan_t *c) { drv_t *d = c->sys; bdrv_t *bd = d->board->sys; int status, s; status = d->running ? TIOCM_LE : 0; s = splimp (); CP_LOCK (bd); if (cp_get_cd (c)) status |= TIOCM_CD; if (cp_get_cts (c)) status |= TIOCM_CTS; if (cp_get_dsr (c)) status |= TIOCM_DSR; if (c->dtr) status |= TIOCM_DTR; if (c->rts) status |= TIOCM_RTS; CP_UNLOCK (bd); splx (s); return status; } static int cp_ioctl (struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td) { drv_t *d = channel [dev2unit (dev)]; bdrv_t *bd = d->board->sys; cp_chan_t *c = d->chan; struct serial_statistics *st; struct e1_statistics *opte1; struct e3_statistics *opte3; int error, s; char mask[16]; switch (cmd) { case SERIAL_GETREGISTERED: CP_DEBUG2 (d, ("ioctl: getregistered\n")); bzero (mask, sizeof(mask)); for (s=0; sifp)->pp_flags & PP_FR) ? "fr" : (d->ifp->if_flags & PP_CISCO) ? "cisco" : "ppp"); return 0; case SERIAL_SETPROTO: CP_DEBUG2 (d, ("ioctl: setproto\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (d->ifp->if_drv_flags & IFF_DRV_RUNNING) return EBUSY; if (! strcmp ("cisco", (char*)data)) { IFP2SP(d->ifp)->pp_flags &= ~(PP_FR); IFP2SP(d->ifp)->pp_flags |= PP_KEEPALIVE; d->ifp->if_flags |= PP_CISCO; #if PP_FR != 0 } else if (! strcmp ("fr", (char*)data)) { d->ifp->if_flags &= ~(PP_CISCO); IFP2SP(d->ifp)->pp_flags |= PP_FR | PP_KEEPALIVE; #endif } else if (! strcmp ("ppp", (char*)data)) { IFP2SP(d->ifp)->pp_flags &= ~PP_FR; IFP2SP(d->ifp)->pp_flags &= ~PP_KEEPALIVE; d->ifp->if_flags &= ~(PP_CISCO); } else return EINVAL; return 0; case SERIAL_GETKEEPALIVE: CP_DEBUG2 (d, ("ioctl: getkeepalive\n")); if ((IFP2SP(d->ifp)->pp_flags & PP_FR) || (d->ifp->if_flags & PP_CISCO)) return EINVAL; *(int*)data = (IFP2SP(d->ifp)->pp_flags & PP_KEEPALIVE) ? 1 : 0; return 0; case SERIAL_SETKEEPALIVE: CP_DEBUG2 (d, ("ioctl: setkeepalive\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if ((IFP2SP(d->ifp)->pp_flags & PP_FR) || (d->ifp->if_flags & PP_CISCO)) return EINVAL; s = splimp (); CP_LOCK (bd); if (*(int*)data) IFP2SP(d->ifp)->pp_flags |= PP_KEEPALIVE; else IFP2SP(d->ifp)->pp_flags &= ~PP_KEEPALIVE; CP_UNLOCK (bd); splx (s); return 0; #endif /*NETGRAPH*/ case SERIAL_GETMODE: CP_DEBUG2 (d, ("ioctl: getmode\n")); *(int*)data = SERIAL_HDLC; return 0; case SERIAL_SETMODE: /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (*(int*)data != SERIAL_HDLC) return EINVAL; return 0; case SERIAL_GETCFG: CP_DEBUG2 (d, ("ioctl: getcfg\n")); if (c->type != T_E1 || c->unfram) return EINVAL; *(char*)data = c->board->mux ? 'c' : 'a'; return 0; case SERIAL_SETCFG: CP_DEBUG2 (d, ("ioctl: setcfg\n")); error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_mux (c->board, *((char*)data) == 'c'); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETSTAT: CP_DEBUG2 (d, ("ioctl: getstat\n")); st = (struct serial_statistics*) data; st->rintr = c->rintr; st->tintr = c->tintr; st->mintr = 0; st->ibytes = c->ibytes; st->ipkts = c->ipkts; st->obytes = c->obytes; st->opkts = c->opkts; st->ierrs = c->overrun + c->frame + c->crc; st->oerrs = c->underrun; return 0; case SERIAL_GETESTAT: CP_DEBUG2 (d, ("ioctl: getestat\n")); if (c->type != T_E1 && c->type != T_G703) return EINVAL; opte1 = (struct e1_statistics*) data; opte1->status = c->status; opte1->cursec = c->cursec; opte1->totsec = c->totsec + c->cursec; opte1->currnt.bpv = c->currnt.bpv; opte1->currnt.fse = c->currnt.fse; opte1->currnt.crce = c->currnt.crce; opte1->currnt.rcrce = c->currnt.rcrce; opte1->currnt.uas = c->currnt.uas; opte1->currnt.les = c->currnt.les; opte1->currnt.es = c->currnt.es; opte1->currnt.bes = c->currnt.bes; opte1->currnt.ses = c->currnt.ses; opte1->currnt.oofs = c->currnt.oofs; opte1->currnt.css = c->currnt.css; opte1->currnt.dm = c->currnt.dm; opte1->total.bpv = c->total.bpv + c->currnt.bpv; opte1->total.fse = c->total.fse + c->currnt.fse; opte1->total.crce = c->total.crce + c->currnt.crce; opte1->total.rcrce = c->total.rcrce + c->currnt.rcrce; opte1->total.uas = c->total.uas + c->currnt.uas; opte1->total.les = c->total.les + c->currnt.les; opte1->total.es = c->total.es + c->currnt.es; opte1->total.bes = c->total.bes + c->currnt.bes; opte1->total.ses = c->total.ses + c->currnt.ses; opte1->total.oofs = c->total.oofs + c->currnt.oofs; opte1->total.css = c->total.css + c->currnt.css; opte1->total.dm = c->total.dm + c->currnt.dm; for (s=0; s<48; ++s) { opte1->interval[s].bpv = c->interval[s].bpv; opte1->interval[s].fse = c->interval[s].fse; opte1->interval[s].crce = c->interval[s].crce; opte1->interval[s].rcrce = c->interval[s].rcrce; opte1->interval[s].uas = c->interval[s].uas; opte1->interval[s].les = c->interval[s].les; opte1->interval[s].es = c->interval[s].es; opte1->interval[s].bes = c->interval[s].bes; opte1->interval[s].ses = c->interval[s].ses; opte1->interval[s].oofs = c->interval[s].oofs; opte1->interval[s].css = c->interval[s].css; opte1->interval[s].dm = c->interval[s].dm; } return 0; case SERIAL_GETE3STAT: CP_DEBUG2 (d, ("ioctl: gete3stat\n")); if (c->type != T_E3 && c->type != T_T3 && c->type != T_STS1) return EINVAL; opte3 = (struct e3_statistics*) data; opte3->status = c->e3status; opte3->cursec = (c->e3csec_5 * 2 + 1) / 10; opte3->totsec = c->e3tsec + opte3->cursec; opte3->ccv = c->e3ccv; opte3->tcv = c->e3tcv + opte3->ccv; for (s = 0; s < 48; ++s) { opte3->icv[s] = c->e3icv[s]; } return 0; case SERIAL_CLRSTAT: CP_DEBUG2 (d, ("ioctl: clrstat\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; c->rintr = 0; c->tintr = 0; c->ibytes = 0; c->obytes = 0; c->ipkts = 0; c->opkts = 0; c->overrun = 0; c->frame = 0; c->crc = 0; c->underrun = 0; bzero (&c->currnt, sizeof (c->currnt)); bzero (&c->total, sizeof (c->total)); bzero (c->interval, sizeof (c->interval)); c->e3ccv = 0; c->e3tcv = 0; bzero (c->e3icv, sizeof (c->e3icv)); return 0; case SERIAL_GETBAUD: CP_DEBUG2 (d, ("ioctl: getbaud\n")); *(long*)data = c->baud; return 0; case SERIAL_SETBAUD: CP_DEBUG2 (d, ("ioctl: setbaud\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CP_LOCK (bd); cp_set_baud (c, *(long*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETLOOP: CP_DEBUG2 (d, ("ioctl: getloop\n")); *(int*)data = c->lloop; return 0; case SERIAL_SETLOOP: CP_DEBUG2 (d, ("ioctl: setloop\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CP_LOCK (bd); cp_set_lloop (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETDPLL: CP_DEBUG2 (d, ("ioctl: getdpll\n")); if (c->type != T_SERIAL) return EINVAL; *(int*)data = c->dpll; return 0; case SERIAL_SETDPLL: CP_DEBUG2 (d, ("ioctl: setdpll\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_SERIAL) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_dpll (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETNRZI: CP_DEBUG2 (d, ("ioctl: getnrzi\n")); if (c->type != T_SERIAL) return EINVAL; *(int*)data = c->nrzi; return 0; case SERIAL_SETNRZI: CP_DEBUG2 (d, ("ioctl: setnrzi\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_SERIAL) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_nrzi (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETDEBUG: CP_DEBUG2 (d, ("ioctl: getdebug\n")); *(int*)data = d->chan->debug; return 0; case SERIAL_SETDEBUG: CP_DEBUG2 (d, ("ioctl: setdebug\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; #ifndef NETGRAPH /* * The debug_shadow is always greater than zero for logic * simplicity. For switching debug off the IFF_DEBUG is * responsible. */ d->chan->debug_shadow = (*(int*)data) ? (*(int*)data) : 1; if (d->ifp->if_flags & IFF_DEBUG) d->chan->debug = d->chan->debug_shadow; #else d->chan->debug = *(int*)data; #endif return 0; case SERIAL_GETHIGAIN: CP_DEBUG2 (d, ("ioctl: gethigain\n")); if (c->type != T_E1) return EINVAL; *(int*)data = c->higain; return 0; case SERIAL_SETHIGAIN: CP_DEBUG2 (d, ("ioctl: sethigain\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_higain (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETPHONY: CP_DEBUG2 (d, ("ioctl: getphony\n")); if (c->type != T_E1) return EINVAL; *(int*)data = c->phony; return 0; case SERIAL_SETPHONY: CP_DEBUG2 (d, ("ioctl: setphony\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_phony (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETUNFRAM: CP_DEBUG2 (d, ("ioctl: getunfram\n")); if (c->type != T_E1) return EINVAL; *(int*)data = c->unfram; return 0; case SERIAL_SETUNFRAM: CP_DEBUG2 (d, ("ioctl: setunfram\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_unfram (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETSCRAMBLER: CP_DEBUG2 (d, ("ioctl: getscrambler\n")); if (c->type != T_G703 && !c->unfram) return EINVAL; *(int*)data = c->scrambler; return 0; case SERIAL_SETSCRAMBLER: CP_DEBUG2 (d, ("ioctl: setscrambler\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_G703 && !c->unfram) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_scrambler (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETMONITOR: CP_DEBUG2 (d, ("ioctl: getmonitor\n")); if (c->type != T_E1 && c->type != T_E3 && c->type != T_T3 && c->type != T_STS1) return EINVAL; *(int*)data = c->monitor; return 0; case SERIAL_SETMONITOR: CP_DEBUG2 (d, ("ioctl: setmonitor\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_monitor (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETUSE16: CP_DEBUG2 (d, ("ioctl: getuse16\n")); if (c->type != T_E1 || c->unfram) return EINVAL; *(int*)data = c->use16; return 0; case SERIAL_SETUSE16: CP_DEBUG2 (d, ("ioctl: setuse16\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_use16 (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETCRC4: CP_DEBUG2 (d, ("ioctl: getcrc4\n")); if (c->type != T_E1 || c->unfram) return EINVAL; *(int*)data = c->crc4; return 0; case SERIAL_SETCRC4: CP_DEBUG2 (d, ("ioctl: setcrc4\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_crc4 (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETCLK: CP_DEBUG2 (d, ("ioctl: getclk\n")); if (c->type != T_E1 && c->type != T_G703 && c->type != T_E3 && c->type != T_T3 && c->type != T_STS1) return EINVAL; switch (c->gsyn) { default: *(int*)data = E1CLK_INTERNAL; break; case GSYN_RCV: *(int*)data = E1CLK_RECEIVE; break; case GSYN_RCV0: *(int*)data = E1CLK_RECEIVE_CHAN0; break; case GSYN_RCV1: *(int*)data = E1CLK_RECEIVE_CHAN1; break; case GSYN_RCV2: *(int*)data = E1CLK_RECEIVE_CHAN2; break; case GSYN_RCV3: *(int*)data = E1CLK_RECEIVE_CHAN3; break; } return 0; case SERIAL_SETCLK: CP_DEBUG2 (d, ("ioctl: setclk\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_E1 && c->type != T_G703 && c->type != T_E3 && c->type != T_T3 && c->type != T_STS1) return EINVAL; s = splimp (); CP_LOCK (bd); switch (*(int*)data) { default: cp_set_gsyn (c, GSYN_INT); break; case E1CLK_RECEIVE: cp_set_gsyn (c, GSYN_RCV); break; case E1CLK_RECEIVE_CHAN0: cp_set_gsyn (c, GSYN_RCV0); break; case E1CLK_RECEIVE_CHAN1: cp_set_gsyn (c, GSYN_RCV1); break; case E1CLK_RECEIVE_CHAN2: cp_set_gsyn (c, GSYN_RCV2); break; case E1CLK_RECEIVE_CHAN3: cp_set_gsyn (c, GSYN_RCV3); break; } CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETTIMESLOTS: CP_DEBUG2 (d, ("ioctl: gettimeslots\n")); if ((c->type != T_E1 || c->unfram) && c->type != T_DATA) return EINVAL; *(u_long*)data = c->ts; return 0; case SERIAL_SETTIMESLOTS: CP_DEBUG2 (d, ("ioctl: settimeslots\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if ((c->type != T_E1 || c->unfram) && c->type != T_DATA) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_ts (c, *(u_long*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETINVCLK: CP_DEBUG2 (d, ("ioctl: getinvclk\n")); #if 1 return EINVAL; #else if (c->type != T_SERIAL) return EINVAL; *(int*)data = c->invtxc; return 0; #endif case SERIAL_SETINVCLK: CP_DEBUG2 (d, ("ioctl: setinvclk\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_SERIAL) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_invtxc (c, *(int*)data); cp_set_invrxc (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETINVTCLK: CP_DEBUG2 (d, ("ioctl: getinvtclk\n")); if (c->type != T_SERIAL) return EINVAL; *(int*)data = c->invtxc; return 0; case SERIAL_SETINVTCLK: CP_DEBUG2 (d, ("ioctl: setinvtclk\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_SERIAL) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_invtxc (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETINVRCLK: CP_DEBUG2 (d, ("ioctl: getinvrclk\n")); if (c->type != T_SERIAL) return EINVAL; *(int*)data = c->invrxc; return 0; case SERIAL_SETINVRCLK: CP_DEBUG2 (d, ("ioctl: setinvrclk\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_SERIAL) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_invrxc (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETLEVEL: CP_DEBUG2 (d, ("ioctl: getlevel\n")); if (c->type != T_G703) return EINVAL; s = splimp (); CP_LOCK (bd); *(int*)data = cp_get_lq (c); CP_UNLOCK (bd); splx (s); return 0; #if 0 case SERIAL_RESET: CP_DEBUG2 (d, ("ioctl: reset\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CP_LOCK (bd); cp_reset (c->board, 0, 0); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_HARDRESET: CP_DEBUG2 (d, ("ioctl: hardreset\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CP_LOCK (bd); /* hard_reset (c->board); */ CP_UNLOCK (bd); splx (s); return 0; #endif case SERIAL_GETCABLE: CP_DEBUG2 (d, ("ioctl: getcable\n")); if (c->type != T_SERIAL) return EINVAL; s = splimp (); CP_LOCK (bd); *(int*)data = cp_get_cable (c); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETDIR: CP_DEBUG2 (d, ("ioctl: getdir\n")); if (c->type != T_E1 && c->type != T_DATA) return EINVAL; *(int*)data = c->dir; return 0; case SERIAL_SETDIR: CP_DEBUG2 (d, ("ioctl: setdir\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CP_LOCK (bd); cp_set_dir (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETRLOOP: CP_DEBUG2 (d, ("ioctl: getrloop\n")); if (c->type != T_G703 && c->type != T_E3 && c->type != T_T3 && c->type != T_STS1) return EINVAL; *(int*)data = cp_get_rloop (c); return 0; case SERIAL_SETRLOOP: CP_DEBUG2 (d, ("ioctl: setloop\n")); if (c->type != T_E3 && c->type != T_T3 && c->type != T_STS1) return EINVAL; /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CP_LOCK (bd); cp_set_rloop (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETCABLEN: CP_DEBUG2 (d, ("ioctl: getcablen\n")); if (c->type != T_T3 && c->type != T_STS1) return EINVAL; *(int*)data = c->cablen; return 0; case SERIAL_SETCABLEN: CP_DEBUG2 (d, ("ioctl: setloop\n")); if (c->type != T_T3 && c->type != T_STS1) return EINVAL; /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CP_LOCK (bd); cp_set_cablen (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case TIOCSDTR: /* Set DTR */ s = splimp (); CP_LOCK (bd); cp_set_dtr (c, 1); CP_UNLOCK (bd); splx (s); return 0; case TIOCCDTR: /* Clear DTR */ s = splimp (); CP_LOCK (bd); cp_set_dtr (c, 0); CP_UNLOCK (bd); splx (s); return 0; case TIOCMSET: /* Set DTR/RTS */ s = splimp (); CP_LOCK (bd); cp_set_dtr (c, (*(int*)data & TIOCM_DTR) ? 1 : 0); cp_set_rts (c, (*(int*)data & TIOCM_RTS) ? 1 : 0); CP_UNLOCK (bd); splx (s); return 0; case TIOCMBIS: /* Add DTR/RTS */ s = splimp (); CP_LOCK (bd); if (*(int*)data & TIOCM_DTR) cp_set_dtr (c, 1); if (*(int*)data & TIOCM_RTS) cp_set_rts (c, 1); CP_UNLOCK (bd); splx (s); return 0; case TIOCMBIC: /* Clear DTR/RTS */ s = splimp (); CP_LOCK (bd); if (*(int*)data & TIOCM_DTR) cp_set_dtr (c, 0); if (*(int*)data & TIOCM_RTS) cp_set_rts (c, 0); CP_UNLOCK (bd); splx (s); return 0; case TIOCMGET: /* Get modem status */ *(int*)data = cp_modem_status (c); return 0; } return ENOTTY; } #ifdef NETGRAPH static int ng_cp_constructor (node_p node) { drv_t *d = NG_NODE_PRIVATE (node); CP_DEBUG (d, ("Constructor\n")); return EINVAL; } static int ng_cp_newhook (node_p node, hook_p hook, const char *name) { int s; drv_t *d = NG_NODE_PRIVATE (node); bdrv_t *bd = d->board->sys; CP_DEBUG (d, ("Newhook\n")); /* Attach debug hook */ if (strcmp (name, NG_CP_HOOK_DEBUG) == 0) { NG_HOOK_SET_PRIVATE (hook, NULL); d->debug_hook = hook; return 0; } /* Check for raw hook */ if (strcmp (name, NG_CP_HOOK_RAW) != 0) return EINVAL; NG_HOOK_SET_PRIVATE (hook, d); d->hook = hook; s = splimp (); CP_LOCK (bd); cp_up (d); CP_UNLOCK (bd); splx (s); return 0; } static char *format_timeslots (u_long s) { static char buf [100]; char *p = buf; int i; for (i=1; i<32; ++i) if ((s >> i) & 1) { int prev = (i > 1) & (s >> (i-1)); int next = (i < 31) & (s >> (i+1)); if (prev) { if (next) continue; *p++ = '-'; } else if (p > buf) *p++ = ','; if (i >= 10) *p++ = '0' + i / 10; *p++ = '0' + i % 10; } *p = 0; return buf; } static int print_modems (char *s, cp_chan_t *c, int need_header) { int status = cp_modem_status (c); int length = 0; if (need_header) length += sprintf (s + length, " LE DTR DSR RTS CTS CD\n"); length += sprintf (s + length, "%4s %4s %4s %4s %4s %4s\n", status & TIOCM_LE ? "On" : "-", status & TIOCM_DTR ? "On" : "-", status & TIOCM_DSR ? "On" : "-", status & TIOCM_RTS ? "On" : "-", status & TIOCM_CTS ? "On" : "-", status & TIOCM_CD ? "On" : "-"); return length; } static int print_stats (char *s, cp_chan_t *c, int need_header) { int length = 0; if (need_header) length += sprintf (s + length, " Rintr Tintr Mintr Ibytes Ipkts Ierrs Obytes Opkts Oerrs\n"); length += sprintf (s + length, "%7ld %7ld %7ld %8lu %7ld %7ld %8lu %7ld %7ld\n", c->rintr, c->tintr, 0l, (unsigned long) c->ibytes, c->ipkts, c->overrun + c->frame + c->crc, (unsigned long) c->obytes, c->opkts, c->underrun); return length; } static char *format_e1_status (u_char status) { static char buf [80]; if (status & E1_NOALARM) return "Ok"; buf[0] = 0; if (status & E1_LOS) strcat (buf, ",LOS"); if (status & E1_AIS) strcat (buf, ",AIS"); if (status & E1_LOF) strcat (buf, ",LOF"); if (status & E1_LOMF) strcat (buf, ",LOMF"); if (status & E1_FARLOF) strcat (buf, ",FARLOF"); if (status & E1_AIS16) strcat (buf, ",AIS16"); if (status & E1_FARLOMF) strcat (buf, ",FARLOMF"); if (status & E1_TSTREQ) strcat (buf, ",TSTREQ"); if (status & E1_TSTERR) strcat (buf, ",TSTERR"); if (buf[0] == ',') return buf+1; return "Unknown"; } static int print_frac (char *s, int leftalign, u_long numerator, u_long divider) { int n, length = 0; if (numerator < 1 || divider < 1) { length += sprintf (s+length, leftalign ? "/- " : " -"); return length; } n = (int) (0.5 + 1000.0 * numerator / divider); if (n < 1000) { length += sprintf (s+length, leftalign ? "/.%-3d" : " .%03d", n); return length; } *(s + length) = leftalign ? '/' : ' '; length ++; if (n >= 1000000) n = (n+500) / 1000 * 1000; else if (n >= 100000) n = (n+50) / 100 * 100; else if (n >= 10000) n = (n+5) / 10 * 10; switch (n) { case 1000: length += printf (s+length, ".999"); return length; case 10000: n = 9990; break; case 100000: n = 99900; break; case 1000000: n = 999000; break; } if (n < 10000) length += sprintf (s+length, "%d.%d", n/1000, n/10%100); else if (n < 100000) length += sprintf (s+length, "%d.%d", n/1000, n/100%10); else if (n < 1000000) length += sprintf (s+length, "%d.", n/1000); else length += sprintf (s+length, "%d", n/1000); return length; } static int print_e1_stats (char *s, cp_chan_t *c) { struct e1_counters total; u_long totsec; int length = 0; totsec = c->totsec + c->cursec; total.bpv = c->total.bpv + c->currnt.bpv; total.fse = c->total.fse + c->currnt.fse; total.crce = c->total.crce + c->currnt.crce; total.rcrce = c->total.rcrce + c->currnt.rcrce; total.uas = c->total.uas + c->currnt.uas; total.les = c->total.les + c->currnt.les; total.es = c->total.es + c->currnt.es; total.bes = c->total.bes + c->currnt.bes; total.ses = c->total.ses + c->currnt.ses; total.oofs = c->total.oofs + c->currnt.oofs; total.css = c->total.css + c->currnt.css; total.dm = c->total.dm + c->currnt.dm; length += sprintf (s + length, " Unav/Degr Bpv/Fsyn CRC/RCRC Err/Lerr Sev/Bur Oof/Slp Status\n"); /* Unavailable seconds, degraded minutes */ length += print_frac (s + length, 0, c->currnt.uas, c->cursec); length += print_frac (s + length, 1, 60 * c->currnt.dm, c->cursec); /* Bipolar violations, frame sync errors */ length += print_frac (s + length, 0, c->currnt.bpv, c->cursec); length += print_frac (s + length, 1, c->currnt.fse, c->cursec); /* CRC errors, remote CRC errors (E-bit) */ length += print_frac (s + length, 0, c->currnt.crce, c->cursec); length += print_frac (s + length, 1, c->currnt.rcrce, c->cursec); /* Errored seconds, line errored seconds */ length += print_frac (s + length, 0, c->currnt.es, c->cursec); length += print_frac (s + length, 1, c->currnt.les, c->cursec); /* Severely errored seconds, burst errored seconds */ length += print_frac (s + length, 0, c->currnt.ses, c->cursec); length += print_frac (s + length, 1, c->currnt.bes, c->cursec); /* Out of frame seconds, controlled slip seconds */ length += print_frac (s + length, 0, c->currnt.oofs, c->cursec); length += print_frac (s + length, 1, c->currnt.css, c->cursec); length += sprintf (s + length, " %s\n", format_e1_status (c->status)); /* Print total statistics. */ length += print_frac (s + length, 0, total.uas, totsec); length += print_frac (s + length, 1, 60 * total.dm, totsec); length += print_frac (s + length, 0, total.bpv, totsec); length += print_frac (s + length, 1, total.fse, totsec); length += print_frac (s + length, 0, total.crce, totsec); length += print_frac (s + length, 1, total.rcrce, totsec); length += print_frac (s + length, 0, total.es, totsec); length += print_frac (s + length, 1, total.les, totsec); length += print_frac (s + length, 0, total.ses, totsec); length += print_frac (s + length, 1, total.bes, totsec); length += print_frac (s + length, 0, total.oofs, totsec); length += print_frac (s + length, 1, total.css, totsec); length += sprintf (s + length, " -- Total\n"); return length; } static int print_chan (char *s, cp_chan_t *c) { drv_t *d = c->sys; bdrv_t *bd = d->board->sys; int length = 0; length += sprintf (s + length, "cp%d", c->board->num * NCHAN + c->num); if (d->chan->debug) length += sprintf (s + length, " debug=%d", d->chan->debug); if (c->board->mux) { length += sprintf (s + length, " cfg=C"); } else { length += sprintf (s + length, " cfg=A"); } if (c->baud) length += sprintf (s + length, " %ld", c->baud); else length += sprintf (s + length, " extclock"); if (c->type == T_E1 || c->type == T_G703) switch (c->gsyn) { case GSYN_INT : length += sprintf (s + length, " syn=int"); break; case GSYN_RCV : length += sprintf (s + length, " syn=rcv"); break; case GSYN_RCV0 : length += sprintf (s + length, " syn=rcv0"); break; case GSYN_RCV1 : length += sprintf (s + length, " syn=rcv1"); break; case GSYN_RCV2 : length += sprintf (s + length, " syn=rcv2"); break; case GSYN_RCV3 : length += sprintf (s + length, " syn=rcv3"); break; } if (c->type == T_SERIAL) { length += sprintf (s + length, " dpll=%s", c->dpll ? "on" : "off"); length += sprintf (s + length, " nrzi=%s", c->nrzi ? "on" : "off"); length += sprintf (s + length, " invclk=%s", c->invtxc ? "on" : "off"); } if (c->type == T_E1) length += sprintf (s + length, " higain=%s", c->higain ? "on" : "off"); length += sprintf (s + length, " loop=%s", c->lloop ? "on" : "off"); if (c->type == T_E1) length += sprintf (s + length, " ts=%s", format_timeslots (c->ts)); if (c->type == T_G703) { int lq, x; x = splimp (); CP_LOCK (bd); lq = cp_get_lq (c); CP_UNLOCK (bd); splx (x); length += sprintf (s + length, " (level=-%.1fdB)", lq / 10.0); } length += sprintf (s + length, "\n"); return length; } static int ng_cp_rcvmsg (node_p node, item_p item, hook_p lasthook) { drv_t *d = NG_NODE_PRIVATE (node); struct ng_mesg *msg; struct ng_mesg *resp = NULL; int error = 0; CP_DEBUG (d, ("Rcvmsg\n")); NGI_GET_MSG (item, msg); switch (msg->header.typecookie) { default: error = EINVAL; break; case NGM_CP_COOKIE: printf ("Not implemented yet\n"); error = EINVAL; break; case NGM_GENERIC_COOKIE: switch (msg->header.cmd) { default: error = EINVAL; break; case NGM_TEXT_STATUS: { char *s; int l = 0; int dl = sizeof (struct ng_mesg) + 730; NG_MKRESPONSE (resp, msg, dl, M_NOWAIT); if (! resp) { error = ENOMEM; break; } s = (resp)->data; if (d) { l += print_chan (s + l, d->chan); l += print_stats (s + l, d->chan, 1); l += print_modems (s + l, d->chan, 1); l += print_e1_stats (s + l, d->chan); } else l += sprintf (s + l, "Error: node not connect to channel"); strncpy ((resp)->header.cmdstr, "status", NG_CMDSTRSIZ); } break; } break; } NG_RESPOND_MSG (error, node, item, resp); NG_FREE_MSG (msg); return error; } static int ng_cp_rcvdata (hook_p hook, item_p item) { drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE(hook)); struct mbuf *m; struct ng_tag_prio *ptag; bdrv_t *bd = d->board->sys; struct ifqueue *q; int s; CP_DEBUG2 (d, ("Rcvdata\n")); NGI_GET_M (item, m); NG_FREE_ITEM (item); if (! NG_HOOK_PRIVATE (hook) || ! d) { NG_FREE_M (m); return ENETDOWN; } /* Check for high priority data */ if ((ptag = (struct ng_tag_prio *)m_tag_locate(m, NGM_GENERIC_COOKIE, NG_TAG_PRIO, NULL)) != NULL && (ptag->priority > NG_PRIO_CUTOFF) ) q = &d->hi_queue; else q = &d->queue; s = splimp (); CP_LOCK (bd); IF_LOCK (q); if (_IF_QFULL (q)) { IF_UNLOCK (q); CP_UNLOCK (bd); splx (s); NG_FREE_M (m); return ENOBUFS; } _IF_ENQUEUE (q, m); IF_UNLOCK (q); cp_start (d); CP_UNLOCK (bd); splx (s); return 0; } static int ng_cp_rmnode (node_p node) { drv_t *d = NG_NODE_PRIVATE (node); CP_DEBUG (d, ("Rmnode\n")); if (d && d->running) { bdrv_t *bd = d->board->sys; int s = splimp (); CP_LOCK (bd); cp_down (d); CP_UNLOCK (bd); splx (s); } #ifdef KLD_MODULE if (node->nd_flags & NGF_REALLY_DIE) { NG_NODE_SET_PRIVATE (node, NULL); NG_NODE_UNREF (node); } NG_NODE_REVIVE(node); /* Persistent node */ #endif return 0; } static int ng_cp_connect (hook_p hook) { drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE (hook)); if (d) { CP_DEBUG (d, ("Connect\n")); callout_reset (&d->timeout_handle, hz, cp_watchdog_timer, d); } return 0; } static int ng_cp_disconnect (hook_p hook) { drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE (hook)); if (d) { CP_DEBUG (d, ("Disconnect\n")); if (NG_HOOK_PRIVATE (hook)) { bdrv_t *bd = d->board->sys; int s = splimp (); CP_LOCK (bd); cp_down (d); CP_UNLOCK (bd); splx (s); } /* If we were wait it than it reasserted now, just stop it. */ if (!callout_drain (&d->timeout_handle)) callout_stop (&d->timeout_handle); } return 0; } #endif static int cp_modevent (module_t mod, int type, void *unused) { static int load_count = 0; switch (type) { case MOD_LOAD: #ifdef NETGRAPH if (ng_newtype (&typestruct)) printf ("Failed to register ng_cp\n"); #endif ++load_count; callout_init (&timeout_handle, 1); callout_reset (&timeout_handle, hz*5, cp_timeout, 0); break; case MOD_UNLOAD: if (load_count == 1) { printf ("Removing device entry for Tau-PCI\n"); #ifdef NETGRAPH ng_rmtype (&typestruct); #endif } /* If we were wait it than it reasserted now, just stop it. * Actually we shouldn't get this condition. But code could be * changed in the future, so just be a litle paranoid. */ if (!callout_drain (&timeout_handle)) callout_stop (&timeout_handle); --load_count; break; case MOD_SHUTDOWN: break; } return 0; } #ifdef NETGRAPH static struct ng_type typestruct = { .version = NG_ABI_VERSION, .name = NG_CP_NODE_TYPE, .constructor = ng_cp_constructor, .rcvmsg = ng_cp_rcvmsg, .shutdown = ng_cp_rmnode, .newhook = ng_cp_newhook, .connect = ng_cp_connect, .rcvdata = ng_cp_rcvdata, .disconnect = ng_cp_disconnect, }; #endif /*NETGRAPH*/ #ifdef NETGRAPH MODULE_DEPEND (ng_cp, netgraph, NG_ABI_VERSION, NG_ABI_VERSION, NG_ABI_VERSION); #else MODULE_DEPEND (cp, sppp, 1, 1, 1); #endif DRIVER_MODULE (cp, pci, cp_driver, cp_devclass, cp_modevent, NULL); MODULE_VERSION (cp, 1); Index: head/sys/dev/ctau/ctddk.c =================================================================== --- head/sys/dev/ctau/ctddk.c (revision 313981) +++ head/sys/dev/ctau/ctddk.c (revision 313982) @@ -1,1162 +1,1162 @@ /*- * DDK library for Cronyx-Tau adapters. * * Copyright (C) 1998-1999 Cronyx Engineering. * Author: Alexander Kvitchenko, * * Copyright (C) 1999-2003 Cronyx Engineering. * Author: Roman Kurakin, * * This source is derived from * Diagnose utility for Cronyx-Tau adapter: * by Serge Vakulenko, * * This software is distributed with NO WARRANTIES, not even the implied * warranties for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * Authors grant any other persons or organisations permission to use * or modify this software as long as this message is kept with the software, * all derivative works or modified versions. * * Cronyx Id: ctddk.c,v 1.1.2.3 2003/11/14 16:55:36 rik Exp $ */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #ifndef CT_DDK_NO_G703 #include #endif #ifndef CT_DDK_NO_E1 #include #endif static void ct_hdlc_interrupt (ct_chan_t *c, int imvr); static void ct_e1_interrupt (ct_board_t *b); static void ct_scc_interrupt (ct_board_t *b); static void ct_e1timer_interrupt (ct_chan_t *c); static short porttab [] = { /* standard base port set */ 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0x300, 0x320, 0x340, 0x360, 0x380, 0x3a0, 0x3c0, 0x3e0, 0 }; int ct_find (port_t *board_ports) { int i, n; for (i=0, n=0; porttab[i] && n= NBRD || ! ct_probe_board (port, irq, dma)) return 0; /* init callback pointers */ for (c=b->chan; cchan+NCHAN; ++c) { c->call_on_tx = 0; c->call_on_rx = 0; c->call_on_msig = 0; c->call_on_scc = 0; c->call_on_err = 0; } /* init DDK channel variables */ for (c=b->chan; cchan+NCHAN; ++c) { c->sccrx_empty = c->scctx_empty = 1; c->sccrx_b = c->sccrx_e = 0; c->scctx_b = c->scctx_e = 0; c->e1_first_int = 1; } /* init board structure */ ct_init (b, num, port, irq, dma, ctau_fw_data, ctau_fw_len, ctau_fw_tvec, ctau2_fw_data); /* determine which firmware should be loaded */ fw = ctau_fw_data; flen = ctau_fw_len; ft = ctau_fw_tvec; switch (b->type) { case B_TAU2: case B_TAU2_G703: case B_TAU2_E1: case B_TAU2_E1D: fw = ctau2_fw_data; flen = 0; - ft = 0; + ft = NULL; break; #ifndef CT_DDK_NO_G703 case B_TAU_G703: fw = ctaug703_fw_data; flen = ctaug703_fw_len; ft = ctaug703_fw_tvec; break; #endif #ifndef CT_DDK_NO_E1 case B_TAU_E1: fw = ctaue1_fw_data; flen = ctaue1_fw_len; ft = ctaue1_fw_tvec; break; #endif } /* Load firmware and set up board */ return ct_setup_board (b, fw, flen, ft); } /* * must be called on the exit */ void ct_close_board (ct_board_t *b) { ct_setup_board (b, 0, 0, 0); /* Reset the controller. */ outb (BCR0(b->port), 0); /* Disable DMA channel. */ ct_disable_dma (b); ct_led (b, 0); } static void ct_g703_rate (ct_chan_t *c, unsigned long rate) { c->gopt.rate = rate; ct_setup_g703 (c->board); } /* * Set up baud rate. */ static void ct_chan_baud (ct_chan_t *c, unsigned long baud) { c->baud = baud; if (baud) { c->hopt.txs = CLK_INT; } else { ct_set_dpll (c, 0); c->hopt.txs = CLK_LINE; } ct_update_chan (c); } void ct_set_baud (ct_chan_t *c, unsigned long baud) { unsigned long r; if (c->mode == M_E1) return; if (c->mode == M_G703) { if (baud >= 2048000) r = 2048; else if (baud >= 1024000) r = 1024; else if (baud >= 512000) r = 512; else if (baud >= 256000) r = 256; else if (baud >= 128000) r = 128; else r = 64; ct_g703_rate (c, r); } else ct_chan_baud (c, baud); } /* * Configure Tau/E1 board. */ static void ct_e1_config (ct_board_t *b, unsigned char cfg) { if (cfg == b->opt.cfg) return; if (cfg == CFG_B) b->chan[1].mode = M_HDLC; else b->chan[1].mode = M_E1; /* Recovering synchronization */ if (b->opt.cfg == CFG_B) { ct_chan_baud (b->chan+1, 0); ct_set_invtxc (b->chan+1, 0); ct_set_invrxc (b->chan+1, 0); ct_set_nrzi (b->chan+1, 0); } b->opt.cfg = cfg; ct_setup_e1 (b); } /* * Config Tau/G.703 board */ static void ct_g703_config (ct_board_t *b, unsigned char cfg) { if (cfg == b->opt.cfg) return; if (cfg == CFG_B) b->chan[1].mode = M_HDLC; else b->chan[1].mode = M_G703; /* Recovering synchronization */ if (b->opt.cfg == CFG_B) { ct_chan_baud (b->chan+1, 0); ct_set_invtxc (b->chan+1, 0); ct_set_invrxc (b->chan+1, 0); ct_set_nrzi (b->chan+1, 0); } b->opt.cfg = cfg; ct_setup_g703 (b); } int ct_set_clk (ct_chan_t *c, int clk) { if (c->num) c->board->opt.clk1 = clk; else c->board->opt.clk0 = clk; if (c->mode == M_E1) { ct_setup_e1 (c->board); return 0; } if (c->mode == M_G703) { ct_setup_g703 (c->board); return 0; } else return -1; } int ct_get_clk (ct_chan_t *c) { return c->num ? c->board->opt.clk1 : c->board->opt.clk0; } int ct_set_ts (ct_chan_t *c, unsigned long ts) { if (! (c->mode == M_E1)) return -1; if (c->num) c->board->opt.s1 = ts; else c->board->opt.s0 = ts; ct_setup_e1 (c->board); return 0; } int ct_set_subchan (ct_board_t *b, unsigned long ts) { if (b->chan[0].mode != M_E1) return -1; b->opt.s2 = ts; ct_setup_e1 (b); return 0; } int ct_set_higain (ct_chan_t *c, int on) { if (! (c->mode == M_E1)) return -1; c->gopt.higain = on ? 1 : 0; ct_setup_e1 (c->board); return 0; } /* * Start service channel. */ void ct_start_scc (ct_chan_t *c, char *rxbuf, char *txbuf) { c->sccrx = rxbuf; c->scctx = txbuf; /* Enable interrupts from service channel. */ if (c->board->type != B_TAU_E1 && c->board->type != B_TAU_E1C && c->board->type != B_TAU2_E1) return; cte_out2 (c->board->port, c->num ? AM_IMR : AM_IMR | AM_A, IMR_TX | IMR_RX_ALL); cte_out2 (c->board->port, AM_MICR, MICR_MIE); } /* * Start HDLC channel. */ void ct_start_chan (ct_chan_t *c, ct_buf_t *cb, unsigned long phys) { int i, ier0; unsigned long bound; if (cb) { /* Set up descriptors, align to 64k boundary. * If 64k boundary is inside buffers * buffers will begin on this boundary * (there were allocated additional space for this) */ c->tdesc = cb->descbuf; c->tdphys[0] = phys + ((char*)c->tdesc - (char*)cb); bound = ((c->tdphys[0] + 0xffff) & ~(0xffffUL)); if (bound < c->tdphys[0] + 2*NBUF*sizeof(ct_desc_t)) { c->tdesc = (ct_desc_t*) ((char*) c->tdesc + (bound - c->tdphys[0])); c->tdphys[0] = bound; } c->rdesc = c->tdesc + NBUF; /* Set buffers. */ for (i=0; irbuf[i] = cb->rbuffer[i]; c->tbuf[i] = cb->tbuffer[i]; } /* Set buffer physical addresses */ for (i=0; irphys[i] = phys + ((char*)c->rbuf[i] - (char*)cb); c->tphys[i] = phys + ((char*)c->tbuf[i] - (char*)cb); c->rdphys[i] = phys + ((char*)(c->rdesc+i) - (char*)cb); c->tdphys[i] = phys + ((char*)(c->tdesc+i) - (char*)cb); } } /* Set up block chains. */ /* receive buffers */ for (i=0; irdesc[i]) = c->rdphys[(i+1) % NBUF] & 0xffff; B_PTR (c->rdesc[i]) = c->rphys[i]; B_LEN (c->rdesc[i]) = DMABUFSZ; B_STATUS (c->rdesc[i]) = 0; } /* transmit buffers */ for (i=0; itdesc[i]) = c->tdphys[(i+1) % NBUF] & 0xffff; B_PTR (c->tdesc[i]) = c->tphys[i]; B_LEN (c->tdesc[i]) = DMABUFSZ; B_STATUS (c->tdesc[i]) = FST_EOM; c->attach[i] = 0; } if (c->type & T_E1) { c->mode = M_E1; if (c->num && c->board->opt.cfg == CFG_B) c->mode = M_HDLC; } if (c->type & T_G703) { c->mode = M_G703; if (c->num && c->board->opt.cfg == CFG_B) c->mode = M_HDLC; } ct_update_chan (c); /* enable receiver */ c->rn = 0; ct_start_receiver (c, 1 , c->rphys[0], DMABUFSZ, c->rdphys[0], c->rdphys[NBUF-1]); outb (c->IE1, inb (c->IE1) | IE1_CDCDE); outb (c->IE0, inb (c->IE0) | IE0_RX_INTE); ier0 = inb (IER0(c->board->port)); ier0 |= c->num ? IER0_RX_INTE_1 : IER0_RX_INTE_0; outb (IER0(c->board->port), ier0); /* Enable transmitter */ c->tn = 0; c->te = 0; ct_start_transmitter (c, 1 , c->tphys[0], DMABUFSZ, c->tdphys[0], c->tdphys[0]); outb (c->TX.DIR, DIR_CHAIN_EOME | DIR_CHAIN_BOFE | DIR_CHAIN_COFE); /* Clear DTR and RTS */ ct_set_dtr (c, 0); ct_set_rts (c, 0); } /* * Turn receiver on/off */ void ct_enable_receive (ct_chan_t *c, int on) { unsigned char st3, ier0, ier1; st3 = inb (c->ST3); /* enable or disable receiver */ if (on && ! (st3 & ST3_RX_ENABLED)) { c->rn = 0; ct_start_receiver (c, 1 , c->rphys[0], DMABUFSZ, c->rdphys[0], c->rdphys[NBUF-1]); /* enable status interrupt */ outb (c->IE1, inb (c->IE1) | IE1_CDCDE); outb (c->IE0, inb (c->IE0) | IE0_RX_INTE); ier0 = inb (IER0(c->board->port)); ier0 |= c->num ? IER0_RX_INTE_1 : IER0_RX_INTE_0; outb (IER0(c->board->port), ier0); ct_set_rts (c, 1); } else if (! on && (st3 & ST3_RX_ENABLED)) { ct_set_rts (c, 0); outb (c->CMD, CMD_RX_DISABLE); ier0 = inb (IER0(c->board->port)); ier0 &= c->num ? ~(IER0_RX_INTE_1 | IER0_RX_RDYE_1) : ~(IER0_RX_INTE_0 | IER0_RX_RDYE_0); outb (IER0(c->board->port), ier0); ier1 = inb (IER1(c->board->port)); ier1 &= c->num ? ~(IER1_RX_DMERE_1 | IER1_RX_DME_1) : ~(IER1_RX_DMERE_0 | IER1_RX_DME_0); outb (IER1(c->board->port), ier1); } } /* * Turn transmitter on/off */ void ct_enable_transmit (ct_chan_t *c, int on) { unsigned char st3, ier0, ier1; st3 = inb (c->ST3); /* enable or disable receiver */ if (on && ! (st3 & ST3_TX_ENABLED)) { c->tn = 0; c->te = 0; ct_start_transmitter (c, 1 , c->tphys[0], DMABUFSZ, c->tdphys[0], c->tdphys[0]); outb (c->TX.DIR, DIR_CHAIN_EOME | DIR_CHAIN_BOFE | DIR_CHAIN_COFE); } else if (! on && (st3 & ST3_TX_ENABLED)) { outb (c->CMD, CMD_TX_DISABLE); ier0 = inb (IER0(c->board->port)); ier0 &= c->num ? ~(IER0_TX_INTE_1 | IER0_TX_RDYE_1) : ~(IER0_TX_INTE_0 | IER0_TX_RDYE_0); outb (IER0(c->board->port), ier0); ier1 = inb (IER1(c->board->port)); ier1 &= c->num ? ~(IER1_TX_DMERE_1 | IER1_TX_DME_1) : ~(IER1_TX_DMERE_0 | IER1_TX_DME_0); outb (IER1(c->board->port), ier1); } } int ct_set_config (ct_board_t *b, int cfg) { if (b->opt.cfg == cfg) return 0; switch (b->type) { case B_TAU_G703: case B_TAU_G703C: case B_TAU2_G703: if (cfg == CFG_C) return -1; ct_g703_config (b, cfg); return 0; case B_TAU_E1: case B_TAU_E1C: case B_TAU_E1D: case B_TAU2_E1: case B_TAU2_E1D: ct_e1_config (b, cfg); return 0; default: return cfg == CFG_A ? 0 : -1; } } int ct_get_dpll (ct_chan_t *c) { return (c->hopt.rxs == CLK_RXS_DPLL_INT); } void ct_set_dpll (ct_chan_t *c, int on) { if (on && ct_get_baud (c)) c->hopt.rxs = CLK_RXS_DPLL_INT; else c->hopt.rxs = CLK_LINE; ct_update_chan (c); } int ct_get_nrzi (ct_chan_t *c) { return (c->opt.md2.encod == MD2_ENCOD_NRZI); } /* * Change line encoding to NRZI, default is NRZ */ void ct_set_nrzi (ct_chan_t *c, int on) { c->opt.md2.encod = on ? MD2_ENCOD_NRZI : MD2_ENCOD_NRZ; outb (c->MD2, *(unsigned char*)&c->opt.md2); } /* * Transmit clock inversion */ void ct_set_invtxc (ct_chan_t *c, int on) { if (on) c->board->bcr2 |= (c->num ? BCR2_INVTXC1 : BCR2_INVTXC0); else c->board->bcr2 &= ~(c->num ? BCR2_INVTXC1 : BCR2_INVTXC0); outb (BCR2(c->board->port), c->board->bcr2); } int ct_get_invtxc (ct_chan_t *c) { return (c->board->bcr2 & (c->num ? BCR2_INVTXC1 : BCR2_INVTXC0)) != 0; } /* * Receive clock inversion */ void ct_set_invrxc (ct_chan_t *c, int on) { if (on) c->board->bcr2 |= (c->num ? BCR2_INVRXC1 : BCR2_INVRXC0); else c->board->bcr2 &= ~(c->num ? BCR2_INVRXC1 : BCR2_INVRXC0); outb (BCR2(c->board->port), c->board->bcr2); } int ct_get_invrxc (ct_chan_t *c) { return (c->board->bcr2 & (c->num ? BCR2_INVRXC1 : BCR2_INVRXC0)) != 0; } /* * Main interrupt handler */ void ct_int_handler (ct_board_t *b) { unsigned char bsr0, imvr; ct_chan_t *c; while ((bsr0 = inb (BSR0(b->port))) & BSR0_INTR) { if (bsr0 & BSR0_RDYERR) { outb (BCR1(b->port), b->bcr1); } else if (bsr0 & BSR0_GINT) { if (b->type == B_TAU_E1 || b->type == B_TAU_E1C || b->type == B_TAU_E1D || b->type == B_TAU2_E1 || b->type == B_TAU2_E1D) ct_e1_interrupt (b); } else if (bsr0 & BSR0_HDINT) { /* Read the interrupt modified vector register. */ imvr = inb (IACK(b->port)); c = b->chan + (imvr & IMVR_CHAN1 ? 1 : 0); ct_hdlc_interrupt (c, imvr); } } } static void ct_e1_interrupt (ct_board_t *b) { unsigned char sr; sr = inb (E1SR(b->port)); if (sr & E1SR_SCC_IRQ) ct_scc_interrupt (b); if (sr & E1SR_E0_IRQ1) ct_e1timer_interrupt (b->chan + 0); if (sr & E1SR_E1_IRQ1) ct_e1timer_interrupt (b->chan + 1); } static void ct_scc_interrupt (ct_board_t *b) { unsigned char rsr; unsigned char ivr, a = AM_A; /* assume channel A */ ct_chan_t *c = b->chan; ivr = cte_in2 (b->port, AM_IVR); if (! (ivr & IVR_A)) ++c, a = 0; /* really channel B */ switch (ivr & IVR_REASON) { case IVR_TXRDY: /* transmitter empty */ c->scctx_b = (c->scctx_b + 1) % SCCBUFSZ; if (c->scctx_b == c->scctx_e) { c->scctx_empty = 1; cte_out2c (c, AM_CR | CR_RST_TXINT); } else cte_out2d (c, c->scctx[c->scctx_b]); break; case IVR_RXERR: /* receive error */ case IVR_RX: /* receive character available */ rsr = cte_in2 (b->port, a|AM_RSR); if (rsr & RSR_RXOVRN) { /* rx overrun */ if (c->call_on_err) c->call_on_err (c, CT_SCC_OVERRUN); } else if (rsr & RSR_FRME) { /* frame error */ if (c->call_on_err) c->call_on_err (c, CT_SCC_FRAME); } else { c->sccrx[c->sccrx_e] = cte_in2d (c); c->sccrx_e = (c->sccrx_e + 1) % SCCBUFSZ; c->sccrx_empty &= 0; if (c->call_on_scc) c->call_on_scc (c); if (c->sccrx_e == c->sccrx_b && ! c->sccrx_empty) if (c->call_on_err) c->call_on_err (c, CT_SCC_OVERFLOW); } if (rsr) cte_out2c (c, CR_RST_ERROR); break; case IVR_STATUS: /* external status interrupt */ /* Unexpected SCC status interrupt. */ cte_out2c (c, CR_RST_EXTINT); break; } } /* * G.703 mode channel: process 1-second timer interrupts. * Read error and request registers, and fill the status field. */ void ct_g703_timer (ct_chan_t *c) { int bpv, cd, tsterr, tstreq; /* Count seconds. * During the first second after the channel startup * the status registers are not stable yet, * we will so skip the first second. */ ++c->cursec; if (c->mode != M_G703) return; if (c->totsec + c->cursec <= 1) return; c->status = 0; cd = ct_get_cd (c); bpv = inb (GERR (c->board->port)) & (c->num ? GERR_BPV1 : GERR_BPV0); outb (GERR (c->board->port), bpv); tsterr = inb (GERR (c->board->port)) & (c->num ? GERR_ERR1 : GERR_ERR0); outb (GERR (c->board->port), tsterr); tstreq = inb (GLDR (c->board->port)) & (c->num ? GLDR_LREQ1 : GLDR_LREQ0); outb (GLDR (c->board->port), tstreq); /* Compute the SNMP-compatible channel status. */ if (bpv) ++c->currnt.bpv; /* bipolar violation */ if (! cd) c->status |= ESTS_LOS; /* loss of signal */ if (tsterr) c->status |= ESTS_TSTERR; /* test error */ if (tstreq) c->status |= ESTS_TSTREQ; /* test code detected */ if (! c->status) c->status = ESTS_NOALARM; /* Unavaiable second -- loss of carrier, or receiving test code. */ if ((! cd) || tstreq) /* Unavailable second -- no other counters. */ ++c->currnt.uas; else { /* Line errored second -- any BPV. */ if (bpv) ++c->currnt.les; /* Collect data for computing * degraded minutes. */ ++c->degsec; if (cd && bpv) ++c->degerr; } /* Degraded minutes -- having more than 50% error intervals. */ if (c->cursec / 60 == 0) { if (c->degerr*2 > c->degsec) ++c->currnt.dm; c->degsec = 0; c->degerr = 0; } /* Rotate statistics every 15 minutes. */ if (c->cursec > 15*60) { int i; for (i=47; i>0; --i) c->interval[i] = c->interval[i-1]; c->interval[0] = c->currnt; /* Accumulate total statistics. */ c->total.bpv += c->currnt.bpv; c->total.fse += c->currnt.fse; c->total.crce += c->currnt.crce; c->total.rcrce += c->currnt.rcrce; c->total.uas += c->currnt.uas; c->total.les += c->currnt.les; c->total.es += c->currnt.es; c->total.bes += c->currnt.bes; c->total.ses += c->currnt.ses; c->total.oofs += c->currnt.oofs; c->total.css += c->currnt.css; c->total.dm += c->currnt.dm; memset (&c->currnt, 0, sizeof (c->currnt)); c->totsec += c->cursec; c->cursec = 0; } } static void ct_e1timer_interrupt (ct_chan_t *c) { unsigned short port; unsigned char sr1, sr2, ssr; unsigned long bpv, fas, crc4, ebit, pcv, oof; port = c->num ? E1CS1(c->board->port) : E1CS0(c->board->port); sr2 = cte_ins (port, DS_SR2, 0xff); /* is it timer interrupt ? */ if (! (sr2 & SR2_SEC)) return; /* first interrupts should be ignored */ if (c->e1_first_int > 0) { c->e1_first_int --; return; } ++c->cursec; c->status = 0; /* Compute the SNMP-compatible channel status. */ sr1 = cte_ins (port, DS_SR1, 0xff); ssr = cte_in (port, DS_SSR); oof = 0; if (sr1 & (SR1_RCL | SR1_RLOS)) c->status |= ESTS_LOS; /* loss of signal */ if (sr1 & SR1_RUA1) c->status |= ESTS_AIS; /* receiving all ones */ if (c->gopt.cas && (sr1 & SR1_RSA1)) c->status |= ESTS_AIS16; /* signaling all ones */ if (c->gopt.cas && (sr1 & SR1_RDMA)) c->status |= ESTS_FARLOMF; /* alarm in timeslot 16 */ if (sr1 & SR1_RRA) c->status |= ESTS_FARLOF; /* far loss of framing */ /* Controlled slip second -- any slip event. */ if (sr1 & SR1_RSLIP) { ++c->currnt.css; } if (ssr & SSR_SYNC) { c->status |= ESTS_LOF; /* loss of framing */ ++oof; /* out of framing */ } if ((c->gopt.cas && (ssr & SSR_SYNC_CAS)) || (c->gopt.crc4 && (ssr & SSR_SYNC_CRC4))) { c->status |= ESTS_LOMF; /* loss of multiframing */ ++oof; /* out of framing */ } if (! c->status) c->status = ESTS_NOALARM; /* Get error counters. */ bpv = VCR (cte_in (port, DS_VCR1), cte_in (port, DS_VCR2)); fas = FASCR (cte_in (port, DS_FASCR1), cte_in (port, DS_FASCR2)); crc4 = CRCCR (cte_in (port, DS_CRCCR1), cte_in (port, DS_CRCCR2)); ebit = EBCR (cte_in (port, DS_EBCR1), cte_in (port, DS_EBCR2)); c->currnt.bpv += bpv; c->currnt.fse += fas; if (c->gopt.crc4) { c->currnt.crce += crc4; c->currnt.rcrce += ebit; } /* Path code violation is frame sync error if CRC4 disabled, * or CRC error if CRC4 enabled. */ pcv = fas; if (c->gopt.crc4) pcv += crc4; /* Unavaiable second -- receiving all ones, or * loss of carrier, or loss of signal. */ if (sr1 & (SR1_RUA1 | SR1_RCL | SR1_RLOS)) /* Unavailable second -- no other counters. */ ++c->currnt.uas; else { /* Line errored second -- any BPV. */ if (bpv) ++c->currnt.les; /* Errored second -- any PCV, or out of frame sync, * or any slip events. */ if (pcv || oof || (sr1 & SR1_RSLIP)) ++c->currnt.es; /* Severely errored framing second -- out of frame sync. */ if (oof) ++c->currnt.oofs; /* Severely errored seconds -- * 832 or more PCVs, or 2048 or more BPVs. */ if (bpv >= 2048 || pcv >= 832) ++c->currnt.ses; else { /* Bursty errored seconds -- * no SES and more than 1 PCV. */ if (pcv > 1) ++c->currnt.bes; /* Collect data for computing * degraded minutes. */ ++c->degsec; c->degerr += bpv + pcv; } } /* Degraded minutes -- having error rate more than 10e-6, * not counting unavailable and severely errored seconds. */ if (c->cursec / 60 == 0) { if (c->degerr > c->degsec * 2048 / 1000) ++c->currnt.dm; c->degsec = 0; c->degerr = 0; } /* Rotate statistics every 15 minutes. */ if (c->cursec > 15*60) { int i; for (i=47; i>0; --i) c->interval[i] = c->interval[i-1]; c->interval[0] = c->currnt; /* Accumulate total statistics. */ c->total.bpv += c->currnt.bpv; c->total.fse += c->currnt.fse; c->total.crce += c->currnt.crce; c->total.rcrce += c->currnt.rcrce; c->total.uas += c->currnt.uas; c->total.les += c->currnt.les; c->total.es += c->currnt.es; c->total.bes += c->currnt.bes; c->total.ses += c->currnt.ses; c->total.oofs += c->currnt.oofs; c->total.css += c->currnt.css; c->total.dm += c->currnt.dm; for (i=0; icurrnt); ++i) *(((char *)(&c->currnt))+i)=0; c->totsec += c->cursec; c->cursec = 0; } } static void ct_hdlc_interrupt (ct_chan_t *c, int imvr) { int i, dsr, st1, st2, cda; switch (imvr & IMVR_VECT_MASK) { case IMVR_RX_DMOK: /* receive DMA normal end */ dsr = inb (c->RX.DSR); cda = inw (c->RX.CDA); for (i=0; irdphys[i]) break; if (i >= NBUF) i = c->rn; /* cannot happen */ while (c->rn != i) { int cst = B_STATUS (c->rdesc[c->rn]); if (cst == FST_EOM) { /* process data */ if (c->call_on_rx) c->call_on_rx (c, c->rbuf[c->rn], B_LEN(c->rdesc[c->rn])); ++c->ipkts; c->ibytes += B_LEN(c->rdesc[c->rn]); } else if (cst & ST2_OVRN) { /* Receive overrun error */ if (c->call_on_err) c->call_on_err (c, CT_OVERRUN); ++c->ierrs; } else if (cst & (ST2_HDLC_RBIT | ST2_HDLC_ABT | ST2_HDLC_SHRT)) { /* Receive frame error */ if (c->call_on_err) c->call_on_err (c, CT_FRAME); ++c->ierrs; } else if ((cst & ST2_HDLC_EOM) && (cst & ST2_HDLC_CRCE)) { /* Receive CRC error */ if (c->call_on_err) c->call_on_err (c, CT_CRC); ++c->ierrs; } else if (! (cst & ST2_HDLC_EOM)) { /* Frame dose not fit in the buffer.*/ if (c->call_on_err) c->call_on_err (c, CT_OVERFLOW); ++c->ierrs; } B_NEXT (c->rdesc[c->rn]) = c->rdphys[(c->rn+1) % NBUF] & 0xffff; B_PTR (c->rdesc[c->rn]) = c->rphys[c->rn]; B_LEN (c->rdesc[c->rn]) = DMABUFSZ; B_STATUS (c->rdesc[c->rn]) = 0; c->rn = (c->rn + 1) % NBUF; } outw (c->RX.EDA, (unsigned short) c->rdphys[(i+NBUF-1)%NBUF]); /* Clear DMA interrupt. */ if (inb (c->RX.DSR) & DSR_DMA_ENABLE) { outb (c->RX.DSR, dsr); } else { outb (c->RX.DSR, (dsr & 0xfc) | DSR_DMA_ENABLE); } ++c->rintr; break; case IMVR_RX_INT: /* receive status */ st1 = inb (c->ST1); st2 = inb (c->ST2); if (st1 & ST1_CDCD){ if (c->call_on_msig) c->call_on_msig (c); ++c->mintr; } /* Clear interrupt. */ outb (c->ST1, st1); outb (c->ST2, st2); ++c->rintr; break; case IMVR_RX_DMERR: /* receive DMA error */ dsr = inb (c->RX.DSR); if (dsr & (DSR_CHAIN_BOF | DSR_CHAIN_COF)) { if (c->call_on_err) c->call_on_err (c, CT_OVERFLOW); ++c->ierrs; for (i=0; irdesc[i]) = DMABUFSZ; B_STATUS (c->rdesc[i]) = 0; } ct_start_receiver (c, 1, c->rphys[0], DMABUFSZ, c->rdphys[0], c->rdphys[NBUF-1]); c->rn = 0; } /* Clear DMA interrupt. */ outb (c->RX.DSR, dsr); ++c->rintr; break; case IMVR_TX_DMOK: /* transmit DMA normal end */ case IMVR_TX_DMERR: /* transmit DMA error */ dsr = inb (c->TX.DSR); cda = inw (c->TX.CDA); for (i=0; itdphys[i]; ++i) continue; if (i >= NBUF) i = 1; /* cannot happen */ if (dsr & DSR_CHAIN_COF) { if (c->call_on_err) c->call_on_err (c, CT_UNDERRUN); ++c->oerrs; } while (c->tn != i) { if (c->call_on_tx) c->call_on_tx (c, c->attach[c->tn], B_LEN(c->tdesc[c->tn])); ++c->opkts; c->obytes += B_LEN(c->tdesc[c->tn]); c->tn = (c->tn + 1) % NBUF; /* Clear DMA interrupt. */ outb (c->TX.DSR, DSR_CHAIN_EOM | DSR_DMA_CONTINUE); } outb (c->TX.DSR, dsr & ~DSR_CHAIN_EOM); ++c->tintr; break; case IMVR_TX_INT: /* transmit error, HDLC only */ st1 = inb (c->ST1); if (st1 & ST1_HDLC_UDRN) { if (c->call_on_err) c->call_on_err (c, CT_UNDERRUN); ++c->oerrs; } outb (c->ST1, st1); ++c->tintr; break; default: /* Unknown interrupt - cannot happen. */ break; } } int ct_receive_enabled (ct_chan_t *c) { int st3; st3 = inb (c->ST3); return (st3 & ST3_RX_ENABLED) ? 1 : 0; } int ct_transmit_enabled (ct_chan_t *c) { int st3; st3 = inb (c->ST3); return (st3 & ST3_TX_ENABLED) ? 1 : 0; } int ct_buf_free (ct_chan_t *c) { return (NBUF + c->tn - c->te - 1) % NBUF; } int ct_send_packet (ct_chan_t *c, unsigned char *data, int len, void *attachment) { int dsr, ne; if (len > DMABUFSZ) return -2; /* Is it really free? */ ne = (c->te+1) % NBUF; if (ne == c->tn) return -1; /* Set up the tx descriptor. */ B_LEN (c->tdesc[c->te]) = len; B_STATUS (c->tdesc[c->te]) = FST_EOM; c->attach[c->te] = attachment; if (c->tbuf[c->te] != data) memcpy (c->tbuf[c->te], data, len); /* Start the transmitter. */ c->te = ne; outw (c->TX.EDA, (unsigned short) c->tdphys[ne]); dsr = inb (c->TX.DSR); if (! (dsr & DSR_DMA_ENABLE)) outb (c->TX.DSR, DSR_DMA_ENABLE); return 0; } int scc_write (ct_chan_t *c, unsigned char *d, int len) { int i, free; /* determining free place in buffer */ if (c->scctx_empty) free = SCCBUFSZ; else free = (SCCBUFSZ + c->scctx_b - c->scctx_e) % SCCBUFSZ; if (len > free) return -1; for (i=0; iscctx[c->scctx_e] = d[i]; c->scctx_e = (c->scctx_e+1) % SCCBUFSZ; } if (c->scctx_empty && len) { cte_out2d (c, c->scctx[c->scctx_b]); c->scctx_empty = 0; } return 0; } int scc_read (ct_chan_t *c, unsigned char *d, int len) { int i, bytes; if (c->sccrx_empty) bytes = 0; else bytes = (SCCBUFSZ + c->sccrx_e - 1 - c->sccrx_b) % SCCBUFSZ + 1; if (len > bytes) return -1; for (i=0; isccrx[c->sccrx_b]; c->sccrx_b = (c->sccrx_b+1) % SCCBUFSZ; } if (c->sccrx_b==c->sccrx_e) c->sccrx_empty = 1; return 0; } int sccrx_check (ct_chan_t *c) { int bytes; if (c->sccrx_empty) bytes = 0; else bytes = (SCCBUFSZ + c->sccrx_e - 1 - c->sccrx_b) % SCCBUFSZ + 1; return bytes; } int scc_read_byte (ct_chan_t *c) { unsigned char a; if (scc_read (c, &a, 1) < 0) return -1; return a; } int scc_write_byte (ct_chan_t *c, unsigned char b) { if (scc_write (c, &b, 1) < 0) return -1; return b; } /* * Register event processing functions */ void ct_register_transmit (ct_chan_t *c, void (*func) (ct_chan_t*, void*, int)) { c->call_on_tx = func; } void ct_register_receive (ct_chan_t *c, void (*func) (ct_chan_t*, char*, int)) { c->call_on_rx = func; } void ct_register_error (ct_chan_t *c, void (*func) (ct_chan_t*, int)) { c->call_on_err = func; } void ct_register_scc (ct_chan_t *c, void (*func) (ct_chan_t*)) { c->call_on_scc = func; } void ct_register_modem (ct_chan_t *c, void (*func) (ct_chan_t*)) { c->call_on_msig = func; } Index: head/sys/dev/ctau/if_ct.c =================================================================== --- head/sys/dev/ctau/if_ct.c (revision 313981) +++ head/sys/dev/ctau/if_ct.c (revision 313982) @@ -1,2206 +1,2206 @@ /*- * Cronyx-Tau adapter driver for FreeBSD. * Supports PPP/HDLC and Cisco/HDLC protocol in synchronous mode, * and asynchronous channels with full modem control. * Keepalive protocol implemented in both Cisco and PPP modes. * * Copyright (C) 1994-2002 Cronyx Engineering. * Author: Serge Vakulenko, * * Copyright (C) 1999-2004 Cronyx Engineering. * Author: Roman Kurakin, * * This software is distributed with NO WARRANTIES, not even the implied * warranties for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * Authors grant any other persons or organisations a permission to use, * modify and redistribute this software in source and binary forms, * as long as this message is kept with the software, all derivative * works or modified versions. * * Cronyx Id: if_ct.c,v 1.1.2.31 2004/06/23 17:09:13 rik Exp $ */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_ng_cronyx.h" #ifdef NETGRAPH_CRONYX # include "opt_netgraph.h" # include # include # include #else # include # include # define PP_CISCO IFF_LINK2 # include #endif #define NCTAU 1 /* If we don't have Cronyx's sppp version, we don't have fr support via sppp */ #ifndef PP_FR #define PP_FR 0 #endif #define CT_DEBUG(d,s) ({if (d->chan->debug) {\ printf ("%s: ", d->name); printf s;}}) #define CT_DEBUG2(d,s) ({if (d->chan->debug>1) {\ printf ("%s: ", d->name); printf s;}}) #define CT_LOCK_NAME "ctX" #define CT_LOCK(_bd) mtx_lock (&(_bd)->ct_mtx) #define CT_UNLOCK(_bd) mtx_unlock (&(_bd)->ct_mtx) #define CT_LOCK_ASSERT(_bd) mtx_assert (&(_bd)->ct_mtx, MA_OWNED) static void ct_identify __P((driver_t *, device_t)); static int ct_probe __P((device_t)); static int ct_attach __P((device_t)); static int ct_detach __P((device_t)); static device_method_t ct_isa_methods [] = { DEVMETHOD(device_identify, ct_identify), DEVMETHOD(device_probe, ct_probe), DEVMETHOD(device_attach, ct_attach), DEVMETHOD(device_detach, ct_detach), DEVMETHOD_END }; typedef struct _ct_dma_mem_t { unsigned long phys; void *virt; size_t size; bus_dma_tag_t dmat; bus_dmamap_t mapp; } ct_dma_mem_t; typedef struct _drv_t { char name [8]; ct_chan_t *chan; ct_board_t *board; struct _bdrv_t *bd; ct_dma_mem_t dmamem; int running; #ifdef NETGRAPH char nodename [NG_NODESIZ]; hook_p hook; hook_p debug_hook; node_p node; struct ifqueue queue; struct ifqueue hi_queue; #else struct ifqueue queue; struct ifnet *ifp; #endif short timeout; struct callout timeout_handle; struct cdev *devt; } drv_t; typedef struct _bdrv_t { ct_board_t *board; struct resource *base_res; struct resource *drq_res; struct resource *irq_res; int base_rid; int drq_rid; int irq_rid; void *intrhand; drv_t channel [NCHAN]; struct mtx ct_mtx; } bdrv_t; static driver_t ct_isa_driver = { "ct", ct_isa_methods, sizeof (bdrv_t), }; static devclass_t ct_devclass; static void ct_receive (ct_chan_t *c, char *data, int len); static void ct_transmit (ct_chan_t *c, void *attachment, int len); static void ct_error (ct_chan_t *c, int data); static void ct_up (drv_t *d); static void ct_start (drv_t *d); static void ct_down (drv_t *d); static void ct_watchdog (drv_t *d); static void ct_watchdog_timer (void *arg); #ifdef NETGRAPH extern struct ng_type typestruct; #else static void ct_ifstart (struct ifnet *ifp); static void ct_tlf (struct sppp *sp); static void ct_tls (struct sppp *sp); static int ct_sioctl (struct ifnet *ifp, u_long cmd, caddr_t data); static void ct_initialize (void *softc); #endif static ct_board_t *adapter [NCTAU]; static drv_t *channel [NCTAU*NCHAN]; static struct callout led_timo [NCTAU]; static struct callout timeout_handle; static int ct_open (struct cdev *dev, int oflags, int devtype, struct thread *td); static int ct_close (struct cdev *dev, int fflag, int devtype, struct thread *td); static int ct_ioctl (struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td); static struct cdevsw ct_cdevsw = { .d_version = D_VERSION, .d_open = ct_open, .d_close = ct_close, .d_ioctl = ct_ioctl, .d_name = "ct", }; /* * Make an mbuf from data. */ static struct mbuf *makembuf (void *buf, u_int len) { struct mbuf *m; MGETHDR (m, M_NOWAIT, MT_DATA); if (! m) return 0; if (!(MCLGET(m, M_NOWAIT))) { m_freem (m); return 0; } m->m_pkthdr.len = m->m_len = len; bcopy (buf, mtod (m, caddr_t), len); return m; } static void ct_timeout (void *arg) { drv_t *d; int s, i, k; for (i = 0; i < NCTAU; ++i) { if (adapter[i] == NULL) continue; for (k = 0; k < NCHAN; k++) { d = channel[i * NCHAN + k]; if (! d) continue; if (d->chan->mode != M_G703) continue; s = splimp (); CT_LOCK ((bdrv_t *)d->bd); ct_g703_timer (d->chan); CT_UNLOCK ((bdrv_t *)d->bd); splx (s); } } callout_reset (&timeout_handle, hz, ct_timeout, 0); } static void ct_led_off (void *arg) { ct_board_t *b = arg; bdrv_t *bd = ((drv_t *)b->chan->sys)->bd; int s = splimp (); CT_LOCK (bd); ct_led (b, 0); CT_UNLOCK (bd); splx (s); } /* * Activate interrupt handler from DDK. */ static void ct_intr (void *arg) { bdrv_t *bd = arg; ct_board_t *b = bd->board; #ifndef NETGRAPH int i; #endif int s = splimp (); CT_LOCK (bd); /* Turn LED on. */ ct_led (b, 1); ct_int_handler (b); /* Turn LED off 50 msec later. */ callout_reset (&led_timo[b->num], hz/20, ct_led_off, b); CT_UNLOCK (bd); splx (s); #ifndef NETGRAPH /* Pass packets in a lock-free state */ for (i = 0; i < NCHAN && b->chan[i].type; i++) { drv_t *d = b->chan[i].sys; struct mbuf *m; if (!d || !d->running) continue; while (_IF_QLEN(&d->queue)) { IF_DEQUEUE (&d->queue,m); if (!m) continue; sppp_input (d->ifp, m); } } #endif } static int probe_irq (ct_board_t *b, int irq) { int mask, busy, cnt; /* Clear pending irq, if any. */ ct_probe_irq (b, -irq); DELAY (100); for (cnt=0; cnt<5; ++cnt) { /* Get the mask of pending irqs, assuming they are busy. * Activate the adapter on given irq. */ busy = ct_probe_irq (b, irq); DELAY (1000); /* Get the mask of active irqs. * Deactivate our irq. */ mask = ct_probe_irq (b, -irq); DELAY (100); if ((mask & ~busy) == 1 << irq) { ct_probe_irq (b, 0); /* printf ("ct%d: irq %d ok, mask=0x%04x, busy=0x%04x\n", b->num, irq, mask, busy); */ return 1; } } /* printf ("ct%d: irq %d not functional, mask=0x%04x, busy=0x%04x\n", b->num, irq, mask, busy); */ ct_probe_irq (b, 0); return 0; } static short porttab [] = { 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0x300, 0x320, 0x340, 0x360, 0x380, 0x3a0, 0x3c0, 0x3e0, 0 }; static char dmatab [] = { 7, 6, 5, 0 }; static char irqtab [] = { 5, 10, 11, 7, 3, 15, 12, 0 }; static int ct_is_free_res (device_t dev, int rid, int type, rman_res_t start, rman_res_t end, rman_res_t count) { struct resource *res; if (!(res = bus_alloc_resource (dev, type, &rid, start, end, count, 0))) return 0; bus_release_resource (dev, type, rid, res); return 1; } static void ct_identify (driver_t *driver, device_t dev) { rman_res_t iobase, rescount; int devcount; device_t *devices; device_t child; devclass_t my_devclass; int i, k; if ((my_devclass = devclass_find ("ct")) == NULL) return; devclass_get_devices (my_devclass, &devices, &devcount); if (devcount == 0) { /* We should find all devices by our self. We could alter other * devices, but we don't have a choise */ for (i = 0; (iobase = porttab [i]) != 0; i++) { if (!ct_is_free_res (dev, 0, SYS_RES_IOPORT, iobase, iobase + NPORT, NPORT)) continue; if (ct_probe_board (iobase, -1, -1) == 0) continue; devcount++; child = BUS_ADD_CHILD (dev, ISA_ORDER_SPECULATIVE, "ct", -1); if (child == NULL) return; device_set_desc_copy (child, "Cronyx Tau-ISA"); device_set_driver (child, driver); bus_set_resource (child, SYS_RES_IOPORT, 0, iobase, NPORT); if (devcount >= NCTAU) break; } } else { static short porttab [] = { 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0x300, 0x320, 0x340, 0x360, 0x380, 0x3a0, 0x3c0, 0x3e0, 0 }; /* Lets check user choise. */ for (k = 0; k < devcount; k++) { if (bus_get_resource (devices[k], SYS_RES_IOPORT, 0, &iobase, &rescount) != 0) continue; for (i = 0; porttab [i] != 0; i++) { if (porttab [i] != iobase) continue; if (!ct_is_free_res (devices[k], 0, SYS_RES_IOPORT, iobase, iobase + NPORT, NPORT)) continue; if (ct_probe_board (iobase, -1, -1) == 0) continue; porttab [i] = -1; device_set_desc_copy (devices[k], "Cronyx Tau-ISA"); break; } if (porttab [i] == 0) { device_delete_child ( device_get_parent (devices[k]), devices [k]); devices[k] = 0; continue; } } for (k = 0; k < devcount; k++) { if (devices[k] == 0) continue; if (bus_get_resource (devices[k], SYS_RES_IOPORT, 0, &iobase, &rescount) == 0) continue; for (i = 0; (iobase = porttab [i]) != 0; i++) { if (porttab [i] == -1) continue; if (!ct_is_free_res (devices[k], 0, SYS_RES_IOPORT, iobase, iobase + NPORT, NPORT)) continue; if (ct_probe_board (iobase, -1, -1) == 0) continue; bus_set_resource (devices[k], SYS_RES_IOPORT, 0, iobase, NPORT); porttab [i] = -1; device_set_desc_copy (devices[k], "Cronyx Tau-ISA"); break; } if (porttab [i] == 0) { device_delete_child ( device_get_parent (devices[k]), devices [k]); } } free (devices, M_TEMP); } return; } static int ct_probe (device_t dev) { int unit = device_get_unit (dev); rman_res_t iobase, rescount; if (!device_get_desc (dev) || strcmp (device_get_desc (dev), "Cronyx Tau-ISA")) return ENXIO; /* KASSERT ((bd != NULL), ("ct%d: NULL device softc\n", unit));*/ if (bus_get_resource (dev, SYS_RES_IOPORT, 0, &iobase, &rescount) != 0) { printf ("ct%d: Couldn't get IOPORT\n", unit); return ENXIO; } if (!ct_is_free_res (dev, 0, SYS_RES_IOPORT, iobase, iobase + NPORT, NPORT)) { printf ("ct%d: Resource IOPORT isn't free\n", unit); return ENXIO; } if (!ct_probe_board (iobase, -1, -1)) { printf ("ct%d: probing for Tau-ISA at %jx faild\n", unit, iobase); return ENXIO; } return 0; } static void ct_bus_dmamap_addr (void *arg, bus_dma_segment_t *segs, int nseg, int error) { unsigned long *addr; if (error) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); addr = arg; *addr = segs->ds_addr; } static int ct_bus_dma_mem_alloc (int bnum, int cnum, ct_dma_mem_t *dmem) { int error; error = bus_dma_tag_create (NULL, 16, 0, BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR, NULL, NULL, dmem->size, 1, dmem->size, 0, NULL, NULL, &dmem->dmat); if (error) { if (cnum >= 0) printf ("ct%d-%d: ", bnum, cnum); else printf ("ct%d: ", bnum); printf ("couldn't allocate tag for dma memory\n"); return 0; } error = bus_dmamem_alloc (dmem->dmat, (void **)&dmem->virt, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &dmem->mapp); if (error) { if (cnum >= 0) printf ("ct%d-%d: ", bnum, cnum); else printf ("ct%d: ", bnum); printf ("couldn't allocate mem for dma memory\n"); bus_dma_tag_destroy (dmem->dmat); return 0; } error = bus_dmamap_load (dmem->dmat, dmem->mapp, dmem->virt, dmem->size, ct_bus_dmamap_addr, &dmem->phys, 0); if (error) { if (cnum >= 0) printf ("ct%d-%d: ", bnum, cnum); else printf ("ct%d: ", bnum); printf ("couldn't load mem map for dma memory\n"); bus_dmamem_free (dmem->dmat, dmem->virt, dmem->mapp); bus_dma_tag_destroy (dmem->dmat); return 0; } return 1; } static void ct_bus_dma_mem_free (ct_dma_mem_t *dmem) { bus_dmamap_unload (dmem->dmat, dmem->mapp); bus_dmamem_free (dmem->dmat, dmem->virt, dmem->mapp); bus_dma_tag_destroy (dmem->dmat); } /* * The adapter is present, initialize the driver structures. */ static int ct_attach (device_t dev) { bdrv_t *bd = device_get_softc (dev); rman_res_t iobase, drq, irq, rescount; int unit = device_get_unit (dev); char *ct_ln = CT_LOCK_NAME; ct_board_t *b; ct_chan_t *c; drv_t *d; int i; int s; KASSERT ((bd != NULL), ("ct%d: NULL device softc\n", unit)); bus_get_resource (dev, SYS_RES_IOPORT, 0, &iobase, &rescount); bd->base_rid = 0; bd->base_res = bus_alloc_resource (dev, SYS_RES_IOPORT, &bd->base_rid, iobase, iobase + NPORT, NPORT, RF_ACTIVE); if (! bd->base_res) { printf ("ct%d: cannot alloc base address\n", unit); return ENXIO; } if (bus_get_resource (dev, SYS_RES_DRQ, 0, &drq, &rescount) != 0) { for (i = 0; (drq = dmatab [i]) != 0; i++) { if (!ct_is_free_res (dev, 0, SYS_RES_DRQ, drq, drq + 1, 1)) continue; bus_set_resource (dev, SYS_RES_DRQ, 0, drq, 1); break; } if (dmatab[i] == 0) { bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); printf ("ct%d: Couldn't get DRQ\n", unit); return ENXIO; } } bd->drq_rid = 0; bd->drq_res = bus_alloc_resource (dev, SYS_RES_DRQ, &bd->drq_rid, drq, drq + 1, 1, RF_ACTIVE); if (! bd->drq_res) { printf ("ct%d: cannot allocate drq\n", unit); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); return ENXIO; } if (bus_get_resource (dev, SYS_RES_IRQ, 0, &irq, &rescount) != 0) { for (i = 0; (irq = irqtab [i]) != 0; i++) { if (!ct_is_free_res (dev, 0, SYS_RES_IRQ, irq, irq + 1, 1)) continue; bus_set_resource (dev, SYS_RES_IRQ, 0, irq, 1); break; } if (irqtab[i] == 0) { bus_release_resource (dev, SYS_RES_DRQ, bd->drq_rid, bd->drq_res); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); printf ("ct%d: Couldn't get IRQ\n", unit); return ENXIO; } } bd->irq_rid = 0; bd->irq_res = bus_alloc_resource (dev, SYS_RES_IRQ, &bd->irq_rid, irq, irq + 1, 1, RF_ACTIVE); if (! bd->irq_res) { printf ("ct%d: Couldn't allocate irq\n", unit); bus_release_resource (dev, SYS_RES_DRQ, bd->drq_rid, bd->drq_res); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); return ENXIO; } b = malloc (sizeof (ct_board_t), M_DEVBUF, M_WAITOK); if (!b) { printf ("ct:%d: Couldn't allocate memory\n", unit); return (ENXIO); } adapter[unit] = b; bzero (b, sizeof(ct_board_t)); if (! ct_open_board (b, unit, iobase, irq, drq)) { printf ("ct%d: error loading firmware\n", unit); free (b, M_DEVBUF); bus_release_resource (dev, SYS_RES_IRQ, bd->irq_rid, bd->irq_res); bus_release_resource (dev, SYS_RES_DRQ, bd->drq_rid, bd->drq_res); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); return ENXIO; } bd->board = b; ct_ln[2] = '0' + unit; mtx_init (&bd->ct_mtx, ct_ln, MTX_NETWORK_LOCK, MTX_DEF|MTX_RECURSE); if (! probe_irq (b, irq)) { printf ("ct%d: irq %jd not functional\n", unit, irq); bd->board = 0; adapter [unit] = 0; free (b, M_DEVBUF); bus_release_resource (dev, SYS_RES_IRQ, bd->irq_rid, bd->irq_res); bus_release_resource (dev, SYS_RES_DRQ, bd->drq_rid, bd->drq_res); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); mtx_destroy (&bd->ct_mtx); return ENXIO; } callout_init (&led_timo[unit], 1); s = splimp (); if (bus_setup_intr (dev, bd->irq_res, INTR_TYPE_NET|INTR_MPSAFE, NULL, ct_intr, bd, &bd->intrhand)) { printf ("ct%d: Can't setup irq %jd\n", unit, irq); bd->board = 0; adapter [unit] = 0; free (b, M_DEVBUF); bus_release_resource (dev, SYS_RES_IRQ, bd->irq_rid, bd->irq_res); bus_release_resource (dev, SYS_RES_DRQ, bd->drq_rid, bd->drq_res); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); mtx_destroy (&bd->ct_mtx); splx (s); return ENXIO; } CT_LOCK (bd); ct_init_board (b, b->num, b->port, irq, drq, b->type, b->osc); ct_setup_board (b, 0, 0, 0); CT_UNLOCK (bd); printf ("ct%d: , clock %s MHz\n", b->num, b->name, b->osc == 20000000 ? "20" : "16.384"); for (c = b->chan; c < b->chan + NCHAN; ++c) { d = &bd->channel[c->num]; d->dmamem.size = sizeof(ct_buf_t); if (! ct_bus_dma_mem_alloc (unit, c->num, &d->dmamem)) continue; d->board = b; d->chan = c; d->bd = bd; c->sys = d; channel [b->num*NCHAN + c->num] = d; sprintf (d->name, "ct%d.%d", b->num, c->num); callout_init (&d->timeout_handle, 1); #ifdef NETGRAPH if (ng_make_node_common (&typestruct, &d->node) != 0) { printf ("%s: cannot make common node\n", d->name); channel [b->num*NCHAN + c->num] = 0; c->sys = 0; ct_bus_dma_mem_free (&d->dmamem); continue; } NG_NODE_SET_PRIVATE (d->node, d); sprintf (d->nodename, "%s%d", NG_CT_NODE_TYPE, c->board->num*NCHAN + c->num); if (ng_name_node (d->node, d->nodename)) { printf ("%s: cannot name node\n", d->nodename); NG_NODE_UNREF (d->node); channel [b->num*NCHAN + c->num] = 0; c->sys = 0; ct_bus_dma_mem_free (&d->dmamem); continue; } d->queue.ifq_maxlen = ifqmaxlen; d->hi_queue.ifq_maxlen = ifqmaxlen; mtx_init (&d->queue.ifq_mtx, "ct_queue", NULL, MTX_DEF); mtx_init (&d->hi_queue.ifq_mtx, "ct_queue_hi", NULL, MTX_DEF); #else /*NETGRAPH*/ d->ifp = if_alloc(IFT_PPP); if (d->ifp == NULL) { printf ("%s: cannot if_alloc common interface\n", d->name); channel [b->num*NCHAN + c->num] = 0; c->sys = 0; ct_bus_dma_mem_free (&d->dmamem); continue; } d->ifp->if_softc = d; if_initname (d->ifp, "ct", b->num * NCHAN + c->num); d->ifp->if_mtu = PP_MTU; d->ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST; d->ifp->if_ioctl = ct_sioctl; d->ifp->if_start = ct_ifstart; d->ifp->if_init = ct_initialize; d->queue.ifq_maxlen = NBUF; mtx_init (&d->queue.ifq_mtx, "ct_queue", NULL, MTX_DEF); sppp_attach (d->ifp); if_attach (d->ifp); IFP2SP(d->ifp)->pp_tlf = ct_tlf; IFP2SP(d->ifp)->pp_tls = ct_tls; /* If BPF is in the kernel, call the attach for it. * Header size is 4 bytes. */ bpfattach (d->ifp, DLT_PPP, 4); #endif /*NETGRAPH*/ CT_LOCK (bd); ct_start_chan (c, d->dmamem.virt, d->dmamem.phys); ct_register_receive (c, &ct_receive); ct_register_transmit (c, &ct_transmit); ct_register_error (c, &ct_error); CT_UNLOCK (bd); d->devt = make_dev (&ct_cdevsw, b->num*NCHAN+c->num, UID_ROOT, GID_WHEEL, 0600, "ct%d", b->num*NCHAN+c->num); } splx (s); return 0; } static int ct_detach (device_t dev) { bdrv_t *bd = device_get_softc (dev); ct_board_t *b = bd->board; ct_chan_t *c; int s; KASSERT (mtx_initialized (&bd->ct_mtx), ("ct mutex not initialized")); s = splimp (); CT_LOCK (bd); /* Check if the device is busy (open). */ for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (!d || !d->chan->type) continue; if (d->running) { CT_UNLOCK (bd); splx (s); return EBUSY; } } /* Deactivate the timeout routine. */ callout_stop (&led_timo[b->num]); CT_UNLOCK (bd); bus_teardown_intr (dev, bd->irq_res, bd->intrhand); bus_release_resource (dev, SYS_RES_IRQ, bd->irq_rid, bd->irq_res); bus_release_resource (dev, SYS_RES_DRQ, bd->drq_rid, bd->drq_res); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); CT_LOCK (bd); ct_close_board (b); CT_UNLOCK (bd); /* Detach the interfaces, free buffer memory. */ for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (!d || !d->chan->type) continue; callout_stop (&d->timeout_handle); #ifdef NETGRAPH if (d->node) { ng_rmnode_self (d->node); NG_NODE_UNREF (d->node); d->node = NULL; } mtx_destroy (&d->queue.ifq_mtx); mtx_destroy (&d->hi_queue.ifq_mtx); #else /* Detach from the packet filter list of interfaces. */ bpfdetach (d->ifp); /* Detach from the sync PPP list. */ sppp_detach (d->ifp); if_detach (d->ifp); if_free (d->ifp); IF_DRAIN (&d->queue); mtx_destroy (&d->queue.ifq_mtx); #endif destroy_dev (d->devt); } CT_LOCK (bd); ct_led_off (b); CT_UNLOCK (bd); callout_drain (&led_timo[b->num]); splx (s); for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (!d || !d->chan->type) continue; callout_drain(&d->timeout_handle); /* Deallocate buffers. */ ct_bus_dma_mem_free (&d->dmamem); } - bd->board = 0; - adapter [b->num] = 0; + bd->board = NULL; + adapter [b->num] = NULL; free (b, M_DEVBUF); mtx_destroy (&bd->ct_mtx); return 0; } #ifndef NETGRAPH static void ct_ifstart (struct ifnet *ifp) { drv_t *d = ifp->if_softc; bdrv_t *bd = d->bd; CT_LOCK (bd); ct_start (d); CT_UNLOCK (bd); } static void ct_tlf (struct sppp *sp) { drv_t *d = SP2IFP(sp)->if_softc; CT_DEBUG (d, ("ct_tlf\n")); /* ct_set_dtr (d->chan, 0);*/ /* ct_set_rts (d->chan, 0);*/ if (!(sp->pp_flags & PP_FR) && !(d->ifp->if_flags & PP_CISCO)) sp->pp_down (sp); } static void ct_tls (struct sppp *sp) { drv_t *d = SP2IFP(sp)->if_softc; CT_DEBUG (d, ("ct_tls\n")); if (!(sp->pp_flags & PP_FR) && !(d->ifp->if_flags & PP_CISCO)) sp->pp_up (sp); } /* * Initialization of interface. * Ii seems to be never called by upper level. */ static void ct_initialize (void *softc) { drv_t *d = softc; CT_DEBUG (d, ("ct_initialize\n")); } /* * Process an ioctl request. */ static int ct_sioctl (struct ifnet *ifp, u_long cmd, caddr_t data) { drv_t *d = ifp->if_softc; bdrv_t *bd = d->bd; int error, s, was_up, should_be_up; was_up = (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; error = sppp_ioctl (ifp, cmd, data); if (error) return error; if (! (ifp->if_flags & IFF_DEBUG)) d->chan->debug = 0; else d->chan->debug = d->chan->debug_shadow; switch (cmd) { default: CT_DEBUG2 (d, ("ioctl 0x%lx\n", cmd)); return 0; case SIOCADDMULTI: CT_DEBUG2 (d, ("SIOCADDMULTI\n")); return 0; case SIOCDELMULTI: CT_DEBUG2 (d, ("SIOCDELMULTI\n")); return 0; case SIOCSIFFLAGS: CT_DEBUG2 (d, ("SIOCSIFFLAGS\n")); break; case SIOCSIFADDR: CT_DEBUG2 (d, ("SIOCSIFADDR\n")); break; } /* We get here only in case of SIFFLAGS or SIFADDR. */ s = splimp (); CT_LOCK (bd); should_be_up = (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; if (! was_up && should_be_up) { /* Interface goes up -- start it. */ ct_up (d); ct_start (d); } else if (was_up && ! should_be_up) { /* Interface is going down -- stop it. */ /* if ((IFP2SP(d->ifp)->pp_flags & PP_FR) || (ifp->if_flags & PP_CISCO))*/ ct_down (d); } CT_UNLOCK (bd); splx (s); return 0; } #endif /*NETGRAPH*/ /* * Stop the interface. Called on splimp(). */ static void ct_down (drv_t *d) { int s = splimp (); CT_DEBUG (d, ("ct_down\n")); ct_set_dtr (d->chan, 0); ct_set_rts (d->chan, 0); d->running = 0; callout_stop (&d->timeout_handle); splx (s); } /* * Start the interface. Called on splimp(). */ static void ct_up (drv_t *d) { int s = splimp (); CT_DEBUG (d, ("ct_up\n")); ct_set_dtr (d->chan, 1); ct_set_rts (d->chan, 1); d->running = 1; splx (s); } /* * Start output on the (slave) interface. Get another datagram to send * off of the interface queue, and copy it to the interface * before starting the output. */ static void ct_send (drv_t *d) { struct mbuf *m; u_short len; CT_DEBUG2 (d, ("ct_send, tn=%d\n", d->chan->tn)); /* No output if the interface is down. */ if (! d->running) return; /* No output if the modem is off. */ if (! ct_get_dsr (d->chan) && !ct_get_loop (d->chan)) return; while (ct_buf_free (d->chan)) { /* Get the packet to send. */ #ifdef NETGRAPH IF_DEQUEUE (&d->hi_queue, m); if (! m) IF_DEQUEUE (&d->queue, m); #else m = sppp_dequeue (d->ifp); #endif if (! m) return; #ifndef NETGRAPH BPF_MTAP (d->ifp, m); #endif len = m_length (m, NULL); if (! m->m_next) ct_send_packet (d->chan, (u_char*)mtod (m, caddr_t), len, 0); else { m_copydata (m, 0, len, d->chan->tbuf[d->chan->te]); ct_send_packet (d->chan, d->chan->tbuf[d->chan->te], len, 0); } m_freem (m); /* Set up transmit timeout, if the transmit ring is not empty. * Transmit timeout is 10 seconds. */ d->timeout = 10; } #ifndef NETGRAPH d->ifp->if_drv_flags |= IFF_DRV_OACTIVE; #endif } /* * Start output on the interface. * Always called on splimp(). */ static void ct_start (drv_t *d) { int s = splimp (); if (d->running) { if (! d->chan->dtr) ct_set_dtr (d->chan, 1); if (! d->chan->rts) ct_set_rts (d->chan, 1); ct_send (d); callout_reset (&d->timeout_handle, hz, ct_watchdog_timer, d); } splx (s); } /* * Handle transmit timeouts. * Recover after lost transmit interrupts. * Always called on splimp(). */ static void ct_watchdog (drv_t *d) { CT_DEBUG (d, ("device timeout\n")); if (d->running) { ct_setup_chan (d->chan); ct_start_chan (d->chan, 0, 0); ct_set_dtr (d->chan, 1); ct_set_rts (d->chan, 1); ct_start (d); } } static void ct_watchdog_timer (void *arg) { drv_t *d = arg; bdrv_t *bd = d->bd; CT_LOCK (bd); if (d->timeout == 1) ct_watchdog (d); if (d->timeout) d->timeout--; callout_reset (&d->timeout_handle, hz, ct_watchdog_timer, d); CT_UNLOCK (bd); } /* * Transmit callback function. */ static void ct_transmit (ct_chan_t *c, void *attachment, int len) { drv_t *d = c->sys; if (!d) return; d->timeout = 0; #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_OPACKETS, 1); d->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; #endif ct_start (d); } /* * Process the received packet. */ static void ct_receive (ct_chan_t *c, char *data, int len) { drv_t *d = c->sys; struct mbuf *m; #ifdef NETGRAPH int error; #endif if (!d || !d->running) return; m = makembuf (data, len); if (! m) { CT_DEBUG (d, ("no memory for packet\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_IQDROPS, 1); #endif return; } if (c->debug > 1) m_print (m, 0); #ifdef NETGRAPH m->m_pkthdr.rcvif = 0; NG_SEND_DATA_ONLY (error, d->hook, m); #else if_inc_counter(d->ifp, IFCOUNTER_IPACKETS, 1); m->m_pkthdr.rcvif = d->ifp; /* Check if there's a BPF listener on this interface. * If so, hand off the raw packet to bpf. */ BPF_MTAP(d->ifp, m); IF_ENQUEUE (&d->queue, m); #endif } /* * Error callback function. */ static void ct_error (ct_chan_t *c, int data) { drv_t *d = c->sys; if (!d) return; switch (data) { case CT_FRAME: CT_DEBUG (d, ("frame error\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CT_CRC: CT_DEBUG (d, ("crc error\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CT_OVERRUN: CT_DEBUG (d, ("overrun error\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_COLLISIONS, 1); if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CT_OVERFLOW: CT_DEBUG (d, ("overflow error\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CT_UNDERRUN: CT_DEBUG (d, ("underrun error\n")); d->timeout = 0; #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_OERRORS, 1); d->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; #endif ct_start (d); break; default: CT_DEBUG (d, ("error #%d\n", data)); } } static int ct_open (struct cdev *dev, int oflags, int devtype, struct thread *td) { drv_t *d; if (dev2unit(dev) >= NCTAU*NCHAN || ! (d = channel[dev2unit(dev)])) return ENXIO; CT_DEBUG2 (d, ("ct_open\n")); return 0; } static int ct_close (struct cdev *dev, int fflag, int devtype, struct thread *td) { drv_t *d = channel [dev2unit(dev)]; if (!d) return 0; CT_DEBUG2 (d, ("ct_close\n")); return 0; } static int ct_modem_status (ct_chan_t *c) { drv_t *d = c->sys; bdrv_t *bd; int status, s; if (!d) return 0; bd = d->bd; status = d->running ? TIOCM_LE : 0; s = splimp (); CT_LOCK (bd); if (ct_get_cd (c)) status |= TIOCM_CD; if (ct_get_cts (c)) status |= TIOCM_CTS; if (ct_get_dsr (c)) status |= TIOCM_DSR; if (c->dtr) status |= TIOCM_DTR; if (c->rts) status |= TIOCM_RTS; CT_UNLOCK (bd); splx (s); return status; } /* * Process an ioctl request on /dev/cronyx/ctauN. */ static int ct_ioctl (struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td) { drv_t *d = channel [dev2unit (dev)]; bdrv_t *bd; ct_chan_t *c; struct serial_statistics *st; struct e1_statistics *opte1; int error, s; char mask[16]; if (!d || !d->chan) return 0; bd = d->bd; c = d->chan; switch (cmd) { case SERIAL_GETREGISTERED: bzero (mask, sizeof(mask)); for (s=0; sifp)->pp_flags & PP_FR) ? "fr" : (d->ifp->if_flags & PP_CISCO) ? "cisco" : "ppp"); return 0; case SERIAL_SETPROTO: /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (d->ifp->if_drv_flags & IFF_DRV_RUNNING) return EBUSY; if (! strcmp ("cisco", (char*)data)) { IFP2SP(d->ifp)->pp_flags &= ~(PP_FR); IFP2SP(d->ifp)->pp_flags |= PP_KEEPALIVE; d->ifp->if_flags |= PP_CISCO; } else if (! strcmp ("fr", (char*)data)) { d->ifp->if_flags &= ~(PP_CISCO); IFP2SP(d->ifp)->pp_flags |= PP_FR | PP_KEEPALIVE; } else if (! strcmp ("ppp", (char*)data)) { IFP2SP(d->ifp)->pp_flags &= ~(PP_FR | PP_KEEPALIVE); d->ifp->if_flags &= ~(PP_CISCO); } else return EINVAL; return 0; case SERIAL_GETKEEPALIVE: if ((IFP2SP(d->ifp)->pp_flags & PP_FR) || (d->ifp->if_flags & PP_CISCO)) return EINVAL; *(int*)data = (IFP2SP(d->ifp)->pp_flags & PP_KEEPALIVE) ? 1 : 0; return 0; case SERIAL_SETKEEPALIVE: /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if ((IFP2SP(d->ifp)->pp_flags & PP_FR) || (d->ifp->if_flags & PP_CISCO)) return EINVAL; if (*(int*)data) IFP2SP(d->ifp)->pp_flags |= PP_KEEPALIVE; else IFP2SP(d->ifp)->pp_flags &= ~PP_KEEPALIVE; return 0; #endif /*NETGRAPH*/ case SERIAL_GETMODE: *(int*)data = SERIAL_HDLC; return 0; case SERIAL_GETCFG: if (c->mode == M_HDLC) return EINVAL; switch (ct_get_config (c->board)) { default: *(char*)data = 'a'; break; case CFG_B: *(char*)data = 'b'; break; case CFG_C: *(char*)data = 'c'; break; } return 0; case SERIAL_SETCFG: /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->mode == M_HDLC) return EINVAL; s = splimp (); CT_LOCK (bd); switch (*(char*)data) { case 'a': ct_set_config (c->board, CFG_A); break; case 'b': ct_set_config (c->board, CFG_B); break; case 'c': ct_set_config (c->board, CFG_C); break; } CT_UNLOCK (bd); splx (s); return 0; case SERIAL_GETSTAT: st = (struct serial_statistics*) data; st->rintr = c->rintr; st->tintr = c->tintr; st->mintr = c->mintr; st->ibytes = c->ibytes; st->ipkts = c->ipkts; st->ierrs = c->ierrs; st->obytes = c->obytes; st->opkts = c->opkts; st->oerrs = c->oerrs; return 0; case SERIAL_GETESTAT: opte1 = (struct e1_statistics*)data; opte1->status = c->status; opte1->cursec = c->cursec; opte1->totsec = c->totsec + c->cursec; opte1->currnt.bpv = c->currnt.bpv; opte1->currnt.fse = c->currnt.fse; opte1->currnt.crce = c->currnt.crce; opte1->currnt.rcrce = c->currnt.rcrce; opte1->currnt.uas = c->currnt.uas; opte1->currnt.les = c->currnt.les; opte1->currnt.es = c->currnt.es; opte1->currnt.bes = c->currnt.bes; opte1->currnt.ses = c->currnt.ses; opte1->currnt.oofs = c->currnt.oofs; opte1->currnt.css = c->currnt.css; opte1->currnt.dm = c->currnt.dm; opte1->total.bpv = c->total.bpv + c->currnt.bpv; opte1->total.fse = c->total.fse + c->currnt.fse; opte1->total.crce = c->total.crce + c->currnt.crce; opte1->total.rcrce = c->total.rcrce + c->currnt.rcrce; opte1->total.uas = c->total.uas + c->currnt.uas; opte1->total.les = c->total.les + c->currnt.les; opte1->total.es = c->total.es + c->currnt.es; opte1->total.bes = c->total.bes + c->currnt.bes; opte1->total.ses = c->total.ses + c->currnt.ses; opte1->total.oofs = c->total.oofs + c->currnt.oofs; opte1->total.css = c->total.css + c->currnt.css; opte1->total.dm = c->total.dm + c->currnt.dm; for (s=0; s<48; ++s) { opte1->interval[s].bpv = c->interval[s].bpv; opte1->interval[s].fse = c->interval[s].fse; opte1->interval[s].crce = c->interval[s].crce; opte1->interval[s].rcrce = c->interval[s].rcrce; opte1->interval[s].uas = c->interval[s].uas; opte1->interval[s].les = c->interval[s].les; opte1->interval[s].es = c->interval[s].es; opte1->interval[s].bes = c->interval[s].bes; opte1->interval[s].ses = c->interval[s].ses; opte1->interval[s].oofs = c->interval[s].oofs; opte1->interval[s].css = c->interval[s].css; opte1->interval[s].dm = c->interval[s].dm; } return 0; case SERIAL_CLRSTAT: /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; c->rintr = 0; c->tintr = 0; c->mintr = 0; c->ibytes = 0; c->ipkts = 0; c->ierrs = 0; c->obytes = 0; c->opkts = 0; c->oerrs = 0; bzero (&c->currnt, sizeof (c->currnt)); bzero (&c->total, sizeof (c->total)); bzero (c->interval, sizeof (c->interval)); return 0; case SERIAL_GETBAUD: *(long*)data = ct_get_baud(c); return 0; case SERIAL_SETBAUD: /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CT_LOCK (bd); ct_set_baud (c, *(long*)data); CT_UNLOCK (bd); splx (s); return 0; case SERIAL_GETLOOP: *(int*)data = ct_get_loop (c); return 0; case SERIAL_SETLOOP: /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CT_LOCK (bd); ct_set_loop (c, *(int*)data); CT_UNLOCK (bd); splx (s); return 0; case SERIAL_GETDPLL: if (c->mode == M_E1 || c->mode == M_G703) return EINVAL; *(int*)data = ct_get_dpll (c); return 0; case SERIAL_SETDPLL: /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->mode == M_E1 || c->mode == M_G703) return EINVAL; s = splimp (); CT_LOCK (bd); ct_set_dpll (c, *(int*)data); CT_UNLOCK (bd); splx (s); return 0; case SERIAL_GETNRZI: if (c->mode == M_E1 || c->mode == M_G703) return EINVAL; *(int*)data = ct_get_nrzi (c); return 0; case SERIAL_SETNRZI: /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->mode == M_E1 || c->mode == M_G703) return EINVAL; s = splimp (); CT_LOCK (bd); ct_set_nrzi (c, *(int*)data); CT_UNLOCK (bd); splx (s); return 0; case SERIAL_GETDEBUG: *(int*)data = c->debug; return 0; case SERIAL_SETDEBUG: /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; #ifndef NETGRAPH /* * The debug_shadow is always greater than zero for logic * simplicity. For switching debug off the IFF_DEBUG is * responsible. */ c->debug_shadow = (*(int*)data) ? (*(int*)data) : 1; if (d->ifp->if_flags & IFF_DEBUG) c->debug = c->debug_shadow; #else c->debug = *(int*)data; #endif return 0; case SERIAL_GETHIGAIN: if (c->mode != M_E1) return EINVAL; *(int*)data = ct_get_higain (c); return 0; case SERIAL_SETHIGAIN: /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CT_LOCK (bd); ct_set_higain (c, *(int*)data); CT_UNLOCK (bd); splx (s); return 0; case SERIAL_GETPHONY: CT_DEBUG2 (d, ("ioctl: getphony\n")); if (c->mode != M_E1) return EINVAL; *(int*)data = c->gopt.phony; return 0; case SERIAL_SETPHONY: CT_DEBUG2 (d, ("ioctl: setphony\n")); if (c->mode != M_E1) return EINVAL; /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CT_LOCK (bd); ct_set_phony (c, *(int*)data); CT_UNLOCK (bd); splx (s); return 0; case SERIAL_GETCLK: if (c->mode != M_E1 && c->mode != M_G703) return EINVAL; switch (ct_get_clk(c)) { default: *(int*)data = E1CLK_INTERNAL; break; case GCLK_RCV: *(int*)data = E1CLK_RECEIVE; break; case GCLK_RCLKO: *(int*)data = c->num ? E1CLK_RECEIVE_CHAN0 : E1CLK_RECEIVE_CHAN1; break; } return 0; case SERIAL_SETCLK: /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CT_LOCK (bd); switch (*(int*)data) { default: ct_set_clk (c, GCLK_INT); break; case E1CLK_RECEIVE: ct_set_clk (c, GCLK_RCV); break; case E1CLK_RECEIVE_CHAN0: case E1CLK_RECEIVE_CHAN1: ct_set_clk (c, GCLK_RCLKO); break; } CT_UNLOCK (bd); splx (s); return 0; case SERIAL_GETTIMESLOTS: if (c->mode != M_E1) return EINVAL; *(long*)data = ct_get_ts (c); return 0; case SERIAL_SETTIMESLOTS: /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CT_LOCK (bd); ct_set_ts (c, *(long*)data); CT_UNLOCK (bd); splx (s); return 0; case SERIAL_GETSUBCHAN: if (c->mode != M_E1) return EINVAL; *(long*)data = ct_get_subchan (c->board); return 0; case SERIAL_SETSUBCHAN: /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CT_LOCK (bd); ct_set_subchan (c->board, *(long*)data); CT_UNLOCK (bd); splx (s); return 0; case SERIAL_GETINVCLK: case SERIAL_GETINVTCLK: if (c->mode == M_E1 || c->mode == M_G703) return EINVAL; *(int*)data = ct_get_invtxc (c); return 0; case SERIAL_GETINVRCLK: if (c->mode == M_E1 || c->mode == M_G703) return EINVAL; *(int*)data = ct_get_invrxc (c); return 0; case SERIAL_SETINVCLK: case SERIAL_SETINVTCLK: /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->mode == M_E1 || c->mode == M_G703) return EINVAL; s = splimp (); CT_LOCK (bd); ct_set_invtxc (c, *(int*)data); CT_UNLOCK (bd); splx (s); return 0; case SERIAL_SETINVRCLK: /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->mode == M_E1 || c->mode == M_G703) return EINVAL; s = splimp (); CT_LOCK (bd); ct_set_invrxc (c, *(int*)data); CT_UNLOCK (bd); splx (s); return 0; case SERIAL_GETLEVEL: if (c->mode != M_G703) return EINVAL; s = splimp (); CT_LOCK (bd); *(int*)data = ct_get_lq (c); CT_UNLOCK (bd); splx (s); return 0; case TIOCSDTR: /* Set DTR */ s = splimp (); CT_LOCK (bd); ct_set_dtr (c, 1); CT_UNLOCK (bd); splx (s); return 0; case TIOCCDTR: /* Clear DTR */ s = splimp (); CT_LOCK (bd); ct_set_dtr (c, 0); CT_UNLOCK (bd); splx (s); return 0; case TIOCMSET: /* Set DTR/RTS */ s = splimp (); CT_LOCK (bd); ct_set_dtr (c, (*(int*)data & TIOCM_DTR) ? 1 : 0); ct_set_rts (c, (*(int*)data & TIOCM_RTS) ? 1 : 0); CT_UNLOCK (bd); splx (s); return 0; case TIOCMBIS: /* Add DTR/RTS */ s = splimp (); CT_LOCK (bd); if (*(int*)data & TIOCM_DTR) ct_set_dtr (c, 1); if (*(int*)data & TIOCM_RTS) ct_set_rts (c, 1); CT_UNLOCK (bd); splx (s); return 0; case TIOCMBIC: /* Clear DTR/RTS */ s = splimp (); CT_LOCK (bd); if (*(int*)data & TIOCM_DTR) ct_set_dtr (c, 0); if (*(int*)data & TIOCM_RTS) ct_set_rts (c, 0); CT_UNLOCK (bd); splx (s); return 0; case TIOCMGET: /* Get modem status */ *(int*)data = ct_modem_status (c); return 0; } return ENOTTY; } #ifdef NETGRAPH static int ng_ct_constructor (node_p node) { drv_t *d = NG_NODE_PRIVATE (node); CT_DEBUG (d, ("Constructor\n")); return EINVAL; } static int ng_ct_newhook (node_p node, hook_p hook, const char *name) { int s; drv_t *d = NG_NODE_PRIVATE (node); if (!d) return EINVAL; bdrv_t *bd = d->bd; /* Attach debug hook */ if (strcmp (name, NG_CT_HOOK_DEBUG) == 0) { NG_HOOK_SET_PRIVATE (hook, NULL); d->debug_hook = hook; return 0; } /* Check for raw hook */ if (strcmp (name, NG_CT_HOOK_RAW) != 0) return EINVAL; NG_HOOK_SET_PRIVATE (hook, d); d->hook = hook; s = splimp (); CT_LOCK (bd); ct_up (d); CT_UNLOCK (bd); splx (s); return 0; } static char *format_timeslots (u_long s) { static char buf [100]; char *p = buf; int i; for (i=1; i<32; ++i) if ((s >> i) & 1) { int prev = (i > 1) & (s >> (i-1)); int next = (i < 31) & (s >> (i+1)); if (prev) { if (next) continue; *p++ = '-'; } else if (p > buf) *p++ = ','; if (i >= 10) *p++ = '0' + i / 10; *p++ = '0' + i % 10; } *p = 0; return buf; } static int print_modems (char *s, ct_chan_t *c, int need_header) { int status = ct_modem_status (c); int length = 0; if (need_header) length += sprintf (s + length, " LE DTR DSR RTS CTS CD\n"); length += sprintf (s + length, "%4s %4s %4s %4s %4s %4s\n", status & TIOCM_LE ? "On" : "-", status & TIOCM_DTR ? "On" : "-", status & TIOCM_DSR ? "On" : "-", status & TIOCM_RTS ? "On" : "-", status & TIOCM_CTS ? "On" : "-", status & TIOCM_CD ? "On" : "-"); return length; } static int print_stats (char *s, ct_chan_t *c, int need_header) { struct serial_statistics st; int length = 0; st.rintr = c->rintr; st.tintr = c->tintr; st.mintr = c->mintr; st.ibytes = c->ibytes; st.ipkts = c->ipkts; st.ierrs = c->ierrs; st.obytes = c->obytes; st.opkts = c->opkts; st.oerrs = c->oerrs; if (need_header) length += sprintf (s + length, " Rintr Tintr Mintr Ibytes Ipkts Ierrs Obytes Opkts Oerrs\n"); length += sprintf (s + length, "%7ld %7ld %7ld %8ld %7ld %7ld %8ld %7ld %7ld\n", st.rintr, st.tintr, st.mintr, st.ibytes, st.ipkts, st.ierrs, st.obytes, st.opkts, st.oerrs); return length; } static char *format_e1_status (u_char status) { static char buf [80]; if (status & E1_NOALARM) return "Ok"; buf[0] = 0; if (status & E1_LOS) strcat (buf, ",LOS"); if (status & E1_AIS) strcat (buf, ",AIS"); if (status & E1_LOF) strcat (buf, ",LOF"); if (status & E1_LOMF) strcat (buf, ",LOMF"); if (status & E1_FARLOF) strcat (buf, ",FARLOF"); if (status & E1_AIS16) strcat (buf, ",AIS16"); if (status & E1_FARLOMF) strcat (buf, ",FARLOMF"); if (status & E1_TSTREQ) strcat (buf, ",TSTREQ"); if (status & E1_TSTERR) strcat (buf, ",TSTERR"); if (buf[0] == ',') return buf+1; return "Unknown"; } static int print_frac (char *s, int leftalign, u_long numerator, u_long divider) { int n, length = 0; if (numerator < 1 || divider < 1) { length += sprintf (s+length, leftalign ? "/- " : " -"); return length; } n = (int) (0.5 + 1000.0 * numerator / divider); if (n < 1000) { length += sprintf (s+length, leftalign ? "/.%-3d" : " .%03d", n); return length; } *(s + length) = leftalign ? '/' : ' '; length ++; if (n >= 1000000) n = (n+500) / 1000 * 1000; else if (n >= 100000) n = (n+50) / 100 * 100; else if (n >= 10000) n = (n+5) / 10 * 10; switch (n) { case 1000: length += printf (s+length, ".999"); return length; case 10000: n = 9990; break; case 100000: n = 99900; break; case 1000000: n = 999000; break; } if (n < 10000) length += sprintf (s+length, "%d.%d", n/1000, n/10%100); else if (n < 100000) length += sprintf (s+length, "%d.%d", n/1000, n/100%10); else if (n < 1000000) length += sprintf (s+length, "%d.", n/1000); else length += sprintf (s+length, "%d", n/1000); return length; } static int print_e1_stats (char *s, ct_chan_t *c) { struct e1_counters total; u_long totsec; int length = 0; totsec = c->totsec + c->cursec; total.bpv = c->total.bpv + c->currnt.bpv; total.fse = c->total.fse + c->currnt.fse; total.crce = c->total.crce + c->currnt.crce; total.rcrce = c->total.rcrce + c->currnt.rcrce; total.uas = c->total.uas + c->currnt.uas; total.les = c->total.les + c->currnt.les; total.es = c->total.es + c->currnt.es; total.bes = c->total.bes + c->currnt.bes; total.ses = c->total.ses + c->currnt.ses; total.oofs = c->total.oofs + c->currnt.oofs; total.css = c->total.css + c->currnt.css; total.dm = c->total.dm + c->currnt.dm; length += sprintf (s + length, " Unav/Degr Bpv/Fsyn CRC/RCRC Err/Lerr Sev/Bur Oof/Slp Status\n"); /* Unavailable seconds, degraded minutes */ length += print_frac (s + length, 0, c->currnt.uas, c->cursec); length += print_frac (s + length, 1, 60 * c->currnt.dm, c->cursec); /* Bipolar violations, frame sync errors */ length += print_frac (s + length, 0, c->currnt.bpv, c->cursec); length += print_frac (s + length, 1, c->currnt.fse, c->cursec); /* CRC errors, remote CRC errors (E-bit) */ length += print_frac (s + length, 0, c->currnt.crce, c->cursec); length += print_frac (s + length, 1, c->currnt.rcrce, c->cursec); /* Errored seconds, line errored seconds */ length += print_frac (s + length, 0, c->currnt.es, c->cursec); length += print_frac (s + length, 1, c->currnt.les, c->cursec); /* Severely errored seconds, burst errored seconds */ length += print_frac (s + length, 0, c->currnt.ses, c->cursec); length += print_frac (s + length, 1, c->currnt.bes, c->cursec); /* Out of frame seconds, controlled slip seconds */ length += print_frac (s + length, 0, c->currnt.oofs, c->cursec); length += print_frac (s + length, 1, c->currnt.css, c->cursec); length += sprintf (s + length, " %s\n", format_e1_status (c->status)); /* Print total statistics. */ length += print_frac (s + length, 0, total.uas, totsec); length += print_frac (s + length, 1, 60 * total.dm, totsec); length += print_frac (s + length, 0, total.bpv, totsec); length += print_frac (s + length, 1, total.fse, totsec); length += print_frac (s + length, 0, total.crce, totsec); length += print_frac (s + length, 1, total.rcrce, totsec); length += print_frac (s + length, 0, total.es, totsec); length += print_frac (s + length, 1, total.les, totsec); length += print_frac (s + length, 0, total.ses, totsec); length += print_frac (s + length, 1, total.bes, totsec); length += print_frac (s + length, 0, total.oofs, totsec); length += print_frac (s + length, 1, total.css, totsec); length += sprintf (s + length, " -- Total\n"); return length; } static int print_chan (char *s, ct_chan_t *c) { drv_t *d = c->sys; bdrv_t *bd = d->bd; int length = 0; length += sprintf (s + length, "ct%d", c->board->num * NCHAN + c->num); if (d->chan->debug) length += sprintf (s + length, " debug=%d", d->chan->debug); switch (ct_get_config (c->board)) { case CFG_A: length += sprintf (s + length, " cfg=A"); break; case CFG_B: length += sprintf (s + length, " cfg=B"); break; case CFG_C: length += sprintf (s + length, " cfg=C"); break; default: length += sprintf (s + length, " cfg=unknown"); break; } if (ct_get_baud (c)) length += sprintf (s + length, " %ld", ct_get_baud (c)); else length += sprintf (s + length, " extclock"); if (c->mode == M_E1 || c->mode == M_G703) switch (ct_get_clk(c)) { case GCLK_INT : length += sprintf (s + length, " syn=int"); break; case GCLK_RCV : length += sprintf (s + length, " syn=rcv"); break; case GCLK_RCLKO : length += sprintf (s + length, " syn=xrcv"); break; } if (c->mode == M_HDLC) { length += sprintf (s + length, " dpll=%s", ct_get_dpll (c) ? "on" : "off"); length += sprintf (s + length, " nrzi=%s", ct_get_nrzi (c) ? "on" : "off"); length += sprintf (s + length, " invtclk=%s", ct_get_invtxc (c) ? "on" : "off"); length += sprintf (s + length, " invrclk=%s", ct_get_invrxc (c) ? "on" : "off"); } if (c->mode == M_E1) length += sprintf (s + length, " higain=%s", ct_get_higain (c)? "on" : "off"); length += sprintf (s + length, " loop=%s", ct_get_loop (c) ? "on" : "off"); if (c->mode == M_E1) length += sprintf (s + length, " ts=%s", format_timeslots (ct_get_ts(c))); if (c->mode == M_E1 && ct_get_config (c->board) != CFG_A) length += sprintf (s + length, " pass=%s", format_timeslots (ct_get_subchan(c->board))); if (c->mode == M_G703) { int lq, x; x = splimp (); CT_LOCK (bd); lq = ct_get_lq (c); CT_UNLOCK (bd); splx (x); length += sprintf (s + length, " (level=-%.1fdB)", lq / 10.0); } length += sprintf (s + length, "\n"); return length; } static int ng_ct_rcvmsg (node_p node, item_p item, hook_p lasthook) { drv_t *d = NG_NODE_PRIVATE (node); struct ng_mesg *msg; struct ng_mesg *resp = NULL; int error = 0; if (!d) return EINVAL; CT_DEBUG (d, ("Rcvmsg\n")); NGI_GET_MSG (item, msg); switch (msg->header.typecookie) { default: error = EINVAL; break; case NGM_CT_COOKIE: printf ("Don't forget to implement\n"); error = EINVAL; break; case NGM_GENERIC_COOKIE: switch (msg->header.cmd) { default: error = EINVAL; break; case NGM_TEXT_STATUS: { char *s; int l = 0; int dl = sizeof (struct ng_mesg) + 730; NG_MKRESPONSE (resp, msg, dl, M_NOWAIT); if (! resp) { error = ENOMEM; break; } s = (resp)->data; l += print_chan (s + l, d->chan); l += print_stats (s + l, d->chan, 1); l += print_modems (s + l, d->chan, 1); l += print_e1_stats (s + l, d->chan); strncpy ((resp)->header.cmdstr, "status", NG_CMDSTRSIZ); } break; } break; } NG_RESPOND_MSG (error, node, item, resp); NG_FREE_MSG (msg); return error; } static int ng_ct_rcvdata (hook_p hook, item_p item) { drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE(hook)); struct mbuf *m; struct ng_tag_prio *ptag; bdrv_t *bd; struct ifqueue *q; int s; if (!d) return ENETDOWN; bd = d->bd; NGI_GET_M (item, m); NG_FREE_ITEM (item); if (! NG_HOOK_PRIVATE (hook) || ! d) { NG_FREE_M (m); return ENETDOWN; } /* Check for high priority data */ if ((ptag = (struct ng_tag_prio *)m_tag_locate(m, NGM_GENERIC_COOKIE, NG_TAG_PRIO, NULL)) != NULL && (ptag->priority > NG_PRIO_CUTOFF) ) q = &d->hi_queue; else q = &d->queue; s = splimp (); CT_LOCK (bd); IF_LOCK (q); if (_IF_QFULL (q)) { IF_UNLOCK (q); CT_UNLOCK (bd); splx (s); NG_FREE_M (m); return ENOBUFS; } _IF_ENQUEUE (q, m); IF_UNLOCK (q); ct_start (d); CT_UNLOCK (bd); splx (s); return 0; } static int ng_ct_rmnode (node_p node) { drv_t *d = NG_NODE_PRIVATE (node); bdrv_t *bd; CT_DEBUG (d, ("Rmnode\n")); if (d && d->running) { bd = d->bd; int s = splimp (); CT_LOCK (bd); ct_down (d); CT_UNLOCK (bd); splx (s); } #ifdef KLD_MODULE if (node->nd_flags & NGF_REALLY_DIE) { NG_NODE_SET_PRIVATE (node, NULL); NG_NODE_UNREF (node); } NG_NODE_REVIVE(node); /* Persistant node */ #endif return 0; } static int ng_ct_connect (hook_p hook) { drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE (hook)); if (!d) return 0; callout_reset (&d->timeout_handle, hz, ct_watchdog_timer, d); return 0; } static int ng_ct_disconnect (hook_p hook) { drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE (hook)); bdrv_t *bd; if (!d) return 0; bd = d->bd; CT_LOCK (bd); if (NG_HOOK_PRIVATE (hook)) ct_down (d); CT_UNLOCK (bd); /* If we were wait it than it reasserted now, just stop it. */ if (!callout_drain (&d->timeout_handle)) callout_stop (&d->timeout_handle); return 0; } #endif static int ct_modevent (module_t mod, int type, void *unused) { static int load_count = 0; switch (type) { case MOD_LOAD: #ifdef NETGRAPH if (ng_newtype (&typestruct)) printf ("Failed to register ng_ct\n"); #endif ++load_count; callout_init (&timeout_handle, 1); callout_reset (&timeout_handle, hz*5, ct_timeout, 0); break; case MOD_UNLOAD: if (load_count == 1) { printf ("Removing device entry for Tau-ISA\n"); #ifdef NETGRAPH ng_rmtype (&typestruct); #endif } /* If we were wait it than it reasserted now, just stop it. */ if (!callout_drain (&timeout_handle)) callout_stop (&timeout_handle); --load_count; break; case MOD_SHUTDOWN: break; } return 0; } #ifdef NETGRAPH static struct ng_type typestruct = { .version = NG_ABI_VERSION, .name = NG_CT_NODE_TYPE, .constructor = ng_ct_constructor, .rcvmsg = ng_ct_rcvmsg, .shutdown = ng_ct_rmnode, .newhook = ng_ct_newhook, .connect = ng_ct_connect, .rcvdata = ng_ct_rcvdata, .disconnect = ng_ct_disconnect, }; #endif /*NETGRAPH*/ #ifdef NETGRAPH MODULE_DEPEND (ng_ct, netgraph, NG_ABI_VERSION, NG_ABI_VERSION, NG_ABI_VERSION); #else MODULE_DEPEND (ct, sppp, 1, 1, 1); #endif DRIVER_MODULE (ct, isa, ct_isa_driver, ct_devclass, ct_modevent, NULL); MODULE_VERSION (ct, 1); Index: head/sys/dev/cx/cxddk.c =================================================================== --- head/sys/dev/cx/cxddk.c (revision 313981) +++ head/sys/dev/cx/cxddk.c (revision 313982) @@ -1,905 +1,905 @@ /*- * Cronyx-Sigma Driver Development Kit. * * Copyright (C) 1998 Cronyx Engineering. * Author: Pavel Novikov, * * Copyright (C) 1998-2003 Cronyx Engineering. * Author: Roman Kurakin, * * This software is distributed with NO WARRANTIES, not even the implied * warranties for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * Authors grant any other persons or organisations permission to use * or modify this software as long as this message is kept with the software, * all derivative works or modified versions. * * Cronyx Id: cxddk.c,v 1.1.2.2 2003/11/27 14:24:50 rik Exp $ */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #define BYTE *(unsigned char*)& /* standard base port set */ static short porttab [] = { 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0x300, 0x320, 0x340, 0x360, 0x380, 0x3a0, 0x3c0, 0x3e0, 0 }; /* * Compute the optimal size of the receive buffer. */ static int cx_compute_buf_len (cx_chan_t *c) { int rbsz; if (c->mode == M_ASYNC) { rbsz = (c->rxbaud + 800 - 1) / 800 * 2; if (rbsz < 4) rbsz = 4; else if (rbsz > DMABUFSZ) rbsz = DMABUFSZ; } else rbsz = DMABUFSZ; return rbsz; } /* * Auto-detect the installed adapters. */ int cx_find (port_t *board_ports) { int i, n; for (i=0, n=0; porttab[i] && n= NBRD || ! cx_probe_board (port, irq, dma)) return 0; /* init callback pointers */ for (c=b->chan; cchan+NCHAN; ++c) { c->call_on_tx = 0; c->call_on_rx = 0; c->call_on_msig = 0; c->call_on_err = 0; } cx_init (b, num, port, irq, dma); /* Loading firmware */ if (! cx_setup_board (b, csigma_fw_data, csigma_fw_len, csigma_fw_tvec)) return 0; return 1; } /* * Shutdown the adapter. */ void cx_close_board (cx_board_t *b) { cx_setup_board (b, 0, 0, 0); /* Reset the controller. */ outb (BCR0(b->port), 0); if (b->chan[8].type || b->chan[12].type) outb (BCR0(b->port+0x10), 0); } /* * Start the channel. */ void cx_start_chan (cx_chan_t *c, cx_buf_t *cb, unsigned long phys) { int command = 0; int mode = 0; int ier = 0; int rbsz; c->overflow = 0; /* Setting up buffers */ if (cb) { c->arbuf = cb->rbuffer[0]; c->brbuf = cb->rbuffer[1]; c->atbuf = cb->tbuffer[0]; c->btbuf = cb->tbuffer[1]; c->arphys = phys + ((char*)c->arbuf - (char*)cb); c->brphys = phys + ((char*)c->brbuf - (char*)cb); c->atphys = phys + ((char*)c->atbuf - (char*)cb); c->btphys = phys + ((char*)c->btbuf - (char*)cb); } /* Set current channel number */ outb (CAR(c->port), c->num & 3); /* set receiver A buffer physical address */ outw (ARBADRU(c->port), (unsigned short) (c->arphys>>16)); outw (ARBADRL(c->port), (unsigned short) c->arphys); /* set receiver B buffer physical address */ outw (BRBADRU(c->port), (unsigned short) (c->brphys>>16)); outw (BRBADRL(c->port), (unsigned short) c->brphys); /* set transmitter A buffer physical address */ outw (ATBADRU(c->port), (unsigned short) (c->atphys>>16)); outw (ATBADRL(c->port), (unsigned short) c->atphys); /* set transmitter B buffer physical address */ outw (BTBADRU(c->port), (unsigned short) (c->btphys>>16)); outw (BTBADRL(c->port), (unsigned short) c->btphys); /* rx */ command |= CCR_ENRX; ier |= IER_RXD; if (c->board->dma) { mode |= CMR_RXDMA; if (c->mode == M_ASYNC) ier |= IER_RET; } /* tx */ command |= CCR_ENTX; ier |= (c->mode == M_ASYNC) ? IER_TXD : (IER_TXD | IER_TXMPTY); if (c->board->dma) mode |= CMR_TXDMA; /* Set mode */ outb (CMR(c->port), mode | (c->mode == M_ASYNC ? CMR_ASYNC : CMR_HDLC)); /* Clear and initialize channel */ cx_cmd (c->port, CCR_CLRCH); cx_cmd (c->port, CCR_INITCH | command); if (c->mode == M_ASYNC) cx_cmd (c->port, CCR_ENTX); /* Start receiver */ rbsz = cx_compute_buf_len(c); outw (ARBCNT(c->port), rbsz); outw (BRBCNT(c->port), rbsz); outw (ARBSTS(c->port), BSTS_OWN24); outw (BRBSTS(c->port), BSTS_OWN24); if (c->mode == M_ASYNC) ier |= IER_MDM; /* Enable interrupts */ outb (IER(c->port), ier); /* Clear DTR and RTS */ cx_set_dtr (c, 0); cx_set_rts (c, 0); } /* * Turn the receiver on/off. */ void cx_enable_receive (cx_chan_t *c, int on) { unsigned char ier; if (cx_receive_enabled(c) && ! on) { outb (CAR(c->port), c->num & 3); if (c->mode == M_ASYNC) { ier = inb (IER(c->port)); outb (IER(c->port), ier & ~ (IER_RXD | IER_RET)); } cx_cmd (c->port, CCR_DISRX); } else if (! cx_receive_enabled(c) && on) { outb (CAR(c->port), c->num & 3); ier = inb (IER(c->port)); if (c->mode == M_ASYNC) outb (IER(c->port), ier | (IER_RXD | IER_RET)); else outb (IER(c->port), ier | IER_RXD); cx_cmd (c->port, CCR_ENRX); } } /* * Turn the transmiter on/off. */ void cx_enable_transmit (cx_chan_t *c, int on) { if (cx_transmit_enabled(c) && ! on) { outb (CAR(c->port), c->num & 3); if (c->mode != M_ASYNC) outb (STCR(c->port), STC_ABORTTX | STC_SNDSPC); cx_cmd (c->port, CCR_DISTX); } else if (! cx_transmit_enabled(c) && on) { outb (CAR(c->port), c->num & 3); cx_cmd (c->port, CCR_ENTX); } } /* * Get channel status. */ int cx_receive_enabled (cx_chan_t *c) { outb (CAR(c->port), c->num & 3); return (inb (CSR(c->port)) & CSRA_RXEN) != 0; } int cx_transmit_enabled (cx_chan_t *c) { outb (CAR(c->port), c->num & 3); return (inb (CSR(c->port)) & CSRA_TXEN) != 0; } unsigned long cx_get_baud (cx_chan_t *c) { return (c->opt.tcor.clk == CLK_EXT) ? 0 : c->txbaud; } int cx_get_loop (cx_chan_t *c) { return c->opt.tcor.llm ? 1 : 0; } int cx_get_nrzi (cx_chan_t *c) { return c->opt.rcor.encod == ENCOD_NRZI; } int cx_get_dpll (cx_chan_t *c) { return c->opt.rcor.dpll ? 1 : 0; } void cx_set_baud (cx_chan_t *c, unsigned long bps) { int clock, period; c->txbaud = c->rxbaud = bps; /* Set current channel number */ outb (CAR(c->port), c->num & 3); if (bps) { if (c->mode == M_ASYNC || c->opt.rcor.dpll || c->opt.tcor.llm) { /* Receive baud - internal */ cx_clock (c->oscfreq, c->rxbaud, &clock, &period); c->opt.rcor.clk = clock; outb (RCOR(c->port), BYTE c->opt.rcor); outb (RBPR(c->port), period); } else { /* Receive baud - external */ c->opt.rcor.clk = CLK_EXT; outb (RCOR(c->port), BYTE c->opt.rcor); outb (RBPR(c->port), 1); } /* Transmit baud - internal */ cx_clock (c->oscfreq, c->txbaud, &clock, &period); c->opt.tcor.clk = clock; c->opt.tcor.ext1x = 0; outb (TBPR(c->port), period); } else if (c->mode != M_ASYNC) { /* External clock - disable local loopback and DPLL */ c->opt.tcor.llm = 0; c->opt.rcor.dpll = 0; /* Transmit baud - external */ c->opt.tcor.ext1x = 1; c->opt.tcor.clk = CLK_EXT; outb (TBPR(c->port), 1); /* Receive baud - external */ c->opt.rcor.clk = CLK_EXT; outb (RCOR(c->port), BYTE c->opt.rcor); outb (RBPR(c->port), 1); } if (c->opt.tcor.llm) outb (COR2(c->port), (BYTE c->hopt.cor2) & ~3); else outb (COR2(c->port), BYTE c->hopt.cor2); outb (TCOR(c->port), BYTE c->opt.tcor); } void cx_set_loop (cx_chan_t *c, int on) { if (! c->txbaud) return; c->opt.tcor.llm = on ? 1 : 0; cx_set_baud (c, c->txbaud); } void cx_set_dpll (cx_chan_t *c, int on) { if (! c->txbaud) return; c->opt.rcor.dpll = on ? 1 : 0; cx_set_baud (c, c->txbaud); } void cx_set_nrzi (cx_chan_t *c, int nrzi) { c->opt.rcor.encod = (nrzi ? ENCOD_NRZI : ENCOD_NRZ); outb (CAR(c->port), c->num & 3); outb (RCOR(c->port), BYTE c->opt.rcor); } static int cx_send (cx_chan_t *c, char *data, int len, void *attachment) { unsigned char *buf; port_t cnt_port, sts_port; void **attp; /* Set the current channel number. */ outb (CAR(c->port), c->num & 3); /* Determine the buffer order. */ if (inb (DMABSTS(c->port)) & DMABSTS_NTBUF) { if (inb (BTBSTS(c->port)) & BSTS_OWN24) { buf = c->atbuf; cnt_port = ATBCNT(c->port); sts_port = ATBSTS(c->port); attp = &c->attach[0]; } else { buf = c->btbuf; cnt_port = BTBCNT(c->port); sts_port = BTBSTS(c->port); attp = &c->attach[1]; } } else { if (inb (ATBSTS(c->port)) & BSTS_OWN24) { buf = c->btbuf; cnt_port = BTBCNT(c->port); sts_port = BTBSTS(c->port); attp = &c->attach[1]; } else { buf = c->atbuf; cnt_port = ATBCNT(c->port); sts_port = ATBSTS(c->port); attp = &c->attach[0]; } } /* Is it busy? */ if (inb (sts_port) & BSTS_OWN24) return -1; memcpy (buf, data, len); *attp = attachment; /* Start transmitter. */ outw (cnt_port, len); outb (sts_port, BSTS_EOFR | BSTS_INTR | BSTS_OWN24); /* Enable TXMPTY interrupt, * to catch the case when the second buffer is empty. */ if (c->mode != M_ASYNC) { if ((inb(ATBSTS(c->port)) & BSTS_OWN24) && (inb(BTBSTS(c->port)) & BSTS_OWN24)) { outb (IER(c->port), IER_RXD | IER_TXD | IER_TXMPTY); } else outb (IER(c->port), IER_RXD | IER_TXD); } return 0; } /* * Number of free buffs */ int cx_buf_free (cx_chan_t *c) { return ! (inb (ATBSTS(c->port)) & BSTS_OWN24) + ! (inb (BTBSTS(c->port)) & BSTS_OWN24); } /* * Send the data packet. */ int cx_send_packet (cx_chan_t *c, char *data, int len, void *attachment) { if (len >= DMABUFSZ) return -2; if (c->mode == M_ASYNC) { static char buf [DMABUFSZ]; char *p, *t = buf; /* Async -- double all nulls. */ for (p=data; p < data+len && t < buf+DMABUFSZ-1; ++p) if ((*t++ = *p) == 0) *t++ = 0; return cx_send (c, buf, t-buf, attachment); } return cx_send (c, data, len, attachment); } static int cx_receive_interrupt (cx_chan_t *c) { unsigned short risr; int len = 0, rbsz; ++c->rintr; risr = inw (RISR(c->port)); /* Compute optimal receiver buffer length */ rbsz = cx_compute_buf_len(c); if (c->mode == M_ASYNC && (risr & RISA_TIMEOUT)) { unsigned long rcbadr = (unsigned short) inw (RCBADRL(c->port)) | (long) inw (RCBADRU(c->port)) << 16; - unsigned char *buf = 0; + unsigned char *buf = NULL; port_t cnt_port = 0, sts_port = 0; if (rcbadr >= c->brphys && rcbadr < c->brphys+DMABUFSZ) { buf = c->brbuf; len = rcbadr - c->brphys; cnt_port = BRBCNT(c->port); sts_port = BRBSTS(c->port); } else if (rcbadr >= c->arphys && rcbadr < c->arphys+DMABUFSZ) { buf = c->arbuf; len = rcbadr - c->arphys; cnt_port = ARBCNT(c->port); sts_port = ARBSTS(c->port); } if (len) { c->ibytes += len; c->received_data = buf; c->received_len = len; /* Restart receiver. */ outw (cnt_port, rbsz); outb (sts_port, BSTS_OWN24); } return (REOI_TERMBUFF); } /* Receive errors. */ if (risr & RIS_OVERRUN) { ++c->ierrs; if (c->call_on_err) c->call_on_err (c, CX_OVERRUN); } else if (c->mode != M_ASYNC && (risr & RISH_CRCERR)) { ++c->ierrs; if (c->call_on_err) c->call_on_err (c, CX_CRC); } else if (c->mode != M_ASYNC && (risr & (RISH_RXABORT | RISH_RESIND))) { ++c->ierrs; if (c->call_on_err) c->call_on_err (c, CX_FRAME); } else if (c->mode == M_ASYNC && (risr & RISA_PARERR)) { ++c->ierrs; if (c->call_on_err) c->call_on_err (c, CX_CRC); } else if (c->mode == M_ASYNC && (risr & RISA_FRERR)) { ++c->ierrs; if (c->call_on_err) c->call_on_err (c, CX_FRAME); } else if (c->mode == M_ASYNC && (risr & RISA_BREAK)) { if (c->call_on_err) c->call_on_err (c, CX_BREAK); } else if (! (risr & RIS_EOBUF)) { ++c->ierrs; } else { /* Handle received data. */ len = (risr & RIS_BB) ? inw(BRBCNT(c->port)) : inw(ARBCNT(c->port)); if (len > DMABUFSZ) { /* Fatal error: actual DMA transfer size * exceeds our buffer size. It could be caused * by incorrectly programmed DMA register or * hardware fault. Possibly, should panic here. */ len = DMABUFSZ; } else if (c->mode != M_ASYNC && ! (risr & RIS_EOFR)) { /* The received frame does not fit in the DMA buffer. * It could be caused by serial lie noise, * or if the peer has too big MTU. */ if (! c->overflow) { if (c->call_on_err) c->call_on_err (c, CX_OVERFLOW); c->overflow = 1; ++c->ierrs; } } else if (! c->overflow) { if (risr & RIS_BB) { c->received_data = c->brbuf; c->received_len = len; } else { c->received_data = c->arbuf; c->received_len = len; } if (c->mode != M_ASYNC) ++c->ipkts; c->ibytes += len; } else c->overflow = 0; } /* Restart receiver. */ if (! (inb (ARBSTS(c->port)) & BSTS_OWN24)) { outw (ARBCNT(c->port), rbsz); outb (ARBSTS(c->port), BSTS_OWN24); } if (! (inb (BRBSTS(c->port)) & BSTS_OWN24)) { outw (BRBCNT(c->port), rbsz); outb (BRBSTS(c->port), BSTS_OWN24); } /* Discard exception characters. */ if ((risr & RISA_SCMASK) && c->aopt.cor2.ixon) return (REOI_DISCEXC); else return (0); } static void cx_transmit_interrupt (cx_chan_t *c) { unsigned char tisr; int len = 0; ++c->tintr; tisr = inb (TISR(c->port)); if (tisr & TIS_UNDERRUN) { /* Transmit underrun error */ if (c->call_on_err) c->call_on_err (c, CX_UNDERRUN); ++c->oerrs; } else if (tisr & (TIS_EOBUF | TIS_TXEMPTY | TIS_TXDATA)) { /* Call processing function */ if (tisr & TIS_BB) { len = inw(BTBCNT(c->port)); if (c->call_on_tx) c->call_on_tx (c, c->attach[1], len); } else { len = inw(ATBCNT(c->port)); if (c->call_on_tx) c->call_on_tx (c, c->attach[0], len); } if (c->mode != M_ASYNC && len != 0) ++c->opkts; c->obytes += len; } /* Enable TXMPTY interrupt, * to catch the case when the second buffer is empty. */ if (c->mode != M_ASYNC) { if ((inb (ATBSTS(c->port)) & BSTS_OWN24) && (inb (BTBSTS(c->port)) & BSTS_OWN24)) { outb (IER(c->port), IER_RXD | IER_TXD | IER_TXMPTY); } else outb (IER(c->port), IER_RXD | IER_TXD); } } void cx_int_handler (cx_board_t *b) { unsigned char livr; cx_chan_t *c; while (! (inw (BSR(b->port)) & BSR_NOINTR)) { /* Enter the interrupt context, using IACK bus cycle. Read the local interrupt vector register. */ livr = inb (IACK(b->port, BRD_INTR_LEVEL)); c = b->chan + (livr>>2 & 0xf); if (c->type == T_NONE) continue; switch (livr & 3) { case LIV_MODEM: /* modem interrupt */ ++c->mintr; if (c->call_on_msig) c->call_on_msig (c); outb (MEOIR(c->port), 0); break; case LIV_EXCEP: /* receive exception */ case LIV_RXDATA: /* receive interrupt */ outb (REOIR(c->port), cx_receive_interrupt (c)); if (c->call_on_rx && c->received_data) { c->call_on_rx (c, c->received_data, c->received_len); c->received_data = 0; } break; case LIV_TXDATA: /* transmit interrupt */ cx_transmit_interrupt (c); outb (TEOIR(c->port), 0); break; } } } /* * Register event processing functions */ void cx_register_transmit (cx_chan_t *c, void (*func) (cx_chan_t *c, void *attachment, int len)) { c->call_on_tx = func; } void cx_register_receive (cx_chan_t *c, void (*func) (cx_chan_t *c, char *data, int len)) { c->call_on_rx = func; } void cx_register_modem (cx_chan_t *c, void (*func) (cx_chan_t *c)) { c->call_on_msig = func; } void cx_register_error (cx_chan_t *c, void (*func) (cx_chan_t *c, int data)) { c->call_on_err = func; } /* * Async protocol functions. */ /* * Enable/disable transmitter. */ void cx_transmitter_ctl (cx_chan_t *c,int start) { outb (CAR(c->port), c->num & 3); cx_cmd (c->port, start ? CCR_ENTX : CCR_DISTX); } /* * Discard all data queued in transmitter. */ void cx_flush_transmit (cx_chan_t *c) { outb (CAR(c->port), c->num & 3); cx_cmd (c->port, CCR_CLRTX); } /* * Send the XON/XOFF flow control symbol. */ void cx_xflow_ctl (cx_chan_t *c, int on) { outb (CAR(c->port), c->num & 3); outb (STCR(c->port), STC_SNDSPC | (on ? STC_SSPC_1 : STC_SSPC_2)); } /* * Send the break signal for a given number of milliseconds. */ void cx_send_break (cx_chan_t *c, int msec) { static unsigned char buf [128]; unsigned char *p; p = buf; *p++ = 0; /* extended transmit command */ *p++ = 0x81; /* send break */ if (msec > 10000) /* max 10 seconds */ msec = 10000; if (msec < 10) /* min 10 msec */ msec = 10; while (msec > 0) { int ms = 250; /* 250 msec */ if (ms > msec) ms = msec; msec -= ms; *p++ = 0; /* extended transmit command */ *p++ = 0x82; /* insert delay */ *p++ = ms; } *p++ = 0; /* extended transmit command */ *p++ = 0x83; /* stop break */ cx_send (c, buf, p-buf, 0); } /* * Set async parameters. */ void cx_set_async_param (cx_chan_t *c, int baud, int bits, int parity, int stop2, int ignpar, int rtscts, int ixon, int ixany, int symstart, int symstop) { int clock, period; cx_cor1_async_t cor1; /* Set character length and parity mode. */ BYTE cor1 = 0; cor1.charlen = bits - 1; cor1.parmode = parity ? PARM_NORMAL : PARM_NOPAR; cor1.parity = parity==1 ? PAR_ODD : PAR_EVEN; cor1.ignpar = ignpar ? 1 : 0; /* Enable/disable hardware CTS. */ c->aopt.cor2.ctsae = rtscts ? 1 : 0; /* Enable extended transmit command mode. * Unfortunately, there is no other method for sending break. */ c->aopt.cor2.etc = 1; /* Enable/disable hardware XON/XOFF. */ c->aopt.cor2.ixon = ixon ? 1 : 0; c->aopt.cor2.ixany = ixany ? 1 : 0; /* Set the number of stop bits. */ if (stop2) c->aopt.cor3.stopb = STOPB_2; else c->aopt.cor3.stopb = STOPB_1; /* Disable/enable passing XON/XOFF chars to the host. */ c->aopt.cor3.scde = ixon ? 1 : 0; c->aopt.cor3.flowct = ixon ? FLOWCC_NOTPASS : FLOWCC_PASS; c->aopt.schr1 = symstart; /* XON */ c->aopt.schr2 = symstop; /* XOFF */ /* Set current channel number. */ outb (CAR(c->port), c->num & 3); /* Set up clock values. */ if (baud) { c->rxbaud = c->txbaud = baud; /* Receiver. */ cx_clock (c->oscfreq, c->rxbaud, &clock, &period); c->opt.rcor.clk = clock; outb (RCOR(c->port), BYTE c->opt.rcor); outb (RBPR(c->port), period); /* Transmitter. */ cx_clock (c->oscfreq, c->txbaud, &clock, &period); c->opt.tcor.clk = clock; c->opt.tcor.ext1x = 0; outb (TCOR(c->port), BYTE c->opt.tcor); outb (TBPR(c->port), period); } outb (COR2(c->port), BYTE c->aopt.cor2); outb (COR3(c->port), BYTE c->aopt.cor3); outb (SCHR1(c->port), c->aopt.schr1); outb (SCHR2(c->port), c->aopt.schr2); if (BYTE c->aopt.cor1 != BYTE cor1) { BYTE c->aopt.cor1 = BYTE cor1; outb (COR1(c->port), BYTE c->aopt.cor1); /* Any change to COR1 require reinitialization. */ /* Unfortunately, it may cause transmitter glitches... */ cx_cmd (c->port, CCR_INITCH); } } /* * Set mode: M_ASYNC or M_HDLC. * Both receiver and transmitter are disabled. */ int cx_set_mode (cx_chan_t *c, int mode) { if (mode == M_HDLC) { if (c->type == T_ASYNC) return -1; if (c->mode == M_HDLC) return 0; c->mode = M_HDLC; } else if (mode == M_ASYNC) { if (c->type == T_SYNC_RS232 || c->type == T_SYNC_V35 || c->type == T_SYNC_RS449) return -1; if (c->mode == M_ASYNC) return 0; c->mode = M_ASYNC; c->opt.tcor.ext1x = 0; c->opt.tcor.llm = 0; c->opt.rcor.dpll = 0; c->opt.rcor.encod = ENCOD_NRZ; if (! c->txbaud || ! c->rxbaud) c->txbaud = c->rxbaud = 9600; } else return -1; cx_setup_chan (c); cx_start_chan (c, 0, 0); cx_enable_receive (c, 0); cx_enable_transmit (c, 0); return 0; } /* * Set port type for old models of Sigma */ void cx_set_port (cx_chan_t *c, int iftype) { if (c->board->type == B_SIGMA_XXX) { switch (c->num) { case 0: if ((c->board->if0type != 0) == (iftype != 0)) return; c->board->if0type = iftype; c->board->bcr0 &= ~BCR0_UMASK; if (c->board->if0type && (c->type==T_UNIV_RS449 || c->type==T_UNIV_V35)) c->board->bcr0 |= BCR0_UI_RS449; outb (BCR0(c->board->port), c->board->bcr0); break; case 8: if ((c->board->if8type != 0) == (iftype != 0)) return; c->board->if8type = iftype; c->board->bcr0b &= ~BCR0_UMASK; if (c->board->if8type && (c->type==T_UNIV_RS449 || c->type==T_UNIV_V35)) c->board->bcr0b |= BCR0_UI_RS449; outb (BCR0(c->board->port+0x10), c->board->bcr0b); break; } } } /* * Get port type for old models of Sigma * -1 Fixed port type or auto detect * 0 RS232 * 1 V35 * 2 RS449 */ int cx_get_port (cx_chan_t *c) { int iftype; if (c->board->type == B_SIGMA_XXX) { switch (c->num) { case 0: iftype = c->board->if0type; break; case 8: iftype = c->board->if8type; break; default: return -1; } if (iftype) switch (c->type) { case T_UNIV_V35: return 1; case T_UNIV_RS449: return 2; default: return -1; } else return 0; } else return -1; } void cx_intr_off (cx_board_t *b) { outb (BCR0(b->port), b->bcr0 & ~BCR0_IRQ_MASK); if (b->chan[8].port || b->chan[12].port) outb (BCR0(b->port+0x10), b->bcr0b & ~BCR0_IRQ_MASK); } void cx_intr_on (cx_board_t *b) { outb (BCR0(b->port), b->bcr0); if (b->chan[8].port || b->chan[12].port) outb (BCR0(b->port+0x10), b->bcr0b); } int cx_checkintr (cx_board_t *b) { return (!(inw (BSR(b->port)) & BSR_NOINTR)); } Index: head/sys/dev/cx/if_cx.c =================================================================== --- head/sys/dev/cx/if_cx.c (revision 313981) +++ head/sys/dev/cx/if_cx.c (revision 313982) @@ -1,2545 +1,2545 @@ /*- * Cronyx-Sigma adapter driver for FreeBSD. * Supports PPP/HDLC and Cisco/HDLC protocol in synchronous mode, * and asynchronous channels with full modem control. * Keepalive protocol implemented in both Cisco and PPP modes. * * Copyright (C) 1994-2002 Cronyx Engineering. * Author: Serge Vakulenko, * * Copyright (C) 1999-2004 Cronyx Engineering. * Rewritten on DDK, ported to NETGRAPH, rewritten for FreeBSD 3.x-5.x by * Kurakin Roman, * * This software is distributed with NO WARRANTIES, not even the implied * warranties for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * Authors grant any other persons or organisations a permission to use, * modify and redistribute this software in source and binary forms, * as long as this message is kept with the software, all derivative * works or modified versions. * * Cronyx Id: if_cx.c,v 1.1.2.34 2004/06/23 17:09:13 rik Exp $ */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_ng_cronyx.h" #ifdef NETGRAPH_CRONYX # include "opt_netgraph.h" # include # include # include #else # include # include # define PP_CISCO IFF_LINK2 # include #endif #define NCX 1 /* If we don't have Cronyx's sppp version, we don't have fr support via sppp */ #ifndef PP_FR #define PP_FR 0 #endif #define CX_DEBUG(d,s) ({if (d->chan->debug) {\ printf ("%s: ", d->name); printf s;}}) #define CX_DEBUG2(d,s) ({if (d->chan->debug>1) {\ printf ("%s: ", d->name); printf s;}}) #define CX_LOCK_NAME "cxX" #define CX_LOCK(_bd) mtx_lock (&(_bd)->cx_mtx) #define CX_UNLOCK(_bd) mtx_unlock (&(_bd)->cx_mtx) #define CX_LOCK_ASSERT(_bd) mtx_assert (&(_bd)->cx_mtx, MA_OWNED) typedef struct _async_q { int beg; int end; #define BF_SZ 14400 int buf[BF_SZ+1]; } async_q; #define AQ_GSZ(q) ((BF_SZ + (q)->end - (q)->beg)%BF_SZ) #define AQ_PUSH(q,c) {*((q)->buf + (q)->end) = c;\ (q)->end = ((q)->end + 1)%BF_SZ;} #define AQ_POP(q,c) {c = *((q)->buf + (q)->beg);\ (q)->beg = ((q)->beg + 1)%BF_SZ;} static void cx_identify __P((driver_t *, device_t)); static int cx_probe __P((device_t)); static int cx_attach __P((device_t)); static int cx_detach __P((device_t)); static t_open_t cx_topen; static t_modem_t cx_tmodem; static t_close_t cx_tclose; static device_method_t cx_isa_methods [] = { DEVMETHOD(device_identify, cx_identify), DEVMETHOD(device_probe, cx_probe), DEVMETHOD(device_attach, cx_attach), DEVMETHOD(device_detach, cx_detach), DEVMETHOD_END }; typedef struct _cx_dma_mem_t { unsigned long phys; void *virt; size_t size; bus_dma_tag_t dmat; bus_dmamap_t mapp; } cx_dma_mem_t; typedef struct _drv_t { char name [8]; cx_chan_t *chan; cx_board_t *board; cx_dma_mem_t dmamem; struct tty *tty; struct callout dcd_timeout_handle; unsigned callout; unsigned lock; int open_dev; int cd; int running; #ifdef NETGRAPH char nodename [NG_NODESIZ]; hook_p hook; hook_p debug_hook; node_p node; struct ifqueue lo_queue; struct ifqueue hi_queue; #else struct ifqueue queue; struct ifnet *ifp; #endif short timeout; struct callout timeout_handle; struct cdev *devt; async_q aqueue; #define CX_READ 1 #define CX_WRITE 2 int intr_action; short atimeout; } drv_t; typedef struct _bdrv_t { cx_board_t *board; struct resource *base_res; struct resource *drq_res; struct resource *irq_res; int base_rid; int drq_rid; int irq_rid; void *intrhand; drv_t channel [NCHAN]; struct mtx cx_mtx; } bdrv_t; static driver_t cx_isa_driver = { "cx", cx_isa_methods, sizeof (bdrv_t), }; static devclass_t cx_devclass; extern long csigma_fw_len; extern const char *csigma_fw_version; extern const char *csigma_fw_date; extern const char *csigma_fw_copyright; extern const cr_dat_tst_t csigma_fw_tvec[]; extern const u_char csigma_fw_data[]; static void cx_oproc (struct tty *tp); static int cx_param (struct tty *tp, struct termios *t); static void cx_stop (struct tty *tp, int flag); static void cx_receive (cx_chan_t *c, char *data, int len); static void cx_transmit (cx_chan_t *c, void *attachment, int len); static void cx_error (cx_chan_t *c, int data); static void cx_modem (cx_chan_t *c); static void cx_up (drv_t *d); static void cx_start (drv_t *d); static void cx_softintr (void *); static void *cx_fast_ih; static void cx_down (drv_t *d); static void cx_watchdog (drv_t *d); static void cx_watchdog_timer (void *arg); static void cx_carrier (void *arg); #ifdef NETGRAPH extern struct ng_type typestruct; #else static void cx_ifstart (struct ifnet *ifp); static void cx_tlf (struct sppp *sp); static void cx_tls (struct sppp *sp); static int cx_sioctl (struct ifnet *ifp, u_long cmd, caddr_t data); static void cx_initialize (void *softc); #endif static cx_board_t *adapter [NCX]; static drv_t *channel [NCX*NCHAN]; static struct callout led_timo [NCX]; static struct callout timeout_handle; static int cx_open (struct cdev *dev, int flag, int mode, struct thread *td); static int cx_close (struct cdev *dev, int flag, int mode, struct thread *td); static int cx_ioctl (struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td); static struct cdevsw cx_cdevsw = { .d_version = D_VERSION, .d_open = cx_open, .d_close = cx_close, .d_ioctl = cx_ioctl, .d_name = "cx", .d_flags = D_TTY, }; static int MY_SOFT_INTR; /* * Make an mbuf from data. */ static struct mbuf *makembuf (void *buf, u_int len) { struct mbuf *m, *o, *p; MGETHDR (m, M_NOWAIT, MT_DATA); if (! m) return 0; if (len >= MINCLSIZE) MCLGET (m, M_NOWAIT); m->m_pkthdr.len = len; m->m_len = 0; p = m; while (len) { u_int n = M_TRAILINGSPACE (p); if (n > len) n = len; if (! n) { /* Allocate new mbuf. */ o = p; MGET (p, M_NOWAIT, MT_DATA); if (! p) { m_freem (m); return 0; } if (len >= MINCLSIZE) MCLGET (p, M_NOWAIT); p->m_len = 0; o->m_next = p; n = M_TRAILINGSPACE (p); if (n > len) n = len; } bcopy (buf, mtod (p, caddr_t) + p->m_len, n); p->m_len += n; buf = n + (char*) buf; len -= n; } return m; } /* * Recover after lost transmit interrupts. */ static void cx_timeout (void *arg) { drv_t *d; int s, i, k; for (i = 0; i < NCX; i++) { if (adapter[i] == NULL) continue; for (k = 0; k < NCHAN; ++k) { d = channel[i * NCHAN + k]; if (! d) continue; s = splhigh (); CX_LOCK ((bdrv_t *)d->board->sys); if (d->atimeout == 1 && d->tty && d->tty->t_state & TS_BUSY) { d->tty->t_state &= ~TS_BUSY; if (d->tty->t_dev) { d->intr_action |= CX_WRITE; MY_SOFT_INTR = 1; swi_sched (cx_fast_ih, 0); } CX_DEBUG (d, ("cx_timeout\n")); } if (d->atimeout) d->atimeout--; CX_UNLOCK ((bdrv_t *)d->board->sys); splx (s); } } callout_reset (&timeout_handle, hz*5, cx_timeout, 0); } static void cx_led_off (void *arg) { cx_board_t *b = arg; bdrv_t *bd = b->sys; int s; s = splhigh (); CX_LOCK (bd); cx_led (b, 0); CX_UNLOCK (bd); splx (s); } /* * Activate interrupt handler from DDK. */ static void cx_intr (void *arg) { bdrv_t *bd = arg; cx_board_t *b = bd->board; #ifndef NETGRAPH int i; #endif int s = splhigh (); CX_LOCK (bd); /* Turn LED on. */ cx_led (b, 1); cx_int_handler (b); /* Turn LED off 50 msec later. */ callout_reset (&led_timo[b->num], hz/20, cx_led_off, b); CX_UNLOCK (bd); splx (s); #ifndef NETGRAPH /* Pass packets in a lock-free state */ for (i = 0; i < NCHAN && b->chan[i].type; i++) { drv_t *d = b->chan[i].sys; struct mbuf *m; if (!d || !d->running) continue; while (_IF_QLEN(&d->queue)) { IF_DEQUEUE (&d->queue,m); if (!m) continue; sppp_input (d->ifp, m); } } #endif } static int probe_irq (cx_board_t *b, int irq) { int mask, busy, cnt; /* Clear pending irq, if any. */ cx_probe_irq (b, -irq); DELAY (100); for (cnt=0; cnt<5; ++cnt) { /* Get the mask of pending irqs, assuming they are busy. * Activate the adapter on given irq. */ busy = cx_probe_irq (b, irq); DELAY (100); /* Get the mask of active irqs. * Deactivate our irq. */ mask = cx_probe_irq (b, -irq); DELAY (100); if ((mask & ~busy) == 1 << irq) { cx_probe_irq (b, 0); /* printf ("cx%d: irq %d ok, mask=0x%04x, busy=0x%04x\n", b->num, irq, mask, busy); */ return 1; } } /* printf ("cx%d: irq %d not functional, mask=0x%04x, busy=0x%04x\n", b->num, irq, mask, busy); */ cx_probe_irq (b, 0); return 0; } static short porttab [] = { 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0x300, 0x320, 0x340, 0x360, 0x380, 0x3a0, 0x3c0, 0x3e0, 0 }; static char dmatab [] = { 7, 6, 5, 0 }; static char irqtab [] = { 5, 10, 11, 7, 3, 15, 12, 0 }; static int cx_is_free_res (device_t dev, int rid, int type, rman_res_t start, rman_res_t end, rman_res_t count) { struct resource *res; if (!(res = bus_alloc_resource (dev, type, &rid, start, end, count, 0))) return 0; bus_release_resource (dev, type, rid, res); return 1; } static void cx_identify (driver_t *driver, device_t dev) { rman_res_t iobase, rescount; int devcount; device_t *devices; device_t child; devclass_t my_devclass; int i, k; if ((my_devclass = devclass_find ("cx")) == NULL) return; devclass_get_devices (my_devclass, &devices, &devcount); if (devcount == 0) { /* We should find all devices by our self. We could alter other * devices, but we don't have a choise */ for (i = 0; (iobase = porttab [i]) != 0; i++) { if (!cx_is_free_res (dev, 0, SYS_RES_IOPORT, iobase, iobase + NPORT, NPORT)) continue; if (cx_probe_board (iobase, -1, -1) == 0) continue; devcount++; child = BUS_ADD_CHILD (dev, ISA_ORDER_SPECULATIVE, "cx", -1); if (child == NULL) return; device_set_desc_copy (child, "Cronyx Sigma"); device_set_driver (child, driver); bus_set_resource (child, SYS_RES_IOPORT, 0, iobase, NPORT); if (devcount >= NCX) break; } } else { static short porttab [] = { 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0x300, 0x320, 0x340, 0x360, 0x380, 0x3a0, 0x3c0, 0x3e0, 0 }; /* Lets check user choise. */ for (k = 0; k < devcount; k++) { if (bus_get_resource (devices[k], SYS_RES_IOPORT, 0, &iobase, &rescount) != 0) continue; for (i = 0; porttab [i] != 0; i++) { if (porttab [i] != iobase) continue; if (!cx_is_free_res (devices[k], 0, SYS_RES_IOPORT, iobase, iobase + NPORT, NPORT)) continue; if (cx_probe_board (iobase, -1, -1) == 0) continue; porttab [i] = -1; device_set_desc_copy (devices[k], "Cronyx Sigma"); break; } if (porttab [i] == 0) { device_delete_child ( device_get_parent (devices[k]), devices [k]); devices[k] = 0; continue; } } for (k = 0; k < devcount; k++) { if (devices[k] == 0) continue; if (bus_get_resource (devices[k], SYS_RES_IOPORT, 0, &iobase, &rescount) == 0) continue; for (i = 0; (iobase = porttab [i]) != 0; i++) { if (porttab [i] == -1) { continue; } if (!cx_is_free_res (devices[k], 0, SYS_RES_IOPORT, iobase, iobase + NPORT, NPORT)) continue; if (cx_probe_board (iobase, -1, -1) == 0) continue; bus_set_resource (devices[k], SYS_RES_IOPORT, 0, iobase, NPORT); porttab [i] = -1; device_set_desc_copy (devices[k], "Cronyx Sigma"); break; } if (porttab [i] == 0) { device_delete_child ( device_get_parent (devices[k]), devices [k]); } } free (devices, M_TEMP); } return; } static int cx_probe (device_t dev) { int unit = device_get_unit (dev); int i; rman_res_t iobase, rescount; if (!device_get_desc (dev) || strcmp (device_get_desc (dev), "Cronyx Sigma")) return ENXIO; if (bus_get_resource (dev, SYS_RES_IOPORT, 0, &iobase, &rescount) != 0) { printf ("cx%d: Couldn't get IOPORT\n", unit); return ENXIO; } if (!cx_is_free_res (dev, 0, SYS_RES_IOPORT, iobase, iobase + NPORT, NPORT)) { printf ("cx%d: Resource IOPORT isn't free %lx\n", unit, iobase); return ENXIO; } for (i = 0; porttab [i] != 0; i++) { if (porttab [i] == iobase) { porttab [i] = -1; break; } } if (porttab [i] == 0) { return ENXIO; } if (!cx_probe_board (iobase, -1, -1)) { printf ("cx%d: probing for Sigma at %lx faild\n", unit, iobase); return ENXIO; } return 0; } static void cx_bus_dmamap_addr (void *arg, bus_dma_segment_t *segs, int nseg, int error) { unsigned long *addr; if (error) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); addr = arg; *addr = segs->ds_addr; } static int cx_bus_dma_mem_alloc (int bnum, int cnum, cx_dma_mem_t *dmem) { int error; error = bus_dma_tag_create (NULL, 16, 0, BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR, NULL, NULL, dmem->size, 1, dmem->size, 0, NULL, NULL, &dmem->dmat); if (error) { if (cnum >= 0) printf ("cx%d-%d: ", bnum, cnum); else printf ("cx%d: ", bnum); printf ("couldn't allocate tag for dma memory\n"); return 0; } error = bus_dmamem_alloc (dmem->dmat, (void **)&dmem->virt, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &dmem->mapp); if (error) { if (cnum >= 0) printf ("cx%d-%d: ", bnum, cnum); else printf ("cx%d: ", bnum); printf ("couldn't allocate mem for dma memory\n"); bus_dma_tag_destroy (dmem->dmat); return 0; } error = bus_dmamap_load (dmem->dmat, dmem->mapp, dmem->virt, dmem->size, cx_bus_dmamap_addr, &dmem->phys, 0); if (error) { if (cnum >= 0) printf ("cx%d-%d: ", bnum, cnum); else printf ("cx%d: ", bnum); printf ("couldn't load mem map for dma memory\n"); bus_dmamem_free (dmem->dmat, dmem->virt, dmem->mapp); bus_dma_tag_destroy (dmem->dmat); return 0; } return 1; } static void cx_bus_dma_mem_free (cx_dma_mem_t *dmem) { bus_dmamap_unload (dmem->dmat, dmem->mapp); bus_dmamem_free (dmem->dmat, dmem->virt, dmem->mapp); bus_dma_tag_destroy (dmem->dmat); } /* * The adapter is present, initialize the driver structures. */ static int cx_attach (device_t dev) { bdrv_t *bd = device_get_softc (dev); rman_res_t iobase, drq, irq, rescount; int unit = device_get_unit (dev); char *cx_ln = CX_LOCK_NAME; cx_board_t *b; cx_chan_t *c; drv_t *d; int i; int s; KASSERT ((bd != NULL), ("cx%d: NULL device softc\n", unit)); bus_get_resource (dev, SYS_RES_IOPORT, 0, &iobase, &rescount); bd->base_rid = 0; bd->base_res = bus_alloc_resource (dev, SYS_RES_IOPORT, &bd->base_rid, iobase, iobase + NPORT, NPORT, RF_ACTIVE); if (! bd->base_res) { printf ("cx%d: cannot allocate base address\n", unit); return ENXIO; } if (bus_get_resource (dev, SYS_RES_DRQ, 0, &drq, &rescount) != 0) { for (i = 0; (drq = dmatab [i]) != 0; i++) { if (!cx_is_free_res (dev, 0, SYS_RES_DRQ, drq, drq + 1, 1)) continue; bus_set_resource (dev, SYS_RES_DRQ, 0, drq, 1); break; } if (dmatab[i] == 0) { bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); printf ("cx%d: Couldn't get DRQ\n", unit); return ENXIO; } } bd->drq_rid = 0; bd->drq_res = bus_alloc_resource (dev, SYS_RES_DRQ, &bd->drq_rid, drq, drq + 1, 1, RF_ACTIVE); if (! bd->drq_res) { printf ("cx%d: cannot allocate drq\n", unit); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); return ENXIO; } if (bus_get_resource (dev, SYS_RES_IRQ, 0, &irq, &rescount) != 0) { for (i = 0; (irq = irqtab [i]) != 0; i++) { if (!cx_is_free_res (dev, 0, SYS_RES_IRQ, irq, irq + 1, 1)) continue; bus_set_resource (dev, SYS_RES_IRQ, 0, irq, 1); break; } if (irqtab[i] == 0) { bus_release_resource (dev, SYS_RES_DRQ, bd->drq_rid, bd->drq_res); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); printf ("cx%d: Couldn't get IRQ\n", unit); return ENXIO; } } bd->irq_rid = 0; bd->irq_res = bus_alloc_resource (dev, SYS_RES_IRQ, &bd->irq_rid, irq, irq + 1, 1, RF_ACTIVE); if (! bd->irq_res) { printf ("cx%d: Couldn't allocate irq\n", unit); bus_release_resource (dev, SYS_RES_DRQ, bd->drq_rid, bd->drq_res); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); return ENXIO; } b = malloc (sizeof (cx_board_t), M_DEVBUF, M_WAITOK); if (!b) { printf ("cx:%d: Couldn't allocate memory\n", unit); return (ENXIO); } adapter[unit] = b; bzero (b, sizeof(cx_board_t)); if (! cx_open_board (b, unit, iobase, irq, drq)) { printf ("cx%d: error loading firmware\n", unit); free (b, M_DEVBUF); bus_release_resource (dev, SYS_RES_IRQ, bd->irq_rid, bd->irq_res); bus_release_resource (dev, SYS_RES_DRQ, bd->drq_rid, bd->drq_res); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); return ENXIO; } bd->board = b; cx_ln[2] = '0' + unit; mtx_init (&bd->cx_mtx, cx_ln, MTX_NETWORK_LOCK, MTX_DEF|MTX_RECURSE); if (! probe_irq (b, irq)) { printf ("cx%d: irq %ld not functional\n", unit, irq); bd->board = 0; adapter [unit] = 0; mtx_destroy (&bd->cx_mtx); free (b, M_DEVBUF); bus_release_resource (dev, SYS_RES_IRQ, bd->irq_rid, bd->irq_res); bus_release_resource (dev, SYS_RES_DRQ, bd->drq_rid, bd->drq_res); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); return ENXIO; } b->sys = bd; callout_init (&led_timo[b->num], 1); s = splhigh (); if (bus_setup_intr (dev, bd->irq_res, INTR_TYPE_NET|INTR_MPSAFE, NULL, cx_intr, bd, &bd->intrhand)) { printf ("cx%d: Can't setup irq %ld\n", unit, irq); bd->board = 0; b->sys = 0; adapter [unit] = 0; mtx_destroy (&bd->cx_mtx); free (b, M_DEVBUF); bus_release_resource (dev, SYS_RES_IRQ, bd->irq_rid, bd->irq_res); bus_release_resource (dev, SYS_RES_DRQ, bd->drq_rid, bd->drq_res); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); splx (s); return ENXIO; } CX_LOCK (bd); cx_init (b, b->num, b->port, irq, drq); cx_setup_board (b, 0, 0, 0); CX_UNLOCK (bd); printf ("cx%d: \n", b->num, b->name); for (c=b->chan; cchan+NCHAN; ++c) { if (c->type == T_NONE) continue; d = &bd->channel[c->num]; d->dmamem.size = sizeof(cx_buf_t); if (! cx_bus_dma_mem_alloc (unit, c->num, &d->dmamem)) continue; d->board = b; d->chan = c; d->open_dev = 0; c->sys = d; channel [b->num*NCHAN + c->num] = d; sprintf (d->name, "cx%d.%d", b->num, c->num); switch (c->type) { case T_SYNC_RS232: case T_SYNC_V35: case T_SYNC_RS449: case T_UNIV: case T_UNIV_RS232: case T_UNIV_RS449: case T_UNIV_V35: callout_init (&d->timeout_handle, 1); #ifdef NETGRAPH if (ng_make_node_common (&typestruct, &d->node) != 0) { printf ("%s: cannot make common node\n", d->name); channel [b->num*NCHAN + c->num] = 0; c->sys = 0; cx_bus_dma_mem_free (&d->dmamem); continue; } NG_NODE_SET_PRIVATE (d->node, d); sprintf (d->nodename, "%s%d", NG_CX_NODE_TYPE, c->board->num*NCHAN + c->num); if (ng_name_node (d->node, d->nodename)) { printf ("%s: cannot name node\n", d->nodename); NG_NODE_UNREF (d->node); channel [b->num*NCHAN + c->num] = 0; c->sys = 0; cx_bus_dma_mem_free (&d->dmamem); continue; } d->lo_queue.ifq_maxlen = ifqmaxlen; d->hi_queue.ifq_maxlen = ifqmaxlen; mtx_init (&d->lo_queue.ifq_mtx, "cx_queue_lo", NULL, MTX_DEF); mtx_init (&d->hi_queue.ifq_mtx, "cx_queue_hi", NULL, MTX_DEF); #else /*NETGRAPH*/ d->ifp = if_alloc(IFT_PPP); if (d->ifp == NULL) { printf ("%s: cannot if_alloc() common interface\n", d->name); channel [b->num*NCHAN + c->num] = 0; c->sys = 0; cx_bus_dma_mem_free (&d->dmamem); continue; } d->ifp->if_softc = d; if_initname (d->ifp, "cx", b->num * NCHAN + c->num); d->ifp->if_mtu = PP_MTU; d->ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST; d->ifp->if_ioctl = cx_sioctl; d->ifp->if_start = cx_ifstart; d->ifp->if_init = cx_initialize; d->queue.ifq_maxlen = 2; mtx_init (&d->queue.ifq_mtx, "cx_queue", NULL, MTX_DEF); sppp_attach (d->ifp); if_attach (d->ifp); IFP2SP(d->ifp)->pp_tlf = cx_tlf; IFP2SP(d->ifp)->pp_tls = cx_tls; /* If BPF is in the kernel, call the attach for it. * Size of PPP header is 4 bytes. */ bpfattach (d->ifp, DLT_PPP, 4); #endif /*NETGRAPH*/ } d->tty = ttyalloc (); d->tty->t_open = cx_topen; d->tty->t_close = cx_tclose; d->tty->t_param = cx_param; d->tty->t_stop = cx_stop; d->tty->t_modem = cx_tmodem; d->tty->t_oproc = cx_oproc; d->tty->t_sc = d; CX_LOCK (bd); cx_start_chan (c, d->dmamem.virt, d->dmamem.phys); cx_register_receive (c, &cx_receive); cx_register_transmit (c, &cx_transmit); cx_register_error (c, &cx_error); cx_register_modem (c, &cx_modem); CX_UNLOCK (bd); ttycreate(d->tty, TS_CALLOUT, "x%r%r", b->num, c->num); d->devt = make_dev (&cx_cdevsw, b->num*NCHAN + c->num + 64, UID_ROOT, GID_WHEEL, 0600, "cx%d", b->num*NCHAN + c->num); d->devt->si_drv1 = d; callout_init (&d->dcd_timeout_handle, 1); } splx (s); return 0; } static int cx_detach (device_t dev) { bdrv_t *bd = device_get_softc (dev); cx_board_t *b = bd->board; cx_chan_t *c; int s; KASSERT (mtx_initialized (&bd->cx_mtx), ("cx mutex not initialized")); s = splhigh (); CX_LOCK (bd); /* Check if the device is busy (open). */ for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (!d || d->chan->type == T_NONE) continue; if (d->lock) { CX_UNLOCK (bd); splx (s); return EBUSY; } if (c->mode == M_ASYNC && d->tty && (d->tty->t_state & TS_ISOPEN) && (d->open_dev|0x2)) { CX_UNLOCK (bd); splx (s); return EBUSY; } if (d->running) { CX_UNLOCK (bd); splx (s); return EBUSY; } } /* Deactivate the timeout routine. And soft interrupt*/ callout_stop (&led_timo[b->num]); for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = c->sys; if (!d || d->chan->type == T_NONE) continue; callout_stop (&d->dcd_timeout_handle); } CX_UNLOCK (bd); bus_teardown_intr (dev, bd->irq_res, bd->intrhand); bus_release_resource (dev, SYS_RES_IRQ, bd->irq_rid, bd->irq_res); bus_release_resource (dev, SYS_RES_DRQ, bd->drq_rid, bd->drq_res); bus_release_resource (dev, SYS_RES_IOPORT, bd->base_rid, bd->base_res); CX_LOCK (bd); cx_close_board (b); /* Detach the interfaces, free buffer memory. */ for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (!d || d->chan->type == T_NONE) continue; if (d->tty) { ttyfree (d->tty); d->tty = NULL; } callout_stop (&d->timeout_handle); #ifdef NETGRAPH if (d->node) { ng_rmnode_self (d->node); NG_NODE_UNREF (d->node); d->node = NULL; } mtx_destroy (&d->lo_queue.ifq_mtx); mtx_destroy (&d->hi_queue.ifq_mtx); #else /* Detach from the packet filter list of interfaces. */ bpfdetach (d->ifp); /* Detach from the sync PPP list. */ sppp_detach (d->ifp); if_detach (d->ifp); if_free(d->ifp); /* XXXRIK: check interconnection with irq handler */ IF_DRAIN (&d->queue); mtx_destroy (&d->queue.ifq_mtx); #endif destroy_dev (d->devt); } cx_led_off (b); CX_UNLOCK (bd); callout_drain (&led_timo[b->num]); for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = c->sys; if (!d || d->chan->type == T_NONE) continue; callout_drain (&d->dcd_timeout_handle); callout_drain (&d->timeout_handle); } splx (s); s = splhigh (); for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (!d || d->chan->type == T_NONE) continue; /* Deallocate buffers. */ cx_bus_dma_mem_free (&d->dmamem); } - bd->board = 0; - adapter [b->num] = 0; + bd->board = NULL; + adapter [b->num] = NULL; free (b, M_DEVBUF); splx (s); mtx_destroy (&bd->cx_mtx); return 0; } #ifndef NETGRAPH static void cx_ifstart (struct ifnet *ifp) { drv_t *d = ifp->if_softc; bdrv_t *bd = d->board->sys; CX_LOCK (bd); cx_start (d); CX_UNLOCK (bd); } static void cx_tlf (struct sppp *sp) { drv_t *d = SP2IFP(sp)->if_softc; CX_DEBUG (d, ("cx_tlf\n")); /* cx_set_dtr (d->chan, 0);*/ /* cx_set_rts (d->chan, 0);*/ if (!(IFP2SP(d->ifp)->pp_flags & PP_FR) && !(d->ifp->if_flags & PP_CISCO)) sp->pp_down (sp); } static void cx_tls (struct sppp *sp) { drv_t *d = SP2IFP(sp)->if_softc; CX_DEBUG (d, ("cx_tls\n")); if (!(IFP2SP(d->ifp)->pp_flags & PP_FR) && !(d->ifp->if_flags & PP_CISCO)) sp->pp_up (sp); } /* * Initialization of interface. * It seems to be never called by upper level. */ static void cx_initialize (void *softc) { drv_t *d = softc; CX_DEBUG (d, ("cx_initialize\n")); } /* * Process an ioctl request. */ static int cx_sioctl (struct ifnet *ifp, u_long cmd, caddr_t data) { drv_t *d = ifp->if_softc; bdrv_t *bd = d->board->sys; int error, s, was_up, should_be_up; /* No socket ioctls while the channel is in async mode. */ if (d->chan->type == T_NONE || d->chan->mode == M_ASYNC) return EBUSY; /* Socket ioctls on slave subchannels are not allowed. */ was_up = (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; error = sppp_ioctl (ifp, cmd, data); if (error) return error; s = splhigh (); CX_LOCK (bd); if (! (ifp->if_flags & IFF_DEBUG)) d->chan->debug = 0; else d->chan->debug = d->chan->debug_shadow; CX_UNLOCK (bd); splx (s); switch (cmd) { default: CX_DEBUG2 (d, ("ioctl 0x%lx\n", cmd)); return 0; case SIOCADDMULTI: CX_DEBUG2 (d, ("SIOCADDMULTI\n")); return 0; case SIOCDELMULTI: CX_DEBUG2 (d, ("SIOCDELMULTI\n")); return 0; case SIOCSIFFLAGS: CX_DEBUG2 (d, ("SIOCSIFFLAGS\n")); break; case SIOCSIFADDR: CX_DEBUG2 (d, ("SIOCSIFADDR\n")); break; } /* We get here only in case of SIFFLAGS or SIFADDR. */ s = splhigh (); CX_LOCK (bd); should_be_up = (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; if (!was_up && should_be_up) { /* Interface goes up -- start it. */ cx_up (d); cx_start (d); } else if (was_up && !should_be_up) { /* Interface is going down -- stop it. */ /* if ((IFP2SP(d->ifp)->pp_flags & PP_FR) || (ifp->if_flags & PP_CISCO))*/ cx_down (d); } CX_UNLOCK (bd); splx (s); return 0; } #endif /*NETGRAPH*/ /* * Stop the interface. Called on splimp(). */ static void cx_down (drv_t *d) { int s = splhigh (); CX_DEBUG (d, ("cx_down\n")); cx_set_dtr (d->chan, 0); cx_set_rts (d->chan, 0); d->running = 0; callout_stop (&d->timeout_handle); splx (s); } /* * Start the interface. Called on splimp(). */ static void cx_up (drv_t *d) { int s = splhigh (); CX_DEBUG (d, ("cx_up\n")); cx_set_dtr (d->chan, 1); cx_set_rts (d->chan, 1); d->running = 1; splx (s); } /* * Start output on the (slave) interface. Get another datagram to send * off of the interface queue, and copy it to the interface * before starting the output. */ static void cx_send (drv_t *d) { struct mbuf *m; u_short len; CX_DEBUG2 (d, ("cx_send\n")); /* No output if the interface is down. */ if (! d->running) return; /* No output if the modem is off. */ if (! cx_get_dsr (d->chan) && ! cx_get_loop(d->chan)) return; if (cx_buf_free (d->chan)) { /* Get the packet to send. */ #ifdef NETGRAPH IF_DEQUEUE (&d->hi_queue, m); if (! m) IF_DEQUEUE (&d->lo_queue, m); #else m = sppp_dequeue (d->ifp); #endif if (! m) return; #ifndef NETGRAPH BPF_MTAP (d->ifp, m); #endif len = m_length (m, NULL); if (! m->m_next) cx_send_packet (d->chan, (u_char*)mtod (m, caddr_t), len, 0); else { u_char buf [DMABUFSZ]; m_copydata (m, 0, len, buf); cx_send_packet (d->chan, buf, len, 0); } m_freem (m); /* Set up transmit timeout, 10 seconds. */ d->timeout = 10; } #ifndef NETGRAPH d->ifp->if_drv_flags |= IFF_DRV_OACTIVE; #endif } /* * Start output on the interface. * Always called on splimp(). */ static void cx_start (drv_t *d) { int s = splhigh (); if (d->running) { if (! d->chan->dtr) cx_set_dtr (d->chan, 1); if (! d->chan->rts) cx_set_rts (d->chan, 1); cx_send (d); callout_reset (&d->timeout_handle, hz, cx_watchdog_timer, d); } splx (s); } /* * Handle transmit timeouts. * Recover after lost transmit interrupts. * Always called on splimp(). */ static void cx_watchdog (drv_t *d) { CX_DEBUG (d, ("device timeout\n")); if (d->running) { cx_setup_chan (d->chan); cx_start_chan (d->chan, 0, 0); cx_set_dtr (d->chan, 1); cx_set_rts (d->chan, 1); cx_start (d); } } static void cx_watchdog_timer (void *arg) { drv_t *d = arg; bdrv_t *bd = d->board->sys; CX_LOCK (bd); if (d->timeout == 1) cx_watchdog (d); if (d->timeout) d->timeout--; callout_reset (&d->timeout_handle, hz, cx_watchdog_timer, d); CX_UNLOCK (bd); } /* * Transmit callback function. */ static void cx_transmit (cx_chan_t *c, void *attachment, int len) { drv_t *d = c->sys; if (!d) return; if (c->mode == M_ASYNC && d->tty) { d->tty->t_state &= ~(TS_BUSY | TS_FLUSH); d->atimeout = 0; if (d->tty->t_dev) { d->intr_action |= CX_WRITE; MY_SOFT_INTR = 1; swi_sched (cx_fast_ih, 0); } return; } d->timeout = 0; #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_OPACKETS, 1); d->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; #endif cx_start (d); } /* * Process the received packet. */ static void cx_receive (cx_chan_t *c, char *data, int len) { drv_t *d = c->sys; struct mbuf *m; char *cc = data; #ifdef NETGRAPH int error; #endif if (!d) return; if (c->mode == M_ASYNC && d->tty) { if (d->tty->t_state & TS_ISOPEN) { async_q *q = &d->aqueue; int size = BF_SZ - 1 - AQ_GSZ (q); if (len <= 0 && !size) return; if (len > size) { c->ierrs++; cx_error (c, CX_OVERRUN); len = size - 1; } while (len--) { AQ_PUSH (q, *(unsigned char *)cc); cc++; } d->intr_action |= CX_READ; MY_SOFT_INTR = 1; swi_sched (cx_fast_ih, 0); } return; } if (! d->running) return; m = makembuf (data, len); if (! m) { CX_DEBUG (d, ("no memory for packet\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_IQDROPS, 1); #endif return; } if (c->debug > 1) m_print (m, 0); #ifdef NETGRAPH m->m_pkthdr.rcvif = 0; NG_SEND_DATA_ONLY (error, d->hook, m); #else if_inc_counter(d->ifp, IFCOUNTER_IPACKETS, 1); m->m_pkthdr.rcvif = d->ifp; /* Check if there's a BPF listener on this interface. * If so, hand off the raw packet to bpf. */ BPF_MTAP(d->ifp, m); IF_ENQUEUE (&d->queue, m); #endif } #define CONDITION(t,tp) (!(t->c_iflag & (ICRNL | IGNCR | IMAXBEL | INLCR | ISTRIP | IXON))\ && (!(tp->t_iflag & BRKINT) || (tp->t_iflag & IGNBRK))\ && (!(tp->t_iflag & PARMRK)\ || (tp->t_iflag & (IGNPAR | IGNBRK)) == (IGNPAR | IGNBRK))\ && !(t->c_lflag & (ECHO | ICANON | IEXTEN | ISIG | PENDIN))\ && linesw[tp->t_line]->l_rint == ttyinput) /* * Error callback function. */ static void cx_error (cx_chan_t *c, int data) { drv_t *d = c->sys; async_q *q; if (!d) return; q = &(d->aqueue); switch (data) { case CX_FRAME: CX_DEBUG (d, ("frame error\n")); if (c->mode == M_ASYNC && d->tty && (d->tty->t_state & TS_ISOPEN) && (AQ_GSZ (q) < BF_SZ - 1) && (!CONDITION((&d->tty->t_termios), (d->tty)) || !(d->tty->t_iflag & (IGNPAR | PARMRK)))) { AQ_PUSH (q, TTY_FE); d->intr_action |= CX_READ; MY_SOFT_INTR = 1; swi_sched (cx_fast_ih, 0); } #ifndef NETGRAPH else if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CX_CRC: CX_DEBUG (d, ("crc error\n")); if (c->mode == M_ASYNC && d->tty && (d->tty->t_state & TS_ISOPEN) && (AQ_GSZ (q) < BF_SZ - 1) && (!CONDITION((&d->tty->t_termios), (d->tty)) || !(d->tty->t_iflag & INPCK) || !(d->tty->t_iflag & (IGNPAR | PARMRK)))) { AQ_PUSH (q, TTY_PE); d->intr_action |= CX_READ; MY_SOFT_INTR = 1; swi_sched (cx_fast_ih, 0); } #ifndef NETGRAPH else if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CX_OVERRUN: CX_DEBUG (d, ("overrun error\n")); #ifdef TTY_OE if (c->mode == M_ASYNC && d->tty && (d->tty->t_state & TS_ISOPEN) && (AQ_GSZ (q) < BF_SZ - 1) && (!CONDITION((&d->tty->t_termios), (d->tty)))) { AQ_PUSH (q, TTY_OE); d->intr_action |= CX_READ; MY_SOFT_INTR = 1; swi_sched (cx_fast_ih, 0); } #endif #ifndef NETGRAPH else { if_inc_counter(d->ifp, IFCOUNTER_COLLISIONS, 1); if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); } #endif break; case CX_OVERFLOW: CX_DEBUG (d, ("overflow error\n")); #ifndef NETGRAPH if (c->mode != M_ASYNC) if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CX_UNDERRUN: CX_DEBUG (d, ("underrun error\n")); if (c->mode != M_ASYNC) { d->timeout = 0; #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_OERRORS, 1); d->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; #endif cx_start (d); } break; case CX_BREAK: CX_DEBUG (d, ("break error\n")); if (c->mode == M_ASYNC && d->tty && (d->tty->t_state & TS_ISOPEN) && (AQ_GSZ (q) < BF_SZ - 1) && (!CONDITION((&d->tty->t_termios), (d->tty)) || !(d->tty->t_iflag & (IGNBRK | BRKINT | PARMRK)))) { AQ_PUSH (q, TTY_BI); d->intr_action |= CX_READ; MY_SOFT_INTR = 1; swi_sched (cx_fast_ih, 0); } #ifndef NETGRAPH else if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; default: CX_DEBUG (d, ("error #%d\n", data)); } } static int cx_topen (struct tty *tp, struct cdev *dev) { bdrv_t *bd; drv_t *d; d = tp->t_sc; CX_DEBUG2 (d, ("cx_open (serial)\n")); bd = d->board->sys; if (d->chan->mode != M_ASYNC) return (EBUSY); d->open_dev |= 0x2; CX_LOCK (bd); cx_start_chan (d->chan, 0, 0); cx_set_dtr (d->chan, 1); cx_set_rts (d->chan, 1); d->cd = cx_get_cd (d->chan); CX_UNLOCK (bd); CX_DEBUG2 (d, ("cx_open done\n")); return 0; } static void cx_tclose (struct tty *tp) { drv_t *d; bdrv_t *bd; d = tp->t_sc; CX_DEBUG2 (d, ("cx_close\n")); bd = d->board->sys; CX_LOCK (bd); /* Disable receiver. * Transmitter continues sending the queued data. */ cx_enable_receive (d->chan, 0); CX_UNLOCK (bd); d->open_dev &= ~0x2; } static int cx_tmodem (struct tty *tp, int sigon, int sigoff) { drv_t *d; bdrv_t *bd; d = tp->t_sc; bd = d->board->sys; CX_LOCK (bd); if (!sigon && !sigoff) { if (cx_get_dsr (d->chan)) sigon |= SER_DSR; if (cx_get_cd (d->chan)) sigon |= SER_DCD; if (cx_get_cts (d->chan)) sigon |= SER_CTS; if (d->chan->dtr) sigon |= SER_DTR; if (d->chan->rts) sigon |= SER_RTS; CX_UNLOCK (bd); return sigon; } if (sigon & SER_DTR) cx_set_dtr (d->chan, 1); if (sigoff & SER_DTR) cx_set_dtr (d->chan, 0); if (sigon & SER_RTS) cx_set_rts (d->chan, 1); if (sigoff & SER_RTS) cx_set_rts (d->chan, 0); CX_UNLOCK (bd); return (0); } static int cx_open (struct cdev *dev, int flag, int mode, struct thread *td) { int unit; drv_t *d; d = dev->si_drv1; unit = d->chan->num; CX_DEBUG2 (d, ("cx_open unit=%d, flag=0x%x, mode=0x%x\n", unit, flag, mode)); d->open_dev |= 0x1; CX_DEBUG2 (d, ("cx_open done\n")); return 0; } static int cx_close (struct cdev *dev, int flag, int mode, struct thread *td) { drv_t *d; d = dev->si_drv1; CX_DEBUG2 (d, ("cx_close\n")); d->open_dev &= ~0x1; return 0; } static int cx_modem_status (drv_t *d) { bdrv_t *bd = d->board->sys; int status = 0, s = splhigh (); CX_LOCK (bd); /* Already opened by someone or network interface is up? */ if ((d->chan->mode == M_ASYNC && d->tty && (d->tty->t_state & TS_ISOPEN) && (d->open_dev|0x2)) || (d->chan->mode != M_ASYNC && d->running)) status = TIOCM_LE; /* always enabled while open */ if (cx_get_dsr (d->chan)) status |= TIOCM_DSR; if (cx_get_cd (d->chan)) status |= TIOCM_CD; if (cx_get_cts (d->chan)) status |= TIOCM_CTS; if (d->chan->dtr) status |= TIOCM_DTR; if (d->chan->rts) status |= TIOCM_RTS; CX_UNLOCK (bd); splx (s); return status; } static int cx_ioctl (struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td) { drv_t *d; bdrv_t *bd; cx_chan_t *c; struct serial_statistics *st; int error, s; char mask[16]; d = dev->si_drv1; c = d->chan; bd = d->board->sys; switch (cmd) { case SERIAL_GETREGISTERED: CX_DEBUG2 (d, ("ioctl: getregistered\n")); bzero (mask, sizeof(mask)); for (s=0; smode == M_ASYNC) ? "async" : (IFP2SP(d->ifp)->pp_flags & PP_FR) ? "fr" : (d->ifp->if_flags & PP_CISCO) ? "cisco" : "ppp"); CX_UNLOCK (bd); splx (s); return 0; case SERIAL_SETPROTO: CX_DEBUG2 (d, ("ioctl: setproto\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->mode == M_ASYNC) return EBUSY; if (d->ifp->if_drv_flags & IFF_DRV_RUNNING) return EBUSY; if (! strcmp ("cisco", (char*)data)) { IFP2SP(d->ifp)->pp_flags &= ~(PP_FR); IFP2SP(d->ifp)->pp_flags |= PP_KEEPALIVE; d->ifp->if_flags |= PP_CISCO; } else if (! strcmp ("fr", (char*)data)) { d->ifp->if_flags &= ~(PP_CISCO); IFP2SP(d->ifp)->pp_flags |= PP_FR | PP_KEEPALIVE; } else if (! strcmp ("ppp", (char*)data)) { IFP2SP(d->ifp)->pp_flags &= ~(PP_FR | PP_KEEPALIVE); d->ifp->if_flags &= ~(PP_CISCO); } else return EINVAL; return 0; case SERIAL_GETKEEPALIVE: CX_DEBUG2 (d, ("ioctl: getkeepalive\n")); if ((IFP2SP(d->ifp)->pp_flags & PP_FR) || (d->ifp->if_flags & PP_CISCO) || (c->mode == M_ASYNC)) return EINVAL; s = splhigh (); CX_LOCK (bd); *(int*)data = (IFP2SP(d->ifp)->pp_flags & PP_KEEPALIVE) ? 1 : 0; CX_UNLOCK (bd); splx (s); return 0; case SERIAL_SETKEEPALIVE: CX_DEBUG2 (d, ("ioctl: setkeepalive\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if ((IFP2SP(d->ifp)->pp_flags & PP_FR) || (d->ifp->if_flags & PP_CISCO)) return EINVAL; s = splhigh (); CX_LOCK (bd); if (*(int*)data) IFP2SP(d->ifp)->pp_flags |= PP_KEEPALIVE; else IFP2SP(d->ifp)->pp_flags &= ~PP_KEEPALIVE; CX_UNLOCK (bd); splx (s); return 0; #endif /*NETGRAPH*/ case SERIAL_GETMODE: CX_DEBUG2 (d, ("ioctl: getmode\n")); s = splhigh (); CX_LOCK (bd); *(int*)data = (c->mode == M_ASYNC) ? SERIAL_ASYNC : SERIAL_HDLC; CX_UNLOCK (bd); splx (s); return 0; case SERIAL_SETMODE: CX_DEBUG2 (d, ("ioctl: setmode\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; /* Somebody is waiting for carrier? */ if (d->lock) return EBUSY; /* /dev/ttyXX is already opened by someone? */ if (c->mode == M_ASYNC && d->tty && (d->tty->t_state & TS_ISOPEN) && (d->open_dev|0x2)) return EBUSY; /* Network interface is up? * Cannot change to async mode. */ if (c->mode != M_ASYNC && d->running && (*(int*)data == SERIAL_ASYNC)) return EBUSY; s = splhigh (); CX_LOCK (bd); if (c->mode == M_HDLC && *(int*)data == SERIAL_ASYNC) { cx_set_mode (c, M_ASYNC); cx_enable_receive (c, 0); cx_enable_transmit (c, 0); } else if (c->mode == M_ASYNC && *(int*)data == SERIAL_HDLC) { if (d->ifp->if_flags & IFF_DEBUG) c->debug = c->debug_shadow; cx_set_mode (c, M_HDLC); cx_enable_receive (c, 1); cx_enable_transmit (c, 1); } CX_UNLOCK (bd); splx (s); return 0; case SERIAL_GETSTAT: CX_DEBUG2 (d, ("ioctl: getestat\n")); st = (struct serial_statistics*) data; s = splhigh (); CX_LOCK (bd); st->rintr = c->rintr; st->tintr = c->tintr; st->mintr = c->mintr; st->ibytes = c->ibytes; st->ipkts = c->ipkts; st->ierrs = c->ierrs; st->obytes = c->obytes; st->opkts = c->opkts; st->oerrs = c->oerrs; CX_UNLOCK (bd); splx (s); return 0; case SERIAL_CLRSTAT: CX_DEBUG2 (d, ("ioctl: clrstat\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splhigh (); CX_LOCK (bd); c->rintr = 0; c->tintr = 0; c->mintr = 0; c->ibytes = 0; c->ipkts = 0; c->ierrs = 0; c->obytes = 0; c->opkts = 0; c->oerrs = 0; CX_UNLOCK (bd); splx (s); return 0; case SERIAL_GETBAUD: CX_DEBUG2 (d, ("ioctl: getbaud\n")); if (c->mode == M_ASYNC) return EINVAL; s = splhigh (); CX_LOCK (bd); *(long*)data = cx_get_baud(c); CX_UNLOCK (bd); splx (s); return 0; case SERIAL_SETBAUD: CX_DEBUG2 (d, ("ioctl: setbaud\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->mode == M_ASYNC) return EINVAL; s = splhigh (); CX_LOCK (bd); cx_set_baud (c, *(long*)data); CX_UNLOCK (bd); splx (s); return 0; case SERIAL_GETLOOP: CX_DEBUG2 (d, ("ioctl: getloop\n")); if (c->mode == M_ASYNC) return EINVAL; s = splhigh (); CX_LOCK (bd); *(int*)data = cx_get_loop (c); CX_UNLOCK (bd); splx (s); return 0; case SERIAL_SETLOOP: CX_DEBUG2 (d, ("ioctl: setloop\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->mode == M_ASYNC) return EINVAL; s = splhigh (); CX_LOCK (bd); cx_set_loop (c, *(int*)data); CX_UNLOCK (bd); splx (s); return 0; case SERIAL_GETDPLL: CX_DEBUG2 (d, ("ioctl: getdpll\n")); if (c->mode == M_ASYNC) return EINVAL; s = splhigh (); CX_LOCK (bd); *(int*)data = cx_get_dpll (c); CX_UNLOCK (bd); splx (s); return 0; case SERIAL_SETDPLL: CX_DEBUG2 (d, ("ioctl: setdpll\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->mode == M_ASYNC) return EINVAL; s = splhigh (); CX_LOCK (bd); cx_set_dpll (c, *(int*)data); CX_UNLOCK (bd); splx (s); return 0; case SERIAL_GETNRZI: CX_DEBUG2 (d, ("ioctl: getnrzi\n")); if (c->mode == M_ASYNC) return EINVAL; s = splhigh (); CX_LOCK (bd); *(int*)data = cx_get_nrzi (c); CX_UNLOCK (bd); splx (s); return 0; case SERIAL_SETNRZI: CX_DEBUG2 (d, ("ioctl: setnrzi\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->mode == M_ASYNC) return EINVAL; s = splhigh (); CX_LOCK (bd); cx_set_nrzi (c, *(int*)data); CX_UNLOCK (bd); splx (s); return 0; case SERIAL_GETDEBUG: CX_DEBUG2 (d, ("ioctl: getdebug\n")); s = splhigh (); CX_LOCK (bd); *(int*)data = c->debug; CX_UNLOCK (bd); splx (s); return 0; case SERIAL_SETDEBUG: CX_DEBUG2 (d, ("ioctl: setdebug\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splhigh (); CX_LOCK (bd); #ifndef NETGRAPH if (c->mode == M_ASYNC) { c->debug = *(int*)data; } else { /* * The debug_shadow is always greater than zero for * logic simplicity. For switching debug off the * IFF_DEBUG is responsible (for !M_ASYNC mode). */ c->debug_shadow = (*(int*)data) ? (*(int*)data) : 1; if (d->ifp->if_flags & IFF_DEBUG) c->debug = c->debug_shadow; } #else c->debug = *(int*)data; #endif CX_UNLOCK (bd); splx (s); return 0; } switch (cmd) { case TIOCSDTR: /* Set DTR */ CX_DEBUG2 (d, ("ioctl: tiocsdtr\n")); s = splhigh (); CX_LOCK (bd); cx_set_dtr (c, 1); CX_UNLOCK (bd); splx (s); return 0; case TIOCCDTR: /* Clear DTR */ CX_DEBUG2 (d, ("ioctl: tioccdtr\n")); s = splhigh (); CX_LOCK (bd); cx_set_dtr (c, 0); CX_UNLOCK (bd); splx (s); return 0; case TIOCMSET: /* Set DTR/RTS */ CX_DEBUG2 (d, ("ioctl: tiocmset\n")); s = splhigh (); CX_LOCK (bd); cx_set_dtr (c, (*(int*)data & TIOCM_DTR) ? 1 : 0); cx_set_rts (c, (*(int*)data & TIOCM_RTS) ? 1 : 0); CX_UNLOCK (bd); splx (s); return 0; case TIOCMBIS: /* Add DTR/RTS */ CX_DEBUG2 (d, ("ioctl: tiocmbis\n")); s = splhigh (); CX_LOCK (bd); if (*(int*)data & TIOCM_DTR) cx_set_dtr (c, 1); if (*(int*)data & TIOCM_RTS) cx_set_rts (c, 1); CX_UNLOCK (bd); splx (s); return 0; case TIOCMBIC: /* Clear DTR/RTS */ CX_DEBUG2 (d, ("ioctl: tiocmbic\n")); s = splhigh (); CX_LOCK (bd); if (*(int*)data & TIOCM_DTR) cx_set_dtr (c, 0); if (*(int*)data & TIOCM_RTS) cx_set_rts (c, 0); CX_UNLOCK (bd); splx (s); return 0; case TIOCMGET: /* Get modem status */ CX_DEBUG2 (d, ("ioctl: tiocmget\n")); *(int*)data = cx_modem_status (d); return 0; } CX_DEBUG2 (d, ("ioctl: 0x%lx\n", cmd)); return ENOTTY; } void cx_softintr (void *unused) { drv_t *d; bdrv_t *bd; async_q *q; int i, s, ic, k; while (MY_SOFT_INTR) { MY_SOFT_INTR = 0; for (i=0; ichan || d->chan->type == T_NONE || d->chan->mode != M_ASYNC || !d->tty || !d->tty->t_dev) continue; bd = d->board->sys; s = splhigh (); CX_LOCK (bd); if (d->intr_action & CX_READ) { q = &(d->aqueue); if (d->tty->t_state & TS_CAN_BYPASS_L_RINT) { k = AQ_GSZ(q); if (d->tty->t_rawq.c_cc + k > d->tty->t_ihiwat && (d->tty->t_cflag & CRTS_IFLOW || d->tty->t_iflag & IXOFF) && !(d->tty->t_state & TS_TBLOCK)) ttyblock(d->tty); d->tty->t_rawcc += k; while (k>0) { k--; AQ_POP (q, ic); CX_UNLOCK (bd); splx (s); putc (ic, &d->tty->t_rawq); s = splhigh (); CX_LOCK (bd); } ttwakeup(d->tty); if (d->tty->t_state & TS_TTSTOP && (d->tty->t_iflag & IXANY || d->tty->t_cc[VSTART] == d->tty->t_cc[VSTOP])) { d->tty->t_state &= ~TS_TTSTOP; d->tty->t_lflag &= ~FLUSHO; d->intr_action |= CX_WRITE; } } else { while (q->end != q->beg) { AQ_POP (q, ic); CX_UNLOCK (bd); splx (s); ttyld_rint (d->tty, ic); s = splhigh (); CX_LOCK (bd); } } d->intr_action &= ~CX_READ; } splx (s); CX_UNLOCK (bd); s = splhigh (); CX_LOCK (bd); if (d->intr_action & CX_WRITE) { if (d->tty->t_line) ttyld_start (d->tty); else cx_oproc (d->tty); d->intr_action &= ~CX_WRITE; } CX_UNLOCK (bd); splx (s); } } } /* * Fill transmitter buffer with data. */ static void cx_oproc (struct tty *tp) { int s, k; drv_t *d; bdrv_t *bd; static u_char buf[DMABUFSZ]; u_char *p; u_short len = 0, sublen = 0; d = tp->t_sc; bd = d->board->sys; CX_DEBUG2 (d, ("cx_oproc\n")); s = splhigh (); CX_LOCK (bd); if (tp->t_cflag & CRTSCTS && (tp->t_state & TS_TBLOCK) && d->chan->rts) cx_set_rts (d->chan, 0); else if (tp->t_cflag & CRTSCTS && ! (tp->t_state & TS_TBLOCK) && ! d->chan->rts) cx_set_rts (d->chan, 1); if (! (tp->t_state & (TS_TIMEOUT | TS_TTSTOP))) { /* Start transmitter. */ cx_enable_transmit (d->chan, 1); /* Is it busy? */ if (! cx_buf_free (d->chan)) { tp->t_state |= TS_BUSY; CX_UNLOCK (bd); splx (s); return; } if (tp->t_iflag & IXOFF) { p = (buf + (DMABUFSZ/2)); sublen = q_to_b (&tp->t_outq, p, (DMABUFSZ/2)); k = sublen; while (k--) { /* Send XON/XOFF out of band. */ if (*p == tp->t_cc[VSTOP]) { cx_xflow_ctl (d->chan, 0); p++; continue; } if (*p == tp->t_cc[VSTART]) { cx_xflow_ctl (d->chan, 1); p++; continue; } buf[len] = *p; len++; p++; } } else { p = buf; len = q_to_b (&tp->t_outq, p, (DMABUFSZ/2)); } if (len) { cx_send_packet (d->chan, buf, len, 0); tp->t_state |= TS_BUSY; d->atimeout = 10; CX_DEBUG2 (d, ("out %d bytes\n", len)); } } ttwwakeup (tp); CX_UNLOCK (bd); splx (s); } static int cx_param (struct tty *tp, struct termios *t) { drv_t *d; bdrv_t *bd; int s, bits, parity; d = tp->t_sc; bd = d->board->sys; s = splhigh (); CX_LOCK (bd); if (t->c_ospeed == 0) { /* Clear DTR and RTS. */ cx_set_dtr (d->chan, 0); CX_UNLOCK (bd); splx (s); CX_DEBUG2 (d, ("cx_param (hangup)\n")); return 0; } CX_DEBUG2 (d, ("cx_param\n")); /* Check requested parameters. */ if (t->c_ospeed < 300 || t->c_ospeed > 256*1024) { CX_UNLOCK (bd); splx (s); return EINVAL; } if (t->c_ispeed && (t->c_ispeed < 300 || t->c_ispeed > 256*1024)) { CX_UNLOCK (bd); splx (s); return EINVAL; } /* And copy them to tty and channel structures. */ tp->t_ispeed = t->c_ispeed = tp->t_ospeed = t->c_ospeed; tp->t_cflag = t->c_cflag; /* Set character length and parity mode. */ switch (t->c_cflag & CSIZE) { default: case CS8: bits = 8; break; case CS7: bits = 7; break; case CS6: bits = 6; break; case CS5: bits = 5; break; } parity = ((t->c_cflag & PARENB) ? 1 : 0) * (1 + ((t->c_cflag & PARODD) ? 0 : 1)); /* Set current channel number. */ if (! d->chan->dtr) cx_set_dtr (d->chan, 1); ttyldoptim (tp); cx_set_async_param (d->chan, t->c_ospeed, bits, parity, (t->c_cflag & CSTOPB), !(t->c_cflag & PARENB), (t->c_cflag & CRTSCTS), (t->c_iflag & IXON), (t->c_iflag & IXANY), t->c_cc[VSTART], t->c_cc[VSTOP]); CX_UNLOCK (bd); splx (s); return 0; } /* * Stop output on a line */ static void cx_stop (struct tty *tp, int flag) { drv_t *d; bdrv_t *bd; int s; d = tp->t_sc; bd = d->board->sys; s = splhigh (); CX_LOCK (bd); if (tp->t_state & TS_BUSY) { /* Stop transmitter */ CX_DEBUG2 (d, ("cx_stop\n")); cx_transmitter_ctl (d->chan, 0); } CX_UNLOCK (bd); splx (s); } /* * Process the (delayed) carrier signal setup. */ static void cx_carrier (void *arg) { drv_t *d = arg; bdrv_t *bd = d->board->sys; cx_chan_t *c = d->chan; int s, cd; s = splhigh (); CX_LOCK (bd); cd = cx_get_cd (c); if (d->cd != cd) { if (cd) { CX_DEBUG (d, ("carrier on\n")); d->cd = 1; CX_UNLOCK (bd); splx (s); if (d->tty) ttyld_modem(d->tty, 1); } else { CX_DEBUG (d, ("carrier loss\n")); d->cd = 0; CX_UNLOCK (bd); splx (s); if (d->tty) ttyld_modem(d->tty, 0); } } else { CX_UNLOCK (bd); splx (s); } } /* * Modem signal callback function. */ static void cx_modem (cx_chan_t *c) { drv_t *d = c->sys; if (!d || c->mode != M_ASYNC) return; /* Handle carrier detect/loss. */ /* Carrier changed - delay processing DCD for a while * to give both sides some time to initialize. */ callout_reset (&d->dcd_timeout_handle, hz/2, cx_carrier, d); } #ifdef NETGRAPH static int ng_cx_constructor (node_p node) { drv_t *d = NG_NODE_PRIVATE (node); CX_DEBUG (d, ("Constructor\n")); return EINVAL; } static int ng_cx_newhook (node_p node, hook_p hook, const char *name) { int s; drv_t *d = NG_NODE_PRIVATE (node); bdrv_t *bd = d->board->sys; if (d->chan->mode == M_ASYNC) return EINVAL; /* Attach debug hook */ if (strcmp (name, NG_CX_HOOK_DEBUG) == 0) { NG_HOOK_SET_PRIVATE (hook, NULL); d->debug_hook = hook; return 0; } /* Check for raw hook */ if (strcmp (name, NG_CX_HOOK_RAW) != 0) return EINVAL; NG_HOOK_SET_PRIVATE (hook, d); d->hook = hook; s = splhigh (); CX_LOCK (bd); cx_up (d); CX_UNLOCK (bd); splx (s); return 0; } static int print_modems (char *s, cx_chan_t *c, int need_header) { int status = cx_modem_status (c->sys); int length = 0; if (need_header) length += sprintf (s + length, " LE DTR DSR RTS CTS CD\n"); length += sprintf (s + length, "%4s %4s %4s %4s %4s %4s\n", status & TIOCM_LE ? "On" : "-", status & TIOCM_DTR ? "On" : "-", status & TIOCM_DSR ? "On" : "-", status & TIOCM_RTS ? "On" : "-", status & TIOCM_CTS ? "On" : "-", status & TIOCM_CD ? "On" : "-"); return length; } static int print_stats (char *s, cx_chan_t *c, int need_header) { int length = 0; if (need_header) length += sprintf (s + length, " Rintr Tintr Mintr Ibytes Ipkts Ierrs Obytes Opkts Oerrs\n"); length += sprintf (s + length, "%7ld %7ld %7ld %8ld %7ld %7ld %8ld %7ld %7ld\n", c->rintr, c->tintr, c->mintr, c->ibytes, c->ipkts, c->ierrs, c->obytes, c->opkts, c->oerrs); return length; } static int print_chan (char *s, cx_chan_t *c) { drv_t *d = c->sys; int length = 0; length += sprintf (s + length, "cx%d", c->board->num * NCHAN + c->num); if (d->chan->debug) length += sprintf (s + length, " debug=%d", d->chan->debug); if (cx_get_baud (c)) length += sprintf (s + length, " %ld", cx_get_baud (c)); else length += sprintf (s + length, " extclock"); if (c->mode == M_HDLC) { length += sprintf (s + length, " dpll=%s", cx_get_dpll (c) ? "on" : "off"); length += sprintf (s + length, " nrzi=%s", cx_get_nrzi (c) ? "on" : "off"); } length += sprintf (s + length, " loop=%s", cx_get_loop (c) ? "on\n" : "off\n"); return length; } static int ng_cx_rcvmsg (node_p node, item_p item, hook_p lasthook) { drv_t *d = NG_NODE_PRIVATE (node); struct ng_mesg *msg; struct ng_mesg *resp = NULL; int error = 0; if (!d) return EINVAL; CX_DEBUG (d, ("Rcvmsg\n")); NGI_GET_MSG (item, msg); switch (msg->header.typecookie) { default: error = EINVAL; break; case NGM_CX_COOKIE: printf ("Don't forget to implement\n"); error = EINVAL; break; case NGM_GENERIC_COOKIE: switch (msg->header.cmd) { default: error = EINVAL; break; case NGM_TEXT_STATUS: { char *s; int l = 0; int dl = sizeof (struct ng_mesg) + 730; NG_MKRESPONSE (resp, msg, dl, M_NOWAIT); if (! resp) { error = ENOMEM; break; } bzero (resp, dl); s = (resp)->data; l += print_chan (s + l, d->chan); l += print_stats (s + l, d->chan, 1); l += print_modems (s + l, d->chan, 1); strncpy ((resp)->header.cmdstr, "status", NG_CMDSTRSIZ); } break; } break; } NG_RESPOND_MSG (error, node, item, resp); NG_FREE_MSG (msg); return error; } static int ng_cx_rcvdata (hook_p hook, item_p item) { drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE(hook)); struct mbuf *m; struct ng_tag_prio *ptag; bdrv_t *bd; struct ifqueue *q; int s; NGI_GET_M (item, m); NG_FREE_ITEM (item); if (! NG_HOOK_PRIVATE (hook) || ! d) { NG_FREE_M (m); return ENETDOWN; } bd = d->board->sys; /* Check for high priority data */ if ((ptag = (struct ng_tag_prio *)m_tag_locate(m, NGM_GENERIC_COOKIE, NG_TAG_PRIO, NULL)) != NULL && (ptag->priority > NG_PRIO_CUTOFF) ) q = &d->hi_queue; else q = &d->lo_queue; s = splhigh (); CX_LOCK (bd); IF_LOCK (q); if (_IF_QFULL (q)) { IF_UNLOCK (q); CX_UNLOCK (bd); splx (s); NG_FREE_M (m); return ENOBUFS; } _IF_ENQUEUE (q, m); IF_UNLOCK (q); cx_start (d); CX_UNLOCK (bd); splx (s); return 0; } static int ng_cx_rmnode (node_p node) { drv_t *d = NG_NODE_PRIVATE (node); bdrv_t *bd; CX_DEBUG (d, ("Rmnode\n")); if (d && d->running) { int s = splhigh (); bd = d->board->sys; CX_LOCK (bd); cx_down (d); CX_UNLOCK (bd); splx (s); } #ifdef KLD_MODULE if (node->nd_flags & NGF_REALLY_DIE) { NG_NODE_SET_PRIVATE (node, NULL); NG_NODE_UNREF (node); } NG_NODE_REVIVE(node); /* Persistent node */ #endif return 0; } static int ng_cx_connect (hook_p hook) { drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE (hook)); callout_reset (&d->timeout_handle, hz, cx_watchdog_timer, d); return 0; } static int ng_cx_disconnect (hook_p hook) { drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE (hook)); bdrv_t *bd = d->board->sys; int s; s = splhigh (); CX_LOCK (bd); if (NG_HOOK_PRIVATE (hook)) cx_down (d); CX_UNLOCK (bd); splx (s); /* If we were wait it than it reasserted now, just stop it. */ if (!callout_drain (&d->timeout_handle)) callout_stop (&d->timeout_handle); return 0; } #endif /*NETGRAPH*/ static int cx_modevent (module_t mod, int type, void *unused) { static int load_count = 0; switch (type) { case MOD_LOAD: #ifdef NETGRAPH if (ng_newtype (&typestruct)) printf ("Failed to register ng_cx\n"); #endif ++load_count; callout_init (&timeout_handle, 1); callout_reset (&timeout_handle, hz*5, cx_timeout, 0); /* Software interrupt. */ swi_add(&tty_intr_event, "cx", cx_softintr, NULL, SWI_TTY, INTR_MPSAFE, &cx_fast_ih); break; case MOD_UNLOAD: if (load_count == 1) { printf ("Removing device entry for Sigma\n"); #ifdef NETGRAPH ng_rmtype (&typestruct); #endif } /* If we were wait it than it reasserted now, just stop it. */ if (!callout_drain (&timeout_handle)) callout_stop (&timeout_handle); swi_remove (cx_fast_ih); --load_count; break; case MOD_SHUTDOWN: break; } return 0; } #ifdef NETGRAPH static struct ng_type typestruct = { .version = NG_ABI_VERSION, .name = NG_CX_NODE_TYPE, .constructor = ng_cx_constructor, .rcvmsg = ng_cx_rcvmsg, .shutdown = ng_cx_rmnode, .newhook = ng_cx_newhook, .connect = ng_cx_connect, .rcvdata = ng_cx_rcvdata, .disconnect = ng_cx_disconnect, }; #endif /*NETGRAPH*/ #ifdef NETGRAPH MODULE_DEPEND (ng_cx, netgraph, NG_ABI_VERSION, NG_ABI_VERSION, NG_ABI_VERSION); #else MODULE_DEPEND (isa_cx, sppp, 1, 1, 1); #endif DRIVER_MODULE (cx, isa, cx_isa_driver, cx_devclass, cx_modevent, NULL); MODULE_VERSION (cx, 1); Index: head/sys/dev/de/if_de.c =================================================================== --- head/sys/dev/de/if_de.c (revision 313981) +++ head/sys/dev/de/if_de.c (revision 313982) @@ -1,5017 +1,5017 @@ /* $NetBSD: if_de.c,v 1.86 1999/06/01 19:17:59 thorpej Exp $ */ /*- * Copyright (c) 1994-1997 Matt Thomas (matt@3am-software.com) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Id: if_de.c,v 1.94 1997/07/03 16:55:07 thomas Exp */ /* * DEC 21040 PCI Ethernet Controller * * Written by Matt Thomas * BPF support code stolen directly from if_ec.c * * This driver supports the DEC DE435 or any other PCI * board which support 21040, 21041, or 21140 (mostly). */ #include __FBSDID("$FreeBSD$"); #define TULIP_HDR_DATA #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #endif #include #include #include #include #include #include #ifdef DDB #include #endif /* * Intel CPUs should use I/O mapped access. */ #if defined(__i386__) #define TULIP_IOMAPPED #endif #if 0 /* This enables KTR traces at KTR_DEV. */ #define KTR_TULIP KTR_DEV #else #define KTR_TULIP 0 #endif #if 0 /* * This turns on all sort of debugging stuff and make the * driver much larger. */ #define TULIP_DEBUG #endif #if 0 #define TULIP_PERFSTATS #endif #define TULIP_HZ 10 #include #define SYNC_NONE 0 #define SYNC_RX 1 #define SYNC_TX 2 /* * This module supports * the DEC 21040 PCI Ethernet Controller. * the DEC 21041 PCI Ethernet Controller. * the DEC 21140 PCI Fast Ethernet Controller. */ static void tulip_addr_filter(tulip_softc_t * const sc); static int tulip_ifmedia_change(struct ifnet * const ifp); static void tulip_ifmedia_status(struct ifnet * const ifp, struct ifmediareq *req); static void tulip_init(void *); static void tulip_init_locked(tulip_softc_t * const sc); static void tulip_intr_shared(void *arg); static void tulip_intr_normal(void *arg); static void tulip_mii_autonegotiate(tulip_softc_t * const sc, const unsigned phyaddr); static int tulip_mii_map_abilities(tulip_softc_t * const sc, unsigned abilities); static tulip_media_t tulip_mii_phy_readspecific(tulip_softc_t * const sc); static unsigned tulip_mii_readreg(tulip_softc_t * const sc, unsigned devaddr, unsigned regno); static void tulip_mii_writereg(tulip_softc_t * const sc, unsigned devaddr, unsigned regno, unsigned data); static void tulip_reset(tulip_softc_t * const sc); static void tulip_rx_intr(tulip_softc_t * const sc); static int tulip_srom_decode(tulip_softc_t * const sc); static void tulip_start(struct ifnet *ifp); static void tulip_start_locked(tulip_softc_t * const sc); static struct mbuf * tulip_txput(tulip_softc_t * const sc, struct mbuf *m); static void tulip_txput_setup(tulip_softc_t * const sc); static void tulip_watchdog(void *arg); struct mbuf * tulip_dequeue_mbuf(tulip_ringinfo_t *ri, tulip_descinfo_t *di, int sync); static void tulip_dma_map_addr(void *, bus_dma_segment_t *, int, int); static void tulip_dma_map_rxbuf(void *, bus_dma_segment_t *, int, bus_size_t, int); static void tulip_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *paddr; if (error) return; paddr = arg; *paddr = segs->ds_addr; } static void tulip_dma_map_rxbuf(void *arg, bus_dma_segment_t *segs, int nseg, bus_size_t mapsize, int error) { tulip_desc_t *desc; if (error) return; desc = arg; KASSERT(nseg == 1, ("too many DMA segments")); KASSERT(segs[0].ds_len >= TULIP_RX_BUFLEN, ("receive buffer too small")); desc->d_addr1 = segs[0].ds_addr & 0xffffffff; desc->d_length1 = TULIP_RX_BUFLEN; #ifdef not_needed /* These should already always be zero. */ desc->d_addr2 = 0; desc->d_length2 = 0; #endif } struct mbuf * tulip_dequeue_mbuf(tulip_ringinfo_t *ri, tulip_descinfo_t *di, int sync) { struct mbuf *m; m = di->di_mbuf; if (m != NULL) { switch (sync) { case SYNC_NONE: break; case SYNC_RX: TULIP_RXMAP_POSTSYNC(ri, di); break; case SYNC_TX: TULIP_TXMAP_POSTSYNC(ri, di); break; default: panic("bad sync flag: %d", sync); } bus_dmamap_unload(ri->ri_data_tag, *di->di_map); di->di_mbuf = NULL; } return (m); } static void tulip_timeout_callback(void *arg) { tulip_softc_t * const sc = arg; TULIP_PERFSTART(timeout) TULIP_LOCK_ASSERT(sc); sc->tulip_flags &= ~TULIP_TIMEOUTPENDING; sc->tulip_probe_timeout -= 1000 / TULIP_HZ; (sc->tulip_boardsw->bd_media_poll)(sc, TULIP_MEDIAPOLL_TIMER); TULIP_PERFEND(timeout); } static void tulip_timeout(tulip_softc_t * const sc) { TULIP_LOCK_ASSERT(sc); if (sc->tulip_flags & TULIP_TIMEOUTPENDING) return; sc->tulip_flags |= TULIP_TIMEOUTPENDING; callout_reset(&sc->tulip_callout, (hz + TULIP_HZ / 2) / TULIP_HZ, tulip_timeout_callback, sc); } static int tulip_txprobe(tulip_softc_t * const sc) { struct mbuf *m; u_char *enaddr; /* * Before we are sure this is the right media we need * to send a small packet to make sure there's carrier. * Strangely, BNC and AUI will "see" receive data if * either is connected so the transmit is the only way * to verify the connectivity. */ TULIP_LOCK_ASSERT(sc); MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) return 0; /* * Construct a LLC TEST message which will point to ourselves. */ if (sc->tulip_ifp->if_input != NULL) enaddr = IF_LLADDR(sc->tulip_ifp); else enaddr = sc->tulip_enaddr; bcopy(enaddr, mtod(m, struct ether_header *)->ether_dhost, ETHER_ADDR_LEN); bcopy(enaddr, mtod(m, struct ether_header *)->ether_shost, ETHER_ADDR_LEN); mtod(m, struct ether_header *)->ether_type = htons(3); mtod(m, unsigned char *)[14] = 0; mtod(m, unsigned char *)[15] = 0; mtod(m, unsigned char *)[16] = 0xE3; /* LLC Class1 TEST (no poll) */ m->m_len = m->m_pkthdr.len = sizeof(struct ether_header) + 3; /* * send it! */ sc->tulip_cmdmode |= TULIP_CMD_TXRUN; sc->tulip_intrmask |= TULIP_STS_TXINTR; sc->tulip_flags |= TULIP_TXPROBE_ACTIVE; TULIP_CSR_WRITE(sc, csr_command, sc->tulip_cmdmode); TULIP_CSR_WRITE(sc, csr_intr, sc->tulip_intrmask); if ((m = tulip_txput(sc, m)) != NULL) m_freem(m); sc->tulip_probe.probe_txprobes++; return 1; } static void tulip_media_set(tulip_softc_t * const sc, tulip_media_t media) { const tulip_media_info_t *mi = sc->tulip_mediums[media]; TULIP_LOCK_ASSERT(sc); if (mi == NULL) return; /* * If we are switching media, make sure we don't think there's * any stale RX activity */ sc->tulip_flags &= ~TULIP_RXACT; if (mi->mi_type == TULIP_MEDIAINFO_SIA) { TULIP_CSR_WRITE(sc, csr_sia_connectivity, TULIP_SIACONN_RESET); TULIP_CSR_WRITE(sc, csr_sia_tx_rx, mi->mi_sia_tx_rx); if (sc->tulip_features & TULIP_HAVE_SIAGP) { TULIP_CSR_WRITE(sc, csr_sia_general, mi->mi_sia_gp_control|mi->mi_sia_general); DELAY(50); TULIP_CSR_WRITE(sc, csr_sia_general, mi->mi_sia_gp_data|mi->mi_sia_general); } else { TULIP_CSR_WRITE(sc, csr_sia_general, mi->mi_sia_general); } TULIP_CSR_WRITE(sc, csr_sia_connectivity, mi->mi_sia_connectivity); } else if (mi->mi_type == TULIP_MEDIAINFO_GPR) { #define TULIP_GPR_CMDBITS (TULIP_CMD_PORTSELECT|TULIP_CMD_PCSFUNCTION|TULIP_CMD_SCRAMBLER|TULIP_CMD_TXTHRSHLDCTL) /* * If the cmdmode bits don't match the currently operating mode, * set the cmdmode appropriately and reset the chip. */ if (((mi->mi_cmdmode ^ TULIP_CSR_READ(sc, csr_command)) & TULIP_GPR_CMDBITS) != 0) { sc->tulip_cmdmode &= ~TULIP_GPR_CMDBITS; sc->tulip_cmdmode |= mi->mi_cmdmode; tulip_reset(sc); } TULIP_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET|sc->tulip_gpinit); DELAY(10); TULIP_CSR_WRITE(sc, csr_gp, (u_int8_t) mi->mi_gpdata); } else if (mi->mi_type == TULIP_MEDIAINFO_SYM) { /* * If the cmdmode bits don't match the currently operating mode, * set the cmdmode appropriately and reset the chip. */ if (((mi->mi_cmdmode ^ TULIP_CSR_READ(sc, csr_command)) & TULIP_GPR_CMDBITS) != 0) { sc->tulip_cmdmode &= ~TULIP_GPR_CMDBITS; sc->tulip_cmdmode |= mi->mi_cmdmode; tulip_reset(sc); } TULIP_CSR_WRITE(sc, csr_sia_general, mi->mi_gpcontrol); TULIP_CSR_WRITE(sc, csr_sia_general, mi->mi_gpdata); } else if (mi->mi_type == TULIP_MEDIAINFO_MII && sc->tulip_probe_state != TULIP_PROBE_INACTIVE) { int idx; if (sc->tulip_features & TULIP_HAVE_SIAGP) { const u_int8_t *dp; dp = &sc->tulip_rombuf[mi->mi_reset_offset]; for (idx = 0; idx < mi->mi_reset_length; idx++, dp += 2) { DELAY(10); TULIP_CSR_WRITE(sc, csr_sia_general, (dp[0] + 256 * dp[1]) << 16); } sc->tulip_phyaddr = mi->mi_phyaddr; dp = &sc->tulip_rombuf[mi->mi_gpr_offset]; for (idx = 0; idx < mi->mi_gpr_length; idx++, dp += 2) { DELAY(10); TULIP_CSR_WRITE(sc, csr_sia_general, (dp[0] + 256 * dp[1]) << 16); } } else { for (idx = 0; idx < mi->mi_reset_length; idx++) { DELAY(10); TULIP_CSR_WRITE(sc, csr_gp, sc->tulip_rombuf[mi->mi_reset_offset + idx]); } sc->tulip_phyaddr = mi->mi_phyaddr; for (idx = 0; idx < mi->mi_gpr_length; idx++) { DELAY(10); TULIP_CSR_WRITE(sc, csr_gp, sc->tulip_rombuf[mi->mi_gpr_offset + idx]); } } if (sc->tulip_flags & TULIP_TRYNWAY) { tulip_mii_autonegotiate(sc, sc->tulip_phyaddr); } else if ((sc->tulip_flags & TULIP_DIDNWAY) == 0) { u_int32_t data = tulip_mii_readreg(sc, sc->tulip_phyaddr, PHYREG_CONTROL); data &= ~(PHYCTL_SELECT_100MB|PHYCTL_FULL_DUPLEX|PHYCTL_AUTONEG_ENABLE); sc->tulip_flags &= ~TULIP_DIDNWAY; if (TULIP_IS_MEDIA_FD(media)) data |= PHYCTL_FULL_DUPLEX; if (TULIP_IS_MEDIA_100MB(media)) data |= PHYCTL_SELECT_100MB; tulip_mii_writereg(sc, sc->tulip_phyaddr, PHYREG_CONTROL, data); } } } static void tulip_linkup(tulip_softc_t * const sc, tulip_media_t media) { TULIP_LOCK_ASSERT(sc); if ((sc->tulip_flags & TULIP_LINKUP) == 0) sc->tulip_flags |= TULIP_PRINTLINKUP; sc->tulip_flags |= TULIP_LINKUP; sc->tulip_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; #if 0 /* XXX how does with work with ifmedia? */ if ((sc->tulip_flags & TULIP_DIDNWAY) == 0) { if (sc->tulip_ifp->if_flags & IFF_FULLDUPLEX) { if (TULIP_CAN_MEDIA_FD(media) && sc->tulip_mediums[TULIP_FD_MEDIA_OF(media)] != NULL) media = TULIP_FD_MEDIA_OF(media); } else { if (TULIP_IS_MEDIA_FD(media) && sc->tulip_mediums[TULIP_HD_MEDIA_OF(media)] != NULL) media = TULIP_HD_MEDIA_OF(media); } } #endif if (sc->tulip_media != media) { #ifdef TULIP_DEBUG sc->tulip_dbg.dbg_last_media = sc->tulip_media; #endif sc->tulip_media = media; sc->tulip_flags |= TULIP_PRINTMEDIA; if (TULIP_IS_MEDIA_FD(sc->tulip_media)) { sc->tulip_cmdmode |= TULIP_CMD_FULLDUPLEX; } else if (sc->tulip_chipid != TULIP_21041 || (sc->tulip_flags & TULIP_DIDNWAY) == 0) { sc->tulip_cmdmode &= ~TULIP_CMD_FULLDUPLEX; } } /* * We could set probe_timeout to 0 but setting to 3000 puts this * in one central place and the only matters is tulip_link is * followed by a tulip_timeout. Therefore setting it should not * result in aberrant behaviour. */ sc->tulip_probe_timeout = 3000; sc->tulip_probe_state = TULIP_PROBE_INACTIVE; sc->tulip_flags &= ~(TULIP_TXPROBE_ACTIVE|TULIP_TRYNWAY); if (sc->tulip_flags & TULIP_INRESET) { tulip_media_set(sc, sc->tulip_media); } else if (sc->tulip_probe_media != sc->tulip_media) { /* * No reason to change media if we have the right media. */ tulip_reset(sc); } tulip_init_locked(sc); } static void tulip_media_print(tulip_softc_t * const sc) { TULIP_LOCK_ASSERT(sc); if ((sc->tulip_flags & TULIP_LINKUP) == 0) return; if (sc->tulip_flags & TULIP_PRINTMEDIA) { device_printf(sc->tulip_dev, "enabling %s port\n", tulip_mediums[sc->tulip_media]); sc->tulip_flags &= ~(TULIP_PRINTMEDIA|TULIP_PRINTLINKUP); } else if (sc->tulip_flags & TULIP_PRINTLINKUP) { device_printf(sc->tulip_dev, "link up\n"); sc->tulip_flags &= ~TULIP_PRINTLINKUP; } } #if defined(TULIP_DO_GPR_SENSE) static tulip_media_t tulip_21140_gpr_media_sense(tulip_softc_t * const sc) { struct ifnet *ifp sc->tulip_ifp; tulip_media_t maybe_media = TULIP_MEDIA_UNKNOWN; tulip_media_t last_media = TULIP_MEDIA_UNKNOWN; tulip_media_t media; TULIP_LOCK_ASSERT(sc); /* * If one of the media blocks contained a default media flag, * use that. */ for (media = TULIP_MEDIA_UNKNOWN; media < TULIP_MEDIA_MAX; media++) { const tulip_media_info_t *mi; /* * Media is not supported (or is full-duplex). */ if ((mi = sc->tulip_mediums[media]) == NULL || TULIP_IS_MEDIA_FD(media)) continue; if (mi->mi_type != TULIP_MEDIAINFO_GPR) continue; /* * Remember the media is this is the "default" media. */ if (mi->mi_default && maybe_media == TULIP_MEDIA_UNKNOWN) maybe_media = media; /* * No activity mask? Can't see if it is active if there's no mask. */ if (mi->mi_actmask == 0) continue; /* * Does the activity data match? */ if ((TULIP_CSR_READ(sc, csr_gp) & mi->mi_actmask) != mi->mi_actdata) continue; #if defined(TULIP_DEBUG) device_printf(sc->tulip_dev, "%s: %s: 0x%02x & 0x%02x == 0x%02x\n", __func__, tulip_mediums[media], TULIP_CSR_READ(sc, csr_gp) & 0xFF, mi->mi_actmask, mi->mi_actdata); #endif /* * It does! If this is the first media we detected, then * remember this media. If isn't the first, then there were * multiple matches which we equate to no match (since we don't * which to select (if any). */ if (last_media == TULIP_MEDIA_UNKNOWN) { last_media = media; } else if (last_media != media) { last_media = TULIP_MEDIA_UNKNOWN; } } return (last_media != TULIP_MEDIA_UNKNOWN) ? last_media : maybe_media; } #endif /* TULIP_DO_GPR_SENSE */ static tulip_link_status_t tulip_media_link_monitor(tulip_softc_t * const sc) { const tulip_media_info_t * const mi = sc->tulip_mediums[sc->tulip_media]; tulip_link_status_t linkup = TULIP_LINK_DOWN; TULIP_LOCK_ASSERT(sc); if (mi == NULL) { #if defined(DIAGNOSTIC) || defined(TULIP_DEBUG) panic("tulip_media_link_monitor: %s: botch at line %d\n", tulip_mediums[sc->tulip_media],__LINE__); #else return TULIP_LINK_UNKNOWN; #endif } /* * Have we seen some packets? If so, the link must be good. */ if ((sc->tulip_flags & (TULIP_RXACT|TULIP_LINKUP)) == (TULIP_RXACT|TULIP_LINKUP)) { sc->tulip_flags &= ~TULIP_RXACT; sc->tulip_probe_timeout = 3000; return TULIP_LINK_UP; } sc->tulip_flags &= ~TULIP_RXACT; if (mi->mi_type == TULIP_MEDIAINFO_MII) { u_int32_t status; /* * Read the PHY status register. */ status = tulip_mii_readreg(sc, sc->tulip_phyaddr, PHYREG_STATUS); if (status & PHYSTS_AUTONEG_DONE) { /* * If the PHY has completed autonegotiation, see the if the * remote systems abilities have changed. If so, upgrade or * downgrade as appropriate. */ u_int32_t abilities = tulip_mii_readreg(sc, sc->tulip_phyaddr, PHYREG_AUTONEG_ABILITIES); abilities = (abilities << 6) & status; if (abilities != sc->tulip_abilities) { #if defined(TULIP_DEBUG) loudprintf("%s(phy%d): autonegotiation changed: 0x%04x -> 0x%04x\n", ifp->if_xname, sc->tulip_phyaddr, sc->tulip_abilities, abilities); #endif if (tulip_mii_map_abilities(sc, abilities)) { tulip_linkup(sc, sc->tulip_probe_media); return TULIP_LINK_UP; } /* * if we had selected media because of autonegotiation, * we need to probe for the new media. */ sc->tulip_probe_state = TULIP_PROBE_INACTIVE; if (sc->tulip_flags & TULIP_DIDNWAY) return TULIP_LINK_DOWN; } } /* * The link is now up. If was down, say its back up. */ if ((status & (PHYSTS_LINK_UP|PHYSTS_REMOTE_FAULT)) == PHYSTS_LINK_UP) linkup = TULIP_LINK_UP; } else if (mi->mi_type == TULIP_MEDIAINFO_GPR) { /* * No activity sensor? Assume all's well. */ if (mi->mi_actmask == 0) return TULIP_LINK_UNKNOWN; /* * Does the activity data match? */ if ((TULIP_CSR_READ(sc, csr_gp) & mi->mi_actmask) == mi->mi_actdata) linkup = TULIP_LINK_UP; } else if (mi->mi_type == TULIP_MEDIAINFO_SIA) { /* * Assume non TP ok for now. */ if (!TULIP_IS_MEDIA_TP(sc->tulip_media)) return TULIP_LINK_UNKNOWN; if ((TULIP_CSR_READ(sc, csr_sia_status) & TULIP_SIASTS_LINKFAIL) == 0) linkup = TULIP_LINK_UP; #if defined(TULIP_DEBUG) if (sc->tulip_probe_timeout <= 0) device_printf(sc->tulip_dev, "sia status = 0x%08x\n", TULIP_CSR_READ(sc, csr_sia_status)); #endif } else if (mi->mi_type == TULIP_MEDIAINFO_SYM) { return TULIP_LINK_UNKNOWN; } /* * We will wait for 3 seconds until the link goes into suspect mode. */ if (sc->tulip_flags & TULIP_LINKUP) { if (linkup == TULIP_LINK_UP) sc->tulip_probe_timeout = 3000; if (sc->tulip_probe_timeout > 0) return TULIP_LINK_UP; sc->tulip_flags &= ~TULIP_LINKUP; device_printf(sc->tulip_dev, "link down: cable problem?\n"); } #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_link_downed++; #endif return TULIP_LINK_DOWN; } static void tulip_media_poll(tulip_softc_t * const sc, tulip_mediapoll_event_t event) { TULIP_LOCK_ASSERT(sc); #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_events[event]++; #endif if (sc->tulip_probe_state == TULIP_PROBE_INACTIVE && event == TULIP_MEDIAPOLL_TIMER) { switch (tulip_media_link_monitor(sc)) { case TULIP_LINK_DOWN: { /* * Link Monitor failed. Probe for new media. */ event = TULIP_MEDIAPOLL_LINKFAIL; break; } case TULIP_LINK_UP: { /* * Check again soon. */ tulip_timeout(sc); return; } case TULIP_LINK_UNKNOWN: { /* * We can't tell so don't bother. */ return; } } } if (event == TULIP_MEDIAPOLL_LINKFAIL) { if (sc->tulip_probe_state == TULIP_PROBE_INACTIVE) { if (TULIP_DO_AUTOSENSE(sc)) { #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_link_failures++; #endif sc->tulip_media = TULIP_MEDIA_UNKNOWN; if (sc->tulip_ifp->if_flags & IFF_UP) tulip_reset(sc); /* restart probe */ } return; } #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_link_pollintrs++; #endif } if (event == TULIP_MEDIAPOLL_START) { sc->tulip_ifp->if_drv_flags |= IFF_DRV_OACTIVE; if (sc->tulip_probe_state != TULIP_PROBE_INACTIVE) return; sc->tulip_probe_mediamask = 0; sc->tulip_probe_passes = 0; #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_media_probes++; #endif /* * If the SROM contained an explicit media to use, use it. */ sc->tulip_cmdmode &= ~(TULIP_CMD_RXRUN|TULIP_CMD_FULLDUPLEX); sc->tulip_flags |= TULIP_TRYNWAY|TULIP_PROBE1STPASS; sc->tulip_flags &= ~(TULIP_DIDNWAY|TULIP_PRINTMEDIA|TULIP_PRINTLINKUP); /* * connidx is defaulted to a media_unknown type. */ sc->tulip_probe_media = tulip_srom_conninfo[sc->tulip_connidx].sc_media; if (sc->tulip_probe_media != TULIP_MEDIA_UNKNOWN) { tulip_linkup(sc, sc->tulip_probe_media); tulip_timeout(sc); return; } if (sc->tulip_features & TULIP_HAVE_GPR) { sc->tulip_probe_state = TULIP_PROBE_GPRTEST; sc->tulip_probe_timeout = 2000; } else { sc->tulip_probe_media = TULIP_MEDIA_MAX; sc->tulip_probe_timeout = 0; sc->tulip_probe_state = TULIP_PROBE_MEDIATEST; } } /* * Ignore txprobe failures or spurious callbacks. */ if (event == TULIP_MEDIAPOLL_TXPROBE_FAILED && sc->tulip_probe_state != TULIP_PROBE_MEDIATEST) { sc->tulip_flags &= ~TULIP_TXPROBE_ACTIVE; return; } /* * If we really transmitted a packet, then that's the media we'll use. */ if (event == TULIP_MEDIAPOLL_TXPROBE_OK || event == TULIP_MEDIAPOLL_LINKPASS) { if (event == TULIP_MEDIAPOLL_LINKPASS) { /* XXX Check media status just to be sure */ sc->tulip_probe_media = TULIP_MEDIA_10BASET; #if defined(TULIP_DEBUG) } else { sc->tulip_dbg.dbg_txprobes_ok[sc->tulip_probe_media]++; #endif } tulip_linkup(sc, sc->tulip_probe_media); tulip_timeout(sc); return; } if (sc->tulip_probe_state == TULIP_PROBE_GPRTEST) { #if defined(TULIP_DO_GPR_SENSE) /* * Check for media via the general purpose register. * * Try to sense the media via the GPR. If the same value * occurs 3 times in a row then just use that. */ if (sc->tulip_probe_timeout > 0) { tulip_media_t new_probe_media = tulip_21140_gpr_media_sense(sc); #if defined(TULIP_DEBUG) device_printf(sc->tulip_dev, "%s: gpr sensing = %s\n", __func__, tulip_mediums[new_probe_media]); #endif if (new_probe_media != TULIP_MEDIA_UNKNOWN) { if (new_probe_media == sc->tulip_probe_media) { if (--sc->tulip_probe_count == 0) tulip_linkup(sc, sc->tulip_probe_media); } else { sc->tulip_probe_count = 10; } } sc->tulip_probe_media = new_probe_media; tulip_timeout(sc); return; } #endif /* TULIP_DO_GPR_SENSE */ /* * Brute force. We cycle through each of the media types * and try to transmit a packet. */ sc->tulip_probe_state = TULIP_PROBE_MEDIATEST; sc->tulip_probe_media = TULIP_MEDIA_MAX; sc->tulip_probe_timeout = 0; tulip_timeout(sc); return; } if (sc->tulip_probe_state != TULIP_PROBE_MEDIATEST && (sc->tulip_features & TULIP_HAVE_MII)) { tulip_media_t old_media = sc->tulip_probe_media; tulip_mii_autonegotiate(sc, sc->tulip_phyaddr); switch (sc->tulip_probe_state) { case TULIP_PROBE_FAILED: case TULIP_PROBE_MEDIATEST: { /* * Try the next media. */ sc->tulip_probe_mediamask |= sc->tulip_mediums[sc->tulip_probe_media]->mi_mediamask; sc->tulip_probe_timeout = 0; #ifdef notyet if (sc->tulip_probe_state == TULIP_PROBE_FAILED) break; if (sc->tulip_probe_media != tulip_mii_phy_readspecific(sc)) break; sc->tulip_probe_timeout = TULIP_IS_MEDIA_TP(sc->tulip_probe_media) ? 2500 : 300; #endif break; } case TULIP_PROBE_PHYAUTONEG: { return; } case TULIP_PROBE_INACTIVE: { /* * Only probe if we autonegotiated a media that hasn't failed. */ sc->tulip_probe_timeout = 0; if (sc->tulip_probe_mediamask & TULIP_BIT(sc->tulip_probe_media)) { sc->tulip_probe_media = old_media; break; } tulip_linkup(sc, sc->tulip_probe_media); tulip_timeout(sc); return; } default: { #if defined(DIAGNOSTIC) || defined(TULIP_DEBUG) panic("tulip_media_poll: botch at line %d\n", __LINE__); #endif break; } } } if (event == TULIP_MEDIAPOLL_TXPROBE_FAILED) { #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_txprobes_failed[sc->tulip_probe_media]++; #endif sc->tulip_flags &= ~TULIP_TXPROBE_ACTIVE; return; } /* * switch to another media if we tried this one enough. */ if (/* event == TULIP_MEDIAPOLL_TXPROBE_FAILED || */ sc->tulip_probe_timeout <= 0) { #if defined(TULIP_DEBUG) if (sc->tulip_probe_media == TULIP_MEDIA_UNKNOWN) { device_printf(sc->tulip_dev, "poll media unknown!\n"); sc->tulip_probe_media = TULIP_MEDIA_MAX; } #endif /* * Find the next media type to check for. Full Duplex * types are not allowed. */ do { sc->tulip_probe_media -= 1; if (sc->tulip_probe_media == TULIP_MEDIA_UNKNOWN) { if (++sc->tulip_probe_passes == 3) { device_printf(sc->tulip_dev, "autosense failed: cable problem?\n"); if ((sc->tulip_ifp->if_flags & IFF_UP) == 0) { sc->tulip_ifp->if_drv_flags &= ~IFF_DRV_RUNNING; sc->tulip_probe_state = TULIP_PROBE_INACTIVE; return; } } sc->tulip_flags ^= TULIP_TRYNWAY; /* XXX */ sc->tulip_probe_mediamask = 0; sc->tulip_probe_media = TULIP_MEDIA_MAX - 1; } } while (sc->tulip_mediums[sc->tulip_probe_media] == NULL || (sc->tulip_probe_mediamask & TULIP_BIT(sc->tulip_probe_media)) || TULIP_IS_MEDIA_FD(sc->tulip_probe_media)); #if defined(TULIP_DEBUG) device_printf(sc->tulip_dev, "%s: probing %s\n", event == TULIP_MEDIAPOLL_TXPROBE_FAILED ? "txprobe failed" : "timeout", tulip_mediums[sc->tulip_probe_media]); #endif sc->tulip_probe_timeout = TULIP_IS_MEDIA_TP(sc->tulip_probe_media) ? 2500 : 1000; sc->tulip_probe_state = TULIP_PROBE_MEDIATEST; sc->tulip_probe.probe_txprobes = 0; tulip_reset(sc); tulip_media_set(sc, sc->tulip_probe_media); sc->tulip_flags &= ~TULIP_TXPROBE_ACTIVE; } tulip_timeout(sc); /* * If this is hanging off a phy, we know are doing NWAY and we have * forced the phy to a specific speed. Wait for link up before * before sending a packet. */ switch (sc->tulip_mediums[sc->tulip_probe_media]->mi_type) { case TULIP_MEDIAINFO_MII: { if (sc->tulip_probe_media != tulip_mii_phy_readspecific(sc)) return; break; } case TULIP_MEDIAINFO_SIA: { if (TULIP_IS_MEDIA_TP(sc->tulip_probe_media)) { if (TULIP_CSR_READ(sc, csr_sia_status) & TULIP_SIASTS_LINKFAIL) return; tulip_linkup(sc, sc->tulip_probe_media); #ifdef notyet if (sc->tulip_features & TULIP_HAVE_MII) tulip_timeout(sc); #endif return; } break; } case TULIP_MEDIAINFO_RESET: case TULIP_MEDIAINFO_SYM: case TULIP_MEDIAINFO_NONE: case TULIP_MEDIAINFO_GPR: { break; } } /* * Try to send a packet. */ tulip_txprobe(sc); } static void tulip_media_select(tulip_softc_t * const sc) { TULIP_LOCK_ASSERT(sc); if (sc->tulip_features & TULIP_HAVE_GPR) { TULIP_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET|sc->tulip_gpinit); DELAY(10); TULIP_CSR_WRITE(sc, csr_gp, sc->tulip_gpdata); } /* * If this board has no media, just return */ if (sc->tulip_features & TULIP_HAVE_NOMEDIA) return; if (sc->tulip_media == TULIP_MEDIA_UNKNOWN) { TULIP_CSR_WRITE(sc, csr_intr, sc->tulip_intrmask); (*sc->tulip_boardsw->bd_media_poll)(sc, TULIP_MEDIAPOLL_START); } else { tulip_media_set(sc, sc->tulip_media); } } static void tulip_21040_mediainfo_init(tulip_softc_t * const sc, tulip_media_t media) { TULIP_LOCK_ASSERT(sc); sc->tulip_cmdmode |= TULIP_CMD_CAPTREFFCT|TULIP_CMD_THRSHLD160 |TULIP_CMD_BACKOFFCTR; sc->tulip_ifp->if_baudrate = 10000000; if (media == TULIP_MEDIA_10BASET || media == TULIP_MEDIA_UNKNOWN) { TULIP_MEDIAINFO_SIA_INIT(sc, &sc->tulip_mediainfo[0], 21040, 10BASET); TULIP_MEDIAINFO_SIA_INIT(sc, &sc->tulip_mediainfo[1], 21040, 10BASET_FD); sc->tulip_intrmask |= TULIP_STS_LINKPASS|TULIP_STS_LINKFAIL; } if (media == TULIP_MEDIA_AUIBNC || media == TULIP_MEDIA_UNKNOWN) { TULIP_MEDIAINFO_SIA_INIT(sc, &sc->tulip_mediainfo[2], 21040, AUIBNC); } if (media == TULIP_MEDIA_UNKNOWN) { TULIP_MEDIAINFO_SIA_INIT(sc, &sc->tulip_mediainfo[3], 21040, EXTSIA); } } static void tulip_21040_media_probe(tulip_softc_t * const sc) { TULIP_LOCK_ASSERT(sc); tulip_21040_mediainfo_init(sc, TULIP_MEDIA_UNKNOWN); return; } static void tulip_21040_10baset_only_media_probe(tulip_softc_t * const sc) { TULIP_LOCK_ASSERT(sc); tulip_21040_mediainfo_init(sc, TULIP_MEDIA_10BASET); tulip_media_set(sc, TULIP_MEDIA_10BASET); sc->tulip_media = TULIP_MEDIA_10BASET; } static void tulip_21040_10baset_only_media_select(tulip_softc_t * const sc) { TULIP_LOCK_ASSERT(sc); sc->tulip_flags |= TULIP_LINKUP; if (sc->tulip_media == TULIP_MEDIA_10BASET_FD) { sc->tulip_cmdmode |= TULIP_CMD_FULLDUPLEX; sc->tulip_flags &= ~TULIP_SQETEST; } else { sc->tulip_cmdmode &= ~TULIP_CMD_FULLDUPLEX; sc->tulip_flags |= TULIP_SQETEST; } tulip_media_set(sc, sc->tulip_media); } static void tulip_21040_auibnc_only_media_probe(tulip_softc_t * const sc) { TULIP_LOCK_ASSERT(sc); tulip_21040_mediainfo_init(sc, TULIP_MEDIA_AUIBNC); sc->tulip_flags |= TULIP_SQETEST|TULIP_LINKUP; tulip_media_set(sc, TULIP_MEDIA_AUIBNC); sc->tulip_media = TULIP_MEDIA_AUIBNC; } static void tulip_21040_auibnc_only_media_select(tulip_softc_t * const sc) { TULIP_LOCK_ASSERT(sc); tulip_media_set(sc, TULIP_MEDIA_AUIBNC); sc->tulip_cmdmode &= ~TULIP_CMD_FULLDUPLEX; } static const tulip_boardsw_t tulip_21040_boardsw = { TULIP_21040_GENERIC, tulip_21040_media_probe, tulip_media_select, tulip_media_poll, }; static const tulip_boardsw_t tulip_21040_10baset_only_boardsw = { TULIP_21040_GENERIC, tulip_21040_10baset_only_media_probe, tulip_21040_10baset_only_media_select, NULL, }; static const tulip_boardsw_t tulip_21040_auibnc_only_boardsw = { TULIP_21040_GENERIC, tulip_21040_auibnc_only_media_probe, tulip_21040_auibnc_only_media_select, NULL, }; static void tulip_21041_mediainfo_init(tulip_softc_t * const sc) { tulip_media_info_t * const mi = sc->tulip_mediainfo; TULIP_LOCK_ASSERT(sc); #ifdef notyet if (sc->tulip_revinfo >= 0x20) { TULIP_MEDIAINFO_SIA_INIT(sc, &mi[0], 21041P2, 10BASET); TULIP_MEDIAINFO_SIA_INIT(sc, &mi[1], 21041P2, 10BASET_FD); TULIP_MEDIAINFO_SIA_INIT(sc, &mi[0], 21041P2, AUI); TULIP_MEDIAINFO_SIA_INIT(sc, &mi[1], 21041P2, BNC); return; } #endif TULIP_MEDIAINFO_SIA_INIT(sc, &mi[0], 21041, 10BASET); TULIP_MEDIAINFO_SIA_INIT(sc, &mi[1], 21041, 10BASET_FD); TULIP_MEDIAINFO_SIA_INIT(sc, &mi[2], 21041, AUI); TULIP_MEDIAINFO_SIA_INIT(sc, &mi[3], 21041, BNC); } static void tulip_21041_media_probe(tulip_softc_t * const sc) { TULIP_LOCK_ASSERT(sc); sc->tulip_ifp->if_baudrate = 10000000; sc->tulip_cmdmode |= TULIP_CMD_CAPTREFFCT|TULIP_CMD_ENHCAPTEFFCT |TULIP_CMD_THRSHLD160|TULIP_CMD_BACKOFFCTR; sc->tulip_intrmask |= TULIP_STS_LINKPASS|TULIP_STS_LINKFAIL; tulip_21041_mediainfo_init(sc); } static void tulip_21041_media_poll(tulip_softc_t * const sc, const tulip_mediapoll_event_t event) { u_int32_t sia_status; TULIP_LOCK_ASSERT(sc); #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_events[event]++; #endif if (event == TULIP_MEDIAPOLL_LINKFAIL) { if (sc->tulip_probe_state != TULIP_PROBE_INACTIVE || !TULIP_DO_AUTOSENSE(sc)) return; sc->tulip_media = TULIP_MEDIA_UNKNOWN; tulip_reset(sc); /* start probe */ return; } /* * If we've been been asked to start a poll or link change interrupt * restart the probe (and reset the tulip to a known state). */ if (event == TULIP_MEDIAPOLL_START) { sc->tulip_ifp->if_drv_flags |= IFF_DRV_OACTIVE; sc->tulip_cmdmode &= ~(TULIP_CMD_FULLDUPLEX|TULIP_CMD_RXRUN); #ifdef notyet if (sc->tulip_revinfo >= 0x20) { sc->tulip_cmdmode |= TULIP_CMD_FULLDUPLEX; sc->tulip_flags |= TULIP_DIDNWAY; } #endif TULIP_CSR_WRITE(sc, csr_command, sc->tulip_cmdmode); sc->tulip_probe_state = TULIP_PROBE_MEDIATEST; sc->tulip_probe_media = TULIP_MEDIA_10BASET; sc->tulip_probe_timeout = TULIP_21041_PROBE_10BASET_TIMEOUT; tulip_media_set(sc, TULIP_MEDIA_10BASET); tulip_timeout(sc); return; } if (sc->tulip_probe_state == TULIP_PROBE_INACTIVE) return; if (event == TULIP_MEDIAPOLL_TXPROBE_OK) { #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_txprobes_ok[sc->tulip_probe_media]++; #endif tulip_linkup(sc, sc->tulip_probe_media); return; } sia_status = TULIP_CSR_READ(sc, csr_sia_status); TULIP_CSR_WRITE(sc, csr_sia_status, sia_status); if ((sia_status & TULIP_SIASTS_LINKFAIL) == 0) { if (sc->tulip_revinfo >= 0x20) { if (sia_status & (PHYSTS_10BASET_FD << (16 - 6))) sc->tulip_probe_media = TULIP_MEDIA_10BASET_FD; } /* * If the link has passed LinkPass, 10baseT is the * proper media to use. */ tulip_linkup(sc, sc->tulip_probe_media); return; } /* * wait for up to 2.4 seconds for the link to reach pass state. * Only then start scanning the other media for activity. * choose media with receive activity over those without. */ if (sc->tulip_probe_media == TULIP_MEDIA_10BASET) { if (event != TULIP_MEDIAPOLL_TIMER) return; if (sc->tulip_probe_timeout > 0 && (sia_status & TULIP_SIASTS_OTHERRXACTIVITY) == 0) { tulip_timeout(sc); return; } sc->tulip_probe_timeout = TULIP_21041_PROBE_AUIBNC_TIMEOUT; sc->tulip_flags |= TULIP_WANTRXACT; if (sia_status & TULIP_SIASTS_OTHERRXACTIVITY) { sc->tulip_probe_media = TULIP_MEDIA_BNC; } else { sc->tulip_probe_media = TULIP_MEDIA_AUI; } tulip_media_set(sc, sc->tulip_probe_media); tulip_timeout(sc); return; } /* * If we failed, clear the txprobe active flag. */ if (event == TULIP_MEDIAPOLL_TXPROBE_FAILED) sc->tulip_flags &= ~TULIP_TXPROBE_ACTIVE; if (event == TULIP_MEDIAPOLL_TIMER) { /* * If we've received something, then that's our link! */ if (sc->tulip_flags & TULIP_RXACT) { tulip_linkup(sc, sc->tulip_probe_media); return; } /* * if no txprobe active */ if ((sc->tulip_flags & TULIP_TXPROBE_ACTIVE) == 0 && ((sc->tulip_flags & TULIP_WANTRXACT) == 0 || (sia_status & TULIP_SIASTS_RXACTIVITY))) { sc->tulip_probe_timeout = TULIP_21041_PROBE_AUIBNC_TIMEOUT; tulip_txprobe(sc); tulip_timeout(sc); return; } /* * Take 2 passes through before deciding to not * wait for receive activity. Then take another * two passes before spitting out a warning. */ if (sc->tulip_probe_timeout <= 0) { if (sc->tulip_flags & TULIP_WANTRXACT) { sc->tulip_flags &= ~TULIP_WANTRXACT; sc->tulip_probe_timeout = TULIP_21041_PROBE_AUIBNC_TIMEOUT; } else { device_printf(sc->tulip_dev, "autosense failed: cable problem?\n"); if ((sc->tulip_ifp->if_flags & IFF_UP) == 0) { sc->tulip_ifp->if_drv_flags &= ~IFF_DRV_RUNNING; sc->tulip_probe_state = TULIP_PROBE_INACTIVE; return; } } } } /* * Since this media failed to probe, try the other one. */ sc->tulip_probe_timeout = TULIP_21041_PROBE_AUIBNC_TIMEOUT; if (sc->tulip_probe_media == TULIP_MEDIA_AUI) { sc->tulip_probe_media = TULIP_MEDIA_BNC; } else { sc->tulip_probe_media = TULIP_MEDIA_AUI; } tulip_media_set(sc, sc->tulip_probe_media); sc->tulip_flags &= ~TULIP_TXPROBE_ACTIVE; tulip_timeout(sc); } static const tulip_boardsw_t tulip_21041_boardsw = { TULIP_21041_GENERIC, tulip_21041_media_probe, tulip_media_select, tulip_21041_media_poll }; static const tulip_phy_attr_t tulip_mii_phy_attrlist[] = { { 0x20005c00, 0, /* 08-00-17 */ { { 0x19, 0x0040, 0x0040 }, /* 10TX */ { 0x19, 0x0040, 0x0000 }, /* 100TX */ }, #if defined(TULIP_DEBUG) "NS DP83840", #endif }, { 0x0281F400, 0, /* 00-A0-7D */ { { 0x12, 0x0010, 0x0000 }, /* 10T */ { }, /* 100TX */ { 0x12, 0x0010, 0x0010 }, /* 100T4 */ { 0x12, 0x0008, 0x0008 }, /* FULL_DUPLEX */ }, #if defined(TULIP_DEBUG) "Seeq 80C240" #endif }, #if 0 { 0x0015F420, 0, /* 00-A0-7D */ { { 0x12, 0x0010, 0x0000 }, /* 10T */ { }, /* 100TX */ { 0x12, 0x0010, 0x0010 }, /* 100T4 */ { 0x12, 0x0008, 0x0008 }, /* FULL_DUPLEX */ }, #if defined(TULIP_DEBUG) "Broadcom BCM5000" #endif }, #endif { 0x0281F400, 0, /* 00-A0-BE */ { { 0x11, 0x8000, 0x0000 }, /* 10T */ { 0x11, 0x8000, 0x8000 }, /* 100TX */ { }, /* 100T4 */ { 0x11, 0x4000, 0x4000 }, /* FULL_DUPLEX */ }, #if defined(TULIP_DEBUG) "ICS 1890" #endif }, { 0 } }; static tulip_media_t tulip_mii_phy_readspecific(tulip_softc_t * const sc) { const tulip_phy_attr_t *attr; u_int16_t data; u_int32_t id; unsigned idx = 0; static const tulip_media_t table[] = { TULIP_MEDIA_UNKNOWN, TULIP_MEDIA_10BASET, TULIP_MEDIA_100BASETX, TULIP_MEDIA_100BASET4, TULIP_MEDIA_UNKNOWN, TULIP_MEDIA_10BASET_FD, TULIP_MEDIA_100BASETX_FD, TULIP_MEDIA_UNKNOWN }; TULIP_LOCK_ASSERT(sc); /* * Don't read phy specific registers if link is not up. */ data = tulip_mii_readreg(sc, sc->tulip_phyaddr, PHYREG_STATUS); if ((data & (PHYSTS_LINK_UP|PHYSTS_EXTENDED_REGS)) != (PHYSTS_LINK_UP|PHYSTS_EXTENDED_REGS)) return TULIP_MEDIA_UNKNOWN; id = (tulip_mii_readreg(sc, sc->tulip_phyaddr, PHYREG_IDLOW) << 16) | tulip_mii_readreg(sc, sc->tulip_phyaddr, PHYREG_IDHIGH); for (attr = tulip_mii_phy_attrlist;; attr++) { if (attr->attr_id == 0) return TULIP_MEDIA_UNKNOWN; if ((id & ~0x0F) == attr->attr_id) break; } if (attr->attr_modes[PHY_MODE_100TX].pm_regno) { const tulip_phy_modedata_t * const pm = &attr->attr_modes[PHY_MODE_100TX]; data = tulip_mii_readreg(sc, sc->tulip_phyaddr, pm->pm_regno); if ((data & pm->pm_mask) == pm->pm_value) idx = 2; } if (idx == 0 && attr->attr_modes[PHY_MODE_100T4].pm_regno) { const tulip_phy_modedata_t * const pm = &attr->attr_modes[PHY_MODE_100T4]; data = tulip_mii_readreg(sc, sc->tulip_phyaddr, pm->pm_regno); if ((data & pm->pm_mask) == pm->pm_value) idx = 3; } if (idx == 0 && attr->attr_modes[PHY_MODE_10T].pm_regno) { const tulip_phy_modedata_t * const pm = &attr->attr_modes[PHY_MODE_10T]; data = tulip_mii_readreg(sc, sc->tulip_phyaddr, pm->pm_regno); if ((data & pm->pm_mask) == pm->pm_value) idx = 1; } if (idx != 0 && attr->attr_modes[PHY_MODE_FULLDUPLEX].pm_regno) { const tulip_phy_modedata_t * const pm = &attr->attr_modes[PHY_MODE_FULLDUPLEX]; data = tulip_mii_readreg(sc, sc->tulip_phyaddr, pm->pm_regno); idx += ((data & pm->pm_mask) == pm->pm_value ? 4 : 0); } return table[idx]; } static unsigned tulip_mii_get_phyaddr(tulip_softc_t * const sc, unsigned offset) { unsigned phyaddr; TULIP_LOCK_ASSERT(sc); for (phyaddr = 1; phyaddr < 32; phyaddr++) { unsigned status = tulip_mii_readreg(sc, phyaddr, PHYREG_STATUS); if (status == 0 || status == 0xFFFF || status < PHYSTS_10BASET) continue; if (offset == 0) return phyaddr; offset--; } if (offset == 0) { unsigned status = tulip_mii_readreg(sc, 0, PHYREG_STATUS); if (status == 0 || status == 0xFFFF || status < PHYSTS_10BASET) return TULIP_MII_NOPHY; return 0; } return TULIP_MII_NOPHY; } static int tulip_mii_map_abilities(tulip_softc_t * const sc, unsigned abilities) { TULIP_LOCK_ASSERT(sc); sc->tulip_abilities = abilities; if (abilities & PHYSTS_100BASETX_FD) { sc->tulip_probe_media = TULIP_MEDIA_100BASETX_FD; } else if (abilities & PHYSTS_100BASET4) { sc->tulip_probe_media = TULIP_MEDIA_100BASET4; } else if (abilities & PHYSTS_100BASETX) { sc->tulip_probe_media = TULIP_MEDIA_100BASETX; } else if (abilities & PHYSTS_10BASET_FD) { sc->tulip_probe_media = TULIP_MEDIA_10BASET_FD; } else if (abilities & PHYSTS_10BASET) { sc->tulip_probe_media = TULIP_MEDIA_10BASET; } else { sc->tulip_probe_state = TULIP_PROBE_MEDIATEST; return 0; } sc->tulip_probe_state = TULIP_PROBE_INACTIVE; return 1; } static void tulip_mii_autonegotiate(tulip_softc_t * const sc, const unsigned phyaddr) { struct ifnet *ifp = sc->tulip_ifp; TULIP_LOCK_ASSERT(sc); switch (sc->tulip_probe_state) { case TULIP_PROBE_MEDIATEST: case TULIP_PROBE_INACTIVE: { sc->tulip_flags |= TULIP_DIDNWAY; tulip_mii_writereg(sc, phyaddr, PHYREG_CONTROL, PHYCTL_RESET); sc->tulip_probe_timeout = 3000; sc->tulip_intrmask |= TULIP_STS_ABNRMLINTR|TULIP_STS_NORMALINTR; sc->tulip_probe_state = TULIP_PROBE_PHYRESET; } /* FALLTHROUGH */ case TULIP_PROBE_PHYRESET: { u_int32_t status; u_int32_t data = tulip_mii_readreg(sc, phyaddr, PHYREG_CONTROL); if (data & PHYCTL_RESET) { if (sc->tulip_probe_timeout > 0) { tulip_timeout(sc); return; } printf("%s(phy%d): error: reset of PHY never completed!\n", ifp->if_xname, phyaddr); sc->tulip_flags &= ~TULIP_TXPROBE_ACTIVE; sc->tulip_probe_state = TULIP_PROBE_FAILED; sc->tulip_ifp->if_flags &= ~IFF_UP; sc->tulip_ifp->if_drv_flags &= ~IFF_DRV_RUNNING; return; } status = tulip_mii_readreg(sc, phyaddr, PHYREG_STATUS); if ((status & PHYSTS_CAN_AUTONEG) == 0) { #if defined(TULIP_DEBUG) loudprintf("%s(phy%d): autonegotiation disabled\n", ifp->if_xname, phyaddr); #endif sc->tulip_flags &= ~TULIP_DIDNWAY; sc->tulip_probe_state = TULIP_PROBE_MEDIATEST; return; } if (tulip_mii_readreg(sc, phyaddr, PHYREG_AUTONEG_ADVERTISEMENT) != ((status >> 6) | 0x01)) tulip_mii_writereg(sc, phyaddr, PHYREG_AUTONEG_ADVERTISEMENT, (status >> 6) | 0x01); tulip_mii_writereg(sc, phyaddr, PHYREG_CONTROL, data|PHYCTL_AUTONEG_RESTART|PHYCTL_AUTONEG_ENABLE); data = tulip_mii_readreg(sc, phyaddr, PHYREG_CONTROL); #if defined(TULIP_DEBUG) if ((data & PHYCTL_AUTONEG_ENABLE) == 0) loudprintf("%s(phy%d): oops: enable autonegotiation failed: 0x%04x\n", ifp->if_xname, phyaddr, data); else loudprintf("%s(phy%d): autonegotiation restarted: 0x%04x\n", ifp->if_xname, phyaddr, data); sc->tulip_dbg.dbg_nway_starts++; #endif sc->tulip_probe_state = TULIP_PROBE_PHYAUTONEG; sc->tulip_probe_timeout = 3000; } /* FALLTHROUGH */ case TULIP_PROBE_PHYAUTONEG: { u_int32_t status = tulip_mii_readreg(sc, phyaddr, PHYREG_STATUS); u_int32_t data; if ((status & PHYSTS_AUTONEG_DONE) == 0) { if (sc->tulip_probe_timeout > 0) { tulip_timeout(sc); return; } #if defined(TULIP_DEBUG) loudprintf("%s(phy%d): autonegotiation timeout: sts=0x%04x, ctl=0x%04x\n", ifp->if_xname, phyaddr, status, tulip_mii_readreg(sc, phyaddr, PHYREG_CONTROL)); #endif sc->tulip_flags &= ~TULIP_DIDNWAY; sc->tulip_probe_state = TULIP_PROBE_MEDIATEST; return; } data = tulip_mii_readreg(sc, phyaddr, PHYREG_AUTONEG_ABILITIES); #if defined(TULIP_DEBUG) loudprintf("%s(phy%d): autonegotiation complete: 0x%04x\n", ifp->if_xname, phyaddr, data); #endif data = (data << 6) & status; if (!tulip_mii_map_abilities(sc, data)) sc->tulip_flags &= ~TULIP_DIDNWAY; return; } default: { #if defined(DIAGNOSTIC) panic("tulip_media_poll: botch at line %d\n", __LINE__); #endif break; } } #if defined(TULIP_DEBUG) loudprintf("%s(phy%d): autonegotiation failure: state = %d\n", ifp->if_xname, phyaddr, sc->tulip_probe_state); sc->tulip_dbg.dbg_nway_failures++; #endif } static void tulip_2114x_media_preset(tulip_softc_t * const sc) { const tulip_media_info_t *mi = NULL; tulip_media_t media = sc->tulip_media; TULIP_LOCK_ASSERT(sc); if (sc->tulip_probe_state == TULIP_PROBE_INACTIVE) media = sc->tulip_media; else media = sc->tulip_probe_media; sc->tulip_cmdmode &= ~TULIP_CMD_PORTSELECT; sc->tulip_flags &= ~TULIP_SQETEST; if (media != TULIP_MEDIA_UNKNOWN && media != TULIP_MEDIA_MAX) { #if defined(TULIP_DEBUG) if (media < TULIP_MEDIA_MAX && sc->tulip_mediums[media] != NULL) { #endif mi = sc->tulip_mediums[media]; if (mi->mi_type == TULIP_MEDIAINFO_MII) { sc->tulip_cmdmode |= TULIP_CMD_PORTSELECT; } else if (mi->mi_type == TULIP_MEDIAINFO_GPR || mi->mi_type == TULIP_MEDIAINFO_SYM) { sc->tulip_cmdmode &= ~TULIP_GPR_CMDBITS; sc->tulip_cmdmode |= mi->mi_cmdmode; } else if (mi->mi_type == TULIP_MEDIAINFO_SIA) { TULIP_CSR_WRITE(sc, csr_sia_connectivity, TULIP_SIACONN_RESET); } #if defined(TULIP_DEBUG) } else { device_printf(sc->tulip_dev, "preset: bad media %d!\n", media); } #endif } switch (media) { case TULIP_MEDIA_BNC: case TULIP_MEDIA_AUI: case TULIP_MEDIA_10BASET: { sc->tulip_cmdmode &= ~TULIP_CMD_FULLDUPLEX; sc->tulip_cmdmode |= TULIP_CMD_TXTHRSHLDCTL; sc->tulip_ifp->if_baudrate = 10000000; sc->tulip_flags |= TULIP_SQETEST; break; } case TULIP_MEDIA_10BASET_FD: { sc->tulip_cmdmode |= TULIP_CMD_FULLDUPLEX|TULIP_CMD_TXTHRSHLDCTL; sc->tulip_ifp->if_baudrate = 10000000; break; } case TULIP_MEDIA_100BASEFX: case TULIP_MEDIA_100BASET4: case TULIP_MEDIA_100BASETX: { sc->tulip_cmdmode &= ~(TULIP_CMD_FULLDUPLEX|TULIP_CMD_TXTHRSHLDCTL); sc->tulip_cmdmode |= TULIP_CMD_PORTSELECT; sc->tulip_ifp->if_baudrate = 100000000; break; } case TULIP_MEDIA_100BASEFX_FD: case TULIP_MEDIA_100BASETX_FD: { sc->tulip_cmdmode |= TULIP_CMD_FULLDUPLEX|TULIP_CMD_PORTSELECT; sc->tulip_cmdmode &= ~TULIP_CMD_TXTHRSHLDCTL; sc->tulip_ifp->if_baudrate = 100000000; break; } default: { break; } } TULIP_CSR_WRITE(sc, csr_command, sc->tulip_cmdmode); } /* ******************************************************************** * Start of 21140/21140A support which does not use the MII interface */ static void tulip_null_media_poll(tulip_softc_t * const sc, tulip_mediapoll_event_t event) { #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_events[event]++; #endif #if defined(DIAGNOSTIC) device_printf(sc->tulip_dev, "botch(media_poll) at line %d\n", __LINE__); #endif } static inline void tulip_21140_mediainit(tulip_softc_t * const sc, tulip_media_info_t * const mip, tulip_media_t const media, unsigned gpdata, unsigned cmdmode) { TULIP_LOCK_ASSERT(sc); sc->tulip_mediums[media] = mip; mip->mi_type = TULIP_MEDIAINFO_GPR; mip->mi_cmdmode = cmdmode; mip->mi_gpdata = gpdata; } static void tulip_21140_evalboard_media_probe(tulip_softc_t * const sc) { tulip_media_info_t *mip = sc->tulip_mediainfo; TULIP_LOCK_ASSERT(sc); sc->tulip_gpinit = TULIP_GP_EB_PINS; sc->tulip_gpdata = TULIP_GP_EB_INIT; TULIP_CSR_WRITE(sc, csr_gp, TULIP_GP_EB_PINS); TULIP_CSR_WRITE(sc, csr_gp, TULIP_GP_EB_INIT); TULIP_CSR_WRITE(sc, csr_command, TULIP_CSR_READ(sc, csr_command) | TULIP_CMD_PORTSELECT | TULIP_CMD_PCSFUNCTION | TULIP_CMD_SCRAMBLER | TULIP_CMD_MUSTBEONE); TULIP_CSR_WRITE(sc, csr_command, TULIP_CSR_READ(sc, csr_command) & ~TULIP_CMD_TXTHRSHLDCTL); DELAY(1000000); if ((TULIP_CSR_READ(sc, csr_gp) & TULIP_GP_EB_OK100) != 0) { sc->tulip_media = TULIP_MEDIA_10BASET; } else { sc->tulip_media = TULIP_MEDIA_100BASETX; } tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_10BASET, TULIP_GP_EB_INIT, TULIP_CMD_TXTHRSHLDCTL); tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_10BASET_FD, TULIP_GP_EB_INIT, TULIP_CMD_TXTHRSHLDCTL|TULIP_CMD_FULLDUPLEX); tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_100BASETX, TULIP_GP_EB_INIT, TULIP_CMD_PORTSELECT|TULIP_CMD_PCSFUNCTION |TULIP_CMD_SCRAMBLER); tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_100BASETX_FD, TULIP_GP_EB_INIT, TULIP_CMD_PORTSELECT|TULIP_CMD_PCSFUNCTION |TULIP_CMD_SCRAMBLER|TULIP_CMD_FULLDUPLEX); } static const tulip_boardsw_t tulip_21140_eb_boardsw = { TULIP_21140_DEC_EB, tulip_21140_evalboard_media_probe, tulip_media_select, tulip_null_media_poll, tulip_2114x_media_preset, }; static void tulip_21140_accton_media_probe(tulip_softc_t * const sc) { tulip_media_info_t *mip = sc->tulip_mediainfo; unsigned gpdata; TULIP_LOCK_ASSERT(sc); sc->tulip_gpinit = TULIP_GP_EB_PINS; sc->tulip_gpdata = TULIP_GP_EB_INIT; TULIP_CSR_WRITE(sc, csr_gp, TULIP_GP_EB_PINS); TULIP_CSR_WRITE(sc, csr_gp, TULIP_GP_EB_INIT); TULIP_CSR_WRITE(sc, csr_command, TULIP_CSR_READ(sc, csr_command) | TULIP_CMD_PORTSELECT | TULIP_CMD_PCSFUNCTION | TULIP_CMD_SCRAMBLER | TULIP_CMD_MUSTBEONE); TULIP_CSR_WRITE(sc, csr_command, TULIP_CSR_READ(sc, csr_command) & ~TULIP_CMD_TXTHRSHLDCTL); DELAY(1000000); gpdata = TULIP_CSR_READ(sc, csr_gp); if ((gpdata & TULIP_GP_EN1207_UTP_INIT) == 0) { sc->tulip_media = TULIP_MEDIA_10BASET; } else { if ((gpdata & TULIP_GP_EN1207_BNC_INIT) == 0) { sc->tulip_media = TULIP_MEDIA_BNC; } else { sc->tulip_media = TULIP_MEDIA_100BASETX; } } tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_BNC, TULIP_GP_EN1207_BNC_INIT, TULIP_CMD_TXTHRSHLDCTL); tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_10BASET, TULIP_GP_EN1207_UTP_INIT, TULIP_CMD_TXTHRSHLDCTL); tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_10BASET_FD, TULIP_GP_EN1207_UTP_INIT, TULIP_CMD_TXTHRSHLDCTL|TULIP_CMD_FULLDUPLEX); tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_100BASETX, TULIP_GP_EN1207_100_INIT, TULIP_CMD_PORTSELECT|TULIP_CMD_PCSFUNCTION |TULIP_CMD_SCRAMBLER); tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_100BASETX_FD, TULIP_GP_EN1207_100_INIT, TULIP_CMD_PORTSELECT|TULIP_CMD_PCSFUNCTION |TULIP_CMD_SCRAMBLER|TULIP_CMD_FULLDUPLEX); } static const tulip_boardsw_t tulip_21140_accton_boardsw = { TULIP_21140_EN1207, tulip_21140_accton_media_probe, tulip_media_select, tulip_null_media_poll, tulip_2114x_media_preset, }; static void tulip_21140_smc9332_media_probe(tulip_softc_t * const sc) { tulip_media_info_t *mip = sc->tulip_mediainfo; int idx, cnt = 0; TULIP_LOCK_ASSERT(sc); TULIP_CSR_WRITE(sc, csr_command, TULIP_CMD_PORTSELECT|TULIP_CMD_MUSTBEONE); TULIP_CSR_WRITE(sc, csr_busmode, TULIP_BUSMODE_SWRESET); DELAY(10); /* Wait 10 microseconds (actually 50 PCI cycles but at 33MHz that comes to two microseconds but wait a bit longer anyways) */ TULIP_CSR_WRITE(sc, csr_command, TULIP_CMD_PORTSELECT | TULIP_CMD_PCSFUNCTION | TULIP_CMD_SCRAMBLER | TULIP_CMD_MUSTBEONE); sc->tulip_gpinit = TULIP_GP_SMC_9332_PINS; sc->tulip_gpdata = TULIP_GP_SMC_9332_INIT; TULIP_CSR_WRITE(sc, csr_gp, TULIP_GP_SMC_9332_PINS|TULIP_GP_PINSET); TULIP_CSR_WRITE(sc, csr_gp, TULIP_GP_SMC_9332_INIT); DELAY(200000); for (idx = 1000; idx > 0; idx--) { u_int32_t csr = TULIP_CSR_READ(sc, csr_gp); if ((csr & (TULIP_GP_SMC_9332_OK10|TULIP_GP_SMC_9332_OK100)) == (TULIP_GP_SMC_9332_OK10|TULIP_GP_SMC_9332_OK100)) { if (++cnt > 100) break; } else if ((csr & TULIP_GP_SMC_9332_OK10) == 0) { break; } else { cnt = 0; } DELAY(1000); } sc->tulip_media = cnt > 100 ? TULIP_MEDIA_100BASETX : TULIP_MEDIA_10BASET; tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_100BASETX, TULIP_GP_SMC_9332_INIT, TULIP_CMD_PORTSELECT|TULIP_CMD_PCSFUNCTION |TULIP_CMD_SCRAMBLER); tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_100BASETX_FD, TULIP_GP_SMC_9332_INIT, TULIP_CMD_PORTSELECT|TULIP_CMD_PCSFUNCTION |TULIP_CMD_SCRAMBLER|TULIP_CMD_FULLDUPLEX); tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_10BASET, TULIP_GP_SMC_9332_INIT, TULIP_CMD_TXTHRSHLDCTL); tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_10BASET_FD, TULIP_GP_SMC_9332_INIT, TULIP_CMD_TXTHRSHLDCTL|TULIP_CMD_FULLDUPLEX); } static const tulip_boardsw_t tulip_21140_smc9332_boardsw = { TULIP_21140_SMC_9332, tulip_21140_smc9332_media_probe, tulip_media_select, tulip_null_media_poll, tulip_2114x_media_preset, }; static void tulip_21140_cogent_em100_media_probe(tulip_softc_t * const sc) { tulip_media_info_t *mip = sc->tulip_mediainfo; u_int32_t cmdmode = TULIP_CSR_READ(sc, csr_command); TULIP_LOCK_ASSERT(sc); sc->tulip_gpinit = TULIP_GP_EM100_PINS; sc->tulip_gpdata = TULIP_GP_EM100_INIT; TULIP_CSR_WRITE(sc, csr_gp, TULIP_GP_EM100_PINS); TULIP_CSR_WRITE(sc, csr_gp, TULIP_GP_EM100_INIT); cmdmode = TULIP_CMD_PORTSELECT|TULIP_CMD_PCSFUNCTION|TULIP_CMD_MUSTBEONE; cmdmode &= ~(TULIP_CMD_TXTHRSHLDCTL|TULIP_CMD_SCRAMBLER); if (sc->tulip_rombuf[32] == TULIP_COGENT_EM100FX_ID) { TULIP_CSR_WRITE(sc, csr_command, cmdmode); sc->tulip_media = TULIP_MEDIA_100BASEFX; tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_100BASEFX, TULIP_GP_EM100_INIT, TULIP_CMD_PORTSELECT|TULIP_CMD_PCSFUNCTION); tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_100BASEFX_FD, TULIP_GP_EM100_INIT, TULIP_CMD_PORTSELECT|TULIP_CMD_PCSFUNCTION |TULIP_CMD_FULLDUPLEX); } else { TULIP_CSR_WRITE(sc, csr_command, cmdmode|TULIP_CMD_SCRAMBLER); sc->tulip_media = TULIP_MEDIA_100BASETX; tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_100BASETX, TULIP_GP_EM100_INIT, TULIP_CMD_PORTSELECT|TULIP_CMD_PCSFUNCTION |TULIP_CMD_SCRAMBLER); tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_100BASETX_FD, TULIP_GP_EM100_INIT, TULIP_CMD_PORTSELECT|TULIP_CMD_PCSFUNCTION |TULIP_CMD_SCRAMBLER|TULIP_CMD_FULLDUPLEX); } } static const tulip_boardsw_t tulip_21140_cogent_em100_boardsw = { TULIP_21140_COGENT_EM100, tulip_21140_cogent_em100_media_probe, tulip_media_select, tulip_null_media_poll, tulip_2114x_media_preset }; static void tulip_21140_znyx_zx34x_media_probe(tulip_softc_t * const sc) { tulip_media_info_t *mip = sc->tulip_mediainfo; int cnt10 = 0, cnt100 = 0, idx; TULIP_LOCK_ASSERT(sc); sc->tulip_gpinit = TULIP_GP_ZX34X_PINS; sc->tulip_gpdata = TULIP_GP_ZX34X_INIT; TULIP_CSR_WRITE(sc, csr_gp, TULIP_GP_ZX34X_PINS); TULIP_CSR_WRITE(sc, csr_gp, TULIP_GP_ZX34X_INIT); TULIP_CSR_WRITE(sc, csr_command, TULIP_CSR_READ(sc, csr_command) | TULIP_CMD_PORTSELECT | TULIP_CMD_PCSFUNCTION | TULIP_CMD_SCRAMBLER | TULIP_CMD_MUSTBEONE); TULIP_CSR_WRITE(sc, csr_command, TULIP_CSR_READ(sc, csr_command) & ~TULIP_CMD_TXTHRSHLDCTL); DELAY(200000); for (idx = 1000; idx > 0; idx--) { u_int32_t csr = TULIP_CSR_READ(sc, csr_gp); if ((csr & (TULIP_GP_ZX34X_LNKFAIL|TULIP_GP_ZX34X_SYMDET|TULIP_GP_ZX34X_SIGDET)) == (TULIP_GP_ZX34X_LNKFAIL|TULIP_GP_ZX34X_SYMDET|TULIP_GP_ZX34X_SIGDET)) { if (++cnt100 > 100) break; } else if ((csr & TULIP_GP_ZX34X_LNKFAIL) == 0) { if (++cnt10 > 100) break; } else { cnt10 = 0; cnt100 = 0; } DELAY(1000); } sc->tulip_media = cnt100 > 100 ? TULIP_MEDIA_100BASETX : TULIP_MEDIA_10BASET; tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_10BASET, TULIP_GP_ZX34X_INIT, TULIP_CMD_TXTHRSHLDCTL); tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_10BASET_FD, TULIP_GP_ZX34X_INIT, TULIP_CMD_TXTHRSHLDCTL|TULIP_CMD_FULLDUPLEX); tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_100BASETX, TULIP_GP_ZX34X_INIT, TULIP_CMD_PORTSELECT|TULIP_CMD_PCSFUNCTION |TULIP_CMD_SCRAMBLER); tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_100BASETX_FD, TULIP_GP_ZX34X_INIT, TULIP_CMD_PORTSELECT|TULIP_CMD_PCSFUNCTION |TULIP_CMD_SCRAMBLER|TULIP_CMD_FULLDUPLEX); } static const tulip_boardsw_t tulip_21140_znyx_zx34x_boardsw = { TULIP_21140_ZNYX_ZX34X, tulip_21140_znyx_zx34x_media_probe, tulip_media_select, tulip_null_media_poll, tulip_2114x_media_preset, }; static void tulip_2114x_media_probe(tulip_softc_t * const sc) { TULIP_LOCK_ASSERT(sc); sc->tulip_cmdmode |= TULIP_CMD_MUSTBEONE |TULIP_CMD_BACKOFFCTR|TULIP_CMD_THRSHLD72; } static const tulip_boardsw_t tulip_2114x_isv_boardsw = { TULIP_21140_ISV, tulip_2114x_media_probe, tulip_media_select, tulip_media_poll, tulip_2114x_media_preset, }; /* * ******** END of chip-specific handlers. *********** */ /* * Code the read the SROM and MII bit streams (I2C) */ #define EMIT do { TULIP_CSR_WRITE(sc, csr_srom_mii, csr); DELAY(1); } while (0) static void tulip_srom_idle(tulip_softc_t * const sc) { unsigned bit, csr; csr = SROMSEL ; EMIT; csr = SROMSEL | SROMRD; EMIT; csr ^= SROMCS; EMIT; csr ^= SROMCLKON; EMIT; /* * Write 25 cycles of 0 which will force the SROM to be idle. */ for (bit = 3 + SROM_BITWIDTH + 16; bit > 0; bit--) { csr ^= SROMCLKOFF; EMIT; /* clock low; data not valid */ csr ^= SROMCLKON; EMIT; /* clock high; data valid */ } csr ^= SROMCLKOFF; EMIT; csr ^= SROMCS; EMIT; csr = 0; EMIT; } static void tulip_srom_read(tulip_softc_t * const sc) { unsigned idx; const unsigned bitwidth = SROM_BITWIDTH; const unsigned cmdmask = (SROMCMD_RD << bitwidth); const unsigned msb = 1 << (bitwidth + 3 - 1); unsigned lastidx = (1 << bitwidth) - 1; tulip_srom_idle(sc); for (idx = 0; idx <= lastidx; idx++) { unsigned lastbit, data, bits, bit, csr; csr = SROMSEL ; EMIT; csr = SROMSEL | SROMRD; EMIT; csr ^= SROMCSON; EMIT; csr ^= SROMCLKON; EMIT; lastbit = 0; for (bits = idx|cmdmask, bit = bitwidth + 3; bit > 0; bit--, bits <<= 1) { const unsigned thisbit = bits & msb; csr ^= SROMCLKOFF; EMIT; /* clock low; data not valid */ if (thisbit != lastbit) { csr ^= SROMDOUT; EMIT; /* clock low; invert data */ } else { EMIT; } csr ^= SROMCLKON; EMIT; /* clock high; data valid */ lastbit = thisbit; } csr ^= SROMCLKOFF; EMIT; for (data = 0, bits = 0; bits < 16; bits++) { data <<= 1; csr ^= SROMCLKON; EMIT; /* clock high; data valid */ data |= TULIP_CSR_READ(sc, csr_srom_mii) & SROMDIN ? 1 : 0; csr ^= SROMCLKOFF; EMIT; /* clock low; data not valid */ } sc->tulip_rombuf[idx*2] = data & 0xFF; sc->tulip_rombuf[idx*2+1] = data >> 8; csr = SROMSEL | SROMRD; EMIT; csr = 0; EMIT; } tulip_srom_idle(sc); } #define MII_EMIT do { TULIP_CSR_WRITE(sc, csr_srom_mii, csr); DELAY(1); } while (0) static void tulip_mii_writebits(tulip_softc_t * const sc, unsigned data, unsigned bits) { unsigned msb = 1 << (bits - 1); unsigned csr = TULIP_CSR_READ(sc, csr_srom_mii) & (MII_RD|MII_DOUT|MII_CLK); unsigned lastbit = (csr & MII_DOUT) ? msb : 0; TULIP_LOCK_ASSERT(sc); csr |= MII_WR; MII_EMIT; /* clock low; assert write */ for (; bits > 0; bits--, data <<= 1) { const unsigned thisbit = data & msb; if (thisbit != lastbit) { csr ^= MII_DOUT; MII_EMIT; /* clock low; invert data */ } csr ^= MII_CLKON; MII_EMIT; /* clock high; data valid */ lastbit = thisbit; csr ^= MII_CLKOFF; MII_EMIT; /* clock low; data not valid */ } } static void tulip_mii_turnaround(tulip_softc_t * const sc, unsigned cmd) { unsigned csr = TULIP_CSR_READ(sc, csr_srom_mii) & (MII_RD|MII_DOUT|MII_CLK); TULIP_LOCK_ASSERT(sc); if (cmd == MII_WRCMD) { csr |= MII_DOUT; MII_EMIT; /* clock low; change data */ csr ^= MII_CLKON; MII_EMIT; /* clock high; data valid */ csr ^= MII_CLKOFF; MII_EMIT; /* clock low; data not valid */ csr ^= MII_DOUT; MII_EMIT; /* clock low; change data */ } else { csr |= MII_RD; MII_EMIT; /* clock low; switch to read */ } csr ^= MII_CLKON; MII_EMIT; /* clock high; data valid */ csr ^= MII_CLKOFF; MII_EMIT; /* clock low; data not valid */ } static unsigned tulip_mii_readbits(tulip_softc_t * const sc) { unsigned data; unsigned csr = TULIP_CSR_READ(sc, csr_srom_mii) & (MII_RD|MII_DOUT|MII_CLK); int idx; TULIP_LOCK_ASSERT(sc); for (idx = 0, data = 0; idx < 16; idx++) { data <<= 1; /* this is NOOP on the first pass through */ csr ^= MII_CLKON; MII_EMIT; /* clock high; data valid */ if (TULIP_CSR_READ(sc, csr_srom_mii) & MII_DIN) data |= 1; csr ^= MII_CLKOFF; MII_EMIT; /* clock low; data not valid */ } csr ^= MII_RD; MII_EMIT; /* clock low; turn off read */ return data; } static unsigned tulip_mii_readreg(tulip_softc_t * const sc, unsigned devaddr, unsigned regno) { unsigned csr = TULIP_CSR_READ(sc, csr_srom_mii) & (MII_RD|MII_DOUT|MII_CLK); unsigned data; TULIP_LOCK_ASSERT(sc); csr &= ~(MII_RD|MII_CLK); MII_EMIT; tulip_mii_writebits(sc, MII_PREAMBLE, 32); tulip_mii_writebits(sc, MII_RDCMD, 8); tulip_mii_writebits(sc, devaddr, 5); tulip_mii_writebits(sc, regno, 5); tulip_mii_turnaround(sc, MII_RDCMD); data = tulip_mii_readbits(sc); #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_phyregs[regno][0] = data; sc->tulip_dbg.dbg_phyregs[regno][1]++; #endif return data; } static void tulip_mii_writereg(tulip_softc_t * const sc, unsigned devaddr, unsigned regno, unsigned data) { unsigned csr = TULIP_CSR_READ(sc, csr_srom_mii) & (MII_RD|MII_DOUT|MII_CLK); TULIP_LOCK_ASSERT(sc); csr &= ~(MII_RD|MII_CLK); MII_EMIT; tulip_mii_writebits(sc, MII_PREAMBLE, 32); tulip_mii_writebits(sc, MII_WRCMD, 8); tulip_mii_writebits(sc, devaddr, 5); tulip_mii_writebits(sc, regno, 5); tulip_mii_turnaround(sc, MII_WRCMD); tulip_mii_writebits(sc, data, 16); #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_phyregs[regno][2] = data; sc->tulip_dbg.dbg_phyregs[regno][3]++; #endif } #define tulip_mchash(mca) (ether_crc32_le(mca, 6) & 0x1FF) #define tulip_srom_crcok(databuf) ( \ ((ether_crc32_le(databuf, 126) & 0xFFFFU) ^ 0xFFFFU) == \ ((databuf)[126] | ((databuf)[127] << 8))) static void tulip_identify_dec_nic(tulip_softc_t * const sc) { TULIP_LOCK_ASSERT(sc); strcpy(sc->tulip_boardid, "DEC "); #define D0 4 if (sc->tulip_chipid <= TULIP_21040) return; if (bcmp(sc->tulip_rombuf + 29, "DE500", 5) == 0 || bcmp(sc->tulip_rombuf + 29, "DE450", 5) == 0) { bcopy(sc->tulip_rombuf + 29, &sc->tulip_boardid[D0], 8); sc->tulip_boardid[D0+8] = ' '; } #undef D0 } static void tulip_identify_znyx_nic(tulip_softc_t * const sc) { unsigned id = 0; TULIP_LOCK_ASSERT(sc); strcpy(sc->tulip_boardid, "ZNYX ZX3XX "); if (sc->tulip_chipid == TULIP_21140 || sc->tulip_chipid == TULIP_21140A) { unsigned znyx_ptr; sc->tulip_boardid[8] = '4'; znyx_ptr = sc->tulip_rombuf[124] + 256 * sc->tulip_rombuf[125]; if (znyx_ptr < 26 || znyx_ptr > 116) { sc->tulip_boardsw = &tulip_21140_znyx_zx34x_boardsw; return; } /* ZX344 = 0010 .. 0013FF */ if (sc->tulip_rombuf[znyx_ptr] == 0x4A && sc->tulip_rombuf[znyx_ptr + 1] == 0x52 && sc->tulip_rombuf[znyx_ptr + 2] == 0x01) { id = sc->tulip_rombuf[znyx_ptr + 5] + 256 * sc->tulip_rombuf[znyx_ptr + 4]; if ((id >> 8) == (TULIP_ZNYX_ID_ZX342 >> 8)) { sc->tulip_boardid[9] = '2'; if (id == TULIP_ZNYX_ID_ZX342B) { sc->tulip_boardid[10] = 'B'; sc->tulip_boardid[11] = ' '; } sc->tulip_boardsw = &tulip_21140_znyx_zx34x_boardsw; } else if (id == TULIP_ZNYX_ID_ZX344) { sc->tulip_boardid[10] = '4'; sc->tulip_boardsw = &tulip_21140_znyx_zx34x_boardsw; } else if (id == TULIP_ZNYX_ID_ZX345) { sc->tulip_boardid[9] = (sc->tulip_rombuf[19] > 1) ? '8' : '5'; } else if (id == TULIP_ZNYX_ID_ZX346) { sc->tulip_boardid[9] = '6'; } else if (id == TULIP_ZNYX_ID_ZX351) { sc->tulip_boardid[8] = '5'; sc->tulip_boardid[9] = '1'; } } if (id == 0) { /* * Assume it's a ZX342... */ sc->tulip_boardsw = &tulip_21140_znyx_zx34x_boardsw; } return; } sc->tulip_boardid[8] = '1'; if (sc->tulip_chipid == TULIP_21041) { sc->tulip_boardid[10] = '1'; return; } if (sc->tulip_rombuf[32] == 0x4A && sc->tulip_rombuf[33] == 0x52) { id = sc->tulip_rombuf[37] + 256 * sc->tulip_rombuf[36]; if (id == TULIP_ZNYX_ID_ZX312T) { sc->tulip_boardid[9] = '2'; sc->tulip_boardid[10] = 'T'; sc->tulip_boardid[11] = ' '; sc->tulip_boardsw = &tulip_21040_10baset_only_boardsw; } else if (id == TULIP_ZNYX_ID_ZX314_INTA) { sc->tulip_boardid[9] = '4'; sc->tulip_boardsw = &tulip_21040_10baset_only_boardsw; sc->tulip_features |= TULIP_HAVE_SHAREDINTR|TULIP_HAVE_BASEROM; } else if (id == TULIP_ZNYX_ID_ZX314) { sc->tulip_boardid[9] = '4'; sc->tulip_boardsw = &tulip_21040_10baset_only_boardsw; sc->tulip_features |= TULIP_HAVE_BASEROM; } else if (id == TULIP_ZNYX_ID_ZX315_INTA) { sc->tulip_boardid[9] = '5'; sc->tulip_features |= TULIP_HAVE_SHAREDINTR|TULIP_HAVE_BASEROM; } else if (id == TULIP_ZNYX_ID_ZX315) { sc->tulip_boardid[9] = '5'; sc->tulip_features |= TULIP_HAVE_BASEROM; } else { id = 0; } } if (id == 0) { if ((sc->tulip_enaddr[3] & ~3) == 0xF0 && (sc->tulip_enaddr[5] & 2) == 0) { sc->tulip_boardid[9] = '4'; sc->tulip_boardsw = &tulip_21040_10baset_only_boardsw; sc->tulip_features |= TULIP_HAVE_SHAREDINTR|TULIP_HAVE_BASEROM; } else if ((sc->tulip_enaddr[3] & ~3) == 0xF4 && (sc->tulip_enaddr[5] & 1) == 0) { sc->tulip_boardid[9] = '5'; sc->tulip_boardsw = &tulip_21040_boardsw; sc->tulip_features |= TULIP_HAVE_SHAREDINTR|TULIP_HAVE_BASEROM; } else if ((sc->tulip_enaddr[3] & ~3) == 0xEC) { sc->tulip_boardid[9] = '2'; sc->tulip_boardsw = &tulip_21040_boardsw; } } } static void tulip_identify_smc_nic(tulip_softc_t * const sc) { u_int32_t id1, id2, ei; int auibnc = 0, utp = 0; char *cp; TULIP_LOCK_ASSERT(sc); strcpy(sc->tulip_boardid, "SMC "); if (sc->tulip_chipid == TULIP_21041) return; if (sc->tulip_chipid != TULIP_21040) { if (sc->tulip_boardsw != &tulip_2114x_isv_boardsw) { strcpy(&sc->tulip_boardid[4], "9332DST "); sc->tulip_boardsw = &tulip_21140_smc9332_boardsw; } else if (sc->tulip_features & (TULIP_HAVE_BASEROM|TULIP_HAVE_SLAVEDROM)) { strcpy(&sc->tulip_boardid[4], "9334BDT "); } else { strcpy(&sc->tulip_boardid[4], "9332BDT "); } return; } id1 = sc->tulip_rombuf[0x60] | (sc->tulip_rombuf[0x61] << 8); id2 = sc->tulip_rombuf[0x62] | (sc->tulip_rombuf[0x63] << 8); ei = sc->tulip_rombuf[0x66] | (sc->tulip_rombuf[0x67] << 8); strcpy(&sc->tulip_boardid[4], "8432"); cp = &sc->tulip_boardid[8]; if ((id1 & 1) == 0) *cp++ = 'B', auibnc = 1; if ((id1 & 0xFF) > 0x32) *cp++ = 'T', utp = 1; if ((id1 & 0x4000) == 0) *cp++ = 'A', auibnc = 1; if (id2 == 0x15) { sc->tulip_boardid[7] = '4'; *cp++ = '-'; *cp++ = 'C'; *cp++ = 'H'; *cp++ = (ei ? '2' : '1'); } *cp++ = ' '; *cp = '\0'; if (utp && !auibnc) sc->tulip_boardsw = &tulip_21040_10baset_only_boardsw; else if (!utp && auibnc) sc->tulip_boardsw = &tulip_21040_auibnc_only_boardsw; } static void tulip_identify_cogent_nic(tulip_softc_t * const sc) { TULIP_LOCK_ASSERT(sc); strcpy(sc->tulip_boardid, "Cogent "); if (sc->tulip_chipid == TULIP_21140 || sc->tulip_chipid == TULIP_21140A) { if (sc->tulip_rombuf[32] == TULIP_COGENT_EM100TX_ID) { strcat(sc->tulip_boardid, "EM100TX "); sc->tulip_boardsw = &tulip_21140_cogent_em100_boardsw; #if defined(TULIP_COGENT_EM110TX_ID) } else if (sc->tulip_rombuf[32] == TULIP_COGENT_EM110TX_ID) { strcat(sc->tulip_boardid, "EM110TX "); sc->tulip_boardsw = &tulip_21140_cogent_em100_boardsw; #endif } else if (sc->tulip_rombuf[32] == TULIP_COGENT_EM100FX_ID) { strcat(sc->tulip_boardid, "EM100FX "); sc->tulip_boardsw = &tulip_21140_cogent_em100_boardsw; } /* * Magic number (0x24001109U) is the SubVendor (0x2400) and * SubDevId (0x1109) for the ANA6944TX (EM440TX). */ if (*(u_int32_t *) sc->tulip_rombuf == 0x24001109U && (sc->tulip_features & TULIP_HAVE_BASEROM)) { /* * Cogent (Adaptec) is still mapping all INTs to INTA of * first 21140. Dumb! Dumb! */ strcat(sc->tulip_boardid, "EM440TX "); sc->tulip_features |= TULIP_HAVE_SHAREDINTR; } } else if (sc->tulip_chipid == TULIP_21040) { sc->tulip_features |= TULIP_HAVE_SHAREDINTR|TULIP_HAVE_BASEROM; } } static void tulip_identify_accton_nic(tulip_softc_t * const sc) { TULIP_LOCK_ASSERT(sc); strcpy(sc->tulip_boardid, "ACCTON "); switch (sc->tulip_chipid) { case TULIP_21140A: strcat(sc->tulip_boardid, "EN1207 "); if (sc->tulip_boardsw != &tulip_2114x_isv_boardsw) sc->tulip_boardsw = &tulip_21140_accton_boardsw; break; case TULIP_21140: strcat(sc->tulip_boardid, "EN1207TX "); if (sc->tulip_boardsw != &tulip_2114x_isv_boardsw) sc->tulip_boardsw = &tulip_21140_eb_boardsw; break; case TULIP_21040: strcat(sc->tulip_boardid, "EN1203 "); sc->tulip_boardsw = &tulip_21040_boardsw; break; case TULIP_21041: strcat(sc->tulip_boardid, "EN1203 "); sc->tulip_boardsw = &tulip_21041_boardsw; break; default: sc->tulip_boardsw = &tulip_2114x_isv_boardsw; break; } } static void tulip_identify_asante_nic(tulip_softc_t * const sc) { TULIP_LOCK_ASSERT(sc); strcpy(sc->tulip_boardid, "Asante "); if ((sc->tulip_chipid == TULIP_21140 || sc->tulip_chipid == TULIP_21140A) && sc->tulip_boardsw != &tulip_2114x_isv_boardsw) { tulip_media_info_t *mi = sc->tulip_mediainfo; int idx; /* * The Asante Fast Ethernet doesn't always ship with a valid * new format SROM. So if isn't in the new format, we cheat * set it up as if we had. */ sc->tulip_gpinit = TULIP_GP_ASANTE_PINS; sc->tulip_gpdata = 0; TULIP_CSR_WRITE(sc, csr_gp, TULIP_GP_ASANTE_PINS|TULIP_GP_PINSET); TULIP_CSR_WRITE(sc, csr_gp, TULIP_GP_ASANTE_PHYRESET); DELAY(100); TULIP_CSR_WRITE(sc, csr_gp, 0); mi->mi_type = TULIP_MEDIAINFO_MII; mi->mi_gpr_length = 0; mi->mi_gpr_offset = 0; mi->mi_reset_length = 0; mi->mi_reset_offset = 0; mi->mi_phyaddr = TULIP_MII_NOPHY; for (idx = 20; idx > 0 && mi->mi_phyaddr == TULIP_MII_NOPHY; idx--) { DELAY(10000); mi->mi_phyaddr = tulip_mii_get_phyaddr(sc, 0); } if (mi->mi_phyaddr == TULIP_MII_NOPHY) { device_printf(sc->tulip_dev, "can't find phy 0\n"); return; } sc->tulip_features |= TULIP_HAVE_MII; mi->mi_capabilities = PHYSTS_10BASET|PHYSTS_10BASET_FD|PHYSTS_100BASETX|PHYSTS_100BASETX_FD; mi->mi_advertisement = PHYSTS_10BASET|PHYSTS_10BASET_FD|PHYSTS_100BASETX|PHYSTS_100BASETX_FD; mi->mi_full_duplex = PHYSTS_10BASET_FD|PHYSTS_100BASETX_FD; mi->mi_tx_threshold = PHYSTS_10BASET|PHYSTS_10BASET_FD; TULIP_MEDIAINFO_ADD_CAPABILITY(sc, mi, 100BASETX_FD); TULIP_MEDIAINFO_ADD_CAPABILITY(sc, mi, 100BASETX); TULIP_MEDIAINFO_ADD_CAPABILITY(sc, mi, 100BASET4); TULIP_MEDIAINFO_ADD_CAPABILITY(sc, mi, 10BASET_FD); TULIP_MEDIAINFO_ADD_CAPABILITY(sc, mi, 10BASET); mi->mi_phyid = (tulip_mii_readreg(sc, mi->mi_phyaddr, PHYREG_IDLOW) << 16) | tulip_mii_readreg(sc, mi->mi_phyaddr, PHYREG_IDHIGH); sc->tulip_boardsw = &tulip_2114x_isv_boardsw; } } static void tulip_identify_compex_nic(tulip_softc_t * const sc) { TULIP_LOCK_ASSERT(sc); strcpy(sc->tulip_boardid, "COMPEX "); if (sc->tulip_chipid == TULIP_21140A) { int root_unit; tulip_softc_t *root_sc = NULL; strcat(sc->tulip_boardid, "400TX/PCI "); /* * All 4 chips on these boards share an interrupt. This code * copied from tulip_read_macaddr. */ sc->tulip_features |= TULIP_HAVE_SHAREDINTR; for (root_unit = sc->tulip_unit - 1; root_unit >= 0; root_unit--) { root_sc = tulips[root_unit]; if (root_sc == NULL || !(root_sc->tulip_features & TULIP_HAVE_SLAVEDINTR)) break; root_sc = NULL; } if (root_sc != NULL && root_sc->tulip_chipid == sc->tulip_chipid && root_sc->tulip_pci_busno == sc->tulip_pci_busno) { sc->tulip_features |= TULIP_HAVE_SLAVEDINTR; sc->tulip_slaves = root_sc->tulip_slaves; root_sc->tulip_slaves = sc; } else if(sc->tulip_features & TULIP_HAVE_SLAVEDINTR) { printf("\nCannot find master device for %s interrupts", sc->tulip_ifp->if_xname); } } else { strcat(sc->tulip_boardid, "unknown "); } /* sc->tulip_boardsw = &tulip_21140_eb_boardsw; */ return; } static int tulip_srom_decode(tulip_softc_t * const sc) { unsigned idx1, idx2, idx3; const tulip_srom_header_t *shp = (const tulip_srom_header_t *) &sc->tulip_rombuf[0]; const tulip_srom_adapter_info_t *saip = (const tulip_srom_adapter_info_t *) (shp + 1); tulip_srom_media_t srom_media; tulip_media_info_t *mi = sc->tulip_mediainfo; const u_int8_t *dp; u_int32_t leaf_offset, blocks, data; TULIP_LOCK_ASSERT(sc); for (idx1 = 0; idx1 < shp->sh_adapter_count; idx1++, saip++) { if (shp->sh_adapter_count == 1) break; if (saip->sai_device == sc->tulip_pci_devno) break; } /* * Didn't find the right media block for this card. */ if (idx1 == shp->sh_adapter_count) return 0; /* * Save the hardware address. */ bcopy(shp->sh_ieee802_address, sc->tulip_enaddr, 6); /* * If this is a multiple port card, add the adapter index to the last * byte of the hardware address. (if it isn't multiport, adding 0 * won't hurt. */ sc->tulip_enaddr[5] += idx1; leaf_offset = saip->sai_leaf_offset_lowbyte + saip->sai_leaf_offset_highbyte * 256; dp = sc->tulip_rombuf + leaf_offset; sc->tulip_conntype = (tulip_srom_connection_t) (dp[0] + dp[1] * 256); dp += 2; for (idx2 = 0;; idx2++) { if (tulip_srom_conninfo[idx2].sc_type == sc->tulip_conntype || tulip_srom_conninfo[idx2].sc_type == TULIP_SROM_CONNTYPE_NOT_USED) break; } sc->tulip_connidx = idx2; if (sc->tulip_chipid == TULIP_21041) { blocks = *dp++; for (idx2 = 0; idx2 < blocks; idx2++) { tulip_media_t media; data = *dp++; srom_media = (tulip_srom_media_t) (data & 0x3F); for (idx3 = 0; tulip_srom_mediums[idx3].sm_type != TULIP_MEDIA_UNKNOWN; idx3++) { if (tulip_srom_mediums[idx3].sm_srom_type == srom_media) break; } media = tulip_srom_mediums[idx3].sm_type; if (media != TULIP_MEDIA_UNKNOWN) { if (data & TULIP_SROM_21041_EXTENDED) { mi->mi_type = TULIP_MEDIAINFO_SIA; sc->tulip_mediums[media] = mi; mi->mi_sia_connectivity = dp[0] + dp[1] * 256; mi->mi_sia_tx_rx = dp[2] + dp[3] * 256; mi->mi_sia_general = dp[4] + dp[5] * 256; mi++; } else { switch (media) { case TULIP_MEDIA_BNC: { TULIP_MEDIAINFO_SIA_INIT(sc, mi, 21041, BNC); mi++; break; } case TULIP_MEDIA_AUI: { TULIP_MEDIAINFO_SIA_INIT(sc, mi, 21041, AUI); mi++; break; } case TULIP_MEDIA_10BASET: { TULIP_MEDIAINFO_SIA_INIT(sc, mi, 21041, 10BASET); mi++; break; } case TULIP_MEDIA_10BASET_FD: { TULIP_MEDIAINFO_SIA_INIT(sc, mi, 21041, 10BASET_FD); mi++; break; } default: { break; } } } } if (data & TULIP_SROM_21041_EXTENDED) dp += 6; } #ifdef notdef if (blocks == 0) { TULIP_MEDIAINFO_SIA_INIT(sc, mi, 21041, BNC); mi++; TULIP_MEDIAINFO_SIA_INIT(sc, mi, 21041, AUI); mi++; TULIP_MEDIAINFO_SIA_INIT(sc, mi, 21041, 10BASET); mi++; TULIP_MEDIAINFO_SIA_INIT(sc, mi, 21041, 10BASET_FD); mi++; } #endif } else { unsigned length, type; tulip_media_t gp_media = TULIP_MEDIA_UNKNOWN; if (sc->tulip_features & TULIP_HAVE_GPR) sc->tulip_gpinit = *dp++; blocks = *dp++; for (idx2 = 0; idx2 < blocks; idx2++) { const u_int8_t *ep; if ((*dp & 0x80) == 0) { length = 4; type = 0; } else { length = (*dp++ & 0x7f) - 1; type = *dp++ & 0x3f; } ep = dp + length; switch (type & 0x3f) { case 0: { /* 21140[A] GPR block */ tulip_media_t media; srom_media = (tulip_srom_media_t)(dp[0] & 0x3f); for (idx3 = 0; tulip_srom_mediums[idx3].sm_type != TULIP_MEDIA_UNKNOWN; idx3++) { if (tulip_srom_mediums[idx3].sm_srom_type == srom_media) break; } media = tulip_srom_mediums[idx3].sm_type; if (media == TULIP_MEDIA_UNKNOWN) break; mi->mi_type = TULIP_MEDIAINFO_GPR; sc->tulip_mediums[media] = mi; mi->mi_gpdata = dp[1]; if (media > gp_media && !TULIP_IS_MEDIA_FD(media)) { sc->tulip_gpdata = mi->mi_gpdata; gp_media = media; } data = dp[2] + dp[3] * 256; mi->mi_cmdmode = TULIP_SROM_2114X_CMDBITS(data); if (data & TULIP_SROM_2114X_NOINDICATOR) { mi->mi_actmask = 0; } else { #if 0 mi->mi_default = (data & TULIP_SROM_2114X_DEFAULT) != 0; #endif mi->mi_actmask = TULIP_SROM_2114X_BITPOS(data); mi->mi_actdata = (data & TULIP_SROM_2114X_POLARITY) ? 0 : mi->mi_actmask; } mi++; break; } case 1: { /* 21140[A] MII block */ const unsigned phyno = *dp++; mi->mi_type = TULIP_MEDIAINFO_MII; mi->mi_gpr_length = *dp++; mi->mi_gpr_offset = dp - sc->tulip_rombuf; dp += mi->mi_gpr_length; mi->mi_reset_length = *dp++; mi->mi_reset_offset = dp - sc->tulip_rombuf; dp += mi->mi_reset_length; /* * Before we probe for a PHY, use the GPR information * to select it. If we don't, it may be inaccessible. */ TULIP_CSR_WRITE(sc, csr_gp, sc->tulip_gpinit|TULIP_GP_PINSET); for (idx3 = 0; idx3 < mi->mi_reset_length; idx3++) { DELAY(10); TULIP_CSR_WRITE(sc, csr_gp, sc->tulip_rombuf[mi->mi_reset_offset + idx3]); } sc->tulip_phyaddr = mi->mi_phyaddr; for (idx3 = 0; idx3 < mi->mi_gpr_length; idx3++) { DELAY(10); TULIP_CSR_WRITE(sc, csr_gp, sc->tulip_rombuf[mi->mi_gpr_offset + idx3]); } /* * At least write something! */ if (mi->mi_reset_length == 0 && mi->mi_gpr_length == 0) TULIP_CSR_WRITE(sc, csr_gp, 0); mi->mi_phyaddr = TULIP_MII_NOPHY; for (idx3 = 20; idx3 > 0 && mi->mi_phyaddr == TULIP_MII_NOPHY; idx3--) { DELAY(10000); mi->mi_phyaddr = tulip_mii_get_phyaddr(sc, phyno); } if (mi->mi_phyaddr == TULIP_MII_NOPHY) { #if defined(TULIP_DEBUG) device_printf(sc->tulip_dev, "can't find phy %d\n", phyno); #endif break; } sc->tulip_features |= TULIP_HAVE_MII; mi->mi_capabilities = dp[0] + dp[1] * 256; dp += 2; mi->mi_advertisement = dp[0] + dp[1] * 256; dp += 2; mi->mi_full_duplex = dp[0] + dp[1] * 256; dp += 2; mi->mi_tx_threshold = dp[0] + dp[1] * 256; dp += 2; TULIP_MEDIAINFO_ADD_CAPABILITY(sc, mi, 100BASETX_FD); TULIP_MEDIAINFO_ADD_CAPABILITY(sc, mi, 100BASETX); TULIP_MEDIAINFO_ADD_CAPABILITY(sc, mi, 100BASET4); TULIP_MEDIAINFO_ADD_CAPABILITY(sc, mi, 10BASET_FD); TULIP_MEDIAINFO_ADD_CAPABILITY(sc, mi, 10BASET); mi->mi_phyid = (tulip_mii_readreg(sc, mi->mi_phyaddr, PHYREG_IDLOW) << 16) | tulip_mii_readreg(sc, mi->mi_phyaddr, PHYREG_IDHIGH); mi++; break; } case 2: { /* 2114[23] SIA block */ tulip_media_t media; srom_media = (tulip_srom_media_t)(dp[0] & 0x3f); for (idx3 = 0; tulip_srom_mediums[idx3].sm_type != TULIP_MEDIA_UNKNOWN; idx3++) { if (tulip_srom_mediums[idx3].sm_srom_type == srom_media) break; } media = tulip_srom_mediums[idx3].sm_type; if (media == TULIP_MEDIA_UNKNOWN) break; mi->mi_type = TULIP_MEDIAINFO_SIA; sc->tulip_mediums[media] = mi; if (dp[0] & 0x40) { mi->mi_sia_connectivity = dp[1] + dp[2] * 256; mi->mi_sia_tx_rx = dp[3] + dp[4] * 256; mi->mi_sia_general = dp[5] + dp[6] * 256; dp += 6; } else { switch (media) { case TULIP_MEDIA_BNC: { TULIP_MEDIAINFO_SIA_INIT(sc, mi, 21142, BNC); break; } case TULIP_MEDIA_AUI: { TULIP_MEDIAINFO_SIA_INIT(sc, mi, 21142, AUI); break; } case TULIP_MEDIA_10BASET: { TULIP_MEDIAINFO_SIA_INIT(sc, mi, 21142, 10BASET); sc->tulip_intrmask |= TULIP_STS_LINKPASS|TULIP_STS_LINKFAIL; break; } case TULIP_MEDIA_10BASET_FD: { TULIP_MEDIAINFO_SIA_INIT(sc, mi, 21142, 10BASET_FD); sc->tulip_intrmask |= TULIP_STS_LINKPASS|TULIP_STS_LINKFAIL; break; } default: { goto bad_media; } } } mi->mi_sia_gp_control = (dp[1] + dp[2] * 256) << 16; mi->mi_sia_gp_data = (dp[3] + dp[4] * 256) << 16; mi++; bad_media: break; } case 3: { /* 2114[23] MII PHY block */ const unsigned phyno = *dp++; const u_int8_t *dp0; mi->mi_type = TULIP_MEDIAINFO_MII; mi->mi_gpr_length = *dp++; mi->mi_gpr_offset = dp - sc->tulip_rombuf; dp += 2 * mi->mi_gpr_length; mi->mi_reset_length = *dp++; mi->mi_reset_offset = dp - sc->tulip_rombuf; dp += 2 * mi->mi_reset_length; dp0 = &sc->tulip_rombuf[mi->mi_reset_offset]; for (idx3 = 0; idx3 < mi->mi_reset_length; idx3++, dp0 += 2) { DELAY(10); TULIP_CSR_WRITE(sc, csr_sia_general, (dp0[0] + 256 * dp0[1]) << 16); } sc->tulip_phyaddr = mi->mi_phyaddr; dp0 = &sc->tulip_rombuf[mi->mi_gpr_offset]; for (idx3 = 0; idx3 < mi->mi_gpr_length; idx3++, dp0 += 2) { DELAY(10); TULIP_CSR_WRITE(sc, csr_sia_general, (dp0[0] + 256 * dp0[1]) << 16); } if (mi->mi_reset_length == 0 && mi->mi_gpr_length == 0) TULIP_CSR_WRITE(sc, csr_sia_general, 0); mi->mi_phyaddr = TULIP_MII_NOPHY; for (idx3 = 20; idx3 > 0 && mi->mi_phyaddr == TULIP_MII_NOPHY; idx3--) { DELAY(10000); mi->mi_phyaddr = tulip_mii_get_phyaddr(sc, phyno); } if (mi->mi_phyaddr == TULIP_MII_NOPHY) { #if defined(TULIP_DEBUG) device_printf(sc->tulip_dev, "can't find phy %d\n", phyno); #endif break; } sc->tulip_features |= TULIP_HAVE_MII; mi->mi_capabilities = dp[0] + dp[1] * 256; dp += 2; mi->mi_advertisement = dp[0] + dp[1] * 256; dp += 2; mi->mi_full_duplex = dp[0] + dp[1] * 256; dp += 2; mi->mi_tx_threshold = dp[0] + dp[1] * 256; dp += 2; mi->mi_mii_interrupt = dp[0] + dp[1] * 256; dp += 2; TULIP_MEDIAINFO_ADD_CAPABILITY(sc, mi, 100BASETX_FD); TULIP_MEDIAINFO_ADD_CAPABILITY(sc, mi, 100BASETX); TULIP_MEDIAINFO_ADD_CAPABILITY(sc, mi, 100BASET4); TULIP_MEDIAINFO_ADD_CAPABILITY(sc, mi, 10BASET_FD); TULIP_MEDIAINFO_ADD_CAPABILITY(sc, mi, 10BASET); mi->mi_phyid = (tulip_mii_readreg(sc, mi->mi_phyaddr, PHYREG_IDLOW) << 16) | tulip_mii_readreg(sc, mi->mi_phyaddr, PHYREG_IDHIGH); mi++; break; } case 4: { /* 21143 SYM block */ tulip_media_t media; srom_media = (tulip_srom_media_t) dp[0]; for (idx3 = 0; tulip_srom_mediums[idx3].sm_type != TULIP_MEDIA_UNKNOWN; idx3++) { if (tulip_srom_mediums[idx3].sm_srom_type == srom_media) break; } media = tulip_srom_mediums[idx3].sm_type; if (media == TULIP_MEDIA_UNKNOWN) break; mi->mi_type = TULIP_MEDIAINFO_SYM; sc->tulip_mediums[media] = mi; mi->mi_gpcontrol = (dp[1] + dp[2] * 256) << 16; mi->mi_gpdata = (dp[3] + dp[4] * 256) << 16; data = dp[5] + dp[6] * 256; mi->mi_cmdmode = TULIP_SROM_2114X_CMDBITS(data); if (data & TULIP_SROM_2114X_NOINDICATOR) { mi->mi_actmask = 0; } else { mi->mi_default = (data & TULIP_SROM_2114X_DEFAULT) != 0; mi->mi_actmask = TULIP_SROM_2114X_BITPOS(data); mi->mi_actdata = (data & TULIP_SROM_2114X_POLARITY) ? 0 : mi->mi_actmask; } if (TULIP_IS_MEDIA_TP(media)) sc->tulip_intrmask |= TULIP_STS_LINKPASS|TULIP_STS_LINKFAIL; mi++; break; } #if 0 case 5: { /* 21143 Reset block */ mi->mi_type = TULIP_MEDIAINFO_RESET; mi->mi_reset_length = *dp++; mi->mi_reset_offset = dp - sc->tulip_rombuf; dp += 2 * mi->mi_reset_length; mi++; break; } #endif default: { } } dp = ep; } } return mi - sc->tulip_mediainfo; } static const struct { void (*vendor_identify_nic)(tulip_softc_t * const sc); unsigned char vendor_oui[3]; } tulip_vendors[] = { { tulip_identify_dec_nic, { 0x08, 0x00, 0x2B } }, { tulip_identify_dec_nic, { 0x00, 0x00, 0xF8 } }, { tulip_identify_smc_nic, { 0x00, 0x00, 0xC0 } }, { tulip_identify_smc_nic, { 0x00, 0xE0, 0x29 } }, { tulip_identify_znyx_nic, { 0x00, 0xC0, 0x95 } }, { tulip_identify_cogent_nic, { 0x00, 0x00, 0x92 } }, { tulip_identify_asante_nic, { 0x00, 0x00, 0x94 } }, { tulip_identify_cogent_nic, { 0x00, 0x00, 0xD1 } }, { tulip_identify_accton_nic, { 0x00, 0x00, 0xE8 } }, { tulip_identify_compex_nic, { 0x00, 0x80, 0x48 } }, { NULL } }; /* * This deals with the vagaries of the address roms and the * brain-deadness that various vendors commit in using them. */ static int tulip_read_macaddr(tulip_softc_t * const sc) { unsigned cksum, rom_cksum, idx; u_int32_t csr; unsigned char tmpbuf[8]; static const u_char testpat[] = { 0xFF, 0, 0x55, 0xAA, 0xFF, 0, 0x55, 0xAA }; sc->tulip_connidx = TULIP_SROM_LASTCONNIDX; if (sc->tulip_chipid == TULIP_21040) { TULIP_CSR_WRITE(sc, csr_enetrom, 1); for (idx = 0; idx < sizeof(sc->tulip_rombuf); idx++) { int cnt = 0; while (((csr = TULIP_CSR_READ(sc, csr_enetrom)) & 0x80000000L) && cnt < 10000) cnt++; sc->tulip_rombuf[idx] = csr & 0xFF; } sc->tulip_boardsw = &tulip_21040_boardsw; } else { if (sc->tulip_chipid == TULIP_21041) { /* * Thankfully all 21041's act the same. */ sc->tulip_boardsw = &tulip_21041_boardsw; } else { /* * Assume all 21140 board are compatible with the * DEC 10/100 evaluation board. Not really valid but * it's the best we can do until every one switches to * the new SROM format. */ sc->tulip_boardsw = &tulip_21140_eb_boardsw; } tulip_srom_read(sc); if (tulip_srom_crcok(sc->tulip_rombuf)) { /* * SROM CRC is valid therefore it must be in the * new format. */ sc->tulip_features |= TULIP_HAVE_ISVSROM|TULIP_HAVE_OKSROM; } else if (sc->tulip_rombuf[126] == 0xff && sc->tulip_rombuf[127] == 0xFF) { /* * No checksum is present. See if the SROM id checks out; * the first 18 bytes should be 0 followed by a 1 followed * by the number of adapters (which we don't deal with yet). */ for (idx = 0; idx < 18; idx++) { if (sc->tulip_rombuf[idx] != 0) break; } if (idx == 18 && sc->tulip_rombuf[18] == 1 && sc->tulip_rombuf[19] != 0) sc->tulip_features |= TULIP_HAVE_ISVSROM; } else if (sc->tulip_chipid >= TULIP_21142) { sc->tulip_features |= TULIP_HAVE_ISVSROM; sc->tulip_boardsw = &tulip_2114x_isv_boardsw; } if ((sc->tulip_features & TULIP_HAVE_ISVSROM) && tulip_srom_decode(sc)) { if (sc->tulip_chipid != TULIP_21041) sc->tulip_boardsw = &tulip_2114x_isv_boardsw; /* * If the SROM specifies more than one adapter, tag this as a * BASE rom. */ if (sc->tulip_rombuf[19] > 1) sc->tulip_features |= TULIP_HAVE_BASEROM; if (sc->tulip_boardsw == NULL) return -6; goto check_oui; } } if (bcmp(&sc->tulip_rombuf[0], &sc->tulip_rombuf[16], 8) != 0) { /* * Some folks don't use the standard ethernet rom format * but instead just put the address in the first 6 bytes * of the rom and let the rest be all 0xffs. (Can we say * ZNYX?) (well sometimes they put in a checksum so we'll * start at 8). */ for (idx = 8; idx < 32; idx++) { if (sc->tulip_rombuf[idx] != 0xFF) return -4; } /* * Make sure the address is not multicast or locally assigned * that the OUI is not 00-00-00. */ if ((sc->tulip_rombuf[0] & 3) != 0) return -4; if (sc->tulip_rombuf[0] == 0 && sc->tulip_rombuf[1] == 0 && sc->tulip_rombuf[2] == 0) return -4; bcopy(sc->tulip_rombuf, sc->tulip_enaddr, 6); sc->tulip_features |= TULIP_HAVE_OKROM; goto check_oui; } else { /* * A number of makers of multiport boards (ZNYX and Cogent) * only put on one address ROM on their 21040 boards. So * if the ROM is all zeros (or all 0xFFs), look at the * previous configured boards (as long as they are on the same * PCI bus and the bus number is non-zero) until we find the * master board with address ROM. We then use its address ROM * as the base for this board. (we add our relative board * to the last byte of its address). */ for (idx = 0; idx < sizeof(sc->tulip_rombuf); idx++) { if (sc->tulip_rombuf[idx] != 0 && sc->tulip_rombuf[idx] != 0xFF) break; } if (idx == sizeof(sc->tulip_rombuf)) { int root_unit; tulip_softc_t *root_sc = NULL; for (root_unit = sc->tulip_unit - 1; root_unit >= 0; root_unit--) { root_sc = tulips[root_unit]; if (root_sc == NULL || (root_sc->tulip_features & (TULIP_HAVE_OKROM|TULIP_HAVE_SLAVEDROM)) == TULIP_HAVE_OKROM) break; root_sc = NULL; } if (root_sc != NULL && (root_sc->tulip_features & TULIP_HAVE_BASEROM) && root_sc->tulip_chipid == sc->tulip_chipid && root_sc->tulip_pci_busno == sc->tulip_pci_busno) { sc->tulip_features |= TULIP_HAVE_SLAVEDROM; sc->tulip_boardsw = root_sc->tulip_boardsw; strcpy(sc->tulip_boardid, root_sc->tulip_boardid); if (sc->tulip_boardsw->bd_type == TULIP_21140_ISV) { bcopy(root_sc->tulip_rombuf, sc->tulip_rombuf, sizeof(sc->tulip_rombuf)); if (!tulip_srom_decode(sc)) return -5; } else { bcopy(root_sc->tulip_enaddr, sc->tulip_enaddr, 6); sc->tulip_enaddr[5] += sc->tulip_unit - root_sc->tulip_unit; } /* * Now for a truly disgusting kludge: all 4 21040s on * the ZX314 share the same INTA line so the mapping * setup by the BIOS on the PCI bridge is worthless. * Rather than reprogramming the value in the config * register, we will handle this internally. */ if (root_sc->tulip_features & TULIP_HAVE_SHAREDINTR) { sc->tulip_slaves = root_sc->tulip_slaves; root_sc->tulip_slaves = sc; sc->tulip_features |= TULIP_HAVE_SLAVEDINTR; } return 0; } } } /* * This is the standard DEC address ROM test. */ if (bcmp(&sc->tulip_rombuf[24], testpat, 8) != 0) return -3; tmpbuf[0] = sc->tulip_rombuf[15]; tmpbuf[1] = sc->tulip_rombuf[14]; tmpbuf[2] = sc->tulip_rombuf[13]; tmpbuf[3] = sc->tulip_rombuf[12]; tmpbuf[4] = sc->tulip_rombuf[11]; tmpbuf[5] = sc->tulip_rombuf[10]; tmpbuf[6] = sc->tulip_rombuf[9]; tmpbuf[7] = sc->tulip_rombuf[8]; if (bcmp(&sc->tulip_rombuf[0], tmpbuf, 8) != 0) return -2; bcopy(sc->tulip_rombuf, sc->tulip_enaddr, 6); cksum = *(u_int16_t *) &sc->tulip_enaddr[0]; cksum *= 2; if (cksum > 65535) cksum -= 65535; cksum += *(u_int16_t *) &sc->tulip_enaddr[2]; if (cksum > 65535) cksum -= 65535; cksum *= 2; if (cksum > 65535) cksum -= 65535; cksum += *(u_int16_t *) &sc->tulip_enaddr[4]; if (cksum >= 65535) cksum -= 65535; rom_cksum = *(u_int16_t *) &sc->tulip_rombuf[6]; if (cksum != rom_cksum) return -1; check_oui: /* * Check for various boards based on OUI. Did I say braindead? */ for (idx = 0; tulip_vendors[idx].vendor_identify_nic != NULL; idx++) { if (bcmp(sc->tulip_enaddr, tulip_vendors[idx].vendor_oui, 3) == 0) { (*tulip_vendors[idx].vendor_identify_nic)(sc); break; } } sc->tulip_features |= TULIP_HAVE_OKROM; return 0; } static void tulip_ifmedia_add(tulip_softc_t * const sc) { tulip_media_t media; int medias = 0; TULIP_LOCK_ASSERT(sc); for (media = TULIP_MEDIA_UNKNOWN; media < TULIP_MEDIA_MAX; media++) { if (sc->tulip_mediums[media] != NULL) { ifmedia_add(&sc->tulip_ifmedia, tulip_media_to_ifmedia[media], 0, 0); medias++; } } if (medias == 0) { sc->tulip_features |= TULIP_HAVE_NOMEDIA; ifmedia_add(&sc->tulip_ifmedia, IFM_ETHER | IFM_NONE, 0, 0); ifmedia_set(&sc->tulip_ifmedia, IFM_ETHER | IFM_NONE); } else if (sc->tulip_media == TULIP_MEDIA_UNKNOWN) { ifmedia_add(&sc->tulip_ifmedia, IFM_ETHER | IFM_AUTO, 0, 0); ifmedia_set(&sc->tulip_ifmedia, IFM_ETHER | IFM_AUTO); } else { ifmedia_set(&sc->tulip_ifmedia, tulip_media_to_ifmedia[sc->tulip_media]); sc->tulip_flags |= TULIP_PRINTMEDIA; tulip_linkup(sc, sc->tulip_media); } } static int tulip_ifmedia_change(struct ifnet * const ifp) { tulip_softc_t * const sc = (tulip_softc_t *)ifp->if_softc; TULIP_LOCK(sc); sc->tulip_flags |= TULIP_NEEDRESET; sc->tulip_probe_state = TULIP_PROBE_INACTIVE; sc->tulip_media = TULIP_MEDIA_UNKNOWN; if (IFM_SUBTYPE(sc->tulip_ifmedia.ifm_media) != IFM_AUTO) { tulip_media_t media; for (media = TULIP_MEDIA_UNKNOWN; media < TULIP_MEDIA_MAX; media++) { if (sc->tulip_mediums[media] != NULL && sc->tulip_ifmedia.ifm_media == tulip_media_to_ifmedia[media]) { sc->tulip_flags |= TULIP_PRINTMEDIA; sc->tulip_flags &= ~TULIP_DIDNWAY; tulip_linkup(sc, media); TULIP_UNLOCK(sc); return 0; } } } sc->tulip_flags &= ~(TULIP_TXPROBE_ACTIVE|TULIP_WANTRXACT); tulip_reset(sc); tulip_init_locked(sc); TULIP_UNLOCK(sc); return 0; } /* * Media status callback */ static void tulip_ifmedia_status(struct ifnet * const ifp, struct ifmediareq *req) { tulip_softc_t *sc = (tulip_softc_t *)ifp->if_softc; TULIP_LOCK(sc); if (sc->tulip_media == TULIP_MEDIA_UNKNOWN) { TULIP_UNLOCK(sc); return; } req->ifm_status = IFM_AVALID; if (sc->tulip_flags & TULIP_LINKUP) req->ifm_status |= IFM_ACTIVE; req->ifm_active = tulip_media_to_ifmedia[sc->tulip_media]; TULIP_UNLOCK(sc); } static void tulip_addr_filter(tulip_softc_t * const sc) { struct ifmultiaddr *ifma; struct ifnet *ifp; u_char *addrp; u_int16_t eaddr[ETHER_ADDR_LEN/2]; int multicnt; TULIP_LOCK_ASSERT(sc); sc->tulip_flags &= ~(TULIP_WANTHASHPERFECT|TULIP_WANTHASHONLY|TULIP_ALLMULTI); sc->tulip_flags |= TULIP_WANTSETUP|TULIP_WANTTXSTART; sc->tulip_cmdmode &= ~TULIP_CMD_RXRUN; sc->tulip_intrmask &= ~TULIP_STS_RXSTOPPED; #if defined(IFF_ALLMULTI) if (sc->tulip_ifp->if_flags & IFF_ALLMULTI) sc->tulip_flags |= TULIP_ALLMULTI ; #endif multicnt = 0; ifp = sc->tulip_ifp; if_maddr_rlock(ifp); /* Copy MAC address on stack to align. */ if (ifp->if_input != NULL) bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); else bcopy(sc->tulip_enaddr, eaddr, ETHER_ADDR_LEN); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family == AF_LINK) multicnt++; } if (multicnt > 14) { u_int32_t *sp = sc->tulip_setupdata; unsigned hash; /* * Some early passes of the 21140 have broken implementations of * hash-perfect mode. When we get too many multicasts for perfect * filtering with these chips, we need to switch into hash-only * mode (this is better than all-multicast on network with lots * of multicast traffic). */ if (sc->tulip_features & TULIP_HAVE_BROKEN_HASH) sc->tulip_flags |= TULIP_WANTHASHONLY; else sc->tulip_flags |= TULIP_WANTHASHPERFECT; /* * If we have more than 14 multicasts, we have * go into hash perfect mode (512 bit multicast * hash and one perfect hardware). */ bzero(sc->tulip_setupdata, sizeof(sc->tulip_setupdata)); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; hash = tulip_mchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); sp[hash >> 4] |= htole32(1 << (hash & 0xF)); } /* * No reason to use a hash if we are going to be * receiving every multicast. */ if ((sc->tulip_flags & TULIP_ALLMULTI) == 0) { hash = tulip_mchash(ifp->if_broadcastaddr); sp[hash >> 4] |= htole32(1 << (hash & 0xF)); if (sc->tulip_flags & TULIP_WANTHASHONLY) { hash = tulip_mchash((caddr_t)eaddr); sp[hash >> 4] |= htole32(1 << (hash & 0xF)); } else { sp[39] = TULIP_SP_MAC(eaddr[0]); sp[40] = TULIP_SP_MAC(eaddr[1]); sp[41] = TULIP_SP_MAC(eaddr[2]); } } } if ((sc->tulip_flags & (TULIP_WANTHASHPERFECT|TULIP_WANTHASHONLY)) == 0) { u_int32_t *sp = sc->tulip_setupdata; int idx = 0; if ((sc->tulip_flags & TULIP_ALLMULTI) == 0) { /* * Else can get perfect filtering for 16 addresses. */ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; addrp = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); *sp++ = TULIP_SP_MAC(((u_int16_t *)addrp)[0]); *sp++ = TULIP_SP_MAC(((u_int16_t *)addrp)[1]); *sp++ = TULIP_SP_MAC(((u_int16_t *)addrp)[2]); idx++; } /* * Add the broadcast address. */ idx++; *sp++ = TULIP_SP_MAC(0xFFFF); *sp++ = TULIP_SP_MAC(0xFFFF); *sp++ = TULIP_SP_MAC(0xFFFF); } /* * Pad the rest with our hardware address */ for (; idx < 16; idx++) { *sp++ = TULIP_SP_MAC(eaddr[0]); *sp++ = TULIP_SP_MAC(eaddr[1]); *sp++ = TULIP_SP_MAC(eaddr[2]); } } if_maddr_runlock(ifp); } static void tulip_reset(tulip_softc_t * const sc) { tulip_ringinfo_t *ri; tulip_descinfo_t *di; struct mbuf *m; u_int32_t inreset = (sc->tulip_flags & TULIP_INRESET); TULIP_LOCK_ASSERT(sc); CTR1(KTR_TULIP, "tulip_reset: inreset %d", inreset); /* * Brilliant. Simply brilliant. When switching modes/speeds * on a 2114*, you need to set the appriopriate MII/PCS/SCL/PS * bits in CSR6 and then do a software reset to get the 21140 * to properly reset its internal pathways to the right places. * Grrrr. */ if ((sc->tulip_flags & TULIP_DEVICEPROBE) == 0 && sc->tulip_boardsw->bd_media_preset != NULL) (*sc->tulip_boardsw->bd_media_preset)(sc); TULIP_CSR_WRITE(sc, csr_busmode, TULIP_BUSMODE_SWRESET); DELAY(10); /* Wait 10 microseconds (actually 50 PCI cycles but at 33MHz that comes to two microseconds but wait a bit longer anyways) */ if (!inreset) { sc->tulip_flags |= TULIP_INRESET; sc->tulip_flags &= ~(TULIP_NEEDRESET|TULIP_RXBUFSLOW); sc->tulip_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } TULIP_CSR_WRITE(sc, csr_txlist, sc->tulip_txinfo.ri_dma_addr & 0xffffffff); TULIP_CSR_WRITE(sc, csr_rxlist, sc->tulip_rxinfo.ri_dma_addr & 0xffffffff); TULIP_CSR_WRITE(sc, csr_busmode, (1 << (3 /*pci_max_burst_len*/ + 8)) |TULIP_BUSMODE_CACHE_ALIGN8 |TULIP_BUSMODE_READMULTIPLE |(BYTE_ORDER != LITTLE_ENDIAN ? TULIP_BUSMODE_DESC_BIGENDIAN : 0)); sc->tulip_txtimer = 0; /* * Free all the mbufs that were on the transmit ring. */ CTR0(KTR_TULIP, "tulip_reset: drain transmit ring"); ri = &sc->tulip_txinfo; for (di = ri->ri_first; di < ri->ri_last; di++) { m = tulip_dequeue_mbuf(ri, di, SYNC_NONE); if (m != NULL) m_freem(m); di->di_desc->d_status = 0; } ri->ri_nextin = ri->ri_nextout = ri->ri_first; ri->ri_free = ri->ri_max; TULIP_TXDESC_PRESYNC(ri); /* * We need to collect all the mbufs that were on the * receive ring before we reinit it either to put * them back on or to know if we have to allocate * more. */ CTR0(KTR_TULIP, "tulip_reset: drain receive ring"); ri = &sc->tulip_rxinfo; ri->ri_nextin = ri->ri_nextout = ri->ri_first; ri->ri_free = ri->ri_max; for (di = ri->ri_first; di < ri->ri_last; di++) { di->di_desc->d_status = 0; di->di_desc->d_length1 = 0; di->di_desc->d_addr1 = 0; di->di_desc->d_length2 = 0; di->di_desc->d_addr2 = 0; } TULIP_RXDESC_PRESYNC(ri); for (di = ri->ri_first; di < ri->ri_last; di++) { m = tulip_dequeue_mbuf(ri, di, SYNC_NONE); if (m != NULL) m_freem(m); } /* * If tulip_reset is being called recursively, exit quickly knowing * that when the outer tulip_reset returns all the right stuff will * have happened. */ if (inreset) return; sc->tulip_intrmask |= TULIP_STS_NORMALINTR|TULIP_STS_RXINTR|TULIP_STS_TXINTR |TULIP_STS_ABNRMLINTR|TULIP_STS_SYSERROR|TULIP_STS_TXSTOPPED |TULIP_STS_TXUNDERFLOW|TULIP_STS_TXBABBLE |TULIP_STS_RXSTOPPED; if ((sc->tulip_flags & TULIP_DEVICEPROBE) == 0) (*sc->tulip_boardsw->bd_media_select)(sc); #if defined(TULIP_DEBUG) if ((sc->tulip_flags & TULIP_NEEDRESET) == TULIP_NEEDRESET) device_printf(sc->tulip_dev, "tulip_reset: additional reset needed?!?\n"); #endif if (bootverbose) tulip_media_print(sc); if (sc->tulip_features & TULIP_HAVE_DUALSENSE) TULIP_CSR_WRITE(sc, csr_sia_status, TULIP_CSR_READ(sc, csr_sia_status)); sc->tulip_flags &= ~(TULIP_DOINGSETUP|TULIP_WANTSETUP|TULIP_INRESET |TULIP_RXACT); } static void tulip_init(void *arg) { tulip_softc_t *sc = (tulip_softc_t *)arg; TULIP_LOCK(sc); tulip_init_locked(sc); TULIP_UNLOCK(sc); } static void tulip_init_locked(tulip_softc_t * const sc) { CTR0(KTR_TULIP, "tulip_init_locked"); if (sc->tulip_ifp->if_flags & IFF_UP) { if ((sc->tulip_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { /* initialize the media */ CTR0(KTR_TULIP, "tulip_init_locked: up but not running, reset chip"); tulip_reset(sc); } tulip_addr_filter(sc); sc->tulip_ifp->if_drv_flags |= IFF_DRV_RUNNING; if (sc->tulip_ifp->if_flags & IFF_PROMISC) { sc->tulip_flags |= TULIP_PROMISC; sc->tulip_cmdmode |= TULIP_CMD_PROMISCUOUS; sc->tulip_intrmask |= TULIP_STS_TXINTR; } else { sc->tulip_flags &= ~TULIP_PROMISC; sc->tulip_cmdmode &= ~TULIP_CMD_PROMISCUOUS; if (sc->tulip_flags & TULIP_ALLMULTI) { sc->tulip_cmdmode |= TULIP_CMD_ALLMULTI; } else { sc->tulip_cmdmode &= ~TULIP_CMD_ALLMULTI; } } sc->tulip_cmdmode |= TULIP_CMD_TXRUN; if ((sc->tulip_flags & (TULIP_TXPROBE_ACTIVE|TULIP_WANTSETUP)) == 0) { tulip_rx_intr(sc); sc->tulip_cmdmode |= TULIP_CMD_RXRUN; sc->tulip_intrmask |= TULIP_STS_RXSTOPPED; } else { sc->tulip_ifp->if_drv_flags |= IFF_DRV_OACTIVE; sc->tulip_cmdmode &= ~TULIP_CMD_RXRUN; sc->tulip_intrmask &= ~TULIP_STS_RXSTOPPED; } CTR2(KTR_TULIP, "tulip_init_locked: intr mask %08x cmdmode %08x", sc->tulip_intrmask, sc->tulip_cmdmode); TULIP_CSR_WRITE(sc, csr_intr, sc->tulip_intrmask); TULIP_CSR_WRITE(sc, csr_command, sc->tulip_cmdmode); CTR1(KTR_TULIP, "tulip_init_locked: status %08x\n", TULIP_CSR_READ(sc, csr_status)); if ((sc->tulip_flags & (TULIP_WANTSETUP|TULIP_TXPROBE_ACTIVE)) == TULIP_WANTSETUP) tulip_txput_setup(sc); callout_reset(&sc->tulip_stat_timer, hz, tulip_watchdog, sc); } else { CTR0(KTR_TULIP, "tulip_init_locked: not up, reset chip"); sc->tulip_ifp->if_drv_flags &= ~IFF_DRV_RUNNING; tulip_reset(sc); tulip_addr_filter(sc); callout_stop(&sc->tulip_stat_timer); } } #define DESC_STATUS(di) (((volatile tulip_desc_t *)((di)->di_desc))->d_status) #define DESC_FLAG(di) ((di)->di_desc->d_flag) static void tulip_rx_intr(tulip_softc_t * const sc) { TULIP_PERFSTART(rxintr) tulip_ringinfo_t * const ri = &sc->tulip_rxinfo; struct ifnet * const ifp = sc->tulip_ifp; int fillok = 1; #if defined(TULIP_DEBUG) int cnt = 0; #endif TULIP_LOCK_ASSERT(sc); CTR0(KTR_TULIP, "tulip_rx_intr: start"); for (;;) { TULIP_PERFSTART(rxget) tulip_descinfo_t *eop = ri->ri_nextin, *dip; int total_len = 0, last_offset = 0; struct mbuf *ms = NULL, *me = NULL; int accept = 0; int error; if (fillok && (ri->ri_max - ri->ri_free) < TULIP_RXQ_TARGET) goto queue_mbuf; #if defined(TULIP_DEBUG) if (cnt == ri->ri_max) break; #endif /* * If the TULIP has no descriptors, there can't be any receive * descriptors to process. */ if (eop == ri->ri_nextout) break; /* * 90% of the packets will fit in one descriptor. So we optimize * for that case. */ TULIP_RXDESC_POSTSYNC(ri); if ((DESC_STATUS(eop) & (TULIP_DSTS_OWNER|TULIP_DSTS_RxFIRSTDESC|TULIP_DSTS_RxLASTDESC)) == (TULIP_DSTS_RxFIRSTDESC|TULIP_DSTS_RxLASTDESC)) { ms = tulip_dequeue_mbuf(ri, eop, SYNC_RX); CTR2(KTR_TULIP, "tulip_rx_intr: single packet mbuf %p from descriptor %td", ms, eop - ri->ri_first); me = ms; ri->ri_free++; } else { /* * If still owned by the TULIP, don't touch it. */ if (DESC_STATUS(eop) & TULIP_DSTS_OWNER) break; /* * It is possible (though improbable unless MCLBYTES < 1518) for * a received packet to cross more than one receive descriptor. * We first loop through the descriptor ring making sure we have * received a complete packet. If not, we bail until the next * interrupt. */ dip = eop; while ((DESC_STATUS(eop) & TULIP_DSTS_RxLASTDESC) == 0) { if (++eop == ri->ri_last) eop = ri->ri_first; TULIP_RXDESC_POSTSYNC(ri); if (eop == ri->ri_nextout || DESC_STATUS(eop) & TULIP_DSTS_OWNER) { #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_rxintrs++; sc->tulip_dbg.dbg_rxpktsperintr[cnt]++; #endif TULIP_PERFEND(rxget); TULIP_PERFEND(rxintr); return; } total_len++; } /* * Dequeue the first buffer for the start of the packet. Hopefully * this will be the only one we need to dequeue. However, if the * packet consumed multiple descriptors, then we need to dequeue * those buffers and chain to the starting mbuf. All buffers but * the last buffer have the same length so we can set that now. * (we add to last_offset instead of multiplying since we normally * won't go into the loop and thereby saving ourselves from * doing a multiplication by 0 in the normal case). */ ms = tulip_dequeue_mbuf(ri, dip, SYNC_RX); CTR2(KTR_TULIP, "tulip_rx_intr: start packet mbuf %p from descriptor %td", ms, dip - ri->ri_first); ri->ri_free++; for (me = ms; total_len > 0; total_len--) { me->m_len = TULIP_RX_BUFLEN; last_offset += TULIP_RX_BUFLEN; if (++dip == ri->ri_last) dip = ri->ri_first; me->m_next = tulip_dequeue_mbuf(ri, dip, SYNC_RX); ri->ri_free++; me = me->m_next; CTR2(KTR_TULIP, "tulip_rx_intr: cont packet mbuf %p from descriptor %td", me, dip - ri->ri_first); } KASSERT(dip == eop, ("mismatched descinfo structs")); } /* * Now get the size of received packet (minus the CRC). */ total_len = ((DESC_STATUS(eop) >> 16) & 0x7FFF) - ETHER_CRC_LEN; if ((sc->tulip_flags & TULIP_RXIGNORE) == 0 && ((DESC_STATUS(eop) & TULIP_DSTS_ERRSUM) == 0)) { me->m_len = total_len - last_offset; sc->tulip_flags |= TULIP_RXACT; accept = 1; CTR1(KTR_TULIP, "tulip_rx_intr: good packet; length %d", total_len); } else { CTR1(KTR_TULIP, "tulip_rx_intr: bad packet; status %08x", DESC_STATUS(eop)); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); if (DESC_STATUS(eop) & (TULIP_DSTS_RxBADLENGTH|TULIP_DSTS_RxOVERFLOW|TULIP_DSTS_RxWATCHDOG)) { sc->tulip_dot3stats.dot3StatsInternalMacReceiveErrors++; } else { #if defined(TULIP_VERBOSE) const char *error = NULL; #endif if (DESC_STATUS(eop) & TULIP_DSTS_RxTOOLONG) { sc->tulip_dot3stats.dot3StatsFrameTooLongs++; #if defined(TULIP_VERBOSE) error = "frame too long"; #endif } if (DESC_STATUS(eop) & TULIP_DSTS_RxBADCRC) { if (DESC_STATUS(eop) & TULIP_DSTS_RxDRBBLBIT) { sc->tulip_dot3stats.dot3StatsAlignmentErrors++; #if defined(TULIP_VERBOSE) error = "alignment error"; #endif } else { sc->tulip_dot3stats.dot3StatsFCSErrors++; #if defined(TULIP_VERBOSE) error = "bad crc"; #endif } } #if defined(TULIP_VERBOSE) if (error != NULL && (sc->tulip_flags & TULIP_NOMESSAGES) == 0) { device_printf(sc->tulip_dev, "receive: %6D: %s\n", mtod(ms, u_char *) + 6, ":", error); sc->tulip_flags |= TULIP_NOMESSAGES; } #endif } } #if defined(TULIP_DEBUG) cnt++; #endif if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); if (++eop == ri->ri_last) eop = ri->ri_first; ri->ri_nextin = eop; queue_mbuf: /* * We have received a good packet that needs to be passed up the * stack. */ if (accept) { struct mbuf *m0; KASSERT(ms != NULL, ("no packet to accept")); #ifndef __NO_STRICT_ALIGNMENT /* * Copy the data into a new mbuf that is properly aligned. If * we fail to allocate a new mbuf, then drop the packet. We will * reuse the same rx buffer ('ms') below for another packet * regardless. */ m0 = m_devget(mtod(ms, caddr_t), total_len, ETHER_ALIGN, ifp, NULL); if (m0 == NULL) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); goto skip_input; } #else /* * Update the header for the mbuf referencing this receive * buffer and pass it up the stack. Allocate a new mbuf cluster * to replace the one we just passed up the stack. * * Note that if this packet crossed multiple descriptors * we don't even try to reallocate all the mbufs here. * Instead we rely on the test at the beginning of * the loop to refill for the extra consumed mbufs. */ ms->m_pkthdr.len = total_len; ms->m_pkthdr.rcvif = ifp; m0 = ms; ms = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); #endif TULIP_UNLOCK(sc); CTR1(KTR_TULIP, "tulip_rx_intr: passing %p to upper layer", m0); (*ifp->if_input)(ifp, m0); TULIP_LOCK(sc); } else if (ms == NULL) /* * If we are priming the TULIP with mbufs, then allocate * a new cluster for the next descriptor. */ ms = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); #ifndef __NO_STRICT_ALIGNMENT skip_input: #endif if (ms == NULL) { /* * Couldn't allocate a new buffer. Don't bother * trying to replenish the receive queue. */ fillok = 0; sc->tulip_flags |= TULIP_RXBUFSLOW; #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_rxlowbufs++; #endif TULIP_PERFEND(rxget); continue; } /* * Now give the buffer(s) to the TULIP and save in our * receive queue. */ do { tulip_descinfo_t * const nextout = ri->ri_nextout; M_ASSERTPKTHDR(ms); KASSERT(ms->m_data == ms->m_ext.ext_buf, ("rx mbuf data doesn't point to cluster")); ms->m_len = ms->m_pkthdr.len = TULIP_RX_BUFLEN; error = bus_dmamap_load_mbuf(ri->ri_data_tag, *nextout->di_map, ms, tulip_dma_map_rxbuf, nextout->di_desc, BUS_DMA_NOWAIT); if (error) { device_printf(sc->tulip_dev, "unable to load rx map, error = %d\n", error); panic("tulip_rx_intr"); /* XXX */ } nextout->di_desc->d_status = TULIP_DSTS_OWNER; KASSERT(nextout->di_mbuf == NULL, ("clobbering earlier rx mbuf")); nextout->di_mbuf = ms; CTR2(KTR_TULIP, "tulip_rx_intr: enqueued mbuf %p to descriptor %td", ms, nextout - ri->ri_first); TULIP_RXDESC_POSTSYNC(ri); if (++ri->ri_nextout == ri->ri_last) ri->ri_nextout = ri->ri_first; ri->ri_free--; me = ms->m_next; ms->m_next = NULL; } while ((ms = me) != NULL); if ((ri->ri_max - ri->ri_free) >= TULIP_RXQ_TARGET) sc->tulip_flags &= ~TULIP_RXBUFSLOW; TULIP_PERFEND(rxget); } #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_rxintrs++; sc->tulip_dbg.dbg_rxpktsperintr[cnt]++; #endif TULIP_PERFEND(rxintr); } static int tulip_tx_intr(tulip_softc_t * const sc) { TULIP_PERFSTART(txintr) tulip_ringinfo_t * const ri = &sc->tulip_txinfo; struct mbuf *m; int xmits = 0; int descs = 0; CTR0(KTR_TULIP, "tulip_tx_intr: start"); TULIP_LOCK_ASSERT(sc); while (ri->ri_free < ri->ri_max) { u_int32_t d_flag; TULIP_TXDESC_POSTSYNC(ri); if (DESC_STATUS(ri->ri_nextin) & TULIP_DSTS_OWNER) break; ri->ri_free++; descs++; d_flag = DESC_FLAG(ri->ri_nextin); if (d_flag & TULIP_DFLAG_TxLASTSEG) { if (d_flag & TULIP_DFLAG_TxSETUPPKT) { CTR2(KTR_TULIP, "tulip_tx_intr: setup packet from descriptor %td: %08x", ri->ri_nextin - ri->ri_first, DESC_STATUS(ri->ri_nextin)); /* * We've just finished processing a setup packet. * Mark that we finished it. If there's not * another pending, startup the TULIP receiver. * Make sure we ack the RXSTOPPED so we won't get * an abormal interrupt indication. */ bus_dmamap_sync(sc->tulip_setup_tag, sc->tulip_setup_map, BUS_DMASYNC_POSTWRITE); sc->tulip_flags &= ~(TULIP_DOINGSETUP|TULIP_HASHONLY); if (DESC_FLAG(ri->ri_nextin) & TULIP_DFLAG_TxINVRSFILT) sc->tulip_flags |= TULIP_HASHONLY; if ((sc->tulip_flags & (TULIP_WANTSETUP|TULIP_TXPROBE_ACTIVE)) == 0) { tulip_rx_intr(sc); sc->tulip_cmdmode |= TULIP_CMD_RXRUN; sc->tulip_intrmask |= TULIP_STS_RXSTOPPED; CTR2(KTR_TULIP, "tulip_tx_intr: intr mask %08x cmdmode %08x", sc->tulip_intrmask, sc->tulip_cmdmode); TULIP_CSR_WRITE(sc, csr_status, TULIP_STS_RXSTOPPED); TULIP_CSR_WRITE(sc, csr_intr, sc->tulip_intrmask); TULIP_CSR_WRITE(sc, csr_command, sc->tulip_cmdmode); } } else { const u_int32_t d_status = DESC_STATUS(ri->ri_nextin); m = tulip_dequeue_mbuf(ri, ri->ri_nextin, SYNC_TX); CTR2(KTR_TULIP, "tulip_tx_intr: data packet %p from descriptor %td", m, ri->ri_nextin - ri->ri_first); if (m != NULL) { m_freem(m); #if defined(TULIP_DEBUG) } else { device_printf(sc->tulip_dev, "tx_intr: failed to dequeue mbuf?!?\n"); #endif } if (sc->tulip_flags & TULIP_TXPROBE_ACTIVE) { tulip_mediapoll_event_t event = TULIP_MEDIAPOLL_TXPROBE_OK; if (d_status & (TULIP_DSTS_TxNOCARR|TULIP_DSTS_TxEXCCOLL)) { #if defined(TULIP_DEBUG) if (d_status & TULIP_DSTS_TxNOCARR) sc->tulip_dbg.dbg_txprobe_nocarr++; if (d_status & TULIP_DSTS_TxEXCCOLL) sc->tulip_dbg.dbg_txprobe_exccoll++; #endif event = TULIP_MEDIAPOLL_TXPROBE_FAILED; } (*sc->tulip_boardsw->bd_media_poll)(sc, event); /* * Escape from the loop before media poll has reset the TULIP! */ break; } else { xmits++; if (d_status & TULIP_DSTS_ERRSUM) { CTR1(KTR_TULIP, "tulip_tx_intr: output error: %08x", d_status); if_inc_counter(sc->tulip_ifp, IFCOUNTER_OERRORS, 1); if (d_status & TULIP_DSTS_TxEXCCOLL) sc->tulip_dot3stats.dot3StatsExcessiveCollisions++; if (d_status & TULIP_DSTS_TxLATECOLL) sc->tulip_dot3stats.dot3StatsLateCollisions++; if (d_status & (TULIP_DSTS_TxNOCARR|TULIP_DSTS_TxCARRLOSS)) sc->tulip_dot3stats.dot3StatsCarrierSenseErrors++; if (d_status & (TULIP_DSTS_TxUNDERFLOW|TULIP_DSTS_TxBABBLE)) sc->tulip_dot3stats.dot3StatsInternalMacTransmitErrors++; if (d_status & TULIP_DSTS_TxUNDERFLOW) sc->tulip_dot3stats.dot3StatsInternalTransmitUnderflows++; if (d_status & TULIP_DSTS_TxBABBLE) sc->tulip_dot3stats.dot3StatsInternalTransmitBabbles++; } else { u_int32_t collisions = (d_status & TULIP_DSTS_TxCOLLMASK) >> TULIP_DSTS_V_TxCOLLCNT; CTR2(KTR_TULIP, "tulip_tx_intr: output ok, collisions %d, status %08x", collisions, d_status); if_inc_counter(sc->tulip_ifp, IFCOUNTER_COLLISIONS, collisions); if (collisions == 1) sc->tulip_dot3stats.dot3StatsSingleCollisionFrames++; else if (collisions > 1) sc->tulip_dot3stats.dot3StatsMultipleCollisionFrames++; else if (d_status & TULIP_DSTS_TxDEFERRED) sc->tulip_dot3stats.dot3StatsDeferredTransmissions++; /* * SQE is only valid for 10baseT/BNC/AUI when not * running in full-duplex. In order to speed up the * test, the corresponding bit in tulip_flags needs to * set as well to get us to count SQE Test Errors. */ if (d_status & TULIP_DSTS_TxNOHRTBT & sc->tulip_flags) sc->tulip_dot3stats.dot3StatsSQETestErrors++; } } } } if (++ri->ri_nextin == ri->ri_last) ri->ri_nextin = ri->ri_first; if ((sc->tulip_flags & TULIP_TXPROBE_ACTIVE) == 0) sc->tulip_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } /* * If nothing left to transmit, disable the timer. * Else if progress, reset the timer back to 2 ticks. */ if (ri->ri_free == ri->ri_max || (sc->tulip_flags & TULIP_TXPROBE_ACTIVE)) sc->tulip_txtimer = 0; else if (xmits > 0) sc->tulip_txtimer = TULIP_TXTIMER; if_inc_counter(sc->tulip_ifp, IFCOUNTER_OPACKETS, xmits); TULIP_PERFEND(txintr); return descs; } static void tulip_print_abnormal_interrupt(tulip_softc_t * const sc, u_int32_t csr) { const char * const *msgp = tulip_status_bits; const char *sep; u_int32_t mask; const char thrsh[] = "72|128\0\0\0" "96|256\0\0\0" "128|512\0\0" "160|1024"; TULIP_LOCK_ASSERT(sc); csr &= (1 << (sizeof(tulip_status_bits)/sizeof(tulip_status_bits[0]))) - 1; device_printf(sc->tulip_dev, "abnormal interrupt:"); for (sep = " ", mask = 1; mask <= csr; mask <<= 1, msgp++) { if ((csr & mask) && *msgp != NULL) { printf("%s%s", sep, *msgp); if (mask == TULIP_STS_TXUNDERFLOW && (sc->tulip_flags & TULIP_NEWTXTHRESH)) { sc->tulip_flags &= ~TULIP_NEWTXTHRESH; if (sc->tulip_cmdmode & TULIP_CMD_STOREFWD) { printf(" (switching to store-and-forward mode)"); } else { printf(" (raising TX threshold to %s)", &thrsh[9 * ((sc->tulip_cmdmode & TULIP_CMD_THRESHOLDCTL) >> 14)]); } } sep = ", "; } } printf("\n"); } static void tulip_intr_handler(tulip_softc_t * const sc) { TULIP_PERFSTART(intr) u_int32_t csr; CTR0(KTR_TULIP, "tulip_intr_handler invoked"); TULIP_LOCK_ASSERT(sc); while ((csr = TULIP_CSR_READ(sc, csr_status)) & sc->tulip_intrmask) { TULIP_CSR_WRITE(sc, csr_status, csr); if (csr & TULIP_STS_SYSERROR) { sc->tulip_last_system_error = (csr & TULIP_STS_ERRORMASK) >> TULIP_STS_ERR_SHIFT; if (sc->tulip_flags & TULIP_NOMESSAGES) { sc->tulip_flags |= TULIP_SYSTEMERROR; } else { device_printf(sc->tulip_dev, "system error: %s\n", tulip_system_errors[sc->tulip_last_system_error]); } sc->tulip_flags |= TULIP_NEEDRESET; sc->tulip_system_errors++; break; } if (csr & (TULIP_STS_LINKPASS|TULIP_STS_LINKFAIL) & sc->tulip_intrmask) { #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_link_intrs++; #endif if (sc->tulip_boardsw->bd_media_poll != NULL) { (*sc->tulip_boardsw->bd_media_poll)(sc, csr & TULIP_STS_LINKFAIL ? TULIP_MEDIAPOLL_LINKFAIL : TULIP_MEDIAPOLL_LINKPASS); csr &= ~TULIP_STS_ABNRMLINTR; } tulip_media_print(sc); } if (csr & (TULIP_STS_RXINTR|TULIP_STS_RXNOBUF)) { u_int32_t misses = TULIP_CSR_READ(sc, csr_missed_frames); if (csr & TULIP_STS_RXNOBUF) sc->tulip_dot3stats.dot3StatsMissedFrames += misses & 0xFFFF; /* * Pass 2.[012] of the 21140A-A[CDE] may hang and/or corrupt data * on receive overflows. */ if ((misses & 0x0FFE0000) && (sc->tulip_features & TULIP_HAVE_RXBADOVRFLW)) { sc->tulip_dot3stats.dot3StatsInternalMacReceiveErrors++; /* * Stop the receiver process and spin until it's stopped. * Tell rx_intr to drop the packets it dequeues. */ TULIP_CSR_WRITE(sc, csr_command, sc->tulip_cmdmode & ~TULIP_CMD_RXRUN); while ((TULIP_CSR_READ(sc, csr_status) & TULIP_STS_RXSTOPPED) == 0) ; TULIP_CSR_WRITE(sc, csr_status, TULIP_STS_RXSTOPPED); sc->tulip_flags |= TULIP_RXIGNORE; } tulip_rx_intr(sc); if (sc->tulip_flags & TULIP_RXIGNORE) { /* * Restart the receiver. */ sc->tulip_flags &= ~TULIP_RXIGNORE; TULIP_CSR_WRITE(sc, csr_command, sc->tulip_cmdmode); } } if (csr & TULIP_STS_ABNRMLINTR) { u_int32_t tmp = csr & sc->tulip_intrmask & ~(TULIP_STS_NORMALINTR|TULIP_STS_ABNRMLINTR); if (csr & TULIP_STS_TXUNDERFLOW) { if ((sc->tulip_cmdmode & TULIP_CMD_THRESHOLDCTL) != TULIP_CMD_THRSHLD160) { sc->tulip_cmdmode += TULIP_CMD_THRSHLD96; sc->tulip_flags |= TULIP_NEWTXTHRESH; } else if (sc->tulip_features & TULIP_HAVE_STOREFWD) { sc->tulip_cmdmode |= TULIP_CMD_STOREFWD; sc->tulip_flags |= TULIP_NEWTXTHRESH; } } if (sc->tulip_flags & TULIP_NOMESSAGES) { sc->tulip_statusbits |= tmp; } else { tulip_print_abnormal_interrupt(sc, tmp); sc->tulip_flags |= TULIP_NOMESSAGES; } TULIP_CSR_WRITE(sc, csr_command, sc->tulip_cmdmode); } if (sc->tulip_flags & (TULIP_WANTTXSTART|TULIP_TXPROBE_ACTIVE|TULIP_DOINGSETUP|TULIP_PROMISC)) { tulip_tx_intr(sc); if ((sc->tulip_flags & TULIP_TXPROBE_ACTIVE) == 0) tulip_start_locked(sc); } } if (sc->tulip_flags & TULIP_NEEDRESET) { tulip_reset(sc); tulip_init_locked(sc); } TULIP_PERFEND(intr); } static void tulip_intr_shared(void *arg) { tulip_softc_t * sc = arg; for (; sc != NULL; sc = sc->tulip_slaves) { TULIP_LOCK(sc); #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_intrs++; #endif tulip_intr_handler(sc); TULIP_UNLOCK(sc); } } static void tulip_intr_normal(void *arg) { tulip_softc_t * sc = (tulip_softc_t *) arg; TULIP_LOCK(sc); #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_intrs++; #endif tulip_intr_handler(sc); TULIP_UNLOCK(sc); } static struct mbuf * tulip_txput(tulip_softc_t * const sc, struct mbuf *m) { TULIP_PERFSTART(txput) tulip_ringinfo_t * const ri = &sc->tulip_txinfo; tulip_descinfo_t *eop, *nextout; int segcnt, free; u_int32_t d_status; bus_dma_segment_t segs[TULIP_MAX_TXSEG]; bus_dmamap_t *map; int error, nsegs; struct mbuf *m0; TULIP_LOCK_ASSERT(sc); #if defined(TULIP_DEBUG) if ((sc->tulip_cmdmode & TULIP_CMD_TXRUN) == 0) { device_printf(sc->tulip_dev, "txput%s: tx not running\n", (sc->tulip_flags & TULIP_TXPROBE_ACTIVE) ? "(probe)" : ""); sc->tulip_flags |= TULIP_WANTTXSTART; sc->tulip_dbg.dbg_txput_finishes[0]++; goto finish; } #endif /* * Now we try to fill in our transmit descriptors. This is * a bit reminiscent of going on the Ark two by two * since each descriptor for the TULIP can describe * two buffers. So we advance through packet filling * each of the two entries at a time to to fill each * descriptor. Clear the first and last segment bits * in each descriptor (actually just clear everything * but the end-of-ring or chain bits) to make sure * we don't get messed up by previously sent packets. * * We may fail to put the entire packet on the ring if * there is either not enough ring entries free or if the * packet has more than MAX_TXSEG segments. In the former * case we will just wait for the ring to empty. In the * latter case we have to recopy. */ #if defined(KTR) && KTR_TULIP segcnt = 1; m0 = m; while (m0->m_next != NULL) { segcnt++; m0 = m0->m_next; } CTR2(KTR_TULIP, "tulip_txput: sending packet %p (%d chunks)", m, segcnt); #endif d_status = 0; eop = nextout = ri->ri_nextout; segcnt = 0; free = ri->ri_free; /* * Reclaim some tx descriptors if we are out since we need at least one * free descriptor so that we have a dma_map to load the mbuf. */ if (free == 0) { #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_no_txmaps++; #endif free += tulip_tx_intr(sc); } if (free == 0) { sc->tulip_flags |= TULIP_WANTTXSTART; #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_txput_finishes[1]++; #endif goto finish; } error = bus_dmamap_load_mbuf_sg(ri->ri_data_tag, *eop->di_map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { if (error == EFBIG) { /* * The packet exceeds the number of transmit buffer * entries that we can use for one packet, so we have * to recopy it into one mbuf and then try again. If * we can't recopy it, try again later. */ m0 = m_defrag(m, M_NOWAIT); if (m0 == NULL) { sc->tulip_flags |= TULIP_WANTTXSTART; #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_txput_finishes[2]++; #endif goto finish; } m = m0; error = bus_dmamap_load_mbuf_sg(ri->ri_data_tag, *eop->di_map, m, segs, &nsegs, BUS_DMA_NOWAIT); } if (error != 0) { device_printf(sc->tulip_dev, "unable to load tx map, error = %d\n", error); #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_txput_finishes[3]++; #endif goto finish; } } CTR1(KTR_TULIP, "tulip_txput: nsegs %d", nsegs); /* * Each descriptor allows for up to 2 fragments since we don't use * the descriptor chaining mode in this driver. */ if ((free -= (nsegs + 1) / 2) <= 0 /* * See if there's any unclaimed space in the transmit ring. */ && (free += tulip_tx_intr(sc)) <= 0) { /* * There's no more room but since nothing * has been committed at this point, just * show output is active, put back the * mbuf and return. */ sc->tulip_flags |= TULIP_WANTTXSTART; #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_txput_finishes[4]++; #endif bus_dmamap_unload(ri->ri_data_tag, *eop->di_map); goto finish; } for (; nsegs - segcnt > 1; segcnt += 2) { eop = nextout; eop->di_desc->d_flag &= TULIP_DFLAG_ENDRING|TULIP_DFLAG_CHAIN; eop->di_desc->d_status = d_status; eop->di_desc->d_addr1 = segs[segcnt].ds_addr & 0xffffffff; eop->di_desc->d_length1 = segs[segcnt].ds_len; eop->di_desc->d_addr2 = segs[segcnt+1].ds_addr & 0xffffffff; eop->di_desc->d_length2 = segs[segcnt+1].ds_len; d_status = TULIP_DSTS_OWNER; if (++nextout == ri->ri_last) nextout = ri->ri_first; } if (segcnt < nsegs) { eop = nextout; eop->di_desc->d_flag &= TULIP_DFLAG_ENDRING|TULIP_DFLAG_CHAIN; eop->di_desc->d_status = d_status; eop->di_desc->d_addr1 = segs[segcnt].ds_addr & 0xffffffff; eop->di_desc->d_length1 = segs[segcnt].ds_len; eop->di_desc->d_addr2 = 0; eop->di_desc->d_length2 = 0; if (++nextout == ri->ri_last) nextout = ri->ri_first; } /* * tulip_tx_intr() harvests the mbuf from the last descriptor in the * frame. We just used the dmamap in the first descriptor for the * load operation however. Thus, to let the tulip_dequeue_mbuf() call * in tulip_tx_intr() unload the correct dmamap, we swap the dmamap * pointers in the two descriptors if this is a multiple-descriptor * packet. */ if (eop != ri->ri_nextout) { map = eop->di_map; eop->di_map = ri->ri_nextout->di_map; ri->ri_nextout->di_map = map; } /* * bounce a copy to the bpf listener, if any. */ if (!(sc->tulip_flags & TULIP_DEVICEPROBE)) BPF_MTAP(sc->tulip_ifp, m); /* * The descriptors have been filled in. Now get ready * to transmit. */ CTR3(KTR_TULIP, "tulip_txput: enqueued mbuf %p to descriptors %td - %td", m, ri->ri_nextout - ri->ri_first, eop - ri->ri_first); KASSERT(eop->di_mbuf == NULL, ("clobbering earlier tx mbuf")); eop->di_mbuf = m; TULIP_TXMAP_PRESYNC(ri, ri->ri_nextout); m = NULL; /* * Make sure the next descriptor after this packet is owned * by us since it may have been set up above if we ran out * of room in the ring. */ nextout->di_desc->d_status = 0; TULIP_TXDESC_PRESYNC(ri); /* * Mark the last and first segments, indicate we want a transmit * complete interrupt, and tell it to transmit! */ eop->di_desc->d_flag |= TULIP_DFLAG_TxLASTSEG|TULIP_DFLAG_TxWANTINTR; /* * Note that ri->ri_nextout is still the start of the packet * and until we set the OWNER bit, we can still back out of * everything we have done. */ ri->ri_nextout->di_desc->d_flag |= TULIP_DFLAG_TxFIRSTSEG; TULIP_TXDESC_PRESYNC(ri); ri->ri_nextout->di_desc->d_status = TULIP_DSTS_OWNER; TULIP_TXDESC_PRESYNC(ri); /* * This advances the ring for us. */ ri->ri_nextout = nextout; ri->ri_free = free; TULIP_PERFEND(txput); if (sc->tulip_flags & TULIP_TXPROBE_ACTIVE) { TULIP_CSR_WRITE(sc, csr_txpoll, 1); sc->tulip_ifp->if_drv_flags |= IFF_DRV_OACTIVE; TULIP_PERFEND(txput); return NULL; } /* * switch back to the single queueing ifstart. */ sc->tulip_flags &= ~TULIP_WANTTXSTART; if (sc->tulip_txtimer == 0) sc->tulip_txtimer = TULIP_TXTIMER; #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_txput_finishes[5]++; #endif /* * If we want a txstart, there must be not enough space in the * transmit ring. So we want to enable transmit done interrupts * so we can immediately reclaim some space. When the transmit * interrupt is posted, the interrupt handler will call tx_intr * to reclaim space and then txstart (since WANTTXSTART is set). * txstart will move the packet into the transmit ring and clear * WANTTXSTART thereby causing TXINTR to be cleared. */ finish: #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_txput_finishes[6]++; #endif if (sc->tulip_flags & (TULIP_WANTTXSTART|TULIP_DOINGSETUP)) { sc->tulip_ifp->if_drv_flags |= IFF_DRV_OACTIVE; if ((sc->tulip_intrmask & TULIP_STS_TXINTR) == 0) { sc->tulip_intrmask |= TULIP_STS_TXINTR; TULIP_CSR_WRITE(sc, csr_intr, sc->tulip_intrmask); } } else if ((sc->tulip_flags & TULIP_PROMISC) == 0) { if (sc->tulip_intrmask & TULIP_STS_TXINTR) { sc->tulip_intrmask &= ~TULIP_STS_TXINTR; TULIP_CSR_WRITE(sc, csr_intr, sc->tulip_intrmask); } } TULIP_CSR_WRITE(sc, csr_txpoll, 1); TULIP_PERFEND(txput); return m; } static void tulip_txput_setup(tulip_softc_t * const sc) { tulip_ringinfo_t * const ri = &sc->tulip_txinfo; tulip_desc_t *nextout; TULIP_LOCK_ASSERT(sc); /* * We will transmit, at most, one setup packet per call to ifstart. */ #if defined(TULIP_DEBUG) if ((sc->tulip_cmdmode & TULIP_CMD_TXRUN) == 0) { device_printf(sc->tulip_dev, "txput_setup: tx not running\n"); sc->tulip_flags |= TULIP_WANTTXSTART; return; } #endif /* * Try to reclaim some free descriptors.. */ if (ri->ri_free < 2) tulip_tx_intr(sc); if ((sc->tulip_flags & TULIP_DOINGSETUP) || ri->ri_free == 1) { sc->tulip_flags |= TULIP_WANTTXSTART; return; } bcopy(sc->tulip_setupdata, sc->tulip_setupbuf, sizeof(sc->tulip_setupdata)); /* * Clear WANTSETUP and set DOINGSETUP. Since we know that WANTSETUP is * set and DOINGSETUP is clear doing an XOR of the two will DTRT. */ sc->tulip_flags ^= TULIP_WANTSETUP|TULIP_DOINGSETUP; ri->ri_free--; nextout = ri->ri_nextout->di_desc; nextout->d_flag &= TULIP_DFLAG_ENDRING|TULIP_DFLAG_CHAIN; nextout->d_flag |= TULIP_DFLAG_TxFIRSTSEG|TULIP_DFLAG_TxLASTSEG |TULIP_DFLAG_TxSETUPPKT|TULIP_DFLAG_TxWANTINTR; if (sc->tulip_flags & TULIP_WANTHASHPERFECT) nextout->d_flag |= TULIP_DFLAG_TxHASHFILT; else if (sc->tulip_flags & TULIP_WANTHASHONLY) nextout->d_flag |= TULIP_DFLAG_TxHASHFILT|TULIP_DFLAG_TxINVRSFILT; nextout->d_length2 = 0; nextout->d_addr2 = 0; nextout->d_length1 = sizeof(sc->tulip_setupdata); nextout->d_addr1 = sc->tulip_setup_dma_addr & 0xffffffff; bus_dmamap_sync(sc->tulip_setup_tag, sc->tulip_setup_map, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); TULIP_TXDESC_PRESYNC(ri); CTR1(KTR_TULIP, "tulip_txput_setup: using descriptor %td", ri->ri_nextout - ri->ri_first); /* * Advance the ring for the next transmit packet. */ if (++ri->ri_nextout == ri->ri_last) ri->ri_nextout = ri->ri_first; /* * Make sure the next descriptor is owned by us since it * may have been set up above if we ran out of room in the * ring. */ ri->ri_nextout->di_desc->d_status = 0; TULIP_TXDESC_PRESYNC(ri); nextout->d_status = TULIP_DSTS_OWNER; /* * Flush the ownwership of the current descriptor */ TULIP_TXDESC_PRESYNC(ri); TULIP_CSR_WRITE(sc, csr_txpoll, 1); if ((sc->tulip_intrmask & TULIP_STS_TXINTR) == 0) { sc->tulip_intrmask |= TULIP_STS_TXINTR; TULIP_CSR_WRITE(sc, csr_intr, sc->tulip_intrmask); } } static int tulip_ifioctl(struct ifnet * ifp, u_long cmd, caddr_t data) { TULIP_PERFSTART(ifioctl) tulip_softc_t * const sc = (tulip_softc_t *)ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; int error = 0; switch (cmd) { case SIOCSIFFLAGS: { TULIP_LOCK(sc); tulip_init_locked(sc); TULIP_UNLOCK(sc); break; } case SIOCSIFMEDIA: case SIOCGIFMEDIA: { error = ifmedia_ioctl(ifp, ifr, &sc->tulip_ifmedia, cmd); break; } case SIOCADDMULTI: case SIOCDELMULTI: { /* * Update multicast listeners */ TULIP_LOCK(sc); tulip_init_locked(sc); TULIP_UNLOCK(sc); error = 0; break; } #ifdef SIOCGADDRROM case SIOCGADDRROM: { error = copyout(sc->tulip_rombuf, ifr->ifr_data, sizeof(sc->tulip_rombuf)); break; } #endif #ifdef SIOCGCHIPID case SIOCGCHIPID: { ifr->ifr_metric = (int) sc->tulip_chipid; break; } #endif default: { error = ether_ioctl(ifp, cmd, data); break; } } TULIP_PERFEND(ifioctl); return error; } static void tulip_start(struct ifnet * const ifp) { TULIP_PERFSTART(ifstart) tulip_softc_t * const sc = (tulip_softc_t *)ifp->if_softc; TULIP_LOCK(sc); tulip_start_locked(sc); TULIP_UNLOCK(sc); TULIP_PERFEND(ifstart); } static void tulip_start_locked(tulip_softc_t * const sc) { struct mbuf *m; TULIP_LOCK_ASSERT(sc); CTR0(KTR_TULIP, "tulip_start_locked invoked"); if ((sc->tulip_flags & (TULIP_WANTSETUP|TULIP_TXPROBE_ACTIVE)) == TULIP_WANTSETUP) tulip_txput_setup(sc); CTR1(KTR_TULIP, "tulip_start_locked: %d tx packets pending", sc->tulip_ifp->if_snd.ifq_len); while (!IFQ_DRV_IS_EMPTY(&sc->tulip_ifp->if_snd)) { IFQ_DRV_DEQUEUE(&sc->tulip_ifp->if_snd, m); if(m == NULL) break; if ((m = tulip_txput(sc, m)) != NULL) { IFQ_DRV_PREPEND(&sc->tulip_ifp->if_snd, m); break; } } } static void tulip_watchdog(void *arg) { TULIP_PERFSTART(stat) tulip_softc_t *sc = arg; #if defined(TULIP_DEBUG) u_int32_t rxintrs; #endif TULIP_LOCK_ASSERT(sc); callout_reset(&sc->tulip_stat_timer, hz, tulip_watchdog, sc); #if defined(TULIP_DEBUG) rxintrs = sc->tulip_dbg.dbg_rxintrs - sc->tulip_dbg.dbg_last_rxintrs; if (rxintrs > sc->tulip_dbg.dbg_high_rxintrs_hz) sc->tulip_dbg.dbg_high_rxintrs_hz = rxintrs; sc->tulip_dbg.dbg_last_rxintrs = sc->tulip_dbg.dbg_rxintrs; #endif /* TULIP_DEBUG */ /* * These should be rare so do a bulk test up front so we can just skip * them if needed. */ if (sc->tulip_flags & (TULIP_SYSTEMERROR|TULIP_RXBUFSLOW|TULIP_NOMESSAGES)) { /* * If the number of receive buffer is low, try to refill */ if (sc->tulip_flags & TULIP_RXBUFSLOW) tulip_rx_intr(sc); if (sc->tulip_flags & TULIP_SYSTEMERROR) { if_printf(sc->tulip_ifp, "%d system errors: last was %s\n", sc->tulip_system_errors, tulip_system_errors[sc->tulip_last_system_error]); } if (sc->tulip_statusbits) { tulip_print_abnormal_interrupt(sc, sc->tulip_statusbits); sc->tulip_statusbits = 0; } sc->tulip_flags &= ~(TULIP_NOMESSAGES|TULIP_SYSTEMERROR); } if (sc->tulip_txtimer) tulip_tx_intr(sc); if (sc->tulip_txtimer && --sc->tulip_txtimer == 0) { if_printf(sc->tulip_ifp, "transmission timeout\n"); if (TULIP_DO_AUTOSENSE(sc)) { sc->tulip_media = TULIP_MEDIA_UNKNOWN; sc->tulip_probe_state = TULIP_PROBE_INACTIVE; sc->tulip_flags &= ~(TULIP_WANTRXACT|TULIP_LINKUP); } tulip_reset(sc); tulip_init_locked(sc); } TULIP_PERFEND(stat); TULIP_PERFMERGE(sc, perf_intr_cycles); TULIP_PERFMERGE(sc, perf_ifstart_cycles); TULIP_PERFMERGE(sc, perf_ifioctl_cycles); TULIP_PERFMERGE(sc, perf_stat_cycles); TULIP_PERFMERGE(sc, perf_timeout_cycles); TULIP_PERFMERGE(sc, perf_ifstart_one_cycles); TULIP_PERFMERGE(sc, perf_txput_cycles); TULIP_PERFMERGE(sc, perf_txintr_cycles); TULIP_PERFMERGE(sc, perf_rxintr_cycles); TULIP_PERFMERGE(sc, perf_rxget_cycles); TULIP_PERFMERGE(sc, perf_intr); TULIP_PERFMERGE(sc, perf_ifstart); TULIP_PERFMERGE(sc, perf_ifioctl); TULIP_PERFMERGE(sc, perf_stat); TULIP_PERFMERGE(sc, perf_timeout); TULIP_PERFMERGE(sc, perf_ifstart_one); TULIP_PERFMERGE(sc, perf_txput); TULIP_PERFMERGE(sc, perf_txintr); TULIP_PERFMERGE(sc, perf_rxintr); TULIP_PERFMERGE(sc, perf_rxget); } static void tulip_attach(tulip_softc_t * const sc) { struct ifnet *ifp; ifp = sc->tulip_ifp = if_alloc(IFT_ETHER); /* XXX: driver name/unit should be set some other way */ if_initname(ifp, "de", sc->tulip_unit); ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST; ifp->if_ioctl = tulip_ifioctl; ifp->if_start = tulip_start; ifp->if_init = tulip_init; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); device_printf(sc->tulip_dev, "%s%s pass %d.%d%s\n", sc->tulip_boardid, tulip_chipdescs[sc->tulip_chipid], (sc->tulip_revinfo & 0xF0) >> 4, sc->tulip_revinfo & 0x0F, (sc->tulip_features & (TULIP_HAVE_ISVSROM|TULIP_HAVE_OKSROM)) == TULIP_HAVE_ISVSROM ? " (invalid EESPROM checksum)" : ""); TULIP_LOCK(sc); (*sc->tulip_boardsw->bd_media_probe)(sc); ifmedia_init(&sc->tulip_ifmedia, 0, tulip_ifmedia_change, tulip_ifmedia_status); tulip_ifmedia_add(sc); tulip_reset(sc); TULIP_UNLOCK(sc); ether_ifattach(sc->tulip_ifp, sc->tulip_enaddr); TULIP_LOCK(sc); sc->tulip_flags &= ~TULIP_DEVICEPROBE; TULIP_UNLOCK(sc); } /* Release memory for a single descriptor ring. */ static void tulip_busdma_freering(tulip_ringinfo_t *ri) { int i; /* Release the DMA maps and tag for data buffers. */ if (ri->ri_data_maps != NULL) { for (i = 0; i < ri->ri_max; i++) { if (ri->ri_data_maps[i] != NULL) { bus_dmamap_destroy(ri->ri_data_tag, ri->ri_data_maps[i]); ri->ri_data_maps[i] = NULL; } } free(ri->ri_data_maps, M_DEVBUF); ri->ri_data_maps = NULL; } if (ri->ri_data_tag != NULL) { bus_dma_tag_destroy(ri->ri_data_tag); ri->ri_data_tag = NULL; } /* Release the DMA memory and tag for the ring descriptors. */ if (ri->ri_dma_addr != 0) { bus_dmamap_unload(ri->ri_ring_tag, ri->ri_ring_map); ri->ri_dma_addr = 0; } if (ri->ri_descs != NULL) { bus_dmamem_free(ri->ri_ring_tag, ri->ri_descs, ri->ri_ring_map); ri->ri_descs = NULL; } if (ri->ri_ring_tag != NULL) { bus_dma_tag_destroy(ri->ri_ring_tag); ri->ri_ring_tag = NULL; } } /* Allocate memory for a single descriptor ring. */ static int tulip_busdma_allocring(device_t dev, tulip_softc_t * const sc, size_t count, bus_size_t align, int nsegs, tulip_ringinfo_t *ri, const char *name) { size_t size; int error, i; /* First, setup a tag. */ ri->ri_max = count; size = count * sizeof(tulip_desc_t); error = bus_dma_tag_create(bus_get_dma_tag(dev), 32, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 1, size, 0, NULL, NULL, &ri->ri_ring_tag); if (error) { device_printf(dev, "failed to allocate %s descriptor ring dma tag\n", name); return (error); } /* Next, allocate memory for the descriptors. */ error = bus_dmamem_alloc(ri->ri_ring_tag, (void **)&ri->ri_descs, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ri->ri_ring_map); if (error) { device_printf(dev, "failed to allocate memory for %s descriptor ring\n", name); return (error); } /* Map the descriptors. */ error = bus_dmamap_load(ri->ri_ring_tag, ri->ri_ring_map, ri->ri_descs, size, tulip_dma_map_addr, &ri->ri_dma_addr, BUS_DMA_NOWAIT); if (error) { device_printf(dev, "failed to get dma address for %s descriptor ring\n", name); return (error); } /* Allocate a tag for the data buffers. */ error = bus_dma_tag_create(bus_get_dma_tag(dev), align, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * nsegs, nsegs, MCLBYTES, 0, NULL, NULL, &ri->ri_data_tag); if (error) { device_printf(dev, "failed to allocate %s buffer dma tag\n", name); return (error); } /* Allocate maps for the data buffers. */ ri->ri_data_maps = malloc(sizeof(bus_dmamap_t) * count, M_DEVBUF, M_WAITOK | M_ZERO); for (i = 0; i < count; i++) { error = bus_dmamap_create(ri->ri_data_tag, 0, &ri->ri_data_maps[i]); if (error) { device_printf(dev, "failed to create map for %s buffer %d\n", name, i); return (error); } } return (0); } /* Release busdma maps, tags, and memory. */ static void tulip_busdma_cleanup(tulip_softc_t * const sc) { /* Release resources for the setup descriptor. */ if (sc->tulip_setup_dma_addr != 0) { bus_dmamap_unload(sc->tulip_setup_tag, sc->tulip_setup_map); sc->tulip_setup_dma_addr = 0; } if (sc->tulip_setupbuf != NULL) { bus_dmamem_free(sc->tulip_setup_tag, sc->tulip_setupbuf, sc->tulip_setup_map); sc->tulip_setupbuf = NULL; } if (sc->tulip_setup_tag != NULL) { bus_dma_tag_destroy(sc->tulip_setup_tag); sc->tulip_setup_tag = NULL; } /* Release the transmit ring. */ tulip_busdma_freering(&sc->tulip_txinfo); /* Release the receive ring. */ tulip_busdma_freering(&sc->tulip_rxinfo); } static int tulip_busdma_init(device_t dev, tulip_softc_t * const sc) { int error; /* * Allocate space and dmamap for transmit ring. */ error = tulip_busdma_allocring(dev, sc, TULIP_TXDESCS, 1, TULIP_MAX_TXSEG, &sc->tulip_txinfo, "transmit"); if (error) return (error); /* * Allocate space and dmamap for receive ring. We tell bus_dma that * we can map MCLBYTES so that it will accept a full MCLBYTES cluster, * but we will only map the first TULIP_RX_BUFLEN bytes. This is not * a waste in practice though as an ethernet frame can easily fit * in TULIP_RX_BUFLEN bytes. */ error = tulip_busdma_allocring(dev, sc, TULIP_RXDESCS, 4, 1, &sc->tulip_rxinfo, "receive"); if (error) return (error); /* * Allocate a DMA tag, memory, and map for setup descriptor */ error = bus_dma_tag_create(bus_get_dma_tag(dev), 32, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(sc->tulip_setupdata), 1, sizeof(sc->tulip_setupdata), 0, NULL, NULL, &sc->tulip_setup_tag); if (error) { device_printf(dev, "failed to allocate setup descriptor dma tag\n"); return (error); } error = bus_dmamem_alloc(sc->tulip_setup_tag, (void **)&sc->tulip_setupbuf, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->tulip_setup_map); if (error) { device_printf(dev, "failed to allocate memory for setup descriptor\n"); return (error); } error = bus_dmamap_load(sc->tulip_setup_tag, sc->tulip_setup_map, sc->tulip_setupbuf, sizeof(sc->tulip_setupdata), tulip_dma_map_addr, &sc->tulip_setup_dma_addr, BUS_DMA_NOWAIT); if (error) { device_printf(dev, "failed to get dma address for setup descriptor\n"); return (error); } return error; } static void tulip_initcsrs(tulip_softc_t * const sc, tulip_csrptr_t csr_base, size_t csr_size) { sc->tulip_csrs.csr_busmode = csr_base + 0 * csr_size; sc->tulip_csrs.csr_txpoll = csr_base + 1 * csr_size; sc->tulip_csrs.csr_rxpoll = csr_base + 2 * csr_size; sc->tulip_csrs.csr_rxlist = csr_base + 3 * csr_size; sc->tulip_csrs.csr_txlist = csr_base + 4 * csr_size; sc->tulip_csrs.csr_status = csr_base + 5 * csr_size; sc->tulip_csrs.csr_command = csr_base + 6 * csr_size; sc->tulip_csrs.csr_intr = csr_base + 7 * csr_size; sc->tulip_csrs.csr_missed_frames = csr_base + 8 * csr_size; sc->tulip_csrs.csr_9 = csr_base + 9 * csr_size; sc->tulip_csrs.csr_10 = csr_base + 10 * csr_size; sc->tulip_csrs.csr_11 = csr_base + 11 * csr_size; sc->tulip_csrs.csr_12 = csr_base + 12 * csr_size; sc->tulip_csrs.csr_13 = csr_base + 13 * csr_size; sc->tulip_csrs.csr_14 = csr_base + 14 * csr_size; sc->tulip_csrs.csr_15 = csr_base + 15 * csr_size; } static int tulip_initring( device_t dev, tulip_softc_t * const sc, tulip_ringinfo_t * const ri, int ndescs) { int i; ri->ri_descinfo = malloc(sizeof(tulip_descinfo_t) * ndescs, M_DEVBUF, M_WAITOK | M_ZERO); for (i = 0; i < ndescs; i++) { ri->ri_descinfo[i].di_desc = &ri->ri_descs[i]; ri->ri_descinfo[i].di_map = &ri->ri_data_maps[i]; } ri->ri_first = ri->ri_descinfo; ri->ri_max = ndescs; ri->ri_last = ri->ri_first + ri->ri_max; bzero(ri->ri_descs, sizeof(tulip_desc_t) * ri->ri_max); ri->ri_last[-1].di_desc->d_flag = TULIP_DFLAG_ENDRING; return (0); } /* * This is the PCI configuration support. */ #define PCI_CBIO PCIR_BAR(0) /* Configuration Base IO Address */ #define PCI_CBMA PCIR_BAR(1) /* Configuration Base Memory Address */ #define PCI_CFDA 0x40 /* Configuration Driver Area */ static int tulip_pci_probe(device_t dev) { const char *name = NULL; if (pci_get_vendor(dev) != DEC_VENDORID) return ENXIO; /* * Some LanMedia WAN cards use the Tulip chip, but they have * their own driver, and we should not recognize them */ if (pci_get_subvendor(dev) == 0x1376) return ENXIO; switch (pci_get_device(dev)) { case CHIPID_21040: name = "Digital 21040 Ethernet"; break; case CHIPID_21041: name = "Digital 21041 Ethernet"; break; case CHIPID_21140: if (pci_get_revid(dev) >= 0x20) name = "Digital 21140A Fast Ethernet"; else name = "Digital 21140 Fast Ethernet"; break; case CHIPID_21142: if (pci_get_revid(dev) >= 0x20) name = "Digital 21143 Fast Ethernet"; else name = "Digital 21142 Fast Ethernet"; break; } if (name) { device_set_desc(dev, name); return BUS_PROBE_LOW_PRIORITY; } return ENXIO; } static int tulip_shutdown(device_t dev) { tulip_softc_t * const sc = device_get_softc(dev); TULIP_CSR_WRITE(sc, csr_busmode, TULIP_BUSMODE_SWRESET); DELAY(10); /* Wait 10 microseconds (actually 50 PCI cycles but at 33MHz that comes to two microseconds but wait a bit longer anyways) */ return 0; } static int tulip_pci_attach(device_t dev) { tulip_softc_t *sc; int retval, idx; u_int32_t revinfo, cfdainfo; unsigned csroffset = TULIP_PCI_CSROFFSET; unsigned csrsize = TULIP_PCI_CSRSIZE; tulip_csrptr_t csr_base; tulip_chipid_t chipid = TULIP_CHIPID_UNKNOWN; struct resource *res; int rid, unit; unit = device_get_unit(dev); if (unit >= TULIP_MAX_DEVICES) { device_printf(dev, "not configured; limit of %d reached or exceeded\n", TULIP_MAX_DEVICES); return ENXIO; } revinfo = pci_get_revid(dev); cfdainfo = pci_read_config(dev, PCI_CFDA, 4); /* turn busmaster on in case BIOS doesn't set it */ pci_enable_busmaster(dev); if (pci_get_vendor(dev) == DEC_VENDORID) { if (pci_get_device(dev) == CHIPID_21040) chipid = TULIP_21040; else if (pci_get_device(dev) == CHIPID_21041) chipid = TULIP_21041; else if (pci_get_device(dev) == CHIPID_21140) chipid = (revinfo >= 0x20) ? TULIP_21140A : TULIP_21140; else if (pci_get_device(dev) == CHIPID_21142) chipid = (revinfo >= 0x20) ? TULIP_21143 : TULIP_21142; } if (chipid == TULIP_CHIPID_UNKNOWN) return ENXIO; if (chipid == TULIP_21040 && revinfo < 0x20) { device_printf(dev, "not configured; 21040 pass 2.0 required (%d.%d found)\n", revinfo >> 4, revinfo & 0x0f); return ENXIO; } else if (chipid == TULIP_21140 && revinfo < 0x11) { device_printf(dev, "not configured; 21140 pass 1.1 required (%d.%d found)\n", revinfo >> 4, revinfo & 0x0f); return ENXIO; } sc = device_get_softc(dev); sc->tulip_dev = dev; sc->tulip_pci_busno = pci_get_bus(dev); sc->tulip_pci_devno = pci_get_slot(dev); sc->tulip_chipid = chipid; sc->tulip_flags |= TULIP_DEVICEPROBE; if (chipid == TULIP_21140 || chipid == TULIP_21140A) sc->tulip_features |= TULIP_HAVE_GPR|TULIP_HAVE_STOREFWD; if (chipid == TULIP_21140A && revinfo <= 0x22) sc->tulip_features |= TULIP_HAVE_RXBADOVRFLW; if (chipid == TULIP_21140) sc->tulip_features |= TULIP_HAVE_BROKEN_HASH; if (chipid != TULIP_21040 && chipid != TULIP_21140) sc->tulip_features |= TULIP_HAVE_POWERMGMT; if (chipid == TULIP_21041 || chipid == TULIP_21142 || chipid == TULIP_21143) { sc->tulip_features |= TULIP_HAVE_DUALSENSE; if (chipid != TULIP_21041 || revinfo >= 0x20) sc->tulip_features |= TULIP_HAVE_SIANWAY; if (chipid != TULIP_21041) sc->tulip_features |= TULIP_HAVE_SIAGP|TULIP_HAVE_RXBADOVRFLW|TULIP_HAVE_STOREFWD; if (chipid != TULIP_21041 && revinfo >= 0x20) sc->tulip_features |= TULIP_HAVE_SIA100; } if (sc->tulip_features & TULIP_HAVE_POWERMGMT && (cfdainfo & (TULIP_CFDA_SLEEP|TULIP_CFDA_SNOOZE))) { cfdainfo &= ~(TULIP_CFDA_SLEEP|TULIP_CFDA_SNOOZE); pci_write_config(dev, PCI_CFDA, cfdainfo, 4); DELAY(11*1000); } sc->tulip_unit = unit; sc->tulip_revinfo = revinfo; #if defined(TULIP_IOMAPPED) rid = PCI_CBIO; res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); #else rid = PCI_CBMA; res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); #endif if (!res) return ENXIO; sc->tulip_csrs_bst = rman_get_bustag(res); sc->tulip_csrs_bsh = rman_get_bushandle(res); csr_base = 0; mtx_init(TULIP_MUTEX(sc), MTX_NETWORK_LOCK, device_get_nameunit(dev), MTX_DEF); callout_init_mtx(&sc->tulip_callout, TULIP_MUTEX(sc), 0); callout_init_mtx(&sc->tulip_stat_timer, TULIP_MUTEX(sc), 0); tulips[unit] = sc; tulip_initcsrs(sc, csr_base + csroffset, csrsize); if ((retval = tulip_busdma_init(dev, sc)) != 0) { device_printf(dev, "error initing bus_dma: %d\n", retval); tulip_busdma_cleanup(sc); mtx_destroy(TULIP_MUTEX(sc)); return ENXIO; } retval = tulip_initring(dev, sc, &sc->tulip_rxinfo, TULIP_RXDESCS); if (retval == 0) retval = tulip_initring(dev, sc, &sc->tulip_txinfo, TULIP_TXDESCS); if (retval) { tulip_busdma_cleanup(sc); mtx_destroy(TULIP_MUTEX(sc)); return retval; } /* * Make sure there won't be any interrupts or such... */ TULIP_CSR_WRITE(sc, csr_busmode, TULIP_BUSMODE_SWRESET); DELAY(100); /* Wait 10 microseconds (actually 50 PCI cycles but at 33MHz that comes to two microseconds but wait a bit longer anyways) */ TULIP_LOCK(sc); retval = tulip_read_macaddr(sc); TULIP_UNLOCK(sc); if (retval < 0) { device_printf(dev, "can't read ENET ROM (why=%d) (", retval); for (idx = 0; idx < 32; idx++) printf("%02x", sc->tulip_rombuf[idx]); printf("\n"); device_printf(dev, "%s%s pass %d.%d\n", sc->tulip_boardid, tulip_chipdescs[sc->tulip_chipid], (sc->tulip_revinfo & 0xF0) >> 4, sc->tulip_revinfo & 0x0F); device_printf(dev, "address unknown\n"); } else { void (*intr_rtn)(void *) = tulip_intr_normal; if (sc->tulip_features & TULIP_HAVE_SHAREDINTR) intr_rtn = tulip_intr_shared; tulip_attach(sc); /* Setup interrupt last. */ if ((sc->tulip_features & TULIP_HAVE_SLAVEDINTR) == 0) { void *ih; rid = 0; res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); - if (res == 0 || bus_setup_intr(dev, res, INTR_TYPE_NET | - INTR_MPSAFE, NULL, intr_rtn, sc, &ih)) { + if (res == NULL || bus_setup_intr(dev, res, INTR_TYPE_NET | + INTR_MPSAFE, NULL, intr_rtn, sc, &ih)) { device_printf(dev, "couldn't map interrupt\n"); tulip_busdma_cleanup(sc); ether_ifdetach(sc->tulip_ifp); if_free(sc->tulip_ifp); mtx_destroy(TULIP_MUTEX(sc)); return ENXIO; } } } return 0; } static device_method_t tulip_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, tulip_pci_probe), DEVMETHOD(device_attach, tulip_pci_attach), DEVMETHOD(device_shutdown, tulip_shutdown), { 0, 0 } }; static driver_t tulip_pci_driver = { "de", tulip_pci_methods, sizeof(tulip_softc_t), }; static devclass_t tulip_devclass; DRIVER_MODULE(de, pci, tulip_pci_driver, tulip_devclass, 0, 0); #ifdef DDB void tulip_dumpring(int unit, int ring); void tulip_dumpdesc(int unit, int ring, int desc); void tulip_status(int unit); void tulip_dumpring(int unit, int ring) { tulip_softc_t *sc; tulip_ringinfo_t *ri; tulip_descinfo_t *di; if (unit < 0 || unit >= TULIP_MAX_DEVICES) { db_printf("invalid unit %d\n", unit); return; } sc = tulips[unit]; if (sc == NULL) { db_printf("unit %d not present\n", unit); return; } switch (ring) { case 0: db_printf("receive ring:\n"); ri = &sc->tulip_rxinfo; break; case 1: db_printf("transmit ring:\n"); ri = &sc->tulip_txinfo; break; default: db_printf("invalid ring %d\n", ring); return; } db_printf(" nextin: %td, nextout: %td, max: %d, free: %d\n", ri->ri_nextin - ri->ri_first, ri->ri_nextout - ri->ri_first, ri->ri_max, ri->ri_free); for (di = ri->ri_first; di != ri->ri_last; di++) { if (di->di_mbuf != NULL) db_printf(" descriptor %td: mbuf %p\n", di - ri->ri_first, di->di_mbuf); else if (di->di_desc->d_flag & TULIP_DFLAG_TxSETUPPKT) db_printf(" descriptor %td: setup packet\n", di - ri->ri_first); } } void tulip_dumpdesc(int unit, int ring, int desc) { tulip_softc_t *sc; tulip_ringinfo_t *ri; tulip_descinfo_t *di; char *s; if (unit < 0 || unit >= TULIP_MAX_DEVICES) { db_printf("invalid unit %d\n", unit); return; } sc = tulips[unit]; if (sc == NULL) { db_printf("unit %d not present\n", unit); return; } switch (ring) { case 0: s = "receive"; ri = &sc->tulip_rxinfo; break; case 1: s = "transmit"; ri = &sc->tulip_txinfo; break; default: db_printf("invalid ring %d\n", ring); return; } if (desc < 0 || desc >= ri->ri_max) { db_printf("invalid descriptor %d\n", desc); return; } db_printf("%s descriptor %d:\n", s, desc); di = &ri->ri_first[desc]; db_printf(" mbuf: %p\n", di->di_mbuf); db_printf(" status: %08x flag: %03x\n", di->di_desc->d_status, di->di_desc->d_flag); db_printf(" addr1: %08x len1: %03x\n", di->di_desc->d_addr1, di->di_desc->d_length1); db_printf(" addr2: %08x len2: %03x\n", di->di_desc->d_addr2, di->di_desc->d_length2); } #endif Index: head/sys/dev/ed/if_ed.c =================================================================== --- head/sys/dev/ed/if_ed.c (revision 313981) +++ head/sys/dev/ed/if_ed.c (revision 313982) @@ -1,1855 +1,1855 @@ /*- * Copyright (c) 1995, David Greenman * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Device driver for National Semiconductor DS8390/WD83C690 based ethernet * adapters. By David Greenman, 29-April-1993 * * Currently supports the Western Digital/SMC 8003 and 8013 series, * the SMC Elite Ultra (8216), the 3Com 3c503, the NE1000 and NE2000, * and a variety of similar clones. * */ #include "opt_ed.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include devclass_t ed_devclass; static void ed_init(void *); static void ed_init_locked(struct ed_softc *); static int ed_ioctl(struct ifnet *, u_long, caddr_t); static void ed_start(struct ifnet *); static void ed_start_locked(struct ifnet *); static void ed_reset(struct ifnet *); static void ed_tick(void *); static void ed_watchdog(struct ed_softc *); static void ed_ds_getmcaf(struct ed_softc *, uint32_t *); static void ed_get_packet(struct ed_softc *, bus_size_t, u_short); static void ed_stop_hw(struct ed_softc *sc); static __inline void ed_rint(struct ed_softc *); static __inline void ed_xmit(struct ed_softc *); static __inline void ed_ring_copy(struct ed_softc *, bus_size_t, char *, u_short); static void ed_setrcr(struct ed_softc *); /* * Generic probe routine for testing for the existance of a DS8390. * Must be called after the NIC has just been reset. This routine * works by looking at certain register values that are guaranteed * to be initialized a certain way after power-up or reset. Seems * not to currently work on the 83C690. * * Specifically: * * Register reset bits set bits * Command Register (CR) TXP, STA RD2, STP * Interrupt Status (ISR) RST * Interrupt Mask (IMR) All bits * Data Control (DCR) LAS * Transmit Config. (TCR) LB1, LB0 * * We only look at the CR and ISR registers, however, because looking at * the others would require changing register pages (which would be * intrusive if this isn't an 8390). * * Return 1 if 8390 was found, 0 if not. */ int ed_probe_generic8390(struct ed_softc *sc) { if ((ed_nic_inb(sc, ED_P0_CR) & (ED_CR_RD2 | ED_CR_TXP | ED_CR_STA | ED_CR_STP)) != (ED_CR_RD2 | ED_CR_STP)) return (0); if ((ed_nic_inb(sc, ED_P0_ISR) & ED_ISR_RST) != ED_ISR_RST) return (0); return (1); } void ed_disable_16bit_access(struct ed_softc *sc) { /* * Disable 16 bit access to shared memory */ if (sc->isa16bit && sc->vendor == ED_VENDOR_WD_SMC) { if (sc->chip_type == ED_CHIP_TYPE_WD790) ed_asic_outb(sc, ED_WD_MSR, 0x00); ed_asic_outb(sc, ED_WD_LAAR, sc->wd_laar_proto & ~ED_WD_LAAR_M16EN); } } void ed_enable_16bit_access(struct ed_softc *sc) { if (sc->isa16bit && sc->vendor == ED_VENDOR_WD_SMC) { ed_asic_outb(sc, ED_WD_LAAR, sc->wd_laar_proto | ED_WD_LAAR_M16EN); if (sc->chip_type == ED_CHIP_TYPE_WD790) ed_asic_outb(sc, ED_WD_MSR, ED_WD_MSR_MENB); } } /* * Allocate a port resource with the given resource id. */ int ed_alloc_port(device_t dev, int rid, int size) { struct ed_softc *sc = device_get_softc(dev); struct resource *res; res = bus_alloc_resource_anywhere(dev, SYS_RES_IOPORT, &rid, size, RF_ACTIVE); if (res) { sc->port_res = res; sc->port_used = size; sc->port_bst = rman_get_bustag(res); sc->port_bsh = rman_get_bushandle(res); return (0); } return (ENOENT); } /* * Allocate a memory resource with the given resource id. */ int ed_alloc_memory(device_t dev, int rid, int size) { struct ed_softc *sc = device_get_softc(dev); struct resource *res; res = bus_alloc_resource_anywhere(dev, SYS_RES_MEMORY, &rid, size, RF_ACTIVE); if (res) { sc->mem_res = res; sc->mem_used = size; sc->mem_bst = rman_get_bustag(res); sc->mem_bsh = rman_get_bushandle(res); return (0); } return (ENOENT); } /* * Allocate an irq resource with the given resource id. */ int ed_alloc_irq(device_t dev, int rid, int flags) { struct ed_softc *sc = device_get_softc(dev); struct resource *res; res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | flags); if (res) { sc->irq_res = res; return (0); } return (ENOENT); } /* * Release all resources */ void ed_release_resources(device_t dev) { struct ed_softc *sc = device_get_softc(dev); if (sc->port_res) bus_free_resource(dev, SYS_RES_IOPORT, sc->port_res); if (sc->port_res2) bus_free_resource(dev, SYS_RES_IOPORT, sc->port_res2); if (sc->mem_res) bus_free_resource(dev, SYS_RES_MEMORY, sc->mem_res); if (sc->irq_res) bus_free_resource(dev, SYS_RES_IRQ, sc->irq_res); sc->port_res = 0; sc->port_res2 = 0; sc->mem_res = 0; sc->irq_res = 0; if (sc->ifp) if_free(sc->ifp); } /* * Install interface into kernel networking data structures */ int ed_attach(device_t dev) { struct ed_softc *sc = device_get_softc(dev); struct ifnet *ifp; sc->dev = dev; ED_LOCK_INIT(sc); ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); ED_LOCK_DESTROY(sc); return (ENOSPC); } if (sc->readmem == NULL) { if (sc->mem_shared) { if (sc->isa16bit) sc->readmem = ed_shmem_readmem16; else sc->readmem = ed_shmem_readmem8; } else { sc->readmem = ed_pio_readmem; } } if (sc->sc_write_mbufs == NULL) { device_printf(dev, "No write mbufs routine set\n"); return (ENXIO); } callout_init_mtx(&sc->tick_ch, ED_MUTEX(sc), 0); /* * Set interface to stopped condition (reset) */ ed_stop_hw(sc); /* * Initialize ifnet structure */ ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_start = ed_start; ifp->if_ioctl = ed_ioctl; ifp->if_init = ed_init; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); ifp->if_linkmib = &sc->mibdata; ifp->if_linkmiblen = sizeof sc->mibdata; /* * XXX - should do a better job. */ if (sc->chip_type == ED_CHIP_TYPE_WD790) sc->mibdata.dot3StatsEtherChipSet = DOT3CHIPSET(dot3VendorWesternDigital, dot3ChipSetWesternDigital83C790); else sc->mibdata.dot3StatsEtherChipSet = DOT3CHIPSET(dot3VendorNational, dot3ChipSetNational8390); sc->mibdata.dot3Compliance = DOT3COMPLIANCE_COLLS; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; /* * Set default state for LINK2 flag (used to disable the * tranceiver for AUI operation), based on config option. * We only set this flag before we attach the device, so there's * no race. It is convenient to allow users to turn this off * by default in the kernel config, but given our more advanced * boot time configuration options, this might no longer be needed. */ if (device_get_flags(dev) & ED_FLAGS_DISABLE_TRANCEIVER) ifp->if_flags |= IFF_LINK2; /* * Attach the interface */ ether_ifattach(ifp, sc->enaddr); /* device attach does transition from UNCONFIGURED to IDLE state */ sc->tx_mem = sc->txb_cnt * ED_PAGE_SIZE * ED_TXBUF_SIZE; sc->rx_mem = (sc->rec_page_stop - sc->rec_page_start) * ED_PAGE_SIZE; SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 0, "type", CTLFLAG_RD, sc->type_str, 0, "Type of chip in card"); SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1, "TxMem", CTLFLAG_RD, &sc->tx_mem, 0, "Memory set aside for transmitting packets"); SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 2, "RxMem", CTLFLAG_RD, &sc->rx_mem, 0, "Memory set aside for receiving packets"); SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 3, "Mem", CTLFLAG_RD, &sc->mem_size, 0, "Total Card Memory"); if (bootverbose) { if (sc->type_str && (*sc->type_str != 0)) device_printf(dev, "type %s ", sc->type_str); else device_printf(dev, "type unknown (0x%x) ", sc->type); #ifdef ED_HPP if (sc->vendor == ED_VENDOR_HP) printf("(%s %s IO)", (sc->hpp_id & ED_HPP_ID_16_BIT_ACCESS) ? "16-bit" : "32-bit", sc->hpp_mem_start ? "memory mapped" : "regular"); else #endif printf("%s", sc->isa16bit ? "(16 bit)" : "(8 bit)"); #if defined(ED_HPP) || defined(ED_3C503) printf("%s", (((sc->vendor == ED_VENDOR_3COM) || (sc->vendor == ED_VENDOR_HP)) && (ifp->if_flags & IFF_LINK2)) ? " tranceiver disabled" : ""); #endif printf("\n"); } return (0); } /* * Detach the driver from the hardware and other systems in the kernel. */ int ed_detach(device_t dev) { struct ed_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->ifp; if (mtx_initialized(ED_MUTEX(sc))) ED_ASSERT_UNLOCKED(sc); if (ifp) { ED_LOCK(sc); if (bus_child_present(dev)) ed_stop(sc); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; ED_UNLOCK(sc); ether_ifdetach(ifp); callout_drain(&sc->tick_ch); } if (sc->irq_res != NULL && sc->irq_handle) bus_teardown_intr(dev, sc->irq_res, sc->irq_handle); ed_release_resources(dev); if (sc->miibus) device_delete_child(dev, sc->miibus); if (mtx_initialized(ED_MUTEX(sc))) ED_LOCK_DESTROY(sc); bus_generic_detach(dev); return (0); } /* * Reset interface. */ static void ed_reset(struct ifnet *ifp) { struct ed_softc *sc = ifp->if_softc; ED_ASSERT_LOCKED(sc); /* * Stop interface and re-initialize. */ ed_stop(sc); ed_init_locked(sc); } static void ed_stop_hw(struct ed_softc *sc) { int n = 5000; /* * Stop everything on the interface, and select page 0 registers. */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_STP); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); /* * Wait for interface to enter stopped state, but limit # of checks to * 'n' (about 5ms). It shouldn't even take 5us on modern DS8390's, but * just in case it's an old one. * * The AX88x90 chips don't seem to implement this behavor. The * datasheets say it is only turned on when the chip enters a RESET * state and is silent about behavior for the stopped state we just * entered. */ if (sc->chip_type == ED_CHIP_TYPE_AX88190 || sc->chip_type == ED_CHIP_TYPE_AX88790) return; while (((ed_nic_inb(sc, ED_P0_ISR) & ED_ISR_RST) == 0) && --n) continue; if (n <= 0) device_printf(sc->dev, "ed_stop_hw RST never set\n"); } /* * Take interface offline. */ void ed_stop(struct ed_softc *sc) { ED_ASSERT_LOCKED(sc); callout_stop(&sc->tick_ch); ed_stop_hw(sc); } /* * Periodic timer used to drive the watchdog and attachment-specific * tick handler. */ static void ed_tick(void *arg) { struct ed_softc *sc; sc = arg; ED_ASSERT_LOCKED(sc); if (sc->sc_tick) sc->sc_tick(sc); if (sc->tx_timer != 0 && --sc->tx_timer == 0) ed_watchdog(sc); callout_reset(&sc->tick_ch, hz, ed_tick, sc); } /* * Device timeout/watchdog routine. Entered if the device neglects to * generate an interrupt after a transmit has been started on it. */ static void ed_watchdog(struct ed_softc *sc) { struct ifnet *ifp; ifp = sc->ifp; log(LOG_ERR, "%s: device timeout\n", ifp->if_xname); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); ed_reset(ifp); } /* * Initialize device. */ static void ed_init(void *xsc) { struct ed_softc *sc = xsc; ED_ASSERT_UNLOCKED(sc); ED_LOCK(sc); ed_init_locked(sc); ED_UNLOCK(sc); } static void ed_init_locked(struct ed_softc *sc) { struct ifnet *ifp = sc->ifp; int i; ED_ASSERT_LOCKED(sc); /* * Initialize the NIC in the exact order outlined in the NS manual. * This init procedure is "mandatory"...don't change what or when * things happen. */ /* reset transmitter flags */ sc->xmit_busy = 0; sc->tx_timer = 0; sc->txb_inuse = 0; sc->txb_new = 0; sc->txb_next_tx = 0; /* This variable is used below - don't move this assignment */ sc->next_packet = sc->rec_page_start + 1; /* * Set interface for page 0, Remote DMA complete, Stopped */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_STP); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); if (sc->isa16bit) /* * Set FIFO threshold to 8, No auto-init Remote DMA, byte * order=80x86, word-wide DMA xfers, */ ed_nic_outb(sc, ED_P0_DCR, ED_DCR_FT1 | ED_DCR_WTS | ED_DCR_LS); else /* * Same as above, but byte-wide DMA xfers */ ed_nic_outb(sc, ED_P0_DCR, ED_DCR_FT1 | ED_DCR_LS); /* * Clear Remote Byte Count Registers */ ed_nic_outb(sc, ED_P0_RBCR0, 0); ed_nic_outb(sc, ED_P0_RBCR1, 0); /* * For the moment, don't store incoming packets in memory. */ ed_nic_outb(sc, ED_P0_RCR, ED_RCR_MON); /* * Place NIC in internal loopback mode */ ed_nic_outb(sc, ED_P0_TCR, ED_TCR_LB0); /* * Initialize transmit/receive (ring-buffer) Page Start */ ed_nic_outb(sc, ED_P0_TPSR, sc->tx_page_start); ed_nic_outb(sc, ED_P0_PSTART, sc->rec_page_start); /* Set lower bits of byte addressable framing to 0 */ if (sc->chip_type == ED_CHIP_TYPE_WD790) ed_nic_outb(sc, 0x09, 0); /* * Initialize Receiver (ring-buffer) Page Stop and Boundry */ ed_nic_outb(sc, ED_P0_PSTOP, sc->rec_page_stop); ed_nic_outb(sc, ED_P0_BNRY, sc->rec_page_start); /* * Clear all interrupts. A '1' in each bit position clears the * corresponding flag. */ ed_nic_outb(sc, ED_P0_ISR, 0xff); /* * Enable the following interrupts: receive/transmit complete, * receive/transmit error, and Receiver OverWrite. * * Counter overflow and Remote DMA complete are *not* enabled. */ ed_nic_outb(sc, ED_P0_IMR, ED_IMR_PRXE | ED_IMR_PTXE | ED_IMR_RXEE | ED_IMR_TXEE | ED_IMR_OVWE); /* * Program Command Register for page 1 */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_PAGE_1 | ED_CR_STP); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); /* * Copy out our station address */ for (i = 0; i < ETHER_ADDR_LEN; ++i) ed_nic_outb(sc, ED_P1_PAR(i), IF_LLADDR(sc->ifp)[i]); /* * Set Current Page pointer to next_packet (initialized above) */ ed_nic_outb(sc, ED_P1_CURR, sc->next_packet); /* * Program Receiver Configuration Register and multicast filter. CR is * set to page 0 on return. */ ed_setrcr(sc); /* * Take interface out of loopback */ ed_nic_outb(sc, ED_P0_TCR, 0); if (sc->sc_mediachg) sc->sc_mediachg(sc); /* * Set 'running' flag, and clear output active flag. */ ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; /* * ...and attempt to start output */ ed_start_locked(ifp); callout_reset(&sc->tick_ch, hz, ed_tick, sc); } /* * This routine actually starts the transmission on the interface */ static __inline void ed_xmit(struct ed_softc *sc) { unsigned short len; len = sc->txb_len[sc->txb_next_tx]; /* * Set NIC for page 0 register access */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_STA); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); /* * Set TX buffer start page */ ed_nic_outb(sc, ED_P0_TPSR, sc->tx_page_start + sc->txb_next_tx * ED_TXBUF_SIZE); /* * Set TX length */ ed_nic_outb(sc, ED_P0_TBCR0, len); ed_nic_outb(sc, ED_P0_TBCR1, len >> 8); /* * Set page 0, Remote DMA complete, Transmit Packet, and *Start* */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_TXP | ED_CR_STA); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); sc->xmit_busy = 1; /* * Point to next transmit buffer slot and wrap if necessary. */ sc->txb_next_tx++; if (sc->txb_next_tx == sc->txb_cnt) sc->txb_next_tx = 0; /* * Set a timer just in case we never hear from the board again */ sc->tx_timer = 2; } /* * Start output on interface. * We make two assumptions here: * 1) that the current priority is set to splimp _before_ this code * is called *and* is returned to the appropriate priority after * return * 2) that the IFF_DRV_OACTIVE flag is checked before this code is called * (i.e. that the output part of the interface is idle) */ static void ed_start(struct ifnet *ifp) { struct ed_softc *sc = ifp->if_softc; ED_ASSERT_UNLOCKED(sc); ED_LOCK(sc); ed_start_locked(ifp); ED_UNLOCK(sc); } static void ed_start_locked(struct ifnet *ifp) { struct ed_softc *sc = ifp->if_softc; struct mbuf *m0, *m; bus_size_t buffer; int len; ED_ASSERT_LOCKED(sc); outloop: /* * First, see if there are buffered packets and an idle transmitter - * should never happen at this point. */ if (sc->txb_inuse && (sc->xmit_busy == 0)) { printf("ed: packets buffered, but transmitter idle\n"); ed_xmit(sc); } /* * See if there is room to put another packet in the buffer. */ if (sc->txb_inuse == sc->txb_cnt) { /* * No room. Indicate this to the outside world and exit. */ ifp->if_drv_flags |= IFF_DRV_OACTIVE; return; } IFQ_DRV_DEQUEUE(&ifp->if_snd, m); - if (m == 0) { + if (m == NULL) { /* * We are using the !OACTIVE flag to indicate to the outside * world that we can accept an additional packet rather than * that the transmitter is _actually_ active. Indeed, the * transmitter may be active, but if we haven't filled all the * buffers with data then we still want to accept more. */ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; return; } /* * Copy the mbuf chain into the transmit buffer */ m0 = m; /* txb_new points to next open buffer slot */ buffer = sc->mem_start + (sc->txb_new * ED_TXBUF_SIZE * ED_PAGE_SIZE); len = sc->sc_write_mbufs(sc, m, buffer); if (len == 0) { m_freem(m0); goto outloop; } sc->txb_len[sc->txb_new] = max(len, (ETHER_MIN_LEN-ETHER_CRC_LEN)); sc->txb_inuse++; /* * Point to next buffer slot and wrap if necessary. */ sc->txb_new++; if (sc->txb_new == sc->txb_cnt) sc->txb_new = 0; if (sc->xmit_busy == 0) ed_xmit(sc); /* * Tap off here if there is a bpf listener. */ BPF_MTAP(ifp, m0); m_freem(m0); /* * Loop back to the top to possibly buffer more packets */ goto outloop; } /* * Ethernet interface receiver interrupt. */ static __inline void ed_rint(struct ed_softc *sc) { struct ifnet *ifp = sc->ifp; u_char boundry; u_short len; struct ed_ring packet_hdr; bus_size_t packet_ptr; ED_ASSERT_LOCKED(sc); /* * Set NIC to page 1 registers to get 'current' pointer */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_PAGE_1 | ED_CR_STA); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); /* * 'sc->next_packet' is the logical beginning of the ring-buffer - * i.e. it points to where new data has been buffered. The 'CURR' * (current) register points to the logical end of the ring-buffer - * i.e. it points to where additional new data will be added. We loop * here until the logical beginning equals the logical end (or in * other words, until the ring-buffer is empty). */ while (sc->next_packet != ed_nic_inb(sc, ED_P1_CURR)) { /* get pointer to this buffer's header structure */ packet_ptr = sc->mem_ring + (sc->next_packet - sc->rec_page_start) * ED_PAGE_SIZE; /* * The byte count includes a 4 byte header that was added by * the NIC. */ sc->readmem(sc, packet_ptr, (char *) &packet_hdr, sizeof(packet_hdr)); len = packet_hdr.count; if (len > (ETHER_MAX_LEN - ETHER_CRC_LEN + sizeof(struct ed_ring)) || len < (ETHER_MIN_LEN - ETHER_CRC_LEN + sizeof(struct ed_ring))) { /* * Length is a wild value. There's a good chance that * this was caused by the NIC being old and buggy. * The bug is that the length low byte is duplicated * in the high byte. Try to recalculate the length * based on the pointer to the next packet. Also, * need ot preserve offset into page. * * NOTE: sc->next_packet is pointing at the current * packet. */ len &= ED_PAGE_SIZE - 1; if (packet_hdr.next_packet >= sc->next_packet) len += (packet_hdr.next_packet - sc->next_packet) * ED_PAGE_SIZE; else len += ((packet_hdr.next_packet - sc->rec_page_start) + (sc->rec_page_stop - sc->next_packet)) * ED_PAGE_SIZE; /* * because buffers are aligned on 256-byte boundary, * the length computed above is off by 256 in almost * all cases. Fix it... */ if (len & 0xff) len -= 256; if (len > (ETHER_MAX_LEN - ETHER_CRC_LEN + sizeof(struct ed_ring))) sc->mibdata.dot3StatsFrameTooLongs++; } /* * Be fairly liberal about what we allow as a "reasonable" * length so that a [crufty] packet will make it to BPF (and * can thus be analyzed). Note that all that is really * important is that we have a length that will fit into one * mbuf cluster or less; the upper layer protocols can then * figure out the length from their own length field(s). But * make sure that we have at least a full ethernet header or * we would be unable to call ether_input() later. */ if ((len >= sizeof(struct ed_ring) + ETHER_HDR_LEN) && (len <= MCLBYTES) && (packet_hdr.next_packet >= sc->rec_page_start) && (packet_hdr.next_packet < sc->rec_page_stop)) { /* * Go get packet. */ ed_get_packet(sc, packet_ptr + sizeof(struct ed_ring), len - sizeof(struct ed_ring)); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); } else { /* * Really BAD. The ring pointers are corrupted. */ log(LOG_ERR, "%s: NIC memory corrupt - invalid packet length %d\n", ifp->if_xname, len); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); ed_reset(ifp); return; } /* * Update next packet pointer */ sc->next_packet = packet_hdr.next_packet; /* * Update NIC boundry pointer - being careful to keep it one * buffer behind. (as recommended by NS databook) */ boundry = sc->next_packet - 1; if (boundry < sc->rec_page_start) boundry = sc->rec_page_stop - 1; /* * Set NIC to page 0 registers to update boundry register */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_STA); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_BNRY, boundry); /* * Set NIC to page 1 registers before looping to top (prepare * to get 'CURR' current pointer) */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_PAGE_1 | ED_CR_STA); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); } } /* * Ethernet interface interrupt processor */ void edintr(void *arg) { struct ed_softc *sc = (struct ed_softc*) arg; struct ifnet *ifp = sc->ifp; u_char isr; int count; ED_LOCK(sc); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { ED_UNLOCK(sc); return; } /* * Set NIC to page 0 registers */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_STA); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); /* * loop until there are no more new interrupts. When the card goes * away, the hardware will read back 0xff. Looking at the interrupts, * it would appear that 0xff is impossible as ED_ISR_RST is normally * clear. ED_ISR_RDC is also normally clear and only set while * we're transferring memory to the card and we're holding the * ED_LOCK (so we can't get into here). */ while ((isr = ed_nic_inb(sc, ED_P0_ISR)) != 0 && isr != 0xff) { /* * reset all the bits that we are 'acknowledging' by writing a * '1' to each bit position that was set (writing a '1' * *clears* the bit) */ ed_nic_outb(sc, ED_P0_ISR, isr); /* * The AX88190 and AX88190A has problems acking an interrupt * and having them clear. This interferes with top-level loop * here. Wait for all the bits to clear. * * We limit this to 5000 iterations. At 1us per inb/outb, * this translates to about 15ms, which should be plenty of * time, and also gives protection in the card eject case. */ if (sc->chip_type == ED_CHIP_TYPE_AX88190) { count = 5000; /* 15ms */ while (count-- && (ed_nic_inb(sc, ED_P0_ISR) & isr)) { ed_nic_outb(sc, ED_P0_ISR,0); ed_nic_outb(sc, ED_P0_ISR,isr); } if (count == 0) break; } /* * Handle transmitter interrupts. Handle these first because * the receiver will reset the board under some conditions. */ if (isr & (ED_ISR_PTX | ED_ISR_TXE)) { u_char collisions = ed_nic_inb(sc, ED_P0_NCR) & 0x0f; /* * Check for transmit error. If a TX completed with an * error, we end up throwing the packet away. Really * the only error that is possible is excessive * collisions, and in this case it is best to allow * the automatic mechanisms of TCP to backoff the * flow. Of course, with UDP we're screwed, but this * is expected when a network is heavily loaded. */ (void) ed_nic_inb(sc, ED_P0_TSR); if (isr & ED_ISR_TXE) { u_char tsr; /* * Excessive collisions (16) */ tsr = ed_nic_inb(sc, ED_P0_TSR); if ((tsr & ED_TSR_ABT) && (collisions == 0)) { /* * When collisions total 16, the * P0_NCR will indicate 0, and the * TSR_ABT is set. */ collisions = 16; sc->mibdata.dot3StatsExcessiveCollisions++; sc->mibdata.dot3StatsCollFrequencies[15]++; } if (tsr & ED_TSR_OWC) sc->mibdata.dot3StatsLateCollisions++; if (tsr & ED_TSR_CDH) sc->mibdata.dot3StatsSQETestErrors++; if (tsr & ED_TSR_CRS) sc->mibdata.dot3StatsCarrierSenseErrors++; if (tsr & ED_TSR_FU) sc->mibdata.dot3StatsInternalMacTransmitErrors++; /* * update output errors counter */ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } else { /* * Update total number of successfully * transmitted packets. */ if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); } /* * reset tx busy and output active flags */ sc->xmit_busy = 0; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; /* * clear watchdog timer */ sc->tx_timer = 0; /* * Add in total number of collisions on last * transmission. */ if_inc_counter(ifp, IFCOUNTER_COLLISIONS, collisions); switch(collisions) { case 0: case 16: break; case 1: sc->mibdata.dot3StatsSingleCollisionFrames++; sc->mibdata.dot3StatsCollFrequencies[0]++; break; default: sc->mibdata.dot3StatsMultipleCollisionFrames++; sc->mibdata. dot3StatsCollFrequencies[collisions-1] ++; break; } /* * Decrement buffer in-use count if not zero (can only * be zero if a transmitter interrupt occured while * not actually transmitting). If data is ready to * transmit, start it transmitting, otherwise defer * until after handling receiver */ if (sc->txb_inuse && --sc->txb_inuse) ed_xmit(sc); } /* * Handle receiver interrupts */ if (isr & (ED_ISR_PRX | ED_ISR_RXE | ED_ISR_OVW)) { /* * Overwrite warning. In order to make sure that a * lockup of the local DMA hasn't occurred, we reset * and re-init the NIC. The NSC manual suggests only a * partial reset/re-init is necessary - but some chips * seem to want more. The DMA lockup has been seen * only with early rev chips - Methinks this bug was * fixed in later revs. -DG */ if (isr & ED_ISR_OVW) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); #ifdef DIAGNOSTIC log(LOG_WARNING, "%s: warning - receiver ring buffer overrun\n", ifp->if_xname); #endif /* * Stop/reset/re-init NIC */ ed_reset(ifp); } else { /* * Receiver Error. One or more of: CRC error, * frame alignment error FIFO overrun, or * missed packet. */ if (isr & ED_ISR_RXE) { u_char rsr; rsr = ed_nic_inb(sc, ED_P0_RSR); if (rsr & ED_RSR_CRC) sc->mibdata.dot3StatsFCSErrors++; if (rsr & ED_RSR_FAE) sc->mibdata.dot3StatsAlignmentErrors++; if (rsr & ED_RSR_FO) sc->mibdata.dot3StatsInternalMacReceiveErrors++; if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); #ifdef ED_DEBUG if_printf(ifp, "receive error %x\n", ed_nic_inb(sc, ED_P0_RSR)); #endif } /* * Go get the packet(s) XXX - Doing this on an * error is dubious because there shouldn't be * any data to get (we've configured the * interface to not accept packets with * errors). */ /* * Enable 16bit access to shared memory first * on WD/SMC boards. */ ed_enable_16bit_access(sc); ed_rint(sc); ed_disable_16bit_access(sc); } } /* * If it looks like the transmitter can take more data, * attempt to start output on the interface. This is done * after handling the receiver to give the receiver priority. */ if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) ed_start_locked(ifp); /* * return NIC CR to standard state: page 0, remote DMA * complete, start (toggling the TXP bit off, even if was just * set in the transmit routine, is *okay* - it is 'edge' * triggered from low to high) */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_STA); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); /* * If the Network Talley Counters overflow, read them to reset * them. It appears that old 8390's won't clear the ISR flag * otherwise - resulting in an infinite loop. */ if (isr & ED_ISR_CNT) { (void) ed_nic_inb(sc, ED_P0_CNTR0); (void) ed_nic_inb(sc, ED_P0_CNTR1); (void) ed_nic_inb(sc, ED_P0_CNTR2); } } ED_UNLOCK(sc); } /* * Process an ioctl request. */ static int ed_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct ed_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; int error = 0; switch (command) { case SIOCSIFFLAGS: /* * If the interface is marked up and stopped, then start it. * If we're up and already running, then it may be a mediachg. * If it is marked down and running, then stop it. */ ED_LOCK(sc); if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) ed_init_locked(sc); else if (sc->sc_mediachg) sc->sc_mediachg(sc); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ed_stop(sc); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; } } /* * Promiscuous flag may have changed, so reprogram the RCR. */ ed_setrcr(sc); ED_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: /* * Multicast list has changed; set the hardware filter * accordingly. */ ED_LOCK(sc); ed_setrcr(sc); ED_UNLOCK(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: if (sc->sc_media_ioctl == NULL) { error = EINVAL; break; } sc->sc_media_ioctl(sc, ifr, command); break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } /* * Given a source and destination address, copy 'amount' of a packet from * the ring buffer into a linear destination buffer. Takes into account * ring-wrap. */ static __inline void ed_ring_copy(struct ed_softc *sc, bus_size_t src, char *dst, u_short amount) { u_short tmp_amount; /* does copy wrap to lower addr in ring buffer? */ if (src + amount > sc->mem_end) { tmp_amount = sc->mem_end - src; /* copy amount up to end of NIC memory */ sc->readmem(sc, src, dst, tmp_amount); amount -= tmp_amount; src = sc->mem_ring; dst += tmp_amount; } sc->readmem(sc, src, dst, amount); } /* * Retreive packet from shared memory and send to the next level up via * ether_input(). */ static void ed_get_packet(struct ed_softc *sc, bus_size_t buf, u_short len) { struct ifnet *ifp = sc->ifp; struct ether_header *eh; struct mbuf *m; /* Allocate a header mbuf */ MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) return; m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = len; /* * We always put the received packet in a single buffer - * either with just an mbuf header or in a cluster attached * to the header. The +2 is to compensate for the alignment * fixup below. */ if ((len + 2) > MHLEN) { /* Attach an mbuf cluster */ if (!(MCLGET(m, M_NOWAIT))) { m_freem(m); return; } } /* * The +2 is to longword align the start of the real packet. * This is important for NFS. */ m->m_data += 2; eh = mtod(m, struct ether_header *); /* * Get packet, including link layer address, from interface. */ ed_ring_copy(sc, buf, (char *)eh, len); m->m_pkthdr.len = m->m_len = len; ED_UNLOCK(sc); (*ifp->if_input)(ifp, m); ED_LOCK(sc); } /* * Supporting routines */ /* * Given a NIC memory source address and a host memory destination * address, copy 'amount' from NIC to host using shared memory. * The 'amount' is rounded up to a word - okay as long as mbufs * are word sized. That's what the +1 is below. * This routine accesses things as 16 bit quantities. */ void ed_shmem_readmem16(struct ed_softc *sc, bus_size_t src, uint8_t *dst, uint16_t amount) { bus_space_read_region_2(sc->mem_bst, sc->mem_bsh, src, (uint16_t *)dst, (amount + 1) / 2); } /* * Given a NIC memory source address and a host memory destination * address, copy 'amount' from NIC to host using shared memory. * This routine accesses things as 8 bit quantities. */ void ed_shmem_readmem8(struct ed_softc *sc, bus_size_t src, uint8_t *dst, uint16_t amount) { bus_space_read_region_1(sc->mem_bst, sc->mem_bsh, src, dst, amount); } /* * Given a NIC memory source address and a host memory destination * address, copy 'amount' from NIC to host using Programmed I/O. * The 'amount' is rounded up to a word - okay as long as mbufs * are word sized. * This routine is currently Novell-specific. */ void ed_pio_readmem(struct ed_softc *sc, bus_size_t src, uint8_t *dst, uint16_t amount) { /* Regular Novell cards */ /* select page 0 registers */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, ED_CR_RD2 | ED_CR_STA); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); /* round up to a word */ if (amount & 1) ++amount; /* set up DMA byte count */ ed_nic_outb(sc, ED_P0_RBCR0, amount); ed_nic_outb(sc, ED_P0_RBCR1, amount >> 8); /* set up source address in NIC mem */ ed_nic_outb(sc, ED_P0_RSAR0, src); ed_nic_outb(sc, ED_P0_RSAR1, src >> 8); ed_nic_outb(sc, ED_P0_CR, ED_CR_RD0 | ED_CR_STA); if (sc->isa16bit) ed_asic_insw(sc, ED_NOVELL_DATA, dst, amount / 2); else ed_asic_insb(sc, ED_NOVELL_DATA, dst, amount); } /* * Stripped down routine for writing a linear buffer to NIC memory. * Only used in the probe routine to test the memory. 'len' must * be even. */ void ed_pio_writemem(struct ed_softc *sc, uint8_t *src, uint16_t dst, uint16_t len) { int maxwait = 200; /* about 240us */ /* select page 0 registers */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, ED_CR_RD2 | ED_CR_STA); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); /* reset remote DMA complete flag */ ed_nic_outb(sc, ED_P0_ISR, ED_ISR_RDC); /* set up DMA byte count */ ed_nic_outb(sc, ED_P0_RBCR0, len); ed_nic_outb(sc, ED_P0_RBCR1, len >> 8); /* set up destination address in NIC mem */ ed_nic_outb(sc, ED_P0_RSAR0, dst); ed_nic_outb(sc, ED_P0_RSAR1, dst >> 8); /* set remote DMA write */ ed_nic_outb(sc, ED_P0_CR, ED_CR_RD1 | ED_CR_STA); if (sc->isa16bit) ed_asic_outsw(sc, ED_NOVELL_DATA, src, len / 2); else ed_asic_outsb(sc, ED_NOVELL_DATA, src, len); /* * Wait for remote DMA complete. This is necessary because on the * transmit side, data is handled internally by the NIC in bursts and * we can't start another remote DMA until this one completes. Not * waiting causes really bad things to happen - like the NIC * irrecoverably jamming the ISA bus. */ while (((ed_nic_inb(sc, ED_P0_ISR) & ED_ISR_RDC) != ED_ISR_RDC) && --maxwait) continue; } /* * Write an mbuf chain to the destination NIC memory address using * programmed I/O. */ u_short ed_pio_write_mbufs(struct ed_softc *sc, struct mbuf *m, bus_size_t dst) { struct ifnet *ifp = sc->ifp; unsigned short total_len, dma_len; struct mbuf *mp; int maxwait = 200; /* about 240us */ ED_ASSERT_LOCKED(sc); /* Regular Novell cards */ /* First, count up the total number of bytes to copy */ for (total_len = 0, mp = m; mp; mp = mp->m_next) total_len += mp->m_len; dma_len = total_len; if (sc->isa16bit && (dma_len & 1)) dma_len++; /* select page 0 registers */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, ED_CR_RD2 | ED_CR_STA); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); /* reset remote DMA complete flag */ ed_nic_outb(sc, ED_P0_ISR, ED_ISR_RDC); /* set up DMA byte count */ ed_nic_outb(sc, ED_P0_RBCR0, dma_len); ed_nic_outb(sc, ED_P0_RBCR1, dma_len >> 8); /* set up destination address in NIC mem */ ed_nic_outb(sc, ED_P0_RSAR0, dst); ed_nic_outb(sc, ED_P0_RSAR1, dst >> 8); /* set remote DMA write */ ed_nic_outb(sc, ED_P0_CR, ED_CR_RD1 | ED_CR_STA); /* * Transfer the mbuf chain to the NIC memory. * 16-bit cards require that data be transferred as words, and only words. * So that case requires some extra code to patch over odd-length mbufs. */ if (!sc->isa16bit) { /* NE1000s are easy */ while (m) { if (m->m_len) ed_asic_outsb(sc, ED_NOVELL_DATA, m->m_data, m->m_len); m = m->m_next; } } else { /* NE2000s are a pain */ uint8_t *data; int len, wantbyte; union { uint16_t w; uint8_t b[2]; } saveword; wantbyte = 0; while (m) { len = m->m_len; if (len) { data = mtod(m, caddr_t); /* finish the last word */ if (wantbyte) { saveword.b[1] = *data; ed_asic_outw(sc, ED_NOVELL_DATA, saveword.w); data++; len--; wantbyte = 0; } /* output contiguous words */ if (len > 1) { ed_asic_outsw(sc, ED_NOVELL_DATA, data, len >> 1); data += len & ~1; len &= 1; } /* save last byte, if necessary */ if (len == 1) { saveword.b[0] = *data; wantbyte = 1; } } m = m->m_next; } /* spit last byte */ if (wantbyte) ed_asic_outw(sc, ED_NOVELL_DATA, saveword.w); } /* * Wait for remote DMA complete. This is necessary because on the * transmit side, data is handled internally by the NIC in bursts and * we can't start another remote DMA until this one completes. Not * waiting causes really bad things to happen - like the NIC * irrecoverably jamming the ISA bus. */ while (((ed_nic_inb(sc, ED_P0_ISR) & ED_ISR_RDC) != ED_ISR_RDC) && --maxwait) continue; if (!maxwait) { log(LOG_WARNING, "%s: remote transmit DMA failed to complete\n", ifp->if_xname); ed_reset(ifp); return(0); } return (total_len); } static void ed_setrcr(struct ed_softc *sc) { struct ifnet *ifp = sc->ifp; int i; u_char reg1; ED_ASSERT_LOCKED(sc); /* Bit 6 in AX88190 RCR register must be set. */ if (sc->chip_type == ED_CHIP_TYPE_AX88190 || sc->chip_type == ED_CHIP_TYPE_AX88790) reg1 = ED_RCR_INTT; else reg1 = 0x00; /* set page 1 registers */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_PAGE_1 | ED_CR_STP); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); if (ifp->if_flags & IFF_PROMISC) { /* * Reconfigure the multicast filter. */ for (i = 0; i < 8; i++) ed_nic_outb(sc, ED_P1_MAR(i), 0xff); /* * And turn on promiscuous mode. Also enable reception of * runts and packets with CRC & alignment errors. */ /* Set page 0 registers */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_STP); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_RCR, ED_RCR_PRO | ED_RCR_AM | ED_RCR_AB | ED_RCR_AR | ED_RCR_SEP | reg1); } else { /* set up multicast addresses and filter modes */ if (ifp->if_flags & IFF_MULTICAST) { uint32_t mcaf[2]; if (ifp->if_flags & IFF_ALLMULTI) { mcaf[0] = 0xffffffff; mcaf[1] = 0xffffffff; } else ed_ds_getmcaf(sc, mcaf); /* * Set multicast filter on chip. */ for (i = 0; i < 8; i++) ed_nic_outb(sc, ED_P1_MAR(i), ((u_char *) mcaf)[i]); /* Set page 0 registers */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_STP); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_RCR, ED_RCR_AM | ED_RCR_AB | reg1); } else { /* * Initialize multicast address hashing registers to * not accept multicasts. */ for (i = 0; i < 8; ++i) ed_nic_outb(sc, ED_P1_MAR(i), 0x00); /* Set page 0 registers */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_STP); ed_nic_outb(sc, ED_P0_RCR, ED_RCR_AB | reg1); } } /* * Start interface. */ ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_STA); } /* * Compute the multicast address filter from the * list of multicast addresses we need to listen to. */ static void ed_ds_getmcaf(struct ed_softc *sc, uint32_t *mcaf) { uint32_t index; u_char *af = (u_char *) mcaf; struct ifmultiaddr *ifma; mcaf[0] = 0; mcaf[1] = 0; if_maddr_rlock(sc->ifp); TAILQ_FOREACH(ifma, &sc->ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; index = ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; af[index >> 3] |= 1 << (index & 7); } if_maddr_runlock(sc->ifp); } int ed_isa_mem_ok(device_t dev, u_long pmem, u_int memsize) { if (pmem < 0xa0000 || pmem + memsize > 0x1000000) { device_printf(dev, "Invalid ISA memory address range " "configured: 0x%lx - 0x%lx\n", pmem, pmem + memsize); return (ENXIO); } return (0); } int ed_clear_memory(device_t dev) { struct ed_softc *sc = device_get_softc(dev); bus_size_t i; bus_space_set_region_1(sc->mem_bst, sc->mem_bsh, sc->mem_start, 0, sc->mem_size); for (i = 0; i < sc->mem_size; i++) { if (bus_space_read_1(sc->mem_bst, sc->mem_bsh, sc->mem_start + i)) { device_printf(dev, "failed to clear shared memory at " "0x%jx - check configuration\n", (uintmax_t)rman_get_start(sc->mem_res) + i); return (ENXIO); } } return (0); } u_short ed_shmem_write_mbufs(struct ed_softc *sc, struct mbuf *m, bus_size_t dst) { u_short len; /* * Special case setup for 16 bit boards... */ if (sc->isa16bit) { switch (sc->vendor) { #ifdef ED_3C503 /* * For 16bit 3Com boards (which have 16k of * memory), we have the xmit buffers in a * different page of memory ('page 0') - so * change pages. */ case ED_VENDOR_3COM: ed_asic_outb(sc, ED_3COM_GACFR, ED_3COM_GACFR_RSEL); break; #endif /* * Enable 16bit access to shared memory on * WD/SMC boards. * * XXX - same as ed_enable_16bit_access() */ case ED_VENDOR_WD_SMC: ed_asic_outb(sc, ED_WD_LAAR, sc->wd_laar_proto | ED_WD_LAAR_M16EN); if (sc->chip_type == ED_CHIP_TYPE_WD790) ed_asic_outb(sc, ED_WD_MSR, ED_WD_MSR_MENB); break; } } for (len = 0; m != NULL; m = m->m_next) { if (m->m_len == 0) continue; if (sc->isa16bit) { if (m->m_len > 1) bus_space_write_region_2(sc->mem_bst, sc->mem_bsh, dst, mtod(m, uint16_t *), m->m_len / 2); if ((m->m_len & 1) != 0) bus_space_write_1(sc->mem_bst, sc->mem_bsh, dst + m->m_len - 1, *(mtod(m, uint8_t *) + m->m_len - 1)); } else bus_space_write_region_1(sc->mem_bst, sc->mem_bsh, dst, mtod(m, uint8_t *), m->m_len); dst += m->m_len; len += m->m_len; } /* * Restore previous shared memory access */ if (sc->isa16bit) { switch (sc->vendor) { #ifdef ED_3C503 case ED_VENDOR_3COM: ed_asic_outb(sc, ED_3COM_GACFR, ED_3COM_GACFR_RSEL | ED_3COM_GACFR_MBS0); break; #endif case ED_VENDOR_WD_SMC: /* XXX - same as ed_disable_16bit_access() */ if (sc->chip_type == ED_CHIP_TYPE_WD790) ed_asic_outb(sc, ED_WD_MSR, 0x00); ed_asic_outb(sc, ED_WD_LAAR, sc->wd_laar_proto & ~ED_WD_LAAR_M16EN); break; } } return (len); } /* * Generic ifmedia support. By default, the DP8390-based cards don't know * what their network attachment really is, or even if it is valid (except * upon successful transmission of a packet). To play nicer with dhclient, as * well as to fit in with a framework where some cards can provde more * detailed information, make sure that we use this as a fallback. */ static int ed_gen_ifmedia_ioctl(struct ed_softc *sc, struct ifreq *ifr, u_long command) { return (ifmedia_ioctl(sc->ifp, ifr, &sc->ifmedia, command)); } static int ed_gen_ifmedia_upd(struct ifnet *ifp) { return 0; } static void ed_gen_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { ifmr->ifm_active = IFM_ETHER | IFM_AUTO; ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; } void ed_gen_ifmedia_init(struct ed_softc *sc) { sc->sc_media_ioctl = &ed_gen_ifmedia_ioctl; ifmedia_init(&sc->ifmedia, 0, ed_gen_ifmedia_upd, ed_gen_ifmedia_sts); ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0, 0); ifmedia_set(&sc->ifmedia, IFM_ETHER | IFM_AUTO); } Index: head/sys/dev/fatm/if_fatm.c =================================================================== --- head/sys/dev/fatm/if_fatm.c (revision 313981) +++ head/sys/dev/fatm/if_fatm.c (revision 313982) @@ -1,3091 +1,3091 @@ /*- * Copyright (c) 2001-2003 * Fraunhofer Institute for Open Communication Systems (FhG Fokus). * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Author: Hartmut Brandt * * Fore PCA200E driver for NATM */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_natm.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef ENABLE_BPF #include #endif #ifdef INET #include #include #endif #include #include #include #include #include #include #include #include #include #include devclass_t fatm_devclass; static const struct { uint16_t vid; uint16_t did; const char *name; } fatm_devs[] = { { 0x1127, 0x300, "FORE PCA200E" }, { 0, 0, NULL } }; static const struct rate { uint32_t ratio; uint32_t cell_rate; } rate_table[] = { #include }; #define RATE_TABLE_SIZE (sizeof(rate_table) / sizeof(rate_table[0])) SYSCTL_DECL(_hw_atm); MODULE_DEPEND(fatm, utopia, 1, 1, 1); static int fatm_utopia_readregs(struct ifatm *, u_int, uint8_t *, u_int *); static int fatm_utopia_writereg(struct ifatm *, u_int, u_int, u_int); static const struct utopia_methods fatm_utopia_methods = { fatm_utopia_readregs, fatm_utopia_writereg }; #define VC_OK(SC, VPI, VCI) \ (rounddown2(VPI, 1 << IFP2IFATM((SC)->ifp)->mib.vpi_bits) == 0 && \ (VCI) != 0 && rounddown2(VCI, 1 << IFP2IFATM((SC)->ifp)->mib.vci_bits) == 0) static int fatm_load_vc(struct fatm_softc *sc, struct card_vcc *vc); /* * Probing is easy: step trough the list of known vendor and device * ids and compare. If one is found - it's our. */ static int fatm_probe(device_t dev) { int i; for (i = 0; fatm_devs[i].name; i++) if (pci_get_vendor(dev) == fatm_devs[i].vid && pci_get_device(dev) == fatm_devs[i].did) { device_set_desc(dev, fatm_devs[i].name); return (BUS_PROBE_DEFAULT); } return (ENXIO); } /* * Function called at completion of a SUNI writeregs/readregs command. * This is called from the interrupt handler while holding the softc lock. * We use the queue entry as the randevouze point. */ static void fatm_utopia_writeregs_complete(struct fatm_softc *sc, struct cmdqueue *q) { H_SYNCSTAT_POSTREAD(sc, q->q.statp); if(H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) { sc->istats.suni_reg_errors++; q->error = EIO; } wakeup(q); } /* * Write a SUNI register. The bits that are 1 in mask are written from val * into register reg. We wait for the command to complete by sleeping on * the register memory. * * We assume, that we already hold the softc mutex. */ static int fatm_utopia_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val) { int error; struct cmdqueue *q; struct fatm_softc *sc; sc = ifatm->ifp->if_softc; FATM_CHECKLOCK(sc); if (!(ifatm->ifp->if_drv_flags & IFF_DRV_RUNNING)) return (EIO); /* get queue element and fill it */ q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head); H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) { sc->istats.cmd_queue_full++; return (EIO); } NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN); q->error = 0; q->cb = fatm_utopia_writeregs_complete; H_SETSTAT(q->q.statp, FATM_STAT_PENDING); H_SYNCSTAT_PREWRITE(sc, q->q.statp); WRITE4(sc, q->q.card + FATMOC_GETOC3_BUF, 0); BARRIER_W(sc); WRITE4(sc, q->q.card + FATMOC_OP, FATM_MAKE_SETOC3(reg, val, mask) | FATM_OP_INTERRUPT_SEL); BARRIER_W(sc); /* * Wait for the command to complete */ error = msleep(q, &sc->mtx, PZERO | PCATCH, "fatm_setreg", hz); switch(error) { case EWOULDBLOCK: error = EIO; break; case ERESTART: error = EINTR; break; case 0: error = q->error; break; } return (error); } /* * Function called at completion of a SUNI readregs command. * This is called from the interrupt handler while holding the softc lock. * We use reg_mem as the randevouze point. */ static void fatm_utopia_readregs_complete(struct fatm_softc *sc, struct cmdqueue *q) { H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) { sc->istats.suni_reg_errors++; q->error = EIO; } wakeup(&sc->reg_mem); } /* * Read SUNI registers * * We use a preallocated buffer to read the registers. Therefor we need * to protect against multiple threads trying to read registers. We do this * with a condition variable and a flag. We wait for the command to complete by sleeping on * the register memory. * * We assume, that we already hold the softc mutex. */ static int fatm_utopia_readregs_internal(struct fatm_softc *sc) { int error, i; uint32_t *ptr; struct cmdqueue *q; /* get the buffer */ for (;;) { if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) return (EIO); if (!(sc->flags & FATM_REGS_INUSE)) break; cv_wait(&sc->cv_regs, &sc->mtx); } sc->flags |= FATM_REGS_INUSE; q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head); H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) { sc->istats.cmd_queue_full++; return (EIO); } NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN); q->error = 0; q->cb = fatm_utopia_readregs_complete; H_SETSTAT(q->q.statp, FATM_STAT_PENDING); H_SYNCSTAT_PREWRITE(sc, q->q.statp); bus_dmamap_sync(sc->reg_mem.dmat, sc->reg_mem.map, BUS_DMASYNC_PREREAD); WRITE4(sc, q->q.card + FATMOC_GETOC3_BUF, sc->reg_mem.paddr); BARRIER_W(sc); WRITE4(sc, q->q.card + FATMOC_OP, FATM_OP_OC3_GET_REG | FATM_OP_INTERRUPT_SEL); BARRIER_W(sc); /* * Wait for the command to complete */ error = msleep(&sc->reg_mem, &sc->mtx, PZERO | PCATCH, "fatm_getreg", hz); switch(error) { case EWOULDBLOCK: error = EIO; break; case ERESTART: error = EINTR; break; case 0: bus_dmamap_sync(sc->reg_mem.dmat, sc->reg_mem.map, BUS_DMASYNC_POSTREAD); error = q->error; break; } if (error != 0) { /* declare buffer to be free */ sc->flags &= ~FATM_REGS_INUSE; cv_signal(&sc->cv_regs); return (error); } /* swap if needed */ ptr = (uint32_t *)sc->reg_mem.mem; for (i = 0; i < FATM_NREGS; i++) ptr[i] = le32toh(ptr[i]) & 0xff; return (0); } /* * Read SUNI registers for the SUNI module. * * We assume, that we already hold the mutex. */ static int fatm_utopia_readregs(struct ifatm *ifatm, u_int reg, uint8_t *valp, u_int *np) { int err; int i; struct fatm_softc *sc; if (reg >= FATM_NREGS) return (EINVAL); if (reg + *np > FATM_NREGS) *np = FATM_NREGS - reg; sc = ifatm->ifp->if_softc; FATM_CHECKLOCK(sc); err = fatm_utopia_readregs_internal(sc); if (err != 0) return (err); for (i = 0; i < *np; i++) valp[i] = ((uint32_t *)sc->reg_mem.mem)[reg + i]; /* declare buffer to be free */ sc->flags &= ~FATM_REGS_INUSE; cv_signal(&sc->cv_regs); return (0); } /* * Check whether the hard is beating. We remember the last heart beat and * compare it to the current one. If it appears stuck for 10 times, we have * a problem. * * Assume we hold the lock. */ static void fatm_check_heartbeat(struct fatm_softc *sc) { uint32_t h; FATM_CHECKLOCK(sc); h = READ4(sc, FATMO_HEARTBEAT); DBG(sc, BEAT, ("heartbeat %08x", h)); if (sc->stop_cnt == 10) return; if (h == sc->heartbeat) { if (++sc->stop_cnt == 10) { log(LOG_ERR, "i960 stopped???\n"); WRITE4(sc, FATMO_HIMR, 1); } return; } sc->stop_cnt = 0; sc->heartbeat = h; } /* * Ensure that the heart is still beating. */ static void fatm_watchdog(void *arg) { struct fatm_softc *sc; sc = arg; FATM_CHECKLOCK(sc); fatm_check_heartbeat(sc); callout_reset(&sc->watchdog_timer, hz * 5, fatm_watchdog, sc); } /* * Hard reset the i960 on the board. This is done by initializing registers, * clearing interrupts and waiting for the selftest to finish. Not sure, * whether all these barriers are actually needed. * * Assumes that we hold the lock. */ static int fatm_reset(struct fatm_softc *sc) { int w; uint32_t val; FATM_CHECKLOCK(sc); WRITE4(sc, FATMO_APP_BASE, FATMO_COMMON_ORIGIN); BARRIER_W(sc); WRITE4(sc, FATMO_UART_TO_960, XMIT_READY); BARRIER_W(sc); WRITE4(sc, FATMO_UART_TO_HOST, XMIT_READY); BARRIER_W(sc); WRITE4(sc, FATMO_BOOT_STATUS, COLD_START); BARRIER_W(sc); WRITE1(sc, FATMO_HCR, FATM_HCR_RESET); BARRIER_W(sc); DELAY(1000); WRITE1(sc, FATMO_HCR, 0); BARRIER_RW(sc); DELAY(1000); for (w = 100; w; w--) { BARRIER_R(sc); val = READ4(sc, FATMO_BOOT_STATUS); switch (val) { case SELF_TEST_OK: return (0); case SELF_TEST_FAIL: return (EIO); } DELAY(1000); } return (EIO); } /* * Stop the card. Must be called WITH the lock held * Reset, free transmit and receive buffers. Wakeup everybody who may sleep. */ static void fatm_stop(struct fatm_softc *sc) { int i; struct cmdqueue *q; struct rbuf *rb; struct txqueue *tx; uint32_t stat; FATM_CHECKLOCK(sc); /* Stop the board */ utopia_stop(&sc->utopia); (void)fatm_reset(sc); /* stop watchdog */ callout_stop(&sc->watchdog_timer); if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) { sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); ATMEV_SEND_IFSTATE_CHANGED(IFP2IFATM(sc->ifp), sc->utopia.carrier == UTP_CARR_OK); /* * Collect transmit mbufs, partial receive mbufs and * supplied mbufs */ for (i = 0; i < FATM_TX_QLEN; i++) { tx = GET_QUEUE(sc->txqueue, struct txqueue, i); if (tx->m) { bus_dmamap_unload(sc->tx_tag, tx->map); m_freem(tx->m); tx->m = NULL; } } /* Collect supplied mbufs */ while ((rb = LIST_FIRST(&sc->rbuf_used)) != NULL) { LIST_REMOVE(rb, link); bus_dmamap_unload(sc->rbuf_tag, rb->map); m_free(rb->m); rb->m = NULL; LIST_INSERT_HEAD(&sc->rbuf_free, rb, link); } /* Unwait any waiters */ wakeup(&sc->sadi_mem); /* wakeup all threads waiting for STAT or REG buffers */ cv_broadcast(&sc->cv_stat); cv_broadcast(&sc->cv_regs); sc->flags &= ~(FATM_STAT_INUSE | FATM_REGS_INUSE); /* wakeup all threads waiting on commands */ for (i = 0; i < FATM_CMD_QLEN; i++) { q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, i); H_SYNCSTAT_POSTREAD(sc, q->q.statp); if ((stat = H_GETSTAT(q->q.statp)) != FATM_STAT_FREE) { H_SETSTAT(q->q.statp, stat | FATM_STAT_ERROR); H_SYNCSTAT_PREWRITE(sc, q->q.statp); wakeup(q); } } utopia_reset_media(&sc->utopia); } sc->small_cnt = sc->large_cnt = 0; /* Reset vcc info */ if (sc->vccs != NULL) { sc->open_vccs = 0; for (i = 0; i < FORE_MAX_VCC + 1; i++) { if (sc->vccs[i] != NULL) { if ((sc->vccs[i]->vflags & (FATM_VCC_OPEN | FATM_VCC_TRY_OPEN)) == 0) { uma_zfree(sc->vcc_zone, sc->vccs[i]); sc->vccs[i] = NULL; } else { sc->vccs[i]->vflags = 0; sc->open_vccs++; } } } } } /* * Load the firmware into the board and save the entry point. */ static uint32_t firmware_load(struct fatm_softc *sc) { struct firmware *fw = (struct firmware *)firmware; DBG(sc, INIT, ("loading - entry=%x", fw->entry)); bus_space_write_region_4(sc->memt, sc->memh, fw->offset, firmware, sizeof(firmware) / sizeof(firmware[0])); BARRIER_RW(sc); return (fw->entry); } /* * Read a character from the virtual UART. The availability of a character * is signaled by a non-null value of the 32 bit register. The eating of * the character by us is signalled to the card by setting that register * to zero. */ static int rx_getc(struct fatm_softc *sc) { int w = 50; int c; while (w--) { c = READ4(sc, FATMO_UART_TO_HOST); BARRIER_RW(sc); if (c != 0) { WRITE4(sc, FATMO_UART_TO_HOST, 0); DBGC(sc, UART, ("%c", c & 0xff)); return (c & 0xff); } DELAY(1000); } return (-1); } /* * Eat up characters from the board and stuff them in the bit-bucket. */ static void rx_flush(struct fatm_softc *sc) { int w = 10000; while (w-- && rx_getc(sc) >= 0) ; } /* * Write a character to the card. The UART is available if the register * is zero. */ static int tx_putc(struct fatm_softc *sc, u_char c) { int w = 10; int c1; while (w--) { c1 = READ4(sc, FATMO_UART_TO_960); BARRIER_RW(sc); if (c1 == 0) { WRITE4(sc, FATMO_UART_TO_960, c | CHAR_AVAIL); DBGC(sc, UART, ("%c", c & 0xff)); return (0); } DELAY(1000); } return (-1); } /* * Start the firmware. This is doing by issuing a 'go' command with * the hex entry address of the firmware. Then we wait for the self-test to * succeed. */ static int fatm_start_firmware(struct fatm_softc *sc, uint32_t start) { static char hex[] = "0123456789abcdef"; u_int w, val; DBG(sc, INIT, ("starting")); rx_flush(sc); tx_putc(sc, '\r'); DELAY(1000); rx_flush(sc); tx_putc(sc, 'g'); (void)rx_getc(sc); tx_putc(sc, 'o'); (void)rx_getc(sc); tx_putc(sc, ' '); (void)rx_getc(sc); tx_putc(sc, hex[(start >> 12) & 0xf]); (void)rx_getc(sc); tx_putc(sc, hex[(start >> 8) & 0xf]); (void)rx_getc(sc); tx_putc(sc, hex[(start >> 4) & 0xf]); (void)rx_getc(sc); tx_putc(sc, hex[(start >> 0) & 0xf]); (void)rx_getc(sc); tx_putc(sc, '\r'); rx_flush(sc); for (w = 100; w; w--) { BARRIER_R(sc); val = READ4(sc, FATMO_BOOT_STATUS); switch (val) { case CP_RUNNING: return (0); case SELF_TEST_FAIL: return (EIO); } DELAY(1000); } return (EIO); } /* * Initialize one card and host queue. */ static void init_card_queue(struct fatm_softc *sc, struct fqueue *queue, int qlen, size_t qel_size, size_t desc_size, cardoff_t off, u_char **statpp, uint32_t *cardstat, u_char *descp, uint32_t carddesc) { struct fqelem *el = queue->chunk; while (qlen--) { el->card = off; off += 8; /* size of card entry */ el->statp = (uint32_t *)(*statpp); (*statpp) += sizeof(uint32_t); H_SETSTAT(el->statp, FATM_STAT_FREE); H_SYNCSTAT_PREWRITE(sc, el->statp); WRITE4(sc, el->card + FATMOS_STATP, (*cardstat)); (*cardstat) += sizeof(uint32_t); el->ioblk = descp; descp += desc_size; el->card_ioblk = carddesc; carddesc += desc_size; el = (struct fqelem *)((u_char *)el + qel_size); } queue->tail = queue->head = 0; } /* * Issue the initialize operation to the card, wait for completion and * initialize the on-board and host queue structures with offsets and * addresses. */ static int fatm_init_cmd(struct fatm_softc *sc) { int w, c; u_char *statp; uint32_t card_stat; u_int cnt; struct fqelem *el; cardoff_t off; DBG(sc, INIT, ("command")); WRITE4(sc, FATMO_ISTAT, 0); WRITE4(sc, FATMO_IMASK, 1); WRITE4(sc, FATMO_HLOGGER, 0); WRITE4(sc, FATMO_INIT + FATMOI_RECEIVE_TRESHOLD, 0); WRITE4(sc, FATMO_INIT + FATMOI_NUM_CONNECT, FORE_MAX_VCC); WRITE4(sc, FATMO_INIT + FATMOI_CQUEUE_LEN, FATM_CMD_QLEN); WRITE4(sc, FATMO_INIT + FATMOI_TQUEUE_LEN, FATM_TX_QLEN); WRITE4(sc, FATMO_INIT + FATMOI_RQUEUE_LEN, FATM_RX_QLEN); WRITE4(sc, FATMO_INIT + FATMOI_RPD_EXTENSION, RPD_EXTENSIONS); WRITE4(sc, FATMO_INIT + FATMOI_TPD_EXTENSION, TPD_EXTENSIONS); /* * initialize buffer descriptors */ WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_QUEUE_LENGTH, SMALL_SUPPLY_QLEN); WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_BUFFER_SIZE, SMALL_BUFFER_LEN); WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_POOL_SIZE, SMALL_POOL_SIZE); WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_SUPPLY_BLKSIZE, SMALL_SUPPLY_BLKSIZE); WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_QUEUE_LENGTH, LARGE_SUPPLY_QLEN); WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_BUFFER_SIZE, LARGE_BUFFER_LEN); WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_POOL_SIZE, LARGE_POOL_SIZE); WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_SUPPLY_BLKSIZE, LARGE_SUPPLY_BLKSIZE); WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_QUEUE_LENGTH, 0); WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_BUFFER_SIZE, 0); WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_POOL_SIZE, 0); WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_SUPPLY_BLKSIZE, 0); WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_QUEUE_LENGTH, 0); WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_BUFFER_SIZE, 0); WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_POOL_SIZE, 0); WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_SUPPLY_BLKSIZE, 0); /* * Start the command */ BARRIER_W(sc); WRITE4(sc, FATMO_INIT + FATMOI_STATUS, FATM_STAT_PENDING); BARRIER_W(sc); WRITE4(sc, FATMO_INIT + FATMOI_OP, FATM_OP_INITIALIZE); BARRIER_W(sc); /* * Busy wait for completion */ w = 100; while (w--) { c = READ4(sc, FATMO_INIT + FATMOI_STATUS); BARRIER_R(sc); if (c & FATM_STAT_COMPLETE) break; DELAY(1000); } if (c & FATM_STAT_ERROR) return (EIO); /* * Initialize the queues */ statp = sc->stat_mem.mem; card_stat = sc->stat_mem.paddr; /* * Command queue. This is special in that it's on the card. */ el = sc->cmdqueue.chunk; off = READ4(sc, FATMO_COMMAND_QUEUE); DBG(sc, INIT, ("cmd queue=%x", off)); for (cnt = 0; cnt < FATM_CMD_QLEN; cnt++) { el = &((struct cmdqueue *)sc->cmdqueue.chunk + cnt)->q; el->card = off; off += 32; /* size of card structure */ el->statp = (uint32_t *)statp; statp += sizeof(uint32_t); H_SETSTAT(el->statp, FATM_STAT_FREE); H_SYNCSTAT_PREWRITE(sc, el->statp); WRITE4(sc, el->card + FATMOC_STATP, card_stat); card_stat += sizeof(uint32_t); } sc->cmdqueue.tail = sc->cmdqueue.head = 0; /* * Now the other queues. These are in memory */ init_card_queue(sc, &sc->txqueue, FATM_TX_QLEN, sizeof(struct txqueue), TPD_SIZE, READ4(sc, FATMO_TRANSMIT_QUEUE), &statp, &card_stat, sc->txq_mem.mem, sc->txq_mem.paddr); init_card_queue(sc, &sc->rxqueue, FATM_RX_QLEN, sizeof(struct rxqueue), RPD_SIZE, READ4(sc, FATMO_RECEIVE_QUEUE), &statp, &card_stat, sc->rxq_mem.mem, sc->rxq_mem.paddr); init_card_queue(sc, &sc->s1queue, SMALL_SUPPLY_QLEN, sizeof(struct supqueue), BSUP_BLK2SIZE(SMALL_SUPPLY_BLKSIZE), READ4(sc, FATMO_SMALL_B1_QUEUE), &statp, &card_stat, sc->s1q_mem.mem, sc->s1q_mem.paddr); init_card_queue(sc, &sc->l1queue, LARGE_SUPPLY_QLEN, sizeof(struct supqueue), BSUP_BLK2SIZE(LARGE_SUPPLY_BLKSIZE), READ4(sc, FATMO_LARGE_B1_QUEUE), &statp, &card_stat, sc->l1q_mem.mem, sc->l1q_mem.paddr); sc->txcnt = 0; return (0); } /* * Read PROM. Called only from attach code. Here we spin because the interrupt * handler is not yet set up. */ static int fatm_getprom(struct fatm_softc *sc) { int i; struct prom *prom; struct cmdqueue *q; DBG(sc, INIT, ("reading prom")); q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head); NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN); q->error = 0; q->cb = NULL; H_SETSTAT(q->q.statp, FATM_STAT_PENDING); H_SYNCSTAT_PREWRITE(sc, q->q.statp); bus_dmamap_sync(sc->prom_mem.dmat, sc->prom_mem.map, BUS_DMASYNC_PREREAD); WRITE4(sc, q->q.card + FATMOC_GPROM_BUF, sc->prom_mem.paddr); BARRIER_W(sc); WRITE4(sc, q->q.card + FATMOC_OP, FATM_OP_GET_PROM_DATA); BARRIER_W(sc); for (i = 0; i < 1000; i++) { H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (H_GETSTAT(q->q.statp) & (FATM_STAT_COMPLETE | FATM_STAT_ERROR)) break; DELAY(1000); } if (i == 1000) { if_printf(sc->ifp, "getprom timeout\n"); return (EIO); } H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) { if_printf(sc->ifp, "getprom error\n"); return (EIO); } H_SETSTAT(q->q.statp, FATM_STAT_FREE); H_SYNCSTAT_PREWRITE(sc, q->q.statp); NEXT_QUEUE_ENTRY(sc->cmdqueue.tail, FATM_CMD_QLEN); bus_dmamap_sync(sc->prom_mem.dmat, sc->prom_mem.map, BUS_DMASYNC_POSTREAD); #ifdef notdef { u_int i; printf("PROM: "); u_char *ptr = (u_char *)sc->prom_mem.mem; for (i = 0; i < sizeof(struct prom); i++) printf("%02x ", *ptr++); printf("\n"); } #endif prom = (struct prom *)sc->prom_mem.mem; bcopy(prom->mac + 2, IFP2IFATM(sc->ifp)->mib.esi, 6); IFP2IFATM(sc->ifp)->mib.serial = le32toh(prom->serial); IFP2IFATM(sc->ifp)->mib.hw_version = le32toh(prom->version); IFP2IFATM(sc->ifp)->mib.sw_version = READ4(sc, FATMO_FIRMWARE_RELEASE); if_printf(sc->ifp, "ESI=%02x:%02x:%02x:%02x:%02x:%02x " "serial=%u hw=0x%x sw=0x%x\n", IFP2IFATM(sc->ifp)->mib.esi[0], IFP2IFATM(sc->ifp)->mib.esi[1], IFP2IFATM(sc->ifp)->mib.esi[2], IFP2IFATM(sc->ifp)->mib.esi[3], IFP2IFATM(sc->ifp)->mib.esi[4], IFP2IFATM(sc->ifp)->mib.esi[5], IFP2IFATM(sc->ifp)->mib.serial, IFP2IFATM(sc->ifp)->mib.hw_version, IFP2IFATM(sc->ifp)->mib.sw_version); return (0); } /* * This is the callback function for bus_dmamap_load. We assume, that we * have a 32-bit bus and so have always one segment. */ static void dmaload_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { bus_addr_t *ptr = (bus_addr_t *)arg; if (error != 0) { printf("%s: error=%d\n", __func__, error); return; } KASSERT(nsegs == 1, ("too many DMA segments")); KASSERT(segs[0].ds_addr <= 0xffffffff, ("DMA address too large %lx", (u_long)segs[0].ds_addr)); *ptr = segs[0].ds_addr; } /* * Allocate a chunk of DMA-able memory and map it. */ static int alloc_dma_memory(struct fatm_softc *sc, const char *nm, struct fatm_mem *mem) { int error; mem->mem = NULL; if (bus_dma_tag_create(sc->parent_dmat, mem->align, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, mem->size, 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, NULL, NULL, &mem->dmat)) { if_printf(sc->ifp, "could not allocate %s DMA tag\n", nm); return (ENOMEM); } error = bus_dmamem_alloc(mem->dmat, &mem->mem, 0, &mem->map); if (error) { if_printf(sc->ifp, "could not allocate %s DMA memory: " "%d\n", nm, error); bus_dma_tag_destroy(mem->dmat); mem->mem = NULL; return (error); } error = bus_dmamap_load(mem->dmat, mem->map, mem->mem, mem->size, dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT); if (error) { if_printf(sc->ifp, "could not load %s DMA memory: " "%d\n", nm, error); bus_dmamem_free(mem->dmat, mem->mem, mem->map); bus_dma_tag_destroy(mem->dmat); mem->mem = NULL; return (error); } DBG(sc, DMA, ("DMA %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem, (u_long)mem->paddr, mem->size, mem->align)); return (0); } #ifdef TEST_DMA_SYNC static int alloc_dma_memoryX(struct fatm_softc *sc, const char *nm, struct fatm_mem *mem) { int error; mem->mem = NULL; if (bus_dma_tag_create(NULL, mem->align, 0, BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR, NULL, NULL, mem->size, 1, mem->size, BUS_DMA_ALLOCNOW, NULL, NULL, &mem->dmat)) { if_printf(sc->ifp, "could not allocate %s DMA tag\n", nm); return (ENOMEM); } mem->mem = contigmalloc(mem->size, M_DEVBUF, M_WAITOK, BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR_32BIT, mem->align, 0); error = bus_dmamap_create(mem->dmat, 0, &mem->map); if (error) { if_printf(sc->ifp, "could not allocate %s DMA map: " "%d\n", nm, error); contigfree(mem->mem, mem->size, M_DEVBUF); bus_dma_tag_destroy(mem->dmat); mem->mem = NULL; return (error); } error = bus_dmamap_load(mem->dmat, mem->map, mem->mem, mem->size, dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT); if (error) { if_printf(sc->ifp, "could not load %s DMA memory: " "%d\n", nm, error); bus_dmamap_destroy(mem->dmat, mem->map); contigfree(mem->mem, mem->size, M_DEVBUF); bus_dma_tag_destroy(mem->dmat); mem->mem = NULL; return (error); } DBG(sc, DMA, ("DMAX %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem, (u_long)mem->paddr, mem->size, mem->align)); printf("DMAX: %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem, (u_long)mem->paddr, mem->size, mem->align); return (0); } #endif /* TEST_DMA_SYNC */ /* * Destroy all resources of an dma-able memory chunk */ static void destroy_dma_memory(struct fatm_mem *mem) { if (mem->mem != NULL) { bus_dmamap_unload(mem->dmat, mem->map); bus_dmamem_free(mem->dmat, mem->mem, mem->map); bus_dma_tag_destroy(mem->dmat); mem->mem = NULL; } } #ifdef TEST_DMA_SYNC static void destroy_dma_memoryX(struct fatm_mem *mem) { if (mem->mem != NULL) { bus_dmamap_unload(mem->dmat, mem->map); bus_dmamap_destroy(mem->dmat, mem->map); contigfree(mem->mem, mem->size, M_DEVBUF); bus_dma_tag_destroy(mem->dmat); mem->mem = NULL; } } #endif /* TEST_DMA_SYNC */ /* * Try to supply buffers to the card if there are free entries in the queues */ static void fatm_supply_small_buffers(struct fatm_softc *sc) { int nblocks, nbufs; struct supqueue *q; struct rbd *bd; int i, j, error, cnt; struct mbuf *m; struct rbuf *rb; bus_addr_t phys; nbufs = max(4 * sc->open_vccs, 32); nbufs = min(nbufs, SMALL_POOL_SIZE); nbufs -= sc->small_cnt; nblocks = howmany(nbufs, SMALL_SUPPLY_BLKSIZE); for (cnt = 0; cnt < nblocks; cnt++) { q = GET_QUEUE(sc->s1queue, struct supqueue, sc->s1queue.head); H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) break; bd = (struct rbd *)q->q.ioblk; for (i = 0; i < SMALL_SUPPLY_BLKSIZE; i++) { if ((rb = LIST_FIRST(&sc->rbuf_free)) == NULL) { if_printf(sc->ifp, "out of rbufs\n"); break; } MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) { LIST_INSERT_HEAD(&sc->rbuf_free, rb, link); break; } M_ALIGN(m, SMALL_BUFFER_LEN); error = bus_dmamap_load(sc->rbuf_tag, rb->map, m->m_data, SMALL_BUFFER_LEN, dmaload_helper, &phys, BUS_DMA_NOWAIT); if (error) { if_printf(sc->ifp, "dmamap_load mbuf failed %d", error); m_freem(m); LIST_INSERT_HEAD(&sc->rbuf_free, rb, link); break; } bus_dmamap_sync(sc->rbuf_tag, rb->map, BUS_DMASYNC_PREREAD); LIST_REMOVE(rb, link); LIST_INSERT_HEAD(&sc->rbuf_used, rb, link); rb->m = m; bd[i].handle = rb - sc->rbufs; H_SETDESC(bd[i].buffer, phys); } if (i < SMALL_SUPPLY_BLKSIZE) { for (j = 0; j < i; j++) { rb = sc->rbufs + bd[j].handle; bus_dmamap_unload(sc->rbuf_tag, rb->map); m_free(rb->m); rb->m = NULL; LIST_REMOVE(rb, link); LIST_INSERT_HEAD(&sc->rbuf_free, rb, link); } break; } H_SYNCQ_PREWRITE(&sc->s1q_mem, bd, sizeof(struct rbd) * SMALL_SUPPLY_BLKSIZE); H_SETSTAT(q->q.statp, FATM_STAT_PENDING); H_SYNCSTAT_PREWRITE(sc, q->q.statp); WRITE4(sc, q->q.card, q->q.card_ioblk); BARRIER_W(sc); sc->small_cnt += SMALL_SUPPLY_BLKSIZE; NEXT_QUEUE_ENTRY(sc->s1queue.head, SMALL_SUPPLY_QLEN); } } /* * Try to supply buffers to the card if there are free entries in the queues * We assume that all buffers are within the address space accessible by the * card (32-bit), so we don't need bounce buffers. */ static void fatm_supply_large_buffers(struct fatm_softc *sc) { int nbufs, nblocks, cnt; struct supqueue *q; struct rbd *bd; int i, j, error; struct mbuf *m; struct rbuf *rb; bus_addr_t phys; nbufs = max(4 * sc->open_vccs, 32); nbufs = min(nbufs, LARGE_POOL_SIZE); nbufs -= sc->large_cnt; nblocks = howmany(nbufs, LARGE_SUPPLY_BLKSIZE); for (cnt = 0; cnt < nblocks; cnt++) { q = GET_QUEUE(sc->l1queue, struct supqueue, sc->l1queue.head); H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) break; bd = (struct rbd *)q->q.ioblk; for (i = 0; i < LARGE_SUPPLY_BLKSIZE; i++) { if ((rb = LIST_FIRST(&sc->rbuf_free)) == NULL) { if_printf(sc->ifp, "out of rbufs\n"); break; } if ((m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR)) == NULL) { LIST_INSERT_HEAD(&sc->rbuf_free, rb, link); break; } /* No MEXT_ALIGN */ m->m_data += MCLBYTES - LARGE_BUFFER_LEN; error = bus_dmamap_load(sc->rbuf_tag, rb->map, m->m_data, LARGE_BUFFER_LEN, dmaload_helper, &phys, BUS_DMA_NOWAIT); if (error) { if_printf(sc->ifp, "dmamap_load mbuf failed %d", error); m_freem(m); LIST_INSERT_HEAD(&sc->rbuf_free, rb, link); break; } bus_dmamap_sync(sc->rbuf_tag, rb->map, BUS_DMASYNC_PREREAD); LIST_REMOVE(rb, link); LIST_INSERT_HEAD(&sc->rbuf_used, rb, link); rb->m = m; bd[i].handle = rb - sc->rbufs; H_SETDESC(bd[i].buffer, phys); } if (i < LARGE_SUPPLY_BLKSIZE) { for (j = 0; j < i; j++) { rb = sc->rbufs + bd[j].handle; bus_dmamap_unload(sc->rbuf_tag, rb->map); m_free(rb->m); rb->m = NULL; LIST_REMOVE(rb, link); LIST_INSERT_HEAD(&sc->rbuf_free, rb, link); } break; } H_SYNCQ_PREWRITE(&sc->l1q_mem, bd, sizeof(struct rbd) * LARGE_SUPPLY_BLKSIZE); H_SETSTAT(q->q.statp, FATM_STAT_PENDING); H_SYNCSTAT_PREWRITE(sc, q->q.statp); WRITE4(sc, q->q.card, q->q.card_ioblk); BARRIER_W(sc); sc->large_cnt += LARGE_SUPPLY_BLKSIZE; NEXT_QUEUE_ENTRY(sc->l1queue.head, LARGE_SUPPLY_QLEN); } } /* * Actually start the card. The lock must be held here. * Reset, load the firmware, start it, initializes queues, read the PROM * and supply receive buffers to the card. */ static void fatm_init_locked(struct fatm_softc *sc) { struct rxqueue *q; int i, c, error; uint32_t start; DBG(sc, INIT, ("initialize")); if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) fatm_stop(sc); /* * Hard reset the board */ if (fatm_reset(sc)) return; start = firmware_load(sc); if (fatm_start_firmware(sc, start) || fatm_init_cmd(sc) || fatm_getprom(sc)) { fatm_reset(sc); return; } /* * Handle media */ c = READ4(sc, FATMO_MEDIA_TYPE); switch (c) { case FORE_MT_TAXI_100: IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_TAXI_100; IFP2IFATM(sc->ifp)->mib.pcr = 227273; break; case FORE_MT_TAXI_140: IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_TAXI_140; IFP2IFATM(sc->ifp)->mib.pcr = 318181; break; case FORE_MT_UTP_SONET: IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UTP_155; IFP2IFATM(sc->ifp)->mib.pcr = 353207; break; case FORE_MT_MM_OC3_ST: case FORE_MT_MM_OC3_SC: IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_MM_155; IFP2IFATM(sc->ifp)->mib.pcr = 353207; break; case FORE_MT_SM_OC3_ST: case FORE_MT_SM_OC3_SC: IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_SM_155; IFP2IFATM(sc->ifp)->mib.pcr = 353207; break; default: log(LOG_ERR, "fatm: unknown media type %d\n", c); IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UNKNOWN; IFP2IFATM(sc->ifp)->mib.pcr = 353207; break; } sc->ifp->if_baudrate = 53 * 8 * IFP2IFATM(sc->ifp)->mib.pcr; utopia_init_media(&sc->utopia); /* * Initialize the RBDs */ for (i = 0; i < FATM_RX_QLEN; i++) { q = GET_QUEUE(sc->rxqueue, struct rxqueue, i); WRITE4(sc, q->q.card + 0, q->q.card_ioblk); } BARRIER_W(sc); /* * Supply buffers to the card */ fatm_supply_small_buffers(sc); fatm_supply_large_buffers(sc); /* * Now set flags, that we are ready */ sc->ifp->if_drv_flags |= IFF_DRV_RUNNING; /* * Start the watchdog timer */ callout_reset(&sc->watchdog_timer, hz * 5, fatm_watchdog, sc); /* start SUNI */ utopia_start(&sc->utopia); ATMEV_SEND_IFSTATE_CHANGED(IFP2IFATM(sc->ifp), sc->utopia.carrier == UTP_CARR_OK); /* start all channels */ for (i = 0; i < FORE_MAX_VCC + 1; i++) if (sc->vccs[i] != NULL) { sc->vccs[i]->vflags |= FATM_VCC_REOPEN; error = fatm_load_vc(sc, sc->vccs[i]); if (error != 0) { if_printf(sc->ifp, "reopening %u " "failed: %d\n", i, error); sc->vccs[i]->vflags &= ~FATM_VCC_REOPEN; } } DBG(sc, INIT, ("done")); } /* * This is the exported as initialisation function. */ static void fatm_init(void *p) { struct fatm_softc *sc = p; FATM_LOCK(sc); fatm_init_locked(sc); FATM_UNLOCK(sc); } /************************************************************/ /* * The INTERRUPT handling */ /* * Check the command queue. If a command was completed, call the completion * function for that command. */ static void fatm_intr_drain_cmd(struct fatm_softc *sc) { struct cmdqueue *q; int stat; /* * Drain command queue */ for (;;) { q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.tail); H_SYNCSTAT_POSTREAD(sc, q->q.statp); stat = H_GETSTAT(q->q.statp); if (stat != FATM_STAT_COMPLETE && stat != (FATM_STAT_COMPLETE | FATM_STAT_ERROR) && stat != FATM_STAT_ERROR) break; (*q->cb)(sc, q); H_SETSTAT(q->q.statp, FATM_STAT_FREE); H_SYNCSTAT_PREWRITE(sc, q->q.statp); NEXT_QUEUE_ENTRY(sc->cmdqueue.tail, FATM_CMD_QLEN); } } /* * Drain the small buffer supply queue. */ static void fatm_intr_drain_small_buffers(struct fatm_softc *sc) { struct supqueue *q; int stat; for (;;) { q = GET_QUEUE(sc->s1queue, struct supqueue, sc->s1queue.tail); H_SYNCSTAT_POSTREAD(sc, q->q.statp); stat = H_GETSTAT(q->q.statp); if ((stat & FATM_STAT_COMPLETE) == 0) break; if (stat & FATM_STAT_ERROR) log(LOG_ERR, "%s: status %x\n", __func__, stat); H_SETSTAT(q->q.statp, FATM_STAT_FREE); H_SYNCSTAT_PREWRITE(sc, q->q.statp); NEXT_QUEUE_ENTRY(sc->s1queue.tail, SMALL_SUPPLY_QLEN); } } /* * Drain the large buffer supply queue. */ static void fatm_intr_drain_large_buffers(struct fatm_softc *sc) { struct supqueue *q; int stat; for (;;) { q = GET_QUEUE(sc->l1queue, struct supqueue, sc->l1queue.tail); H_SYNCSTAT_POSTREAD(sc, q->q.statp); stat = H_GETSTAT(q->q.statp); if ((stat & FATM_STAT_COMPLETE) == 0) break; if (stat & FATM_STAT_ERROR) log(LOG_ERR, "%s status %x\n", __func__, stat); H_SETSTAT(q->q.statp, FATM_STAT_FREE); H_SYNCSTAT_PREWRITE(sc, q->q.statp); NEXT_QUEUE_ENTRY(sc->l1queue.tail, LARGE_SUPPLY_QLEN); } } /* * Check the receive queue. Send any received PDU up the protocol stack * (except when there was an error or the VCI appears to be closed. In this * case discard the PDU). */ static void fatm_intr_drain_rx(struct fatm_softc *sc) { struct rxqueue *q; int stat, mlen; u_int i; uint32_t h; struct mbuf *last, *m0; struct rpd *rpd; struct rbuf *rb; u_int vci, vpi, pt; struct atm_pseudohdr aph; struct ifnet *ifp; struct card_vcc *vc; for (;;) { q = GET_QUEUE(sc->rxqueue, struct rxqueue, sc->rxqueue.tail); H_SYNCSTAT_POSTREAD(sc, q->q.statp); stat = H_GETSTAT(q->q.statp); if ((stat & FATM_STAT_COMPLETE) == 0) break; rpd = (struct rpd *)q->q.ioblk; H_SYNCQ_POSTREAD(&sc->rxq_mem, rpd, RPD_SIZE); rpd->nseg = le32toh(rpd->nseg); mlen = 0; - m0 = last = 0; + m0 = last = NULL; for (i = 0; i < rpd->nseg; i++) { rb = sc->rbufs + rpd->segment[i].handle; if (m0 == NULL) { m0 = last = rb->m; } else { last->m_next = rb->m; last = rb->m; } last->m_next = NULL; if (last->m_flags & M_EXT) sc->large_cnt--; else sc->small_cnt--; bus_dmamap_sync(sc->rbuf_tag, rb->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rbuf_tag, rb->map); rb->m = NULL; LIST_REMOVE(rb, link); LIST_INSERT_HEAD(&sc->rbuf_free, rb, link); last->m_len = le32toh(rpd->segment[i].length); mlen += last->m_len; } m0->m_pkthdr.len = mlen; m0->m_pkthdr.rcvif = sc->ifp; h = le32toh(rpd->atm_header); vpi = (h >> 20) & 0xff; vci = (h >> 4 ) & 0xffff; pt = (h >> 1 ) & 0x7; /* * Locate the VCC this packet belongs to */ if (!VC_OK(sc, vpi, vci)) vc = NULL; else if ((vc = sc->vccs[vci]) == NULL || !(sc->vccs[vci]->vflags & FATM_VCC_OPEN)) { sc->istats.rx_closed++; vc = NULL; } DBG(sc, RCV, ("RCV: vc=%u.%u pt=%u mlen=%d %s", vpi, vci, pt, mlen, vc == NULL ? "dropped" : "")); if (vc == NULL) { m_freem(m0); } else { #ifdef ENABLE_BPF if (!(vc->param.flags & ATMIO_FLAG_NG) && vc->param.aal == ATMIO_AAL_5 && (vc->param.flags & ATM_PH_LLCSNAP)) BPF_MTAP(sc->ifp, m0); #endif ATM_PH_FLAGS(&aph) = vc->param.flags; ATM_PH_VPI(&aph) = vpi; ATM_PH_SETVCI(&aph, vci); ifp = sc->ifp; if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); vc->ipackets++; vc->ibytes += m0->m_pkthdr.len; atm_input(ifp, &aph, m0, vc->rxhand); } H_SETSTAT(q->q.statp, FATM_STAT_FREE); H_SYNCSTAT_PREWRITE(sc, q->q.statp); WRITE4(sc, q->q.card, q->q.card_ioblk); BARRIER_W(sc); NEXT_QUEUE_ENTRY(sc->rxqueue.tail, FATM_RX_QLEN); } } /* * Check the transmit queue. Free the mbuf chains that we were transmitting. */ static void fatm_intr_drain_tx(struct fatm_softc *sc) { struct txqueue *q; int stat; /* * Drain tx queue */ for (;;) { q = GET_QUEUE(sc->txqueue, struct txqueue, sc->txqueue.tail); H_SYNCSTAT_POSTREAD(sc, q->q.statp); stat = H_GETSTAT(q->q.statp); if (stat != FATM_STAT_COMPLETE && stat != (FATM_STAT_COMPLETE | FATM_STAT_ERROR) && stat != FATM_STAT_ERROR) break; H_SETSTAT(q->q.statp, FATM_STAT_FREE); H_SYNCSTAT_PREWRITE(sc, q->q.statp); bus_dmamap_sync(sc->tx_tag, q->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->tx_tag, q->map); m_freem(q->m); q->m = NULL; sc->txcnt--; NEXT_QUEUE_ENTRY(sc->txqueue.tail, FATM_TX_QLEN); } } /* * Interrupt handler */ static void fatm_intr(void *p) { struct fatm_softc *sc = (struct fatm_softc *)p; FATM_LOCK(sc); if (!READ4(sc, FATMO_PSR)) { FATM_UNLOCK(sc); return; } WRITE4(sc, FATMO_HCR, FATM_HCR_CLRIRQ); if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) { FATM_UNLOCK(sc); return; } fatm_intr_drain_cmd(sc); fatm_intr_drain_rx(sc); fatm_intr_drain_tx(sc); fatm_intr_drain_small_buffers(sc); fatm_intr_drain_large_buffers(sc); fatm_supply_small_buffers(sc); fatm_supply_large_buffers(sc); FATM_UNLOCK(sc); if (sc->retry_tx && _IF_QLEN(&sc->ifp->if_snd)) (*sc->ifp->if_start)(sc->ifp); } /* * Get device statistics. This must be called with the softc locked. * We use a preallocated buffer, so we need to protect this buffer. * We do this by using a condition variable and a flag. If the flag is set * the buffer is in use by one thread (one thread is executing a GETSTAT * card command). In this case all other threads that are trying to get * statistics block on that condition variable. When the thread finishes * using the buffer it resets the flag and signals the condition variable. This * will wakeup the next thread that is waiting for the buffer. If the interface * is stopped the stopping function will broadcast the cv. All threads will * find that the interface has been stopped and return. * * Acquiring of the buffer is done by the fatm_getstat() function. The freeing * must be done by the caller when he has finished using the buffer. */ static void fatm_getstat_complete(struct fatm_softc *sc, struct cmdqueue *q) { H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) { sc->istats.get_stat_errors++; q->error = EIO; } wakeup(&sc->sadi_mem); } static int fatm_getstat(struct fatm_softc *sc) { int error; struct cmdqueue *q; /* * Wait until either the interface is stopped or we can get the * statistics buffer */ for (;;) { if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) return (EIO); if (!(sc->flags & FATM_STAT_INUSE)) break; cv_wait(&sc->cv_stat, &sc->mtx); } sc->flags |= FATM_STAT_INUSE; q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head); H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) { sc->istats.cmd_queue_full++; return (EIO); } NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN); q->error = 0; q->cb = fatm_getstat_complete; H_SETSTAT(q->q.statp, FATM_STAT_PENDING); H_SYNCSTAT_PREWRITE(sc, q->q.statp); bus_dmamap_sync(sc->sadi_mem.dmat, sc->sadi_mem.map, BUS_DMASYNC_PREREAD); WRITE4(sc, q->q.card + FATMOC_GSTAT_BUF, sc->sadi_mem.paddr); BARRIER_W(sc); WRITE4(sc, q->q.card + FATMOC_OP, FATM_OP_REQUEST_STATS | FATM_OP_INTERRUPT_SEL); BARRIER_W(sc); /* * Wait for the command to complete */ error = msleep(&sc->sadi_mem, &sc->mtx, PZERO | PCATCH, "fatm_stat", hz); switch (error) { case EWOULDBLOCK: error = EIO; break; case ERESTART: error = EINTR; break; case 0: bus_dmamap_sync(sc->sadi_mem.dmat, sc->sadi_mem.map, BUS_DMASYNC_POSTREAD); error = q->error; break; } /* * Swap statistics */ if (q->error == 0) { u_int i; uint32_t *p = (uint32_t *)sc->sadi_mem.mem; for (i = 0; i < sizeof(struct fatm_stats) / sizeof(uint32_t); i++, p++) *p = be32toh(*p); } return (error); } /* * Create a copy of a single mbuf. It can have either internal or * external data, it may have a packet header. External data is really * copied, so the new buffer is writeable. */ static struct mbuf * copy_mbuf(struct mbuf *m) { struct mbuf *new; MGET(new, M_NOWAIT, MT_DATA); if (new == NULL) return (NULL); if (m->m_flags & M_PKTHDR) { M_MOVE_PKTHDR(new, m); if (m->m_len > MHLEN) MCLGET(new, M_WAITOK); } else { if (m->m_len > MLEN) MCLGET(new, M_WAITOK); } bcopy(m->m_data, new->m_data, m->m_len); new->m_len = m->m_len; new->m_flags &= ~M_RDONLY; return (new); } /* * All segments must have a four byte aligned buffer address and a four * byte aligned length. Step through an mbuf chain and check these conditions. * If the buffer address is not aligned and this is a normal mbuf, move * the data down. Else make a copy of the mbuf with aligned data. * If the buffer length is not aligned steel data from the next mbuf. * We don't need to check whether this has more than one external reference, * because steeling data doesn't change the external cluster. * If the last mbuf is not aligned, fill with zeroes. * * Return packet length (well we should have this in the packet header), * but be careful not to count the zero fill at the end. * * If fixing fails free the chain and zero the pointer. * * We assume, that aligning the virtual address also aligns the mapped bus * address. */ static u_int fatm_fix_chain(struct fatm_softc *sc, struct mbuf **mp) { struct mbuf *m = *mp, *prev = NULL, *next, *new; u_int mlen = 0, fill = 0; int first, off; u_char *d, *cp; do { next = m->m_next; if ((uintptr_t)mtod(m, void *) % 4 != 0 || (m->m_len % 4 != 0 && next)) { /* * Needs fixing */ first = (m == *mp); d = mtod(m, u_char *); if ((off = (uintptr_t)(void *)d % 4) != 0) { if (M_WRITABLE(m)) { sc->istats.fix_addr_copy++; bcopy(d, d - off, m->m_len); m->m_data = (caddr_t)(d - off); } else { if ((new = copy_mbuf(m)) == NULL) { sc->istats.fix_addr_noext++; goto fail; } sc->istats.fix_addr_ext++; if (prev) prev->m_next = new; new->m_next = next; m_free(m); m = new; } } if ((off = m->m_len % 4) != 0) { if (!M_WRITABLE(m)) { if ((new = copy_mbuf(m)) == NULL) { sc->istats.fix_len_noext++; goto fail; } sc->istats.fix_len_copy++; if (prev) prev->m_next = new; new->m_next = next; m_free(m); m = new; } else sc->istats.fix_len++; d = mtod(m, u_char *) + m->m_len; off = 4 - off; while (off) { if (next == NULL) { *d++ = 0; fill++; } else if (next->m_len == 0) { sc->istats.fix_empty++; next = m_free(next); continue; } else { cp = mtod(next, u_char *); *d++ = *cp++; next->m_len--; next->m_data = (caddr_t)cp; } off--; m->m_len++; } } if (first) *mp = m; } mlen += m->m_len; prev = m; } while ((m = next) != NULL); return (mlen - fill); fail: m_freem(*mp); *mp = NULL; return (0); } /* * The helper function is used to load the computed physical addresses * into the transmit descriptor. */ static void fatm_tpd_load(void *varg, bus_dma_segment_t *segs, int nsegs, bus_size_t mapsize, int error) { struct tpd *tpd = varg; if (error) return; KASSERT(nsegs <= TPD_EXTENSIONS + TXD_FIXED, ("too many segments")); tpd->spec = 0; while (nsegs--) { H_SETDESC(tpd->segment[tpd->spec].buffer, segs->ds_addr); H_SETDESC(tpd->segment[tpd->spec].length, segs->ds_len); tpd->spec++; segs++; } } /* * Start output. * * Note, that we update the internal statistics without the lock here. */ static int fatm_tx(struct fatm_softc *sc, struct mbuf *m, struct card_vcc *vc, u_int mlen) { struct txqueue *q; u_int nblks; int error, aal, nsegs; struct tpd *tpd; /* * Get a queue element. * If there isn't one - try to drain the transmit queue * We used to sleep here if that doesn't help, but we * should not sleep here, because we are called with locks. */ q = GET_QUEUE(sc->txqueue, struct txqueue, sc->txqueue.head); H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) { fatm_intr_drain_tx(sc); H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) { if (sc->retry_tx) { sc->istats.tx_retry++; IF_PREPEND(&sc->ifp->if_snd, m); return (1); } sc->istats.tx_queue_full++; m_freem(m); return (0); } sc->istats.tx_queue_almost_full++; } tpd = q->q.ioblk; m->m_data += sizeof(struct atm_pseudohdr); m->m_len -= sizeof(struct atm_pseudohdr); #ifdef ENABLE_BPF if (!(vc->param.flags & ATMIO_FLAG_NG) && vc->param.aal == ATMIO_AAL_5 && (vc->param.flags & ATM_PH_LLCSNAP)) BPF_MTAP(sc->ifp, m); #endif /* map the mbuf */ error = bus_dmamap_load_mbuf(sc->tx_tag, q->map, m, fatm_tpd_load, tpd, BUS_DMA_NOWAIT); if(error) { if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); if_printf(sc->ifp, "mbuf loaded error=%d\n", error); m_freem(m); return (0); } nsegs = tpd->spec; bus_dmamap_sync(sc->tx_tag, q->map, BUS_DMASYNC_PREWRITE); /* * OK. Now go and do it. */ aal = (vc->param.aal == ATMIO_AAL_5) ? 5 : 0; H_SETSTAT(q->q.statp, FATM_STAT_PENDING); H_SYNCSTAT_PREWRITE(sc, q->q.statp); q->m = m; /* * If the transmit queue is almost full, schedule a * transmit interrupt so that transmit descriptors can * be recycled. */ H_SETDESC(tpd->spec, TDX_MKSPEC((sc->txcnt >= (4 * FATM_TX_QLEN) / 5), aal, nsegs, mlen)); H_SETDESC(tpd->atm_header, TDX_MKHDR(vc->param.vpi, vc->param.vci, 0, 0)); if (vc->param.traffic == ATMIO_TRAFFIC_UBR) H_SETDESC(tpd->stream, 0); else { u_int i; for (i = 0; i < RATE_TABLE_SIZE; i++) if (rate_table[i].cell_rate < vc->param.tparam.pcr) break; if (i > 0) i--; H_SETDESC(tpd->stream, rate_table[i].ratio); } H_SYNCQ_PREWRITE(&sc->txq_mem, tpd, TPD_SIZE); nblks = TDX_SEGS2BLKS(nsegs); DBG(sc, XMIT, ("XMIT: mlen=%d spec=0x%x nsegs=%d blocks=%d", mlen, le32toh(tpd->spec), nsegs, nblks)); WRITE4(sc, q->q.card + 0, q->q.card_ioblk | nblks); BARRIER_W(sc); sc->txcnt++; if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1); vc->obytes += m->m_pkthdr.len; vc->opackets++; NEXT_QUEUE_ENTRY(sc->txqueue.head, FATM_TX_QLEN); return (0); } static void fatm_start(struct ifnet *ifp) { struct atm_pseudohdr aph; struct fatm_softc *sc; struct mbuf *m; u_int mlen, vpi, vci; struct card_vcc *vc; sc = ifp->if_softc; while (1) { IF_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; /* * Loop through the mbuf chain and compute the total length * of the packet. Check that all data pointer are * 4 byte aligned. If they are not, call fatm_mfix to * fix that problem. This comes more or less from the * en driver. */ mlen = fatm_fix_chain(sc, &m); if (m == NULL) continue; if (m->m_len < sizeof(struct atm_pseudohdr) && (m = m_pullup(m, sizeof(struct atm_pseudohdr))) == NULL) continue; aph = *mtod(m, struct atm_pseudohdr *); mlen -= sizeof(struct atm_pseudohdr); if (mlen == 0) { m_freem(m); continue; } if (mlen > FATM_MAXPDU) { sc->istats.tx_pdu2big++; m_freem(m); continue; } vci = ATM_PH_VCI(&aph); vpi = ATM_PH_VPI(&aph); /* * From here on we need the softc */ FATM_LOCK(sc); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { FATM_UNLOCK(sc); m_freem(m); break; } if (!VC_OK(sc, vpi, vci) || (vc = sc->vccs[vci]) == NULL || !(vc->vflags & FATM_VCC_OPEN)) { FATM_UNLOCK(sc); m_freem(m); continue; } if (fatm_tx(sc, m, vc, mlen)) { FATM_UNLOCK(sc); break; } FATM_UNLOCK(sc); } } /* * VCC management * * This may seem complicated. The reason for this is, that we need an * asynchronuous open/close for the NATM VCCs because our ioctl handler * is called with the radix node head of the routing table locked. Therefor * we cannot sleep there and wait for the open/close to succeed. For this * reason we just initiate the operation from the ioctl. */ /* * Command the card to open/close a VC. * Return the queue entry for waiting if we are successful. */ static struct cmdqueue * fatm_start_vcc(struct fatm_softc *sc, u_int vpi, u_int vci, uint32_t cmd, u_int mtu, void (*func)(struct fatm_softc *, struct cmdqueue *)) { struct cmdqueue *q; q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head); H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) { sc->istats.cmd_queue_full++; return (NULL); } NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN); q->error = 0; q->cb = func; H_SETSTAT(q->q.statp, FATM_STAT_PENDING); H_SYNCSTAT_PREWRITE(sc, q->q.statp); WRITE4(sc, q->q.card + FATMOC_ACTIN_VPVC, MKVPVC(vpi, vci)); BARRIER_W(sc); WRITE4(sc, q->q.card + FATMOC_ACTIN_MTU, mtu); BARRIER_W(sc); WRITE4(sc, q->q.card + FATMOC_OP, cmd); BARRIER_W(sc); return (q); } /* * The VC has been opened/closed and somebody has been waiting for this. * Wake him up. */ static void fatm_cmd_complete(struct fatm_softc *sc, struct cmdqueue *q) { H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) { sc->istats.get_stat_errors++; q->error = EIO; } wakeup(q); } /* * Open complete */ static void fatm_open_finish(struct fatm_softc *sc, struct card_vcc *vc) { vc->vflags &= ~FATM_VCC_TRY_OPEN; vc->vflags |= FATM_VCC_OPEN; if (vc->vflags & FATM_VCC_REOPEN) { vc->vflags &= ~FATM_VCC_REOPEN; return; } /* inform management if this is not an NG * VCC or it's an NG PVC. */ if (!(vc->param.flags & ATMIO_FLAG_NG) || (vc->param.flags & ATMIO_FLAG_PVC)) ATMEV_SEND_VCC_CHANGED(IFP2IFATM(sc->ifp), 0, vc->param.vci, 1); } /* * The VC that we have tried to open asynchronuosly has been opened. */ static void fatm_open_complete(struct fatm_softc *sc, struct cmdqueue *q) { u_int vci; struct card_vcc *vc; vci = GETVCI(READ4(sc, q->q.card + FATMOC_ACTIN_VPVC)); vc = sc->vccs[vci]; H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) { sc->istats.get_stat_errors++; sc->vccs[vci] = NULL; uma_zfree(sc->vcc_zone, vc); if_printf(sc->ifp, "opening VCI %u failed\n", vci); return; } fatm_open_finish(sc, vc); } /* * Wait on the queue entry until the VCC is opened/closed. */ static int fatm_waitvcc(struct fatm_softc *sc, struct cmdqueue *q) { int error; /* * Wait for the command to complete */ error = msleep(q, &sc->mtx, PZERO | PCATCH, "fatm_vci", hz); if (error != 0) return (error); return (q->error); } /* * Start to open a VCC. This just initiates the operation. */ static int fatm_open_vcc(struct fatm_softc *sc, struct atmio_openvcc *op) { int error; struct card_vcc *vc; /* * Check parameters */ if ((op->param.flags & ATMIO_FLAG_NOTX) && (op->param.flags & ATMIO_FLAG_NORX)) return (EINVAL); if (!VC_OK(sc, op->param.vpi, op->param.vci)) return (EINVAL); if (op->param.aal != ATMIO_AAL_0 && op->param.aal != ATMIO_AAL_5) return (EINVAL); vc = uma_zalloc(sc->vcc_zone, M_NOWAIT | M_ZERO); if (vc == NULL) return (ENOMEM); error = 0; FATM_LOCK(sc); if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) { error = EIO; goto done; } if (sc->vccs[op->param.vci] != NULL) { error = EBUSY; goto done; } vc->param = op->param; vc->rxhand = op->rxhand; switch (op->param.traffic) { case ATMIO_TRAFFIC_UBR: break; case ATMIO_TRAFFIC_CBR: if (op->param.tparam.pcr == 0 || op->param.tparam.pcr > IFP2IFATM(sc->ifp)->mib.pcr) { error = EINVAL; goto done; } break; default: error = EINVAL; goto done; } vc->ibytes = vc->obytes = 0; vc->ipackets = vc->opackets = 0; vc->vflags = FATM_VCC_TRY_OPEN; sc->vccs[op->param.vci] = vc; sc->open_vccs++; error = fatm_load_vc(sc, vc); if (error != 0) { sc->vccs[op->param.vci] = NULL; sc->open_vccs--; goto done; } /* don't free below */ vc = NULL; done: FATM_UNLOCK(sc); if (vc != NULL) uma_zfree(sc->vcc_zone, vc); return (error); } /* * Try to initialize the given VC */ static int fatm_load_vc(struct fatm_softc *sc, struct card_vcc *vc) { uint32_t cmd; struct cmdqueue *q; int error; /* Command and buffer strategy */ cmd = FATM_OP_ACTIVATE_VCIN | FATM_OP_INTERRUPT_SEL | (0 << 16); if (vc->param.aal == ATMIO_AAL_0) cmd |= (0 << 8); else cmd |= (5 << 8); q = fatm_start_vcc(sc, vc->param.vpi, vc->param.vci, cmd, 1, (vc->param.flags & ATMIO_FLAG_ASYNC) ? fatm_open_complete : fatm_cmd_complete); if (q == NULL) return (EIO); if (!(vc->param.flags & ATMIO_FLAG_ASYNC)) { error = fatm_waitvcc(sc, q); if (error != 0) return (error); fatm_open_finish(sc, vc); } return (0); } /* * Finish close */ static void fatm_close_finish(struct fatm_softc *sc, struct card_vcc *vc) { /* inform management of this is not an NG * VCC or it's an NG PVC. */ if (!(vc->param.flags & ATMIO_FLAG_NG) || (vc->param.flags & ATMIO_FLAG_PVC)) ATMEV_SEND_VCC_CHANGED(IFP2IFATM(sc->ifp), 0, vc->param.vci, 0); sc->vccs[vc->param.vci] = NULL; sc->open_vccs--; uma_zfree(sc->vcc_zone, vc); } /* * The VC has been closed. */ static void fatm_close_complete(struct fatm_softc *sc, struct cmdqueue *q) { u_int vci; struct card_vcc *vc; vci = GETVCI(READ4(sc, q->q.card + FATMOC_ACTIN_VPVC)); vc = sc->vccs[vci]; H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) { sc->istats.get_stat_errors++; /* keep the VCC in that state */ if_printf(sc->ifp, "closing VCI %u failed\n", vci); return; } fatm_close_finish(sc, vc); } /* * Initiate closing a VCC */ static int fatm_close_vcc(struct fatm_softc *sc, struct atmio_closevcc *cl) { int error; struct cmdqueue *q; struct card_vcc *vc; if (!VC_OK(sc, cl->vpi, cl->vci)) return (EINVAL); error = 0; FATM_LOCK(sc); if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) { error = EIO; goto done; } vc = sc->vccs[cl->vci]; if (vc == NULL || !(vc->vflags & (FATM_VCC_OPEN | FATM_VCC_TRY_OPEN))) { error = ENOENT; goto done; } q = fatm_start_vcc(sc, cl->vpi, cl->vci, FATM_OP_DEACTIVATE_VCIN | FATM_OP_INTERRUPT_SEL, 1, (vc->param.flags & ATMIO_FLAG_ASYNC) ? fatm_close_complete : fatm_cmd_complete); if (q == NULL) { error = EIO; goto done; } vc->vflags &= ~(FATM_VCC_OPEN | FATM_VCC_TRY_OPEN); vc->vflags |= FATM_VCC_TRY_CLOSE; if (!(vc->param.flags & ATMIO_FLAG_ASYNC)) { error = fatm_waitvcc(sc, q); if (error != 0) goto done; fatm_close_finish(sc, vc); } done: FATM_UNLOCK(sc); return (error); } /* * IOCTL handler */ static int fatm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t arg) { int error; struct fatm_softc *sc = ifp->if_softc; struct ifaddr *ifa = (struct ifaddr *)arg; struct ifreq *ifr = (struct ifreq *)arg; struct atmio_closevcc *cl = (struct atmio_closevcc *)arg; struct atmio_openvcc *op = (struct atmio_openvcc *)arg; struct atmio_vcctable *vtab; error = 0; switch (cmd) { case SIOCATMOPENVCC: /* kernel internal use */ error = fatm_open_vcc(sc, op); break; case SIOCATMCLOSEVCC: /* kernel internal use */ error = fatm_close_vcc(sc, cl); break; case SIOCSIFADDR: FATM_LOCK(sc); ifp->if_flags |= IFF_UP; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) fatm_init_locked(sc); switch (ifa->ifa_addr->sa_family) { #ifdef INET case AF_INET: case AF_INET6: ifa->ifa_rtrequest = atm_rtrequest; break; #endif default: break; } FATM_UNLOCK(sc); break; case SIOCSIFFLAGS: FATM_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { fatm_init_locked(sc); } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { fatm_stop(sc); } } FATM_UNLOCK(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: if (ifp->if_drv_flags & IFF_DRV_RUNNING) error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd); else error = EINVAL; break; case SIOCATMGVCCS: /* return vcc table */ vtab = atm_getvccs((struct atmio_vcc **)sc->vccs, FORE_MAX_VCC + 1, sc->open_vccs, &sc->mtx, 1); error = copyout(vtab, ifr->ifr_data, sizeof(*vtab) + vtab->count * sizeof(vtab->vccs[0])); free(vtab, M_DEVBUF); break; case SIOCATMGETVCCS: /* internal netgraph use */ vtab = atm_getvccs((struct atmio_vcc **)sc->vccs, FORE_MAX_VCC + 1, sc->open_vccs, &sc->mtx, 0); if (vtab == NULL) { error = ENOMEM; break; } *(void **)arg = vtab; break; default: DBG(sc, IOCTL, ("+++ cmd=%08lx arg=%p", cmd, arg)); error = EINVAL; break; } return (error); } /* * Detach from the interface and free all resources allocated during * initialisation and later. */ static int fatm_detach(device_t dev) { u_int i; struct rbuf *rb; struct fatm_softc *sc; struct txqueue *tx; sc = device_get_softc(dev); if (device_is_alive(dev)) { FATM_LOCK(sc); fatm_stop(sc); utopia_detach(&sc->utopia); FATM_UNLOCK(sc); atm_ifdetach(sc->ifp); /* XXX race */ } callout_drain(&sc->watchdog_timer); if (sc->ih != NULL) bus_teardown_intr(dev, sc->irqres, sc->ih); while ((rb = LIST_FIRST(&sc->rbuf_used)) != NULL) { if_printf(sc->ifp, "rbuf %p still in use!\n", rb); bus_dmamap_unload(sc->rbuf_tag, rb->map); m_freem(rb->m); LIST_REMOVE(rb, link); LIST_INSERT_HEAD(&sc->rbuf_free, rb, link); } if (sc->txqueue.chunk != NULL) { for (i = 0; i < FATM_TX_QLEN; i++) { tx = GET_QUEUE(sc->txqueue, struct txqueue, i); bus_dmamap_destroy(sc->tx_tag, tx->map); } } while ((rb = LIST_FIRST(&sc->rbuf_free)) != NULL) { bus_dmamap_destroy(sc->rbuf_tag, rb->map); LIST_REMOVE(rb, link); } if (sc->rbufs != NULL) free(sc->rbufs, M_DEVBUF); if (sc->vccs != NULL) { for (i = 0; i < FORE_MAX_VCC + 1; i++) if (sc->vccs[i] != NULL) { uma_zfree(sc->vcc_zone, sc->vccs[i]); sc->vccs[i] = NULL; } free(sc->vccs, M_DEVBUF); } if (sc->vcc_zone != NULL) uma_zdestroy(sc->vcc_zone); if (sc->l1queue.chunk != NULL) free(sc->l1queue.chunk, M_DEVBUF); if (sc->s1queue.chunk != NULL) free(sc->s1queue.chunk, M_DEVBUF); if (sc->rxqueue.chunk != NULL) free(sc->rxqueue.chunk, M_DEVBUF); if (sc->txqueue.chunk != NULL) free(sc->txqueue.chunk, M_DEVBUF); if (sc->cmdqueue.chunk != NULL) free(sc->cmdqueue.chunk, M_DEVBUF); destroy_dma_memory(&sc->reg_mem); destroy_dma_memory(&sc->sadi_mem); destroy_dma_memory(&sc->prom_mem); #ifdef TEST_DMA_SYNC destroy_dma_memoryX(&sc->s1q_mem); destroy_dma_memoryX(&sc->l1q_mem); destroy_dma_memoryX(&sc->rxq_mem); destroy_dma_memoryX(&sc->txq_mem); destroy_dma_memoryX(&sc->stat_mem); #endif if (sc->tx_tag != NULL) if (bus_dma_tag_destroy(sc->tx_tag)) printf("tx DMA tag busy!\n"); if (sc->rbuf_tag != NULL) if (bus_dma_tag_destroy(sc->rbuf_tag)) printf("rbuf DMA tag busy!\n"); if (sc->parent_dmat != NULL) if (bus_dma_tag_destroy(sc->parent_dmat)) printf("parent DMA tag busy!\n"); if (sc->irqres != NULL) bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irqres); if (sc->memres != NULL) bus_release_resource(dev, SYS_RES_MEMORY, sc->memid, sc->memres); (void)sysctl_ctx_free(&sc->sysctl_ctx); cv_destroy(&sc->cv_stat); cv_destroy(&sc->cv_regs); mtx_destroy(&sc->mtx); if_free(sc->ifp); return (0); } /* * Sysctl handler */ static int fatm_sysctl_istats(SYSCTL_HANDLER_ARGS) { struct fatm_softc *sc = arg1; u_long *ret; int error; ret = malloc(sizeof(sc->istats), M_TEMP, M_WAITOK); FATM_LOCK(sc); bcopy(&sc->istats, ret, sizeof(sc->istats)); FATM_UNLOCK(sc); error = SYSCTL_OUT(req, ret, sizeof(sc->istats)); free(ret, M_TEMP); return (error); } /* * Sysctl handler for card statistics * This is disable because it destroys the PHY statistics. */ static int fatm_sysctl_stats(SYSCTL_HANDLER_ARGS) { struct fatm_softc *sc = arg1; int error; const struct fatm_stats *s; u_long *ret; u_int i; ret = malloc(sizeof(u_long) * FATM_NSTATS, M_TEMP, M_WAITOK); FATM_LOCK(sc); if ((error = fatm_getstat(sc)) == 0) { s = sc->sadi_mem.mem; i = 0; ret[i++] = s->phy_4b5b.crc_header_errors; ret[i++] = s->phy_4b5b.framing_errors; ret[i++] = s->phy_oc3.section_bip8_errors; ret[i++] = s->phy_oc3.path_bip8_errors; ret[i++] = s->phy_oc3.line_bip24_errors; ret[i++] = s->phy_oc3.line_febe_errors; ret[i++] = s->phy_oc3.path_febe_errors; ret[i++] = s->phy_oc3.corr_hcs_errors; ret[i++] = s->phy_oc3.ucorr_hcs_errors; ret[i++] = s->atm.cells_transmitted; ret[i++] = s->atm.cells_received; ret[i++] = s->atm.vpi_bad_range; ret[i++] = s->atm.vpi_no_conn; ret[i++] = s->atm.vci_bad_range; ret[i++] = s->atm.vci_no_conn; ret[i++] = s->aal0.cells_transmitted; ret[i++] = s->aal0.cells_received; ret[i++] = s->aal0.cells_dropped; ret[i++] = s->aal4.cells_transmitted; ret[i++] = s->aal4.cells_received; ret[i++] = s->aal4.cells_crc_errors; ret[i++] = s->aal4.cels_protocol_errors; ret[i++] = s->aal4.cells_dropped; ret[i++] = s->aal4.cspdus_transmitted; ret[i++] = s->aal4.cspdus_received; ret[i++] = s->aal4.cspdus_protocol_errors; ret[i++] = s->aal4.cspdus_dropped; ret[i++] = s->aal5.cells_transmitted; ret[i++] = s->aal5.cells_received; ret[i++] = s->aal5.congestion_experienced; ret[i++] = s->aal5.cells_dropped; ret[i++] = s->aal5.cspdus_transmitted; ret[i++] = s->aal5.cspdus_received; ret[i++] = s->aal5.cspdus_crc_errors; ret[i++] = s->aal5.cspdus_protocol_errors; ret[i++] = s->aal5.cspdus_dropped; ret[i++] = s->aux.small_b1_failed; ret[i++] = s->aux.large_b1_failed; ret[i++] = s->aux.small_b2_failed; ret[i++] = s->aux.large_b2_failed; ret[i++] = s->aux.rpd_alloc_failed; ret[i++] = s->aux.receive_carrier; } /* declare the buffer free */ sc->flags &= ~FATM_STAT_INUSE; cv_signal(&sc->cv_stat); FATM_UNLOCK(sc); if (error == 0) error = SYSCTL_OUT(req, ret, sizeof(u_long) * FATM_NSTATS); free(ret, M_TEMP); return (error); } #define MAXDMASEGS 32 /* maximum number of receive descriptors */ /* * Attach to the device. * * We assume, that there is a global lock (Giant in this case) that protects * multiple threads from entering this function. This makes sense, doesn't it? */ static int fatm_attach(device_t dev) { struct ifnet *ifp; struct fatm_softc *sc; int unit; uint16_t cfg; int error = 0; struct rbuf *rb; u_int i; struct txqueue *tx; sc = device_get_softc(dev); unit = device_get_unit(dev); ifp = sc->ifp = if_alloc(IFT_ATM); if (ifp == NULL) { error = ENOSPC; goto fail; } IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_PCA200E; IFP2IFATM(sc->ifp)->mib.serial = 0; IFP2IFATM(sc->ifp)->mib.hw_version = 0; IFP2IFATM(sc->ifp)->mib.sw_version = 0; IFP2IFATM(sc->ifp)->mib.vpi_bits = 0; IFP2IFATM(sc->ifp)->mib.vci_bits = FORE_VCIBITS; IFP2IFATM(sc->ifp)->mib.max_vpcs = 0; IFP2IFATM(sc->ifp)->mib.max_vccs = FORE_MAX_VCC; IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UNKNOWN; IFP2IFATM(sc->ifp)->phy = &sc->utopia; LIST_INIT(&sc->rbuf_free); LIST_INIT(&sc->rbuf_used); /* * Initialize mutex and condition variables. */ mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); cv_init(&sc->cv_stat, "fatm_stat"); cv_init(&sc->cv_regs, "fatm_regs"); sysctl_ctx_init(&sc->sysctl_ctx); callout_init_mtx(&sc->watchdog_timer, &sc->mtx, 0); /* * Make the sysctl tree */ if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO, device_get_nameunit(dev), CTLFLAG_RD, 0, "")) == NULL) goto fail; if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "istats", CTLTYPE_ULONG | CTLFLAG_RD, sc, 0, fatm_sysctl_istats, "LU", "internal statistics") == NULL) goto fail; if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "stats", CTLTYPE_ULONG | CTLFLAG_RD, sc, 0, fatm_sysctl_stats, "LU", "card statistics") == NULL) goto fail; if (SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "retry_tx", CTLFLAG_RW, &sc->retry_tx, 0, "retry flag") == NULL) goto fail; #ifdef FATM_DEBUG if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "debug", CTLFLAG_RW, &sc->debug, 0, "debug flags") == NULL) goto fail; sc->debug = FATM_DEBUG; #endif /* * Network subsystem stuff */ ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_SIMPLEX; ifp->if_ioctl = fatm_ioctl; ifp->if_start = fatm_start; ifp->if_init = fatm_init; ifp->if_linkmib = &IFP2IFATM(sc->ifp)->mib; ifp->if_linkmiblen = sizeof(IFP2IFATM(sc->ifp)->mib); /* * Enable busmaster */ pci_enable_busmaster(dev); /* * Map memory */ sc->memid = 0x10; sc->memres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->memid, RF_ACTIVE); if (sc->memres == NULL) { if_printf(ifp, "could not map memory\n"); error = ENXIO; goto fail; } sc->memh = rman_get_bushandle(sc->memres); sc->memt = rman_get_bustag(sc->memres); /* * Convert endianness of slave access */ cfg = pci_read_config(dev, FATM_PCIR_MCTL, 1); cfg |= FATM_PCIM_SWAB; pci_write_config(dev, FATM_PCIR_MCTL, cfg, 1); /* * Allocate interrupt (activate at the end) */ sc->irqid = 0; sc->irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid, RF_SHAREABLE | RF_ACTIVE); if (sc->irqres == NULL) { if_printf(ifp, "could not allocate irq\n"); error = ENXIO; goto fail; } /* * Allocate the parent DMA tag. This is used simply to hold overall * restrictions for the controller (and PCI bus) and is never used * to do anything. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, MAXDMASEGS, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->parent_dmat)) { if_printf(ifp, "could not allocate parent DMA tag\n"); error = ENOMEM; goto fail; } /* * Allocate the receive buffer DMA tag. This tag must map a maximum of * a mbuf cluster. */ if (bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->rbuf_tag)) { if_printf(ifp, "could not allocate rbuf DMA tag\n"); error = ENOMEM; goto fail; } /* * Allocate the transmission DMA tag. Must add 1, because * rounded up PDU will be 65536 bytes long. */ if (bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, FATM_MAXPDU + 1, TPD_EXTENSIONS + TXD_FIXED, MCLBYTES, 0, NULL, NULL, &sc->tx_tag)) { if_printf(ifp, "could not allocate tx DMA tag\n"); error = ENOMEM; goto fail; } /* * Allocate DMAable memory. */ sc->stat_mem.size = sizeof(uint32_t) * (FATM_CMD_QLEN + FATM_TX_QLEN + FATM_RX_QLEN + SMALL_SUPPLY_QLEN + LARGE_SUPPLY_QLEN); sc->stat_mem.align = 4; sc->txq_mem.size = FATM_TX_QLEN * TPD_SIZE; sc->txq_mem.align = 32; sc->rxq_mem.size = FATM_RX_QLEN * RPD_SIZE; sc->rxq_mem.align = 32; sc->s1q_mem.size = SMALL_SUPPLY_QLEN * BSUP_BLK2SIZE(SMALL_SUPPLY_BLKSIZE); sc->s1q_mem.align = 32; sc->l1q_mem.size = LARGE_SUPPLY_QLEN * BSUP_BLK2SIZE(LARGE_SUPPLY_BLKSIZE); sc->l1q_mem.align = 32; #ifdef TEST_DMA_SYNC if ((error = alloc_dma_memoryX(sc, "STATUS", &sc->stat_mem)) != 0 || (error = alloc_dma_memoryX(sc, "TXQ", &sc->txq_mem)) != 0 || (error = alloc_dma_memoryX(sc, "RXQ", &sc->rxq_mem)) != 0 || (error = alloc_dma_memoryX(sc, "S1Q", &sc->s1q_mem)) != 0 || (error = alloc_dma_memoryX(sc, "L1Q", &sc->l1q_mem)) != 0) goto fail; #else if ((error = alloc_dma_memory(sc, "STATUS", &sc->stat_mem)) != 0 || (error = alloc_dma_memory(sc, "TXQ", &sc->txq_mem)) != 0 || (error = alloc_dma_memory(sc, "RXQ", &sc->rxq_mem)) != 0 || (error = alloc_dma_memory(sc, "S1Q", &sc->s1q_mem)) != 0 || (error = alloc_dma_memory(sc, "L1Q", &sc->l1q_mem)) != 0) goto fail; #endif sc->prom_mem.size = sizeof(struct prom); sc->prom_mem.align = 32; if ((error = alloc_dma_memory(sc, "PROM", &sc->prom_mem)) != 0) goto fail; sc->sadi_mem.size = sizeof(struct fatm_stats); sc->sadi_mem.align = 32; if ((error = alloc_dma_memory(sc, "STATISTICS", &sc->sadi_mem)) != 0) goto fail; sc->reg_mem.size = sizeof(uint32_t) * FATM_NREGS; sc->reg_mem.align = 32; if ((error = alloc_dma_memory(sc, "REGISTERS", &sc->reg_mem)) != 0) goto fail; /* * Allocate queues */ sc->cmdqueue.chunk = malloc(FATM_CMD_QLEN * sizeof(struct cmdqueue), M_DEVBUF, M_ZERO | M_WAITOK); sc->txqueue.chunk = malloc(FATM_TX_QLEN * sizeof(struct txqueue), M_DEVBUF, M_ZERO | M_WAITOK); sc->rxqueue.chunk = malloc(FATM_RX_QLEN * sizeof(struct rxqueue), M_DEVBUF, M_ZERO | M_WAITOK); sc->s1queue.chunk = malloc(SMALL_SUPPLY_QLEN * sizeof(struct supqueue), M_DEVBUF, M_ZERO | M_WAITOK); sc->l1queue.chunk = malloc(LARGE_SUPPLY_QLEN * sizeof(struct supqueue), M_DEVBUF, M_ZERO | M_WAITOK); sc->vccs = malloc((FORE_MAX_VCC + 1) * sizeof(sc->vccs[0]), M_DEVBUF, M_ZERO | M_WAITOK); sc->vcc_zone = uma_zcreate("FATM vccs", sizeof(struct card_vcc), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); if (sc->vcc_zone == NULL) { error = ENOMEM; goto fail; } /* * Allocate memory for the receive buffer headers. The total number * of headers should probably also include the maximum number of * buffers on the receive queue. */ sc->rbuf_total = SMALL_POOL_SIZE + LARGE_POOL_SIZE; sc->rbufs = malloc(sc->rbuf_total * sizeof(struct rbuf), M_DEVBUF, M_ZERO | M_WAITOK); /* * Put all rbuf headers on the free list and create DMA maps. */ for (rb = sc->rbufs, i = 0; i < sc->rbuf_total; i++, rb++) { if ((error = bus_dmamap_create(sc->rbuf_tag, 0, &rb->map))) { if_printf(sc->ifp, "creating rx map: %d\n", error); goto fail; } LIST_INSERT_HEAD(&sc->rbuf_free, rb, link); } /* * Create dma maps for transmission. In case of an error, free the * allocated DMA maps, because on some architectures maps are NULL * and we cannot distinguish between a failure and a NULL map in * the detach routine. */ for (i = 0; i < FATM_TX_QLEN; i++) { tx = GET_QUEUE(sc->txqueue, struct txqueue, i); if ((error = bus_dmamap_create(sc->tx_tag, 0, &tx->map))) { if_printf(sc->ifp, "creating tx map: %d\n", error); while (i > 0) { tx = GET_QUEUE(sc->txqueue, struct txqueue, i - 1); bus_dmamap_destroy(sc->tx_tag, tx->map); i--; } goto fail; } } utopia_attach(&sc->utopia, IFP2IFATM(sc->ifp), &sc->media, &sc->mtx, &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), &fatm_utopia_methods); sc->utopia.flags |= UTP_FL_NORESET | UTP_FL_POLL_CARRIER; /* * Attach the interface */ atm_ifattach(ifp); ifp->if_snd.ifq_maxlen = 512; #ifdef ENABLE_BPF bpfattach(ifp, DLT_ATM_RFC1483, sizeof(struct atmllc)); #endif error = bus_setup_intr(dev, sc->irqres, INTR_TYPE_NET | INTR_MPSAFE, NULL, fatm_intr, sc, &sc->ih); if (error) { if_printf(ifp, "couldn't setup irq\n"); goto fail; } fail: if (error) fatm_detach(dev); return (error); } #if defined(FATM_DEBUG) && 0 static void dump_s1_queue(struct fatm_softc *sc) { int i; struct supqueue *q; for(i = 0; i < SMALL_SUPPLY_QLEN; i++) { q = GET_QUEUE(sc->s1queue, struct supqueue, i); printf("%2d: card=%x(%x,%x) stat=%x\n", i, q->q.card, READ4(sc, q->q.card), READ4(sc, q->q.card + 4), *q->q.statp); } } #endif /* * Driver infrastructure. */ static device_method_t fatm_methods[] = { DEVMETHOD(device_probe, fatm_probe), DEVMETHOD(device_attach, fatm_attach), DEVMETHOD(device_detach, fatm_detach), { 0, 0 } }; static driver_t fatm_driver = { "fatm", fatm_methods, sizeof(struct fatm_softc), }; DRIVER_MODULE(fatm, pci, fatm_driver, fatm_devclass, 0, 0); Index: head/sys/dev/fe/if_fe.c =================================================================== --- head/sys/dev/fe/if_fe.c (revision 313981) +++ head/sys/dev/fe/if_fe.c (revision 313982) @@ -1,2256 +1,2256 @@ /*- * All Rights Reserved, Copyright (C) Fujitsu Limited 1995 * * This software may be used, modified, copied, distributed, and sold, in * both source and binary form provided that the above copyright, these * terms and the following disclaimer are retained. The name of the author * and/or the contributor may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND THE CONTRIBUTOR ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR THE CONTRIBUTOR BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * * Device driver for Fujitsu MB86960A/MB86965A based Ethernet cards. * Contributed by M. Sekiguchi. * * This version is intended to be a generic template for various * MB86960A/MB86965A based Ethernet cards. It currently supports * Fujitsu FMV-180 series for ISA and Allied-Telesis AT1700/RE2000 * series for ISA, as well as Fujitsu MBH10302 PC Card. * There are some currently- * unused hooks embedded, which are primarily intended to support * other types of Ethernet cards, but the author is not sure whether * they are useful. * * This software is a derivative work of if_ed.c version 1.56 by David * Greenman available as a part of FreeBSD 2.0 RELEASE source distribution. * * The following lines are retained from the original if_ed.c: * * Copyright (C) 1993, David Greenman. This software may be used, modified, * copied, distributed, and sold, in both source and binary form provided * that the above copyright and these terms are retained. Under no * circumstances is the author responsible for the proper functioning * of this software, nor does the author assume any responsibility * for damages incurred with its use. */ /* * TODO: * o To support ISA PnP auto configuration for FMV-183/184. * o To reconsider mbuf usage. * o To reconsider transmission buffer usage, including * transmission buffer size (currently 4KB x 2) and pros-and- * cons of multiple frame transmission. * o To test IPX codes. * o To test new-bus frontend. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Transmit just one packet per a "send" command to 86960. * This option is intended for performance test. An EXPERIMENTAL option. */ #ifndef FE_SINGLE_TRANSMISSION #define FE_SINGLE_TRANSMISSION 0 #endif /* * Maximum loops when interrupt. * This option prevents an infinite loop due to hardware failure. * (Some laptops make an infinite loop after PC Card is ejected.) */ #ifndef FE_MAX_LOOP #define FE_MAX_LOOP 0x800 #endif /* * Device configuration flags. */ /* DLCR6 settings. */ #define FE_FLAGS_DLCR6_VALUE 0x007F /* Force DLCR6 override. */ #define FE_FLAGS_OVERRIDE_DLCR6 0x0080 devclass_t fe_devclass; /* * Special filter values. */ static struct fe_filter const fe_filter_nothing = { FE_FILTER_NOTHING }; static struct fe_filter const fe_filter_all = { FE_FILTER_ALL }; /* Standard driver entry points. These can be static. */ static void fe_init (void *); static void fe_init_locked (struct fe_softc *); static driver_intr_t fe_intr; static int fe_ioctl (struct ifnet *, u_long, caddr_t); static void fe_start (struct ifnet *); static void fe_start_locked (struct ifnet *); static void fe_watchdog (void *); static int fe_medchange (struct ifnet *); static void fe_medstat (struct ifnet *, struct ifmediareq *); /* Local functions. Order of declaration is confused. FIXME. */ static int fe_get_packet ( struct fe_softc *, u_short ); static void fe_tint ( struct fe_softc *, u_char ); static void fe_rint ( struct fe_softc *, u_char ); static void fe_xmit ( struct fe_softc * ); static void fe_write_mbufs ( struct fe_softc *, struct mbuf * ); static void fe_setmode ( struct fe_softc * ); static void fe_loadmar ( struct fe_softc * ); #ifdef DIAGNOSTIC static void fe_emptybuffer ( struct fe_softc * ); #endif /* * Fe driver specific constants which relate to 86960/86965. */ /* Interrupt masks */ #define FE_TMASK ( FE_D2_COLL16 | FE_D2_TXDONE ) #define FE_RMASK ( FE_D3_OVRFLO | FE_D3_CRCERR \ | FE_D3_ALGERR | FE_D3_SRTPKT | FE_D3_PKTRDY ) /* Maximum number of iterations for a receive interrupt. */ #define FE_MAX_RECV_COUNT ( ( 65536 - 2048 * 2 ) / 64 ) /* * Maximum size of SRAM is 65536, * minimum size of transmission buffer in fe is 2x2KB, * and minimum amount of received packet including headers * added by the chip is 64 bytes. * Hence FE_MAX_RECV_COUNT is the upper limit for number * of packets in the receive buffer. */ /* * Miscellaneous definitions not directly related to hardware. */ /* The following line must be delete when "net/if_media.h" support it. */ #ifndef IFM_10_FL #define IFM_10_FL /* 13 */ IFM_10_5 #endif #if 0 /* Mapping between media bitmap (in fe_softc.mbitmap) and ifm_media. */ static int const bit2media [] = { IFM_HDX | IFM_ETHER | IFM_AUTO, IFM_HDX | IFM_ETHER | IFM_MANUAL, IFM_HDX | IFM_ETHER | IFM_10_T, IFM_HDX | IFM_ETHER | IFM_10_2, IFM_HDX | IFM_ETHER | IFM_10_5, IFM_HDX | IFM_ETHER | IFM_10_FL, IFM_FDX | IFM_ETHER | IFM_10_T, /* More can be come here... */ 0 }; #else /* Mapping between media bitmap (in fe_softc.mbitmap) and ifm_media. */ static int const bit2media [] = { IFM_ETHER | IFM_AUTO, IFM_ETHER | IFM_MANUAL, IFM_ETHER | IFM_10_T, IFM_ETHER | IFM_10_2, IFM_ETHER | IFM_10_5, IFM_ETHER | IFM_10_FL, IFM_ETHER | IFM_10_T, /* More can be come here... */ 0 }; #endif /* * Check for specific bits in specific registers have specific values. * A common utility function called from various sub-probe routines. */ int fe_simple_probe (struct fe_softc const * sc, struct fe_simple_probe_struct const * sp) { struct fe_simple_probe_struct const *p; int8_t bits; for (p = sp; p->mask != 0; p++) { bits = fe_inb(sc, p->port); printf("port %d, mask %x, bits %x read %x\n", p->port, p->mask, p->bits, bits); if ((bits & p->mask) != p->bits) return 0; } return 1; } /* Test if a given 6 byte value is a valid Ethernet station (MAC) address. "Vendor" is an expected vendor code (first three bytes,) or a zero when nothing expected. */ int fe_valid_Ether_p (u_char const * addr, unsigned vendor) { #ifdef FE_DEBUG printf("fe?: validating %6D against %06x\n", addr, ":", vendor); #endif /* All zero is not allowed as a vendor code. */ if (addr[0] == 0 && addr[1] == 0 && addr[2] == 0) return 0; switch (vendor) { case 0x000000: /* Legal Ethernet address (stored in ROM) must have its Group and Local bits cleared. */ if ((addr[0] & 0x03) != 0) return 0; break; case 0x020000: /* Same as above, but a local address is allowed in this context. */ if (ETHER_IS_MULTICAST(addr)) return 0; break; default: /* Make sure the vendor part matches if one is given. */ if ( addr[0] != ((vendor >> 16) & 0xFF) || addr[1] != ((vendor >> 8) & 0xFF) || addr[2] != ((vendor ) & 0xFF)) return 0; break; } /* Host part must not be all-zeros nor all-ones. */ if (addr[3] == 0xFF && addr[4] == 0xFF && addr[5] == 0xFF) return 0; if (addr[3] == 0x00 && addr[4] == 0x00 && addr[5] == 0x00) return 0; /* Given addr looks like an Ethernet address. */ return 1; } /* Fill our softc struct with default value. */ void fe_softc_defaults (struct fe_softc *sc) { /* Prepare for typical register prototypes. We assume a "typical" board has <32KB> of SRAM connected with a data lines. */ sc->proto_dlcr4 = FE_D4_LBC_DISABLE | FE_D4_CNTRL; sc->proto_dlcr5 = 0; sc->proto_dlcr6 = FE_D6_BUFSIZ_32KB | FE_D6_TXBSIZ_2x4KB | FE_D6_BBW_BYTE | FE_D6_SBW_WORD | FE_D6_SRAM_100ns; sc->proto_dlcr7 = FE_D7_BYTSWP_LH; sc->proto_bmpr13 = 0; /* Assume the probe process (to be done later) is stable. */ sc->stability = 0; /* A typical board needs no hooks. */ sc->init = NULL; sc->stop = NULL; /* Assume the board has no software-controllable media selection. */ sc->mbitmap = MB_HM; sc->defmedia = MB_HM; sc->msel = NULL; } /* Common error reporting routine used in probe routines for "soft configured IRQ"-type boards. */ void fe_irq_failure (char const *name, int unit, int irq, char const *list) { printf("fe%d: %s board is detected, but %s IRQ was given\n", unit, name, (irq == NO_IRQ ? "no" : "invalid")); if (list != NULL) { printf("fe%d: specify an IRQ from %s in kernel config\n", unit, list); } } /* * Hardware (vendor) specific hooks. */ /* * Generic media selection scheme for MB86965 based boards. */ void fe_msel_965 (struct fe_softc *sc) { u_char b13; /* Find the appropriate bits for BMPR13 tranceiver control. */ switch (IFM_SUBTYPE(sc->media.ifm_media)) { case IFM_AUTO: b13 = FE_B13_PORT_AUTO | FE_B13_TPTYPE_UTP; break; case IFM_10_T: b13 = FE_B13_PORT_TP | FE_B13_TPTYPE_UTP; break; default: b13 = FE_B13_PORT_AUI; break; } /* Write it into the register. It takes effect immediately. */ fe_outb(sc, FE_BMPR13, sc->proto_bmpr13 | b13); } /* * Fujitsu MB86965 JLI mode support routines. */ /* * Routines to read all bytes from the config EEPROM through MB86965A. * It is a MicroWire (3-wire) serial EEPROM with 6-bit address. * (93C06 or 93C46.) */ static void fe_strobe_eeprom_jli (struct fe_softc *sc, u_short bmpr16) { /* * We must guarantee 1us (or more) interval to access slow * EEPROMs. The following redundant code provides enough * delay with ISA timing. (Even if the bus clock is "tuned.") * Some modification will be needed on faster busses. */ fe_outb(sc, bmpr16, FE_B16_SELECT); fe_outb(sc, bmpr16, FE_B16_SELECT | FE_B16_CLOCK); fe_outb(sc, bmpr16, FE_B16_SELECT | FE_B16_CLOCK); fe_outb(sc, bmpr16, FE_B16_SELECT); } void fe_read_eeprom_jli (struct fe_softc * sc, u_char * data) { u_char n, val, bit; u_char save16, save17; /* Save the current value of the EEPROM interface registers. */ save16 = fe_inb(sc, FE_BMPR16); save17 = fe_inb(sc, FE_BMPR17); /* Read bytes from EEPROM; two bytes per an iteration. */ for (n = 0; n < JLI_EEPROM_SIZE / 2; n++) { /* Reset the EEPROM interface. */ fe_outb(sc, FE_BMPR16, 0x00); fe_outb(sc, FE_BMPR17, 0x00); /* Start EEPROM access. */ fe_outb(sc, FE_BMPR16, FE_B16_SELECT); fe_outb(sc, FE_BMPR17, FE_B17_DATA); fe_strobe_eeprom_jli(sc, FE_BMPR16); /* Pass the iteration count as well as a READ command. */ val = 0x80 | n; for (bit = 0x80; bit != 0x00; bit >>= 1) { fe_outb(sc, FE_BMPR17, (val & bit) ? FE_B17_DATA : 0); fe_strobe_eeprom_jli(sc, FE_BMPR16); } fe_outb(sc, FE_BMPR17, 0x00); /* Read a byte. */ val = 0; for (bit = 0x80; bit != 0x00; bit >>= 1) { fe_strobe_eeprom_jli(sc, FE_BMPR16); if (fe_inb(sc, FE_BMPR17) & FE_B17_DATA) val |= bit; } *data++ = val; /* Read one more byte. */ val = 0; for (bit = 0x80; bit != 0x00; bit >>= 1) { fe_strobe_eeprom_jli(sc, FE_BMPR16); if (fe_inb(sc, FE_BMPR17) & FE_B17_DATA) val |= bit; } *data++ = val; } #if 0 /* Reset the EEPROM interface, again. */ fe_outb(sc, FE_BMPR16, 0x00); fe_outb(sc, FE_BMPR17, 0x00); #else /* Make sure to restore the original value of EEPROM interface registers, since we are not yet sure we have MB86965A on the address. */ fe_outb(sc, FE_BMPR17, save17); fe_outb(sc, FE_BMPR16, save16); #endif #if 1 /* Report what we got. */ if (bootverbose) { int i; data -= JLI_EEPROM_SIZE; for (i = 0; i < JLI_EEPROM_SIZE; i += 16) { if_printf(sc->ifp, "EEPROM(JLI):%3x: %16D\n", i, data + i, " "); } } #endif } void fe_init_jli (struct fe_softc * sc) { /* "Reset" by writing into a magic location. */ DELAY(200); fe_outb(sc, 0x1E, fe_inb(sc, 0x1E)); DELAY(300); } /* * SSi 78Q8377A support routines. */ /* * Routines to read all bytes from the config EEPROM through 78Q8377A. * It is a MicroWire (3-wire) serial EEPROM with 8-bit address. (I.e., * 93C56 or 93C66.) * * As I don't have SSi manuals, (hmm, an old song again!) I'm not exactly * sure the following code is correct... It is just stolen from the * C-NET(98)P2 support routine in FreeBSD(98). */ void fe_read_eeprom_ssi (struct fe_softc *sc, u_char *data) { u_char val, bit; int n; u_char save6, save7, save12; /* Save the current value for the DLCR registers we are about to destroy. */ save6 = fe_inb(sc, FE_DLCR6); save7 = fe_inb(sc, FE_DLCR7); /* Put the 78Q8377A into a state that we can access the EEPROM. */ fe_outb(sc, FE_DLCR6, FE_D6_BBW_WORD | FE_D6_SBW_WORD | FE_D6_DLC_DISABLE); fe_outb(sc, FE_DLCR7, FE_D7_BYTSWP_LH | FE_D7_RBS_BMPR | FE_D7_RDYPNS | FE_D7_POWER_UP); /* Save the current value for the BMPR12 register, too. */ save12 = fe_inb(sc, FE_DLCR12); /* Read bytes from EEPROM; two bytes per an iteration. */ for (n = 0; n < SSI_EEPROM_SIZE / 2; n++) { /* Start EEPROM access */ fe_outb(sc, FE_DLCR12, SSI_EEP); fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL); /* Send the following four bits to the EEPROM in the specified order: a dummy bit, a start bit, and command bits (10) for READ. */ fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL ); fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL | SSI_CLK ); /* 0 */ fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL | SSI_DAT); fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL | SSI_CLK | SSI_DAT); /* 1 */ fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL | SSI_DAT); fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL | SSI_CLK | SSI_DAT); /* 1 */ fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL ); fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL | SSI_CLK ); /* 0 */ /* Pass the iteration count to the chip. */ for (bit = 0x80; bit != 0x00; bit >>= 1) { val = ( n & bit ) ? SSI_DAT : 0; fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL | val); fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL | SSI_CLK | val); } /* Read a byte. */ val = 0; for (bit = 0x80; bit != 0x00; bit >>= 1) { fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL); fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL | SSI_CLK); if (fe_inb(sc, FE_DLCR12) & SSI_DIN) val |= bit; } *data++ = val; /* Read one more byte. */ val = 0; for (bit = 0x80; bit != 0x00; bit >>= 1) { fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL); fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL | SSI_CLK); if (fe_inb(sc, FE_DLCR12) & SSI_DIN) val |= bit; } *data++ = val; fe_outb(sc, FE_DLCR12, SSI_EEP); } /* Reset the EEPROM interface. (For now.) */ fe_outb(sc, FE_DLCR12, 0x00); /* Restore the saved register values, for the case that we didn't have 78Q8377A at the given address. */ fe_outb(sc, FE_DLCR12, save12); fe_outb(sc, FE_DLCR7, save7); fe_outb(sc, FE_DLCR6, save6); #if 1 /* Report what we got. */ if (bootverbose) { int i; data -= SSI_EEPROM_SIZE; for (i = 0; i < SSI_EEPROM_SIZE; i += 16) { if_printf(sc->ifp, "EEPROM(SSI):%3x: %16D\n", i, data + i, " "); } } #endif } /* * TDK/LANX boards support routines. */ /* It is assumed that the CLK line is low and SDA is high (float) upon entry. */ #define LNX_PH(D,K,N) \ ((LNX_SDA_##D | LNX_CLK_##K) << N) #define LNX_CYCLE(D1,D2,D3,D4,K1,K2,K3,K4) \ (LNX_PH(D1,K1,0)|LNX_PH(D2,K2,8)|LNX_PH(D3,K3,16)|LNX_PH(D4,K4,24)) #define LNX_CYCLE_START LNX_CYCLE(HI,LO,LO,HI, HI,HI,LO,LO) #define LNX_CYCLE_STOP LNX_CYCLE(LO,LO,HI,HI, LO,HI,HI,LO) #define LNX_CYCLE_HI LNX_CYCLE(HI,HI,HI,HI, LO,HI,LO,LO) #define LNX_CYCLE_LO LNX_CYCLE(LO,LO,LO,HI, LO,HI,LO,LO) #define LNX_CYCLE_INIT LNX_CYCLE(LO,HI,HI,HI, LO,LO,LO,LO) static void fe_eeprom_cycle_lnx (struct fe_softc *sc, u_short reg20, u_long cycle) { fe_outb(sc, reg20, (cycle ) & 0xFF); DELAY(15); fe_outb(sc, reg20, (cycle >> 8) & 0xFF); DELAY(15); fe_outb(sc, reg20, (cycle >> 16) & 0xFF); DELAY(15); fe_outb(sc, reg20, (cycle >> 24) & 0xFF); DELAY(15); } static u_char fe_eeprom_receive_lnx (struct fe_softc *sc, u_short reg20) { u_char dat; fe_outb(sc, reg20, LNX_CLK_HI | LNX_SDA_FL); DELAY(15); dat = fe_inb(sc, reg20); fe_outb(sc, reg20, LNX_CLK_LO | LNX_SDA_FL); DELAY(15); return (dat & LNX_SDA_IN); } void fe_read_eeprom_lnx (struct fe_softc *sc, u_char *data) { int i; u_char n, bit, val; u_char save20; u_short reg20 = 0x14; save20 = fe_inb(sc, reg20); /* NOTE: DELAY() timing constants are approximately three times longer (slower) than the required minimum. This is to guarantee a reliable operation under some tough conditions... Fortunately, this routine is only called during the boot phase, so the speed is less important than stability. */ #if 1 /* Reset the X24C01's internal state machine and put it into the IDLE state. We usually don't need this, but *if* someone (e.g., probe routine of other driver) write some garbage into the register at 0x14, synchronization will be lost, and the normal EEPROM access protocol won't work. Moreover, as there are no easy way to reset, we need a _manoeuvre_ here. (It even lacks a reset pin, so pushing the RESET button on the PC doesn't help!) */ fe_eeprom_cycle_lnx(sc, reg20, LNX_CYCLE_INIT); for (i = 0; i < 10; i++) fe_eeprom_cycle_lnx(sc, reg20, LNX_CYCLE_START); fe_eeprom_cycle_lnx(sc, reg20, LNX_CYCLE_STOP); DELAY(10000); #endif /* Issue a start condition. */ fe_eeprom_cycle_lnx(sc, reg20, LNX_CYCLE_START); /* Send seven bits of the starting address (zero, in this case) and a command bit for READ. */ val = 0x01; for (bit = 0x80; bit != 0x00; bit >>= 1) { if (val & bit) { fe_eeprom_cycle_lnx(sc, reg20, LNX_CYCLE_HI); } else { fe_eeprom_cycle_lnx(sc, reg20, LNX_CYCLE_LO); } } /* Receive an ACK bit. */ if (fe_eeprom_receive_lnx(sc, reg20)) { /* ACK was not received. EEPROM is not present (i.e., this board was not a TDK/LANX) or not working properly. */ if (bootverbose) { if_printf(sc->ifp, "no ACK received from EEPROM(LNX)\n"); } /* Clear the given buffer to indicate we could not get any info. and return. */ bzero(data, LNX_EEPROM_SIZE); goto RET; } /* Read bytes from EEPROM. */ for (n = 0; n < LNX_EEPROM_SIZE; n++) { /* Read a byte and store it into the buffer. */ val = 0x00; for (bit = 0x80; bit != 0x00; bit >>= 1) { if (fe_eeprom_receive_lnx(sc, reg20)) val |= bit; } *data++ = val; /* Acknowledge if we have to read more. */ if (n < LNX_EEPROM_SIZE - 1) { fe_eeprom_cycle_lnx(sc, reg20, LNX_CYCLE_LO); } } /* Issue a STOP condition, de-activating the clock line. It will be safer to keep the clock line low than to leave it high. */ fe_eeprom_cycle_lnx(sc, reg20, LNX_CYCLE_STOP); RET: fe_outb(sc, reg20, save20); #if 1 /* Report what we got. */ if (bootverbose) { data -= LNX_EEPROM_SIZE; for (i = 0; i < LNX_EEPROM_SIZE; i += 16) { if_printf(sc->ifp, "EEPROM(LNX):%3x: %16D\n", i, data + i, " "); } } #endif } void fe_init_lnx (struct fe_softc * sc) { /* Reset the 86960. Do we need this? FIXME. */ fe_outb(sc, 0x12, 0x06); DELAY(100); fe_outb(sc, 0x12, 0x07); DELAY(100); /* Setup IRQ control register on the ASIC. */ fe_outb(sc, 0x14, sc->priv_info); } /* * Ungermann-Bass boards support routine. */ void fe_init_ubn (struct fe_softc * sc) { /* Do we need this? FIXME. */ fe_outb(sc, FE_DLCR7, sc->proto_dlcr7 | FE_D7_RBS_BMPR | FE_D7_POWER_UP); fe_outb(sc, 0x18, 0x00); DELAY(200); /* Setup IRQ control register on the ASIC. */ fe_outb(sc, 0x14, sc->priv_info); } /* * Install interface into kernel networking data structures */ int fe_attach (device_t dev) { struct fe_softc *sc = device_get_softc(dev); struct ifnet *ifp; int flags = device_get_flags(dev); int b, error; ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not ifalloc\n"); fe_release_resource(dev); return (ENOSPC); } mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->timer, &sc->lock, 0); /* * Initialize ifnet structure */ ifp->if_softc = sc; if_initname(sc->ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_start = fe_start; ifp->if_ioctl = fe_ioctl; ifp->if_init = fe_init; ifp->if_linkmib = &sc->mibdata; ifp->if_linkmiblen = sizeof (sc->mibdata); #if 0 /* I'm not sure... */ sc->mibdata.dot3Compliance = DOT3COMPLIANCE_COLLS; #endif /* * Set fixed interface flags. */ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); #if FE_SINGLE_TRANSMISSION /* Override txb config to allocate minimum. */ sc->proto_dlcr6 &= ~FE_D6_TXBSIZ sc->proto_dlcr6 |= FE_D6_TXBSIZ_2x2KB; #endif /* Modify hardware config if it is requested. */ if (flags & FE_FLAGS_OVERRIDE_DLCR6) sc->proto_dlcr6 = flags & FE_FLAGS_DLCR6_VALUE; /* Find TX buffer size, based on the hardware dependent proto. */ switch (sc->proto_dlcr6 & FE_D6_TXBSIZ) { case FE_D6_TXBSIZ_2x2KB: sc->txb_size = 2048; break; case FE_D6_TXBSIZ_2x4KB: sc->txb_size = 4096; break; case FE_D6_TXBSIZ_2x8KB: sc->txb_size = 8192; break; default: /* Oops, we can't work with single buffer configuration. */ if (bootverbose) { if_printf(sc->ifp, "strange TXBSIZ config; fixing\n"); } sc->proto_dlcr6 &= ~FE_D6_TXBSIZ; sc->proto_dlcr6 |= FE_D6_TXBSIZ_2x2KB; sc->txb_size = 2048; break; } /* Initialize the if_media interface. */ ifmedia_init(&sc->media, 0, fe_medchange, fe_medstat); for (b = 0; bit2media[b] != 0; b++) { if (sc->mbitmap & (1 << b)) { ifmedia_add(&sc->media, bit2media[b], 0, NULL); } } for (b = 0; bit2media[b] != 0; b++) { if (sc->defmedia & (1 << b)) { ifmedia_set(&sc->media, bit2media[b]); break; } } #if 0 /* Turned off; this is called later, when the interface UPs. */ fe_medchange(sc); #endif /* Attach and stop the interface. */ FE_LOCK(sc); fe_stop(sc); FE_UNLOCK(sc); ether_ifattach(sc->ifp, sc->enaddr); error = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE, NULL, fe_intr, sc, &sc->irq_handle); if (error) { ether_ifdetach(ifp); mtx_destroy(&sc->lock); if_free(ifp); fe_release_resource(dev); return ENXIO; } /* Print additional info when attached. */ device_printf(dev, "type %s%s\n", sc->typestr, (sc->proto_dlcr4 & FE_D4_DSC) ? ", full duplex" : ""); if (bootverbose) { int buf, txb, bbw, sbw, ram; buf = txb = bbw = sbw = ram = -1; switch ( sc->proto_dlcr6 & FE_D6_BUFSIZ ) { case FE_D6_BUFSIZ_8KB: buf = 8; break; case FE_D6_BUFSIZ_16KB: buf = 16; break; case FE_D6_BUFSIZ_32KB: buf = 32; break; case FE_D6_BUFSIZ_64KB: buf = 64; break; } switch ( sc->proto_dlcr6 & FE_D6_TXBSIZ ) { case FE_D6_TXBSIZ_2x2KB: txb = 2; break; case FE_D6_TXBSIZ_2x4KB: txb = 4; break; case FE_D6_TXBSIZ_2x8KB: txb = 8; break; } switch ( sc->proto_dlcr6 & FE_D6_BBW ) { case FE_D6_BBW_BYTE: bbw = 8; break; case FE_D6_BBW_WORD: bbw = 16; break; } switch ( sc->proto_dlcr6 & FE_D6_SBW ) { case FE_D6_SBW_BYTE: sbw = 8; break; case FE_D6_SBW_WORD: sbw = 16; break; } switch ( sc->proto_dlcr6 & FE_D6_SRAM ) { case FE_D6_SRAM_100ns: ram = 100; break; case FE_D6_SRAM_150ns: ram = 150; break; } device_printf(dev, "SRAM %dKB %dbit %dns, TXB %dKBx2, %dbit I/O\n", buf, bbw, ram, txb, sbw); } if (sc->stability & UNSTABLE_IRQ) device_printf(dev, "warning: IRQ number may be incorrect\n"); if (sc->stability & UNSTABLE_MAC) device_printf(dev, "warning: above MAC address may be incorrect\n"); if (sc->stability & UNSTABLE_TYPE) device_printf(dev, "warning: hardware type was not validated\n"); return 0; } int fe_alloc_port(device_t dev, int size) { struct fe_softc *sc = device_get_softc(dev); struct resource *res; int rid; rid = 0; res = bus_alloc_resource_anywhere(dev, SYS_RES_IOPORT, &rid, size, RF_ACTIVE); if (res) { sc->port_used = size; sc->port_res = res; return (0); } return (ENOENT); } int fe_alloc_irq(device_t dev, int flags) { struct fe_softc *sc = device_get_softc(dev); struct resource *res; int rid; rid = 0; res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | flags); if (res) { sc->irq_res = res; return (0); } return (ENOENT); } void fe_release_resource(device_t dev) { struct fe_softc *sc = device_get_softc(dev); if (sc->port_res) { bus_release_resource(dev, SYS_RES_IOPORT, 0, sc->port_res); sc->port_res = NULL; } if (sc->irq_res) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); sc->irq_res = NULL; } } /* * Reset interface, after some (hardware) trouble is deteced. */ static void fe_reset (struct fe_softc *sc) { /* Record how many packets are lost by this accident. */ if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, sc->txb_sched + sc->txb_count); sc->mibdata.dot3StatsInternalMacTransmitErrors++; /* Put the interface into known initial state. */ fe_stop(sc); if (sc->ifp->if_flags & IFF_UP) fe_init_locked(sc); } /* * Stop everything on the interface. * * All buffered packets, both transmitting and receiving, * if any, will be lost by stopping the interface. */ void fe_stop (struct fe_softc *sc) { FE_ASSERT_LOCKED(sc); /* Disable interrupts. */ fe_outb(sc, FE_DLCR2, 0x00); fe_outb(sc, FE_DLCR3, 0x00); /* Stop interface hardware. */ DELAY(200); fe_outb(sc, FE_DLCR6, sc->proto_dlcr6 | FE_D6_DLC_DISABLE); DELAY(200); /* Clear all interrupt status. */ fe_outb(sc, FE_DLCR0, 0xFF); fe_outb(sc, FE_DLCR1, 0xFF); /* Put the chip in stand-by mode. */ DELAY(200); fe_outb(sc, FE_DLCR7, sc->proto_dlcr7 | FE_D7_POWER_DOWN); DELAY(200); /* Reset transmitter variables and interface flags. */ sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING); sc->tx_timeout = 0; callout_stop(&sc->timer); sc->txb_free = sc->txb_size; sc->txb_count = 0; sc->txb_sched = 0; /* MAR loading can be delayed. */ sc->filter_change = 0; /* Call a device-specific hook. */ if (sc->stop) sc->stop(sc); } /* * Device timeout/watchdog routine. Entered if the device neglects to * generate an interrupt after a transmit has been started on it. */ static void fe_watchdog (void *arg) { struct fe_softc *sc = arg; FE_ASSERT_LOCKED(sc); if (sc->tx_timeout && --sc->tx_timeout == 0) { struct ifnet *ifp = sc->ifp; /* A "debug" message. */ if_printf(ifp, "transmission timeout (%d+%d)%s\n", sc->txb_sched, sc->txb_count, (ifp->if_flags & IFF_UP) ? "" : " when down"); if (ifp->if_get_counter(ifp, IFCOUNTER_OPACKETS) == 0 && ifp->if_get_counter(ifp, IFCOUNTER_IPACKETS) == 0) if_printf(ifp, "wrong IRQ setting in config?\n"); fe_reset(sc); } callout_reset(&sc->timer, hz, fe_watchdog, sc); } /* * Initialize device. */ static void fe_init (void * xsc) { struct fe_softc *sc = xsc; FE_LOCK(sc); fe_init_locked(sc); FE_UNLOCK(sc); } static void fe_init_locked (struct fe_softc *sc) { /* Start initializing 86960. */ /* Call a hook before we start initializing the chip. */ if (sc->init) sc->init(sc); /* * Make sure to disable the chip, also. * This may also help re-programming the chip after * hot insertion of PCMCIAs. */ DELAY(200); fe_outb(sc, FE_DLCR6, sc->proto_dlcr6 | FE_D6_DLC_DISABLE); DELAY(200); /* Power up the chip and select register bank for DLCRs. */ DELAY(200); fe_outb(sc, FE_DLCR7, sc->proto_dlcr7 | FE_D7_RBS_DLCR | FE_D7_POWER_UP); DELAY(200); /* Feed the station address. */ fe_outblk(sc, FE_DLCR8, IF_LLADDR(sc->ifp), ETHER_ADDR_LEN); /* Clear multicast address filter to receive nothing. */ fe_outb(sc, FE_DLCR7, sc->proto_dlcr7 | FE_D7_RBS_MAR | FE_D7_POWER_UP); fe_outblk(sc, FE_MAR8, fe_filter_nothing.data, FE_FILTER_LEN); /* Select the BMPR bank for runtime register access. */ fe_outb(sc, FE_DLCR7, sc->proto_dlcr7 | FE_D7_RBS_BMPR | FE_D7_POWER_UP); /* Initialize registers. */ fe_outb(sc, FE_DLCR0, 0xFF); /* Clear all bits. */ fe_outb(sc, FE_DLCR1, 0xFF); /* ditto. */ fe_outb(sc, FE_DLCR2, 0x00); fe_outb(sc, FE_DLCR3, 0x00); fe_outb(sc, FE_DLCR4, sc->proto_dlcr4); fe_outb(sc, FE_DLCR5, sc->proto_dlcr5); fe_outb(sc, FE_BMPR10, 0x00); fe_outb(sc, FE_BMPR11, FE_B11_CTRL_SKIP | FE_B11_MODE1); fe_outb(sc, FE_BMPR12, 0x00); fe_outb(sc, FE_BMPR13, sc->proto_bmpr13); fe_outb(sc, FE_BMPR14, 0x00); fe_outb(sc, FE_BMPR15, 0x00); /* Enable interrupts. */ fe_outb(sc, FE_DLCR2, FE_TMASK); fe_outb(sc, FE_DLCR3, FE_RMASK); /* Select requested media, just before enabling DLC. */ if (sc->msel) sc->msel(sc); /* Enable transmitter and receiver. */ DELAY(200); fe_outb(sc, FE_DLCR6, sc->proto_dlcr6 | FE_D6_DLC_ENABLE); DELAY(200); #ifdef DIAGNOSTIC /* * Make sure to empty the receive buffer. * * This may be redundant, but *if* the receive buffer were full * at this point, then the driver would hang. I have experienced * some strange hang-up just after UP. I hope the following * code solve the problem. * * I have changed the order of hardware initialization. * I think the receive buffer cannot have any packets at this * point in this version. The following code *must* be * redundant now. FIXME. * * I've heard a rumore that on some PC Card implementation of * 8696x, the receive buffer can have some data at this point. * The following message helps discovering the fact. FIXME. */ if (!(fe_inb(sc, FE_DLCR5) & FE_D5_BUFEMP)) { if_printf(sc->ifp, "receive buffer has some data after reset\n"); fe_emptybuffer(sc); } /* Do we need this here? Actually, no. I must be paranoia. */ fe_outb(sc, FE_DLCR0, 0xFF); /* Clear all bits. */ fe_outb(sc, FE_DLCR1, 0xFF); /* ditto. */ #endif /* Set 'running' flag, because we are now running. */ sc->ifp->if_drv_flags |= IFF_DRV_RUNNING; callout_reset(&sc->timer, hz, fe_watchdog, sc); /* * At this point, the interface is running properly, * except that it receives *no* packets. we then call * fe_setmode() to tell the chip what packets to be * received, based on the if_flags and multicast group * list. It completes the initialization process. */ fe_setmode(sc); #if 0 /* ...and attempt to start output queued packets. */ /* TURNED OFF, because the semi-auto media prober wants to UP the interface keeping it idle. The upper layer will soon start the interface anyway, and there are no significant delay. */ fe_start_locked(sc->ifp); #endif } /* * This routine actually starts the transmission on the interface */ static void fe_xmit (struct fe_softc *sc) { /* * Set a timer just in case we never hear from the board again. * We use longer timeout for multiple packet transmission. * I'm not sure this timer value is appropriate. FIXME. */ sc->tx_timeout = 1 + sc->txb_count; /* Update txb variables. */ sc->txb_sched = sc->txb_count; sc->txb_count = 0; sc->txb_free = sc->txb_size; sc->tx_excolls = 0; /* Start transmitter, passing packets in TX buffer. */ fe_outb(sc, FE_BMPR10, sc->txb_sched | FE_B10_START); } /* * Start output on interface. * We make one assumption here: * 1) that the IFF_DRV_OACTIVE flag is checked before this code is called * (i.e. that the output part of the interface is idle) */ static void fe_start (struct ifnet *ifp) { struct fe_softc *sc = ifp->if_softc; FE_LOCK(sc); fe_start_locked(ifp); FE_UNLOCK(sc); } static void fe_start_locked (struct ifnet *ifp) { struct fe_softc *sc = ifp->if_softc; struct mbuf *m; #ifdef DIAGNOSTIC /* Just a sanity check. */ if ((sc->txb_count == 0) != (sc->txb_free == sc->txb_size)) { /* * Txb_count and txb_free co-works to manage the * transmission buffer. Txb_count keeps track of the * used potion of the buffer, while txb_free does unused * potion. So, as long as the driver runs properly, * txb_count is zero if and only if txb_free is same * as txb_size (which represents whole buffer.) */ if_printf(ifp, "inconsistent txb variables (%d, %d)\n", sc->txb_count, sc->txb_free); /* * So, what should I do, then? * * We now know txb_count and txb_free contradicts. We * cannot, however, tell which is wrong. More * over, we cannot peek 86960 transmission buffer or * reset the transmission buffer. (In fact, we can * reset the entire interface. I don't want to do it.) * * If txb_count is incorrect, leaving it as-is will cause * sending of garbage after next interrupt. We have to * avoid it. Hence, we reset the txb_count here. If * txb_free was incorrect, resetting txb_count just loses * some packets. We can live with it. */ sc->txb_count = 0; } #endif /* * First, see if there are buffered packets and an idle * transmitter - should never happen at this point. */ if ((sc->txb_count > 0) && (sc->txb_sched == 0)) { if_printf(ifp, "transmitter idle with %d buffered packets\n", sc->txb_count); fe_xmit(sc); } /* * Stop accepting more transmission packets temporarily, when * a filter change request is delayed. Updating the MARs on * 86960 flushes the transmission buffer, so it is delayed * until all buffered transmission packets have been sent * out. */ if (sc->filter_change) { /* * Filter change request is delayed only when the DLC is * working. DLC soon raise an interrupt after finishing * the work. */ goto indicate_active; } for (;;) { /* * See if there is room to put another packet in the buffer. * We *could* do better job by peeking the send queue to * know the length of the next packet. Current version just * tests against the worst case (i.e., longest packet). FIXME. * * When adding the packet-peek feature, don't forget adding a * test on txb_count against QUEUEING_MAX. * There is a little chance the packet count exceeds * the limit. Assume transmission buffer is 8KB (2x8KB * configuration) and an application sends a bunch of small * (i.e., minimum packet sized) packets rapidly. An 8KB * buffer can hold 130 blocks of 62 bytes long... */ if (sc->txb_free < ETHER_MAX_LEN - ETHER_CRC_LEN + FE_DATA_LEN_LEN) { /* No room. */ goto indicate_active; } #if FE_SINGLE_TRANSMISSION if (sc->txb_count > 0) { /* Just one packet per a transmission buffer. */ goto indicate_active; } #endif /* * Get the next mbuf chain for a packet to send. */ IF_DEQUEUE(&sc->ifp->if_snd, m); if (m == NULL) { /* No more packets to send. */ goto indicate_inactive; } /* * Copy the mbuf chain into the transmission buffer. * txb_* variables are updated as necessary. */ fe_write_mbufs(sc, m); /* Start transmitter if it's idle. */ if ((sc->txb_count > 0) && (sc->txb_sched == 0)) fe_xmit(sc); /* * Tap off here if there is a bpf listener, * and the device is *not* in promiscuous mode. * (86960 receives self-generated packets if * and only if it is in "receive everything" * mode.) */ if (!(sc->ifp->if_flags & IFF_PROMISC)) BPF_MTAP(sc->ifp, m); m_freem(m); } indicate_inactive: /* * We are using the !OACTIVE flag to indicate to * the outside world that we can accept an * additional packet rather than that the * transmitter is _actually_ active. Indeed, the * transmitter may be active, but if we haven't * filled all the buffers with data then we still * want to accept more. */ sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; return; indicate_active: /* * The transmitter is active, and there are no room for * more outgoing packets in the transmission buffer. */ sc->ifp->if_drv_flags |= IFF_DRV_OACTIVE; return; } /* * Drop (skip) a packet from receive buffer in 86960 memory. */ static void fe_droppacket (struct fe_softc * sc, int len) { int i; /* * 86960 manual says that we have to read 8 bytes from the buffer * before skip the packets and that there must be more than 8 bytes * remaining in the buffer when issue a skip command. * Remember, we have already read 4 bytes before come here. */ if (len > 12) { /* Read 4 more bytes, and skip the rest of the packet. */ if ((sc->proto_dlcr6 & FE_D6_SBW) == FE_D6_SBW_BYTE) { (void) fe_inb(sc, FE_BMPR8); (void) fe_inb(sc, FE_BMPR8); (void) fe_inb(sc, FE_BMPR8); (void) fe_inb(sc, FE_BMPR8); } else { (void) fe_inw(sc, FE_BMPR8); (void) fe_inw(sc, FE_BMPR8); } fe_outb(sc, FE_BMPR14, FE_B14_SKIP); } else { /* We should not come here unless receiving RUNTs. */ if ((sc->proto_dlcr6 & FE_D6_SBW) == FE_D6_SBW_BYTE) { for (i = 0; i < len; i++) (void) fe_inb(sc, FE_BMPR8); } else { for (i = 0; i < len; i += 2) (void) fe_inw(sc, FE_BMPR8); } } } #ifdef DIAGNOSTIC /* * Empty receiving buffer. */ static void fe_emptybuffer (struct fe_softc * sc) { int i; u_char saved_dlcr5; #ifdef FE_DEBUG if_printf(sc->ifp, "emptying receive buffer\n"); #endif /* * Stop receiving packets, temporarily. */ saved_dlcr5 = fe_inb(sc, FE_DLCR5); fe_outb(sc, FE_DLCR5, sc->proto_dlcr5); DELAY(1300); /* * When we come here, the receive buffer management may * have been broken. So, we cannot use skip operation. * Just discard everything in the buffer. */ if ((sc->proto_dlcr6 & FE_D6_SBW) == FE_D6_SBW_BYTE) { for (i = 0; i < 65536; i++) { if (fe_inb(sc, FE_DLCR5) & FE_D5_BUFEMP) break; (void) fe_inb(sc, FE_BMPR8); } } else { for (i = 0; i < 65536; i += 2) { if (fe_inb(sc, FE_DLCR5) & FE_D5_BUFEMP) break; (void) fe_inw(sc, FE_BMPR8); } } /* * Double check. */ if (fe_inb(sc, FE_DLCR5) & FE_D5_BUFEMP) { if_printf(sc->ifp, "could not empty receive buffer\n"); /* Hmm. What should I do if this happens? FIXME. */ } /* * Restart receiving packets. */ fe_outb(sc, FE_DLCR5, saved_dlcr5); } #endif /* * Transmission interrupt handler * The control flow of this function looks silly. FIXME. */ static void fe_tint (struct fe_softc * sc, u_char tstat) { int left; int col; /* * Handle "excessive collision" interrupt. */ if (tstat & FE_D0_COLL16) { /* * Find how many packets (including this collided one) * are left unsent in transmission buffer. */ left = fe_inb(sc, FE_BMPR10); if_printf(sc->ifp, "excessive collision (%d/%d)\n", left, sc->txb_sched); /* * Clear the collision flag (in 86960) here * to avoid confusing statistics. */ fe_outb(sc, FE_DLCR0, FE_D0_COLLID); /* * Restart transmitter, skipping the * collided packet. * * We *must* skip the packet to keep network running * properly. Excessive collision error is an * indication of the network overload. If we * tried sending the same packet after excessive * collision, the network would be filled with * out-of-time packets. Packets belonging * to reliable transport (such as TCP) are resent * by some upper layer. */ fe_outb(sc, FE_BMPR11, FE_B11_CTRL_SKIP | FE_B11_MODE1); /* Update statistics. */ sc->tx_excolls++; } /* * Handle "transmission complete" interrupt. */ if (tstat & FE_D0_TXDONE) { /* * Add in total number of collisions on last * transmission. We also clear "collision occurred" flag * here. * * 86960 has a design flaw on collision count on multiple * packet transmission. When we send two or more packets * with one start command (that's what we do when the * transmission queue is crowded), 86960 informs us number * of collisions occurred on the last packet on the * transmission only. Number of collisions on previous * packets are lost. I have told that the fact is clearly * stated in the Fujitsu document. * * I considered not to mind it seriously. Collision * count is not so important, anyway. Any comments? FIXME. */ if (fe_inb(sc, FE_DLCR0) & FE_D0_COLLID) { /* Clear collision flag. */ fe_outb(sc, FE_DLCR0, FE_D0_COLLID); /* Extract collision count from 86960. */ col = fe_inb(sc, FE_DLCR4); col = (col & FE_D4_COL) >> FE_D4_COL_SHIFT; if (col == 0) { /* * Status register indicates collisions, * while the collision count is zero. * This can happen after multiple packet * transmission, indicating that one or more * previous packet(s) had been collided. * * Since the accurate number of collisions * has been lost, we just guess it as 1; * Am I too optimistic? FIXME. */ col = 1; } if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, col); if (col == 1) sc->mibdata.dot3StatsSingleCollisionFrames++; else sc->mibdata.dot3StatsMultipleCollisionFrames++; sc->mibdata.dot3StatsCollFrequencies[col-1]++; } /* * Update transmission statistics. * Be sure to reflect number of excessive collisions. */ col = sc->tx_excolls; if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, sc->txb_sched - col); if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, col); if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, col * 16); sc->mibdata.dot3StatsExcessiveCollisions += col; sc->mibdata.dot3StatsCollFrequencies[15] += col; sc->txb_sched = 0; /* * The transmitter is no more active. * Reset output active flag and watchdog timer. */ sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->tx_timeout = 0; /* * If more data is ready to transmit in the buffer, start * transmitting them. Otherwise keep transmitter idle, * even if more data is queued. This gives receive * process a slight priority. */ if (sc->txb_count > 0) fe_xmit(sc); } } /* * Ethernet interface receiver interrupt. */ static void fe_rint (struct fe_softc * sc, u_char rstat) { u_short len; u_char status; int i; /* * Update statistics if this interrupt is caused by an error. * Note that, when the system was not sufficiently fast, the * receive interrupt might not be acknowledged immediately. If * one or more errornous frames were received before this routine * was scheduled, they are ignored, and the following error stats * give less than real values. */ if (rstat & (FE_D1_OVRFLO | FE_D1_CRCERR | FE_D1_ALGERR | FE_D1_SRTPKT)) { if (rstat & FE_D1_OVRFLO) sc->mibdata.dot3StatsInternalMacReceiveErrors++; if (rstat & FE_D1_CRCERR) sc->mibdata.dot3StatsFCSErrors++; if (rstat & FE_D1_ALGERR) sc->mibdata.dot3StatsAlignmentErrors++; #if 0 /* The reference MAC receiver defined in 802.3 silently ignores short frames (RUNTs) without notifying upper layer. RFC 1650 (dot3 MIB) is based on the 802.3, and it has no stats entry for RUNTs... */ if (rstat & FE_D1_SRTPKT) sc->mibdata.dot3StatsFrameTooShorts++; /* :-) */ #endif if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1); } /* * MB86960 has a flag indicating "receive queue empty." * We just loop, checking the flag, to pull out all received * packets. * * We limit the number of iterations to avoid infinite-loop. * The upper bound is set to unrealistic high value. */ for (i = 0; i < FE_MAX_RECV_COUNT * 2; i++) { /* Stop the iteration if 86960 indicates no packets. */ if (fe_inb(sc, FE_DLCR5) & FE_D5_BUFEMP) return; /* * Extract a receive status byte. * As our 86960 is in 16 bit bus access mode, we have to * use inw() to get the status byte. The significant * value is returned in lower 8 bits. */ if ((sc->proto_dlcr6 & FE_D6_SBW) == FE_D6_SBW_BYTE) { status = fe_inb(sc, FE_BMPR8); (void) fe_inb(sc, FE_BMPR8); } else { status = (u_char) fe_inw(sc, FE_BMPR8); } /* * Extract the packet length. * It is a sum of a header (14 bytes) and a payload. * CRC has been stripped off by the 86960. */ if ((sc->proto_dlcr6 & FE_D6_SBW) == FE_D6_SBW_BYTE) { len = fe_inb(sc, FE_BMPR8); len |= (fe_inb(sc, FE_BMPR8) << 8); } else { len = fe_inw(sc, FE_BMPR8); } /* * AS our 86960 is programed to ignore errored frame, * we must not see any error indication in the * receive buffer. So, any error condition is a * serious error, e.g., out-of-sync of the receive * buffer pointers. */ if ((status & 0xF0) != 0x20 || len > ETHER_MAX_LEN - ETHER_CRC_LEN || len < ETHER_MIN_LEN - ETHER_CRC_LEN) { if_printf(sc->ifp, "RX buffer out-of-sync\n"); if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1); sc->mibdata.dot3StatsInternalMacReceiveErrors++; fe_reset(sc); return; } /* * Go get a packet. */ if (fe_get_packet(sc, len) < 0) { /* * Negative return from fe_get_packet() * indicates no available mbuf. We stop * receiving packets, even if there are more * in the buffer. We hope we can get more * mbuf next time. */ if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1); sc->mibdata.dot3StatsMissedFrames++; fe_droppacket(sc, len); return; } /* Successfully received a packet. Update stat. */ if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1); } /* Maximum number of frames has been received. Something strange is happening here... */ if_printf(sc->ifp, "unusual receive flood\n"); sc->mibdata.dot3StatsInternalMacReceiveErrors++; fe_reset(sc); } /* * Ethernet interface interrupt processor */ static void fe_intr (void *arg) { struct fe_softc *sc = arg; u_char tstat, rstat; int loop_count = FE_MAX_LOOP; FE_LOCK(sc); /* Loop until there are no more new interrupt conditions. */ while (loop_count-- > 0) { /* * Get interrupt conditions, masking unneeded flags. */ tstat = fe_inb(sc, FE_DLCR0) & FE_TMASK; rstat = fe_inb(sc, FE_DLCR1) & FE_RMASK; if (tstat == 0 && rstat == 0) { FE_UNLOCK(sc); return; } /* * Reset the conditions we are acknowledging. */ fe_outb(sc, FE_DLCR0, tstat); fe_outb(sc, FE_DLCR1, rstat); /* * Handle transmitter interrupts. */ if (tstat) fe_tint(sc, tstat); /* * Handle receiver interrupts */ if (rstat) fe_rint(sc, rstat); /* * Update the multicast address filter if it is * needed and possible. We do it now, because * we can make sure the transmission buffer is empty, * and there is a good chance that the receive queue * is empty. It will minimize the possibility of * packet loss. */ if (sc->filter_change && sc->txb_count == 0 && sc->txb_sched == 0) { fe_loadmar(sc); sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } /* * If it looks like the transmitter can take more data, * attempt to start output on the interface. This is done * after handling the receiver interrupt to give the * receive operation priority. * * BTW, I'm not sure in what case the OACTIVE is on at * this point. Is the following test redundant? * * No. This routine polls for both transmitter and * receiver interrupts. 86960 can raise a receiver * interrupt when the transmission buffer is full. */ if ((sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) fe_start_locked(sc->ifp); } FE_UNLOCK(sc); if_printf(sc->ifp, "too many loops\n"); } /* * Process an ioctl request. This code needs some work - it looks * pretty ugly. */ static int fe_ioctl (struct ifnet * ifp, u_long command, caddr_t data) { struct fe_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; int error = 0; switch (command) { case SIOCSIFFLAGS: /* * Switch interface state between "running" and * "stopped", reflecting the UP flag. */ FE_LOCK(sc); if (sc->ifp->if_flags & IFF_UP) { if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) fe_init_locked(sc); } else { if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) fe_stop(sc); } /* * Promiscuous and/or multicast flags may have changed, * so reprogram the multicast filter and/or receive mode. */ fe_setmode(sc); FE_UNLOCK(sc); /* Done. */ break; case SIOCADDMULTI: case SIOCDELMULTI: /* * Multicast list has changed; set the hardware filter * accordingly. */ FE_LOCK(sc); fe_setmode(sc); FE_UNLOCK(sc); break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: /* Let if_media to handle these commands and to call us back. */ error = ifmedia_ioctl(ifp, ifr, &sc->media, command); break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } /* * Retrieve packet from receive buffer and send to the next level up via * ether_input(). * Returns 0 if success, -1 if error (i.e., mbuf allocation failure). */ static int fe_get_packet (struct fe_softc * sc, u_short len) { struct ifnet *ifp = sc->ifp; struct ether_header *eh; struct mbuf *m; FE_ASSERT_LOCKED(sc); /* * NFS wants the data be aligned to the word (4 byte) * boundary. Ethernet header has 14 bytes. There is a * 2-byte gap. */ #define NFS_MAGIC_OFFSET 2 /* * This function assumes that an Ethernet packet fits in an * mbuf (with a cluster attached when necessary.) On FreeBSD * 2.0 for x86, which is the primary target of this driver, an * mbuf cluster has 4096 bytes, and we are happy. On ancient * BSDs, such as vanilla 4.3 for 386, a cluster size was 1024, * however. If the following #error message were printed upon * compile, you need to rewrite this function. */ #if ( MCLBYTES < ETHER_MAX_LEN - ETHER_CRC_LEN + NFS_MAGIC_OFFSET ) #error "Too small MCLBYTES to use fe driver." #endif /* * Our strategy has one more problem. There is a policy on * mbuf cluster allocation. It says that we must have at * least MINCLSIZE (208 bytes on FreeBSD 2.0 for x86) to * allocate a cluster. For a packet of a size between * (MHLEN - 2) to (MINCLSIZE - 2), our code violates the rule... * On the other hand, the current code is short, simple, * and fast, however. It does no harmful thing, just waists * some memory. Any comments? FIXME. */ /* Allocate an mbuf with packet header info. */ MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) return -1; /* Attach a cluster if this packet doesn't fit in a normal mbuf. */ if (len > MHLEN - NFS_MAGIC_OFFSET) { if (!(MCLGET(m, M_NOWAIT))) { m_freem(m); return -1; } } /* Initialize packet header info. */ m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = len; /* Set the length of this packet. */ m->m_len = len; /* The following silliness is to make NFS happy */ m->m_data += NFS_MAGIC_OFFSET; /* Get (actually just point to) the header part. */ eh = mtod(m, struct ether_header *); /* Get a packet. */ if ((sc->proto_dlcr6 & FE_D6_SBW) == FE_D6_SBW_BYTE) { fe_insb(sc, FE_BMPR8, (u_int8_t *)eh, len); } else { fe_insw(sc, FE_BMPR8, (u_int16_t *)eh, (len + 1) >> 1); } /* Feed the packet to upper layer. */ FE_UNLOCK(sc); (*ifp->if_input)(ifp, m); FE_LOCK(sc); return 0; } /* * Write an mbuf chain to the transmission buffer memory using 16 bit PIO. * Returns number of bytes actually written, including length word. * * If an mbuf chain is too long for an Ethernet frame, it is not sent. * Packets shorter than Ethernet minimum are legal, and we pad them * before sending out. An exception is "partial" packets which are * shorter than mandatory Ethernet header. */ static void fe_write_mbufs (struct fe_softc *sc, struct mbuf *m) { u_short length, len; struct mbuf *mp; u_char *data; u_short savebyte; /* WARNING: Architecture dependent! */ #define NO_PENDING_BYTE 0xFFFF static u_char padding [ETHER_MIN_LEN - ETHER_CRC_LEN - ETHER_HDR_LEN]; #ifdef DIAGNOSTIC /* First, count up the total number of bytes to copy */ length = 0; for (mp = m; mp != NULL; mp = mp->m_next) length += mp->m_len; /* Check if this matches the one in the packet header. */ if (length != m->m_pkthdr.len) { if_printf(sc->ifp, "packet length mismatch? (%d/%d)\n", length, m->m_pkthdr.len); } #else /* Just use the length value in the packet header. */ length = m->m_pkthdr.len; #endif #ifdef DIAGNOSTIC /* * Should never send big packets. If such a packet is passed, * it should be a bug of upper layer. We just ignore it. * ... Partial (too short) packets, neither. */ if (length < ETHER_HDR_LEN || length > ETHER_MAX_LEN - ETHER_CRC_LEN) { if_printf(sc->ifp, "got an out-of-spec packet (%u bytes) to send\n", length); if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); sc->mibdata.dot3StatsInternalMacTransmitErrors++; return; } #endif /* * Put the length word for this frame. * Does 86960 accept odd length? -- Yes. * Do we need to pad the length to minimum size by ourselves? * -- Generally yes. But for (or will be) the last * packet in the transmission buffer, we can skip the * padding process. It may gain performance slightly. FIXME. */ if ((sc->proto_dlcr6 & FE_D6_SBW) == FE_D6_SBW_BYTE) { len = max(length, ETHER_MIN_LEN - ETHER_CRC_LEN); fe_outb(sc, FE_BMPR8, len & 0x00ff); fe_outb(sc, FE_BMPR8, (len & 0xff00) >> 8); } else { fe_outw(sc, FE_BMPR8, max(length, ETHER_MIN_LEN - ETHER_CRC_LEN)); } /* * Update buffer status now. * Truncate the length up to an even number, since we use outw(). */ if ((sc->proto_dlcr6 & FE_D6_SBW) != FE_D6_SBW_BYTE) { length = (length + 1) & ~1; } sc->txb_free -= FE_DATA_LEN_LEN + max(length, ETHER_MIN_LEN - ETHER_CRC_LEN); sc->txb_count++; /* * Transfer the data from mbuf chain to the transmission buffer. * MB86960 seems to require that data be transferred as words, and * only words. So that we require some extra code to patch * over odd-length mbufs. */ if ((sc->proto_dlcr6 & FE_D6_SBW) == FE_D6_SBW_BYTE) { /* 8-bit cards are easy. */ - for (mp = m; mp != 0; mp = mp->m_next) { + for (mp = m; mp != NULL; mp = mp->m_next) { if (mp->m_len) fe_outsb(sc, FE_BMPR8, mtod(mp, caddr_t), mp->m_len); } } else { /* 16-bit cards are a pain. */ savebyte = NO_PENDING_BYTE; - for (mp = m; mp != 0; mp = mp->m_next) { + for (mp = m; mp != NULL; mp = mp->m_next) { /* Ignore empty mbuf. */ len = mp->m_len; if (len == 0) continue; /* Find the actual data to send. */ data = mtod(mp, caddr_t); /* Finish the last byte. */ if (savebyte != NO_PENDING_BYTE) { fe_outw(sc, FE_BMPR8, savebyte | (*data << 8)); data++; len--; savebyte = NO_PENDING_BYTE; } /* output contiguous words */ if (len > 1) { fe_outsw(sc, FE_BMPR8, (u_int16_t *)data, len >> 1); data += len & ~1; len &= 1; } /* Save a remaining byte, if there is one. */ if (len > 0) savebyte = *data; } /* Spit the last byte, if the length is odd. */ if (savebyte != NO_PENDING_BYTE) fe_outw(sc, FE_BMPR8, savebyte); } /* Pad to the Ethernet minimum length, if the packet is too short. */ if (length < ETHER_MIN_LEN - ETHER_CRC_LEN) { if ((sc->proto_dlcr6 & FE_D6_SBW) == FE_D6_SBW_BYTE) { fe_outsb(sc, FE_BMPR8, padding, ETHER_MIN_LEN - ETHER_CRC_LEN - length); } else { fe_outsw(sc, FE_BMPR8, (u_int16_t *)padding, (ETHER_MIN_LEN - ETHER_CRC_LEN - length) >> 1); } } } /* * Compute the multicast address filter from the * list of multicast addresses we need to listen to. */ static struct fe_filter fe_mcaf ( struct fe_softc *sc ) { int index; struct fe_filter filter; struct ifmultiaddr *ifma; filter = fe_filter_nothing; if_maddr_rlock(sc->ifp); TAILQ_FOREACH(ifma, &sc->ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; index = ether_crc32_le(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; #ifdef FE_DEBUG if_printf(sc->ifp, "hash(%6D) == %d\n", enm->enm_addrlo , ":", index); #endif filter.data[index >> 3] |= 1 << (index & 7); } if_maddr_runlock(sc->ifp); return ( filter ); } /* * Calculate a new "multicast packet filter" and put the 86960 * receiver in appropriate mode. */ static void fe_setmode (struct fe_softc *sc) { /* * If the interface is not running, we postpone the update * process for receive modes and multicast address filter * until the interface is restarted. It reduces some * complicated job on maintaining chip states. (Earlier versions * of this driver had a bug on that point...) * * To complete the trick, fe_init() calls fe_setmode() after * restarting the interface. */ if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) return; /* * Promiscuous mode is handled separately. */ if (sc->ifp->if_flags & IFF_PROMISC) { /* * Program 86960 to receive all packets on the segment * including those directed to other stations. * Multicast filter stored in MARs are ignored * under this setting, so we don't need to update it. * * Promiscuous mode in FreeBSD 2 is used solely by * BPF, and BPF only listens to valid (no error) packets. * So, we ignore erroneous ones even in this mode. * (Older versions of fe driver mistook the point.) */ fe_outb(sc, FE_DLCR5, sc->proto_dlcr5 | FE_D5_AFM0 | FE_D5_AFM1); sc->filter_change = 0; return; } /* * Turn the chip to the normal (non-promiscuous) mode. */ fe_outb(sc, FE_DLCR5, sc->proto_dlcr5 | FE_D5_AFM1); /* * Find the new multicast filter value. */ if (sc->ifp->if_flags & IFF_ALLMULTI) sc->filter = fe_filter_all; else sc->filter = fe_mcaf(sc); sc->filter_change = 1; /* * We have to update the multicast filter in the 86960, A.S.A.P. * * Note that the DLC (Data Link Control unit, i.e. transmitter * and receiver) must be stopped when feeding the filter, and * DLC trashes all packets in both transmission and receive * buffers when stopped. * * To reduce the packet loss, we delay the filter update * process until buffers are empty. */ if (sc->txb_sched == 0 && sc->txb_count == 0 && !(fe_inb(sc, FE_DLCR1) & FE_D1_PKTRDY)) { /* * Buffers are (apparently) empty. Load * the new filter value into MARs now. */ fe_loadmar(sc); } else { /* * Buffers are not empty. Mark that we have to update * the MARs. The new filter will be loaded by feintr() * later. */ } } /* * Load a new multicast address filter into MARs. * * The caller must have acquired the softc lock before fe_loadmar. * This function starts the DLC upon return. So it can be called only * when the chip is working, i.e., from the driver's point of view, when * a device is RUNNING. (I mistook the point in previous versions.) */ static void fe_loadmar (struct fe_softc * sc) { /* Stop the DLC (transmitter and receiver). */ DELAY(200); fe_outb(sc, FE_DLCR6, sc->proto_dlcr6 | FE_D6_DLC_DISABLE); DELAY(200); /* Select register bank 1 for MARs. */ fe_outb(sc, FE_DLCR7, sc->proto_dlcr7 | FE_D7_RBS_MAR | FE_D7_POWER_UP); /* Copy filter value into the registers. */ fe_outblk(sc, FE_MAR8, sc->filter.data, FE_FILTER_LEN); /* Restore the bank selection for BMPRs (i.e., runtime registers). */ fe_outb(sc, FE_DLCR7, sc->proto_dlcr7 | FE_D7_RBS_BMPR | FE_D7_POWER_UP); /* Restart the DLC. */ DELAY(200); fe_outb(sc, FE_DLCR6, sc->proto_dlcr6 | FE_D6_DLC_ENABLE); DELAY(200); /* We have just updated the filter. */ sc->filter_change = 0; } /* Change the media selection. */ static int fe_medchange (struct ifnet *ifp) { struct fe_softc *sc = (struct fe_softc *)ifp->if_softc; #ifdef DIAGNOSTIC /* If_media should not pass any request for a media which this interface doesn't support. */ int b; for (b = 0; bit2media[b] != 0; b++) { if (bit2media[b] == sc->media.ifm_media) break; } if (((1 << b) & sc->mbitmap) == 0) { if_printf(sc->ifp, "got an unsupported media request (0x%x)\n", sc->media.ifm_media); return EINVAL; } #endif /* We don't actually change media when the interface is down. fe_init() will do the job, instead. Should we also wait until the transmission buffer being empty? Changing the media when we are sending a frame will cause two garbages on wires, one on old media and another on new. FIXME */ FE_LOCK(sc); if (sc->ifp->if_flags & IFF_UP) { if (sc->msel) sc->msel(sc); } FE_UNLOCK(sc); return 0; } /* I don't know how I can support media status callback... FIXME. */ static void fe_medstat (struct ifnet *ifp, struct ifmediareq *ifmr) { struct fe_softc *sc = ifp->if_softc; ifmr->ifm_active = sc->media.ifm_media; } Index: head/sys/dev/firewire/if_fwip.c =================================================================== --- head/sys/dev/firewire/if_fwip.c (revision 313981) +++ head/sys/dev/firewire/if_fwip.c (revision 313982) @@ -1,934 +1,934 @@ /*- * Copyright (c) 2004 * Doug Rabson * Copyright (c) 2002-2003 * Hidetoshi Shimokawa. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * * This product includes software developed by Hidetoshi Shimokawa. * * 4. Neither the name of the author nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #include "opt_inet.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * We really need a mechanism for allocating regions in the FIFO * address space. We pick a address in the OHCI controller's 'middle' * address space. This means that the controller will automatically * send responses for us, which is fine since we don't have any * important information to put in the response anyway. */ #define INET_FIFO 0xfffe00000000LL #define FWIPDEBUG if (fwipdebug) if_printf #define TX_MAX_QUEUE (FWMAXQUEUE - 1) /* network interface */ static void fwip_start (struct ifnet *); static int fwip_ioctl (struct ifnet *, u_long, caddr_t); static void fwip_init (void *); static void fwip_post_busreset (void *); static void fwip_output_callback (struct fw_xfer *); static void fwip_async_output (struct fwip_softc *, struct ifnet *); static void fwip_start_send (void *, int); static void fwip_stream_input (struct fw_xferq *); static void fwip_unicast_input(struct fw_xfer *); static int fwipdebug = 0; static int broadcast_channel = 0xc0 | 0x1f; /* tag | channel(XXX) */ static int tx_speed = 2; static int rx_queue_len = FWMAXQUEUE; static MALLOC_DEFINE(M_FWIP, "if_fwip", "IP over FireWire interface"); SYSCTL_INT(_debug, OID_AUTO, if_fwip_debug, CTLFLAG_RW, &fwipdebug, 0, ""); SYSCTL_DECL(_hw_firewire); static SYSCTL_NODE(_hw_firewire, OID_AUTO, fwip, CTLFLAG_RD, 0, "Firewire ip subsystem"); SYSCTL_INT(_hw_firewire_fwip, OID_AUTO, rx_queue_len, CTLFLAG_RWTUN, &rx_queue_len, 0, "Length of the receive queue"); #ifdef DEVICE_POLLING static poll_handler_t fwip_poll; static int fwip_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct fwip_softc *fwip; struct firewire_comm *fc; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) return (0); fwip = ((struct fwip_eth_softc *)ifp->if_softc)->fwip; fc = fwip->fd.fc; fc->poll(fc, (cmd == POLL_AND_CHECK_STATUS)?0:1, count); return (0); } #endif /* DEVICE_POLLING */ static void fwip_identify(driver_t *driver, device_t parent) { BUS_ADD_CHILD(parent, 0, "fwip", device_get_unit(parent)); } static int fwip_probe(device_t dev) { device_t pa; pa = device_get_parent(dev); if (device_get_unit(dev) != device_get_unit(pa)) { return (ENXIO); } device_set_desc(dev, "IP over FireWire"); return (0); } static int fwip_attach(device_t dev) { struct fwip_softc *fwip; struct ifnet *ifp; int unit, s; struct fw_hwaddr *hwaddr; fwip = ((struct fwip_softc *)device_get_softc(dev)); unit = device_get_unit(dev); ifp = fwip->fw_softc.fwip_ifp = if_alloc(IFT_IEEE1394); if (ifp == NULL) return (ENOSPC); mtx_init(&fwip->mtx, "fwip", NULL, MTX_DEF); /* XXX */ fwip->dma_ch = -1; fwip->fd.fc = device_get_ivars(dev); if (tx_speed < 0) tx_speed = fwip->fd.fc->speed; fwip->fd.dev = dev; fwip->fd.post_explore = NULL; fwip->fd.post_busreset = fwip_post_busreset; fwip->fw_softc.fwip = fwip; TASK_INIT(&fwip->start_send, 0, fwip_start_send, fwip); /* * Encode our hardware the way that arp likes it. */ hwaddr = &IFP2FWC(fwip->fw_softc.fwip_ifp)->fc_hwaddr; hwaddr->sender_unique_ID_hi = htonl(fwip->fd.fc->eui.hi); hwaddr->sender_unique_ID_lo = htonl(fwip->fd.fc->eui.lo); hwaddr->sender_max_rec = fwip->fd.fc->maxrec; hwaddr->sspd = fwip->fd.fc->speed; hwaddr->sender_unicast_FIFO_hi = htons((uint16_t)(INET_FIFO >> 32)); hwaddr->sender_unicast_FIFO_lo = htonl((uint32_t)INET_FIFO); /* fill the rest and attach interface */ ifp->if_softc = &fwip->fw_softc; if_initname(ifp, device_get_name(dev), unit); ifp->if_init = fwip_init; ifp->if_start = fwip_start; ifp->if_ioctl = fwip_ioctl; ifp->if_flags = (IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST); ifp->if_snd.ifq_maxlen = TX_MAX_QUEUE; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif s = splimp(); firewire_ifattach(ifp, hwaddr); splx(s); FWIPDEBUG(ifp, "interface created\n"); return 0; } static void fwip_stop(struct fwip_softc *fwip) { struct firewire_comm *fc; struct fw_xferq *xferq; struct ifnet *ifp = fwip->fw_softc.fwip_ifp; struct fw_xfer *xfer, *next; int i; fc = fwip->fd.fc; if (fwip->dma_ch >= 0) { xferq = fc->ir[fwip->dma_ch]; if (xferq->flag & FWXFERQ_RUNNING) fc->irx_disable(fc, fwip->dma_ch); xferq->flag &= ~(FWXFERQ_MODEMASK | FWXFERQ_OPEN | FWXFERQ_STREAM | FWXFERQ_EXTBUF | FWXFERQ_HANDLER | FWXFERQ_CHTAGMASK); xferq->hand = NULL; for (i = 0; i < xferq->bnchunk; i++) m_freem(xferq->bulkxfer[i].mbuf); free(xferq->bulkxfer, M_FWIP); fw_bindremove(fc, &fwip->fwb); for (xfer = STAILQ_FIRST(&fwip->fwb.xferlist); xfer != NULL; xfer = next) { next = STAILQ_NEXT(xfer, link); fw_xfer_free(xfer); } for (xfer = STAILQ_FIRST(&fwip->xferlist); xfer != NULL; xfer = next) { next = STAILQ_NEXT(xfer, link); fw_xfer_free(xfer); } STAILQ_INIT(&fwip->xferlist); xferq->bulkxfer = NULL; fwip->dma_ch = -1; } ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); } static int fwip_detach(device_t dev) { struct fwip_softc *fwip; struct ifnet *ifp; int s; fwip = (struct fwip_softc *)device_get_softc(dev); ifp = fwip->fw_softc.fwip_ifp; #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif s = splimp(); fwip_stop(fwip); firewire_ifdetach(ifp); if_free(ifp); mtx_destroy(&fwip->mtx); splx(s); return 0; } static void fwip_init(void *arg) { struct fwip_softc *fwip = ((struct fwip_eth_softc *)arg)->fwip; struct firewire_comm *fc; struct ifnet *ifp = fwip->fw_softc.fwip_ifp; struct fw_xferq *xferq; struct fw_xfer *xfer; struct mbuf *m; int i; FWIPDEBUG(ifp, "initializing\n"); fc = fwip->fd.fc; #define START 0 if (fwip->dma_ch < 0) { fwip->dma_ch = fw_open_isodma(fc, /* tx */0); if (fwip->dma_ch < 0) return; xferq = fc->ir[fwip->dma_ch]; xferq->flag |= FWXFERQ_EXTBUF | FWXFERQ_HANDLER | FWXFERQ_STREAM; xferq->flag &= ~0xff; xferq->flag |= broadcast_channel & 0xff; /* register fwip_input handler */ xferq->sc = (caddr_t) fwip; xferq->hand = fwip_stream_input; xferq->bnchunk = rx_queue_len; xferq->bnpacket = 1; xferq->psize = MCLBYTES; xferq->queued = 0; xferq->buf = NULL; xferq->bulkxfer = (struct fw_bulkxfer *) malloc( sizeof(struct fw_bulkxfer) * xferq->bnchunk, M_FWIP, M_WAITOK); if (xferq->bulkxfer == NULL) { printf("if_fwip: malloc failed\n"); return; } STAILQ_INIT(&xferq->stvalid); STAILQ_INIT(&xferq->stfree); STAILQ_INIT(&xferq->stdma); xferq->stproc = NULL; for (i = 0; i < xferq->bnchunk; i++) { m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR); xferq->bulkxfer[i].mbuf = m; m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; STAILQ_INSERT_TAIL(&xferq->stfree, &xferq->bulkxfer[i], link); } fwip->fwb.start = INET_FIFO; fwip->fwb.end = INET_FIFO + 16384; /* S3200 packet size */ /* pre-allocate xfer */ STAILQ_INIT(&fwip->fwb.xferlist); for (i = 0; i < rx_queue_len; i++) { xfer = fw_xfer_alloc(M_FWIP); if (xfer == NULL) break; m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR); xfer->recv.payload = mtod(m, uint32_t *); xfer->recv.pay_len = MCLBYTES; xfer->hand = fwip_unicast_input; xfer->fc = fc; xfer->sc = (caddr_t)fwip; xfer->mbuf = m; STAILQ_INSERT_TAIL(&fwip->fwb.xferlist, xfer, link); } fw_bindadd(fc, &fwip->fwb); STAILQ_INIT(&fwip->xferlist); for (i = 0; i < TX_MAX_QUEUE; i++) { xfer = fw_xfer_alloc(M_FWIP); if (xfer == NULL) break; xfer->send.spd = tx_speed; xfer->fc = fwip->fd.fc; xfer->sc = (caddr_t)fwip; xfer->hand = fwip_output_callback; STAILQ_INSERT_TAIL(&fwip->xferlist, xfer, link); } } else xferq = fc->ir[fwip->dma_ch]; fwip->last_dest.hi = 0; fwip->last_dest.lo = 0; /* start dma */ if ((xferq->flag & FWXFERQ_RUNNING) == 0) fc->irx_enable(fc, fwip->dma_ch); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; #if 0 /* attempt to start output */ fwip_start(ifp); #endif } static int fwip_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct fwip_softc *fwip = ((struct fwip_eth_softc *)ifp->if_softc)->fwip; int s, error; switch (cmd) { case SIOCSIFFLAGS: s = splimp(); if (ifp->if_flags & IFF_UP) { if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) fwip_init(&fwip->fw_softc); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) fwip_stop(fwip); } splx(s); break; case SIOCADDMULTI: case SIOCDELMULTI: break; case SIOCSIFCAP: #ifdef DEVICE_POLLING { struct ifreq *ifr = (struct ifreq *) data; struct firewire_comm *fc = fwip->fd.fc; if (ifr->ifr_reqcap & IFCAP_POLLING && !(ifp->if_capenable & IFCAP_POLLING)) { error = ether_poll_register(fwip_poll, ifp); if (error) return (error); /* Disable interrupts */ fc->set_intr(fc, 0); ifp->if_capenable |= IFCAP_POLLING | IFCAP_POLLING_NOCOUNT; return (error); } if (!(ifr->ifr_reqcap & IFCAP_POLLING) && ifp->if_capenable & IFCAP_POLLING) { error = ether_poll_deregister(ifp); /* Enable interrupts. */ fc->set_intr(fc, 1); ifp->if_capenable &= ~IFCAP_POLLING; ifp->if_capenable &= ~IFCAP_POLLING_NOCOUNT; return (error); } } #endif /* DEVICE_POLLING */ break; default: s = splimp(); error = firewire_ioctl(ifp, cmd, data); splx(s); return (error); } return (0); } static void fwip_post_busreset(void *arg) { struct fwip_softc *fwip = arg; struct crom_src *src; struct crom_chunk *root; src = fwip->fd.fc->crom_src; root = fwip->fd.fc->crom_root; /* RFC2734 IPv4 over IEEE1394 */ bzero(&fwip->unit4, sizeof(struct crom_chunk)); crom_add_chunk(src, root, &fwip->unit4, CROM_UDIR); crom_add_entry(&fwip->unit4, CSRKEY_SPEC, CSRVAL_IETF); crom_add_simple_text(src, &fwip->unit4, &fwip->spec4, "IANA"); crom_add_entry(&fwip->unit4, CSRKEY_VER, 1); crom_add_simple_text(src, &fwip->unit4, &fwip->ver4, "IPv4"); /* RFC3146 IPv6 over IEEE1394 */ bzero(&fwip->unit6, sizeof(struct crom_chunk)); crom_add_chunk(src, root, &fwip->unit6, CROM_UDIR); crom_add_entry(&fwip->unit6, CSRKEY_SPEC, CSRVAL_IETF); crom_add_simple_text(src, &fwip->unit6, &fwip->spec6, "IANA"); crom_add_entry(&fwip->unit6, CSRKEY_VER, 2); crom_add_simple_text(src, &fwip->unit6, &fwip->ver6, "IPv6"); fwip->last_dest.hi = 0; fwip->last_dest.lo = 0; firewire_busreset(fwip->fw_softc.fwip_ifp); } static void fwip_output_callback(struct fw_xfer *xfer) { struct fwip_softc *fwip; struct ifnet *ifp; int s; fwip = (struct fwip_softc *)xfer->sc; ifp = fwip->fw_softc.fwip_ifp; /* XXX error check */ FWIPDEBUG(ifp, "resp = %d\n", xfer->resp); if (xfer->resp != 0) if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); m_freem(xfer->mbuf); fw_xfer_unload(xfer); s = splimp(); FWIP_LOCK(fwip); STAILQ_INSERT_TAIL(&fwip->xferlist, xfer, link); FWIP_UNLOCK(fwip); splx(s); /* for queue full */ if (ifp->if_snd.ifq_head != NULL) { fwip_start(ifp); } } static void fwip_start(struct ifnet *ifp) { struct fwip_softc *fwip = ((struct fwip_eth_softc *)ifp->if_softc)->fwip; int s; FWIPDEBUG(ifp, "starting\n"); if (fwip->dma_ch < 0) { struct mbuf *m = NULL; FWIPDEBUG(ifp, "not ready\n"); s = splimp(); do { IF_DEQUEUE(&ifp->if_snd, m); if (m != NULL) m_freem(m); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } while (m != NULL); splx(s); return; } s = splimp(); ifp->if_drv_flags |= IFF_DRV_OACTIVE; if (ifp->if_snd.ifq_len != 0) fwip_async_output(fwip, ifp); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; splx(s); } /* Async. stream output */ static void fwip_async_output(struct fwip_softc *fwip, struct ifnet *ifp) { struct firewire_comm *fc = fwip->fd.fc; struct mbuf *m; struct m_tag *mtag; struct fw_hwaddr *destfw; struct fw_xfer *xfer; struct fw_xferq *xferq; struct fw_pkt *fp; uint16_t nodeid; int error; int i = 0; xfer = NULL; xferq = fc->atq; while ((xferq->queued < xferq->maxq - 1) && (ifp->if_snd.ifq_head != NULL)) { FWIP_LOCK(fwip); xfer = STAILQ_FIRST(&fwip->xferlist); if (xfer == NULL) { FWIP_UNLOCK(fwip); #if 0 printf("if_fwip: lack of xfer\n"); #endif break; } STAILQ_REMOVE_HEAD(&fwip->xferlist, link); FWIP_UNLOCK(fwip); IF_DEQUEUE(&ifp->if_snd, m); if (m == NULL) { FWIP_LOCK(fwip); STAILQ_INSERT_HEAD(&fwip->xferlist, xfer, link); FWIP_UNLOCK(fwip); break; } /* * Dig out the link-level address which * firewire_output got via arp or neighbour * discovery. If we don't have a link-level address, * just stick the thing on the broadcast channel. */ mtag = m_tag_locate(m, MTAG_FIREWIRE, MTAG_FIREWIRE_HWADDR, 0); if (mtag == NULL) - destfw = 0; + destfw = NULL; else destfw = (struct fw_hwaddr *) (mtag + 1); /* * We don't do any bpf stuff here - the generic code * in firewire_output gives the packet to bpf before * it adds the link-level encapsulation. */ /* * Put the mbuf in the xfer early in case we hit an * error case below - fwip_output_callback will free * the mbuf. */ xfer->mbuf = m; /* * We use the arp result (if any) to add a suitable firewire * packet header before handing off to the bus. */ fp = &xfer->send.hdr; nodeid = FWLOCALBUS | fc->nodeid; if ((m->m_flags & M_BCAST) || !destfw) { /* * Broadcast packets are sent as GASP packets with * specifier ID 0x00005e, version 1 on the broadcast * channel. To be conservative, we send at the * slowest possible speed. */ uint32_t *p; M_PREPEND(m, 2*sizeof(uint32_t), M_NOWAIT); p = mtod(m, uint32_t *); fp->mode.stream.len = m->m_pkthdr.len; fp->mode.stream.chtag = broadcast_channel; fp->mode.stream.tcode = FWTCODE_STREAM; fp->mode.stream.sy = 0; xfer->send.spd = 0; p[0] = htonl(nodeid << 16); p[1] = htonl((0x5e << 24) | 1); } else { /* * Unicast packets are sent as block writes to the * target's unicast fifo address. If we can't * find the node address, we just give up. We * could broadcast it but that might overflow * the packet size limitations due to the * extra GASP header. Note: the hardware * address is stored in network byte order to * make life easier for ARP. */ struct fw_device *fd; struct fw_eui64 eui; eui.hi = ntohl(destfw->sender_unique_ID_hi); eui.lo = ntohl(destfw->sender_unique_ID_lo); if (fwip->last_dest.hi != eui.hi || fwip->last_dest.lo != eui.lo) { fd = fw_noderesolve_eui64(fc, &eui); if (!fd) { /* error */ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); /* XXX set error code */ fwip_output_callback(xfer); continue; } fwip->last_hdr.mode.wreqb.dst = FWLOCALBUS | fd->dst; fwip->last_hdr.mode.wreqb.tlrt = 0; fwip->last_hdr.mode.wreqb.tcode = FWTCODE_WREQB; fwip->last_hdr.mode.wreqb.pri = 0; fwip->last_hdr.mode.wreqb.src = nodeid; fwip->last_hdr.mode.wreqb.dest_hi = ntohs(destfw->sender_unicast_FIFO_hi); fwip->last_hdr.mode.wreqb.dest_lo = ntohl(destfw->sender_unicast_FIFO_lo); fwip->last_hdr.mode.wreqb.extcode = 0; fwip->last_dest = eui; } fp->mode.wreqb = fwip->last_hdr.mode.wreqb; fp->mode.wreqb.len = m->m_pkthdr.len; xfer->send.spd = min(destfw->sspd, fc->speed); } xfer->send.pay_len = m->m_pkthdr.len; error = fw_asyreq(fc, -1, xfer); if (error == EAGAIN) { /* * We ran out of tlabels - requeue the packet * for later transmission. */ xfer->mbuf = 0; FWIP_LOCK(fwip); STAILQ_INSERT_TAIL(&fwip->xferlist, xfer, link); FWIP_UNLOCK(fwip); IF_PREPEND(&ifp->if_snd, m); break; } if (error) { /* error */ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); /* XXX set error code */ fwip_output_callback(xfer); continue; } else { if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); i++; } } #if 0 if (i > 1) printf("%d queued\n", i); #endif if (i > 0) xferq->start(fc); } static void fwip_start_send (void *arg, int count) { struct fwip_softc *fwip = arg; fwip->fd.fc->atq->start(fwip->fd.fc); } /* Async. stream output */ static void fwip_stream_input(struct fw_xferq *xferq) { struct mbuf *m, *m0; struct m_tag *mtag; struct ifnet *ifp; struct fwip_softc *fwip; struct fw_bulkxfer *sxfer; struct fw_pkt *fp; uint16_t src; uint32_t *p; fwip = (struct fwip_softc *)xferq->sc; ifp = fwip->fw_softc.fwip_ifp; while ((sxfer = STAILQ_FIRST(&xferq->stvalid)) != NULL) { STAILQ_REMOVE_HEAD(&xferq->stvalid, link); fp = mtod(sxfer->mbuf, struct fw_pkt *); if (fwip->fd.fc->irx_post != NULL) fwip->fd.fc->irx_post(fwip->fd.fc, fp->mode.ld); m = sxfer->mbuf; /* insert new rbuf */ sxfer->mbuf = m0 = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m0 != NULL) { m0->m_len = m0->m_pkthdr.len = m0->m_ext.ext_size; STAILQ_INSERT_TAIL(&xferq->stfree, sxfer, link); } else printf("fwip_as_input: m_getcl failed\n"); /* * We must have a GASP header - leave the * encapsulation sanity checks to the generic * code. Remember that we also have the firewire async * stream header even though that isn't accounted for * in mode.stream.len. */ if (sxfer->resp != 0 || fp->mode.stream.len < 2*sizeof(uint32_t)) { m_freem(m); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); continue; } m->m_len = m->m_pkthdr.len = fp->mode.stream.len + sizeof(fp->mode.stream); /* * If we received the packet on the broadcast channel, * mark it as broadcast, otherwise we assume it must * be multicast. */ if (fp->mode.stream.chtag == broadcast_channel) m->m_flags |= M_BCAST; else m->m_flags |= M_MCAST; /* * Make sure we recognise the GASP specifier and * version. */ p = mtod(m, uint32_t *); if ((((ntohl(p[1]) & 0xffff) << 8) | ntohl(p[2]) >> 24) != 0x00005e || (ntohl(p[2]) & 0xffffff) != 1) { FWIPDEBUG(ifp, "Unrecognised GASP header %#08x %#08x\n", ntohl(p[1]), ntohl(p[2])); m_freem(m); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); continue; } /* * Record the sender ID for possible BPF usage. */ src = ntohl(p[1]) >> 16; if (bpf_peers_present(ifp->if_bpf)) { mtag = m_tag_alloc(MTAG_FIREWIRE, MTAG_FIREWIRE_SENDER_EUID, 2*sizeof(uint32_t), M_NOWAIT); if (mtag) { /* bpf wants it in network byte order */ struct fw_device *fd; uint32_t *p = (uint32_t *) (mtag + 1); fd = fw_noderesolve_nodeid(fwip->fd.fc, src & 0x3f); if (fd) { p[0] = htonl(fd->eui.hi); p[1] = htonl(fd->eui.lo); } else { p[0] = 0; p[1] = 0; } m_tag_prepend(m, mtag); } } /* * Trim off the GASP header */ m_adj(m, 3*sizeof(uint32_t)); m->m_pkthdr.rcvif = ifp; firewire_input(ifp, m, src); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); } if (STAILQ_FIRST(&xferq->stfree) != NULL) fwip->fd.fc->irx_enable(fwip->fd.fc, fwip->dma_ch); } static __inline void fwip_unicast_input_recycle(struct fwip_softc *fwip, struct fw_xfer *xfer) { struct mbuf *m; /* * We have finished with a unicast xfer. Allocate a new * cluster and stick it on the back of the input queue. */ m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR); xfer->mbuf = m; xfer->recv.payload = mtod(m, uint32_t *); xfer->recv.pay_len = MCLBYTES; xfer->mbuf = m; STAILQ_INSERT_TAIL(&fwip->fwb.xferlist, xfer, link); } static void fwip_unicast_input(struct fw_xfer *xfer) { uint64_t address; struct mbuf *m; struct m_tag *mtag; struct ifnet *ifp; struct fwip_softc *fwip; struct fw_pkt *fp; //struct fw_pkt *sfp; int rtcode; fwip = (struct fwip_softc *)xfer->sc; ifp = fwip->fw_softc.fwip_ifp; m = xfer->mbuf; xfer->mbuf = 0; fp = &xfer->recv.hdr; /* * Check the fifo address - we only accept addresses of * exactly INET_FIFO. */ address = ((uint64_t)fp->mode.wreqb.dest_hi << 32) | fp->mode.wreqb.dest_lo; if (fp->mode.wreqb.tcode != FWTCODE_WREQB) { rtcode = FWRCODE_ER_TYPE; } else if (address != INET_FIFO) { rtcode = FWRCODE_ER_ADDR; } else { rtcode = FWRCODE_COMPLETE; } /* * Pick up a new mbuf and stick it on the back of the receive * queue. */ fwip_unicast_input_recycle(fwip, xfer); /* * If we've already rejected the packet, give up now. */ if (rtcode != FWRCODE_COMPLETE) { m_freem(m); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return; } if (bpf_peers_present(ifp->if_bpf)) { /* * Record the sender ID for possible BPF usage. */ mtag = m_tag_alloc(MTAG_FIREWIRE, MTAG_FIREWIRE_SENDER_EUID, 2*sizeof(uint32_t), M_NOWAIT); if (mtag) { /* bpf wants it in network byte order */ struct fw_device *fd; uint32_t *p = (uint32_t *) (mtag + 1); fd = fw_noderesolve_nodeid(fwip->fd.fc, fp->mode.wreqb.src & 0x3f); if (fd) { p[0] = htonl(fd->eui.hi); p[1] = htonl(fd->eui.lo); } else { p[0] = 0; p[1] = 0; } m_tag_prepend(m, mtag); } } /* * Hand off to the generic encapsulation code. We don't use * ifp->if_input so that we can pass the source nodeid as an * argument to facilitate link-level fragment reassembly. */ m->m_len = m->m_pkthdr.len = fp->mode.wreqb.len; m->m_pkthdr.rcvif = ifp; firewire_input(ifp, m, fp->mode.wreqb.src); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); } static devclass_t fwip_devclass; static device_method_t fwip_methods[] = { /* device interface */ DEVMETHOD(device_identify, fwip_identify), DEVMETHOD(device_probe, fwip_probe), DEVMETHOD(device_attach, fwip_attach), DEVMETHOD(device_detach, fwip_detach), { 0, 0 } }; static driver_t fwip_driver = { "fwip", fwip_methods, sizeof(struct fwip_softc), }; DRIVER_MODULE(fwip, firewire, fwip_driver, fwip_devclass, 0, 0); MODULE_VERSION(fwip, 1); MODULE_DEPEND(fwip, firewire, 1, 1, 1); Index: head/sys/dev/hptiop/hptiop.c =================================================================== --- head/sys/dev/hptiop/hptiop.c (revision 313981) +++ head/sys/dev/hptiop/hptiop.c (revision 313982) @@ -1,2854 +1,2854 @@ /* * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD * Copyright (C) 2007-2012 HighPoint Technologies, Inc. All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static const char driver_name[] = "hptiop"; static const char driver_version[] = "v1.9"; static devclass_t hptiop_devclass; static int hptiop_send_sync_msg(struct hpt_iop_hba *hba, u_int32_t msg, u_int32_t millisec); static void hptiop_request_callback_itl(struct hpt_iop_hba *hba, u_int32_t req); static void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req); static void hptiop_request_callback_mvfrey(struct hpt_iop_hba *hba, u_int32_t req); static void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg); static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba, struct hpt_iop_ioctl_param *pParams); static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba, struct hpt_iop_ioctl_param *pParams); static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_ioctl_param *pParams); static int hptiop_rescan_bus(struct hpt_iop_hba *hba); static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba); static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba); static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba); static int hptiop_get_config_itl(struct hpt_iop_hba *hba, struct hpt_iop_request_get_config *config); static int hptiop_get_config_mv(struct hpt_iop_hba *hba, struct hpt_iop_request_get_config *config); static int hptiop_get_config_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_request_get_config *config); static int hptiop_set_config_itl(struct hpt_iop_hba *hba, struct hpt_iop_request_set_config *config); static int hptiop_set_config_mv(struct hpt_iop_hba *hba, struct hpt_iop_request_set_config *config); static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_request_set_config *config); static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba); static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba); static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba); static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba); static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba); static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba, u_int32_t req32, struct hpt_iop_ioctl_param *pParams); static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba, struct hpt_iop_request_ioctl_command *req, struct hpt_iop_ioctl_param *pParams); static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_request_ioctl_command *req, struct hpt_iop_ioctl_param *pParams); static void hptiop_post_req_itl(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb, bus_dma_segment_t *segs, int nsegs); static void hptiop_post_req_mv(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb, bus_dma_segment_t *segs, int nsegs); static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb, bus_dma_segment_t *segs, int nsegs); static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg); static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg); static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg); static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba); static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba); static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba); static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba); static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba); static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba); static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb); static int hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid); static int hptiop_probe(device_t dev); static int hptiop_attach(device_t dev); static int hptiop_detach(device_t dev); static int hptiop_shutdown(device_t dev); static void hptiop_action(struct cam_sim *sim, union ccb *ccb); static void hptiop_poll(struct cam_sim *sim); static void hptiop_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void hptiop_pci_intr(void *arg); static void hptiop_release_resource(struct hpt_iop_hba *hba); static void hptiop_reset_adapter(void *argv); static d_open_t hptiop_open; static d_close_t hptiop_close; static d_ioctl_t hptiop_ioctl; static struct cdevsw hptiop_cdevsw = { .d_open = hptiop_open, .d_close = hptiop_close, .d_ioctl = hptiop_ioctl, .d_name = driver_name, .d_version = D_VERSION, }; #define hba_from_dev(dev) \ ((struct hpt_iop_hba *)devclass_get_softc(hptiop_devclass, dev2unit(dev))) #define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\ hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value)) #define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\ hba->bar0h, offsetof(struct hpt_iopmu_itl, offset)) #define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\ hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value) #define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\ hba->bar0h, offsetof(struct hpt_iopmv_regs, offset)) #define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\ hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value) #define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\ hba->bar2h, offsetof(struct hpt_iopmu_mv, offset)) #define BUS_SPACE_WRT4_MVFREY2(offset, value) bus_space_write_4(hba->bar2t,\ hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset), value) #define BUS_SPACE_RD4_MVFREY2(offset) bus_space_read_4(hba->bar2t,\ hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset)) static int hptiop_open(ioctl_dev_t dev, int flags, int devtype, ioctl_thread_t proc) { struct hpt_iop_hba *hba = hba_from_dev(dev); if (hba==NULL) return ENXIO; if (hba->flag & HPT_IOCTL_FLAG_OPEN) return EBUSY; hba->flag |= HPT_IOCTL_FLAG_OPEN; return 0; } static int hptiop_close(ioctl_dev_t dev, int flags, int devtype, ioctl_thread_t proc) { struct hpt_iop_hba *hba = hba_from_dev(dev); hba->flag &= ~(u_int32_t)HPT_IOCTL_FLAG_OPEN; return 0; } static int hptiop_ioctl(ioctl_dev_t dev, u_long cmd, caddr_t data, int flags, ioctl_thread_t proc) { int ret = EFAULT; struct hpt_iop_hba *hba = hba_from_dev(dev); mtx_lock(&Giant); switch (cmd) { case HPT_DO_IOCONTROL: ret = hba->ops->do_ioctl(hba, (struct hpt_iop_ioctl_param *)data); break; case HPT_SCAN_BUS: ret = hptiop_rescan_bus(hba); break; } mtx_unlock(&Giant); return ret; } static u_int64_t hptiop_mv_outbound_read(struct hpt_iop_hba *hba) { u_int64_t p; u_int32_t outbound_tail = BUS_SPACE_RD4_MV2(outbound_tail); u_int32_t outbound_head = BUS_SPACE_RD4_MV2(outbound_head); if (outbound_tail != outbound_head) { bus_space_read_region_4(hba->bar2t, hba->bar2h, offsetof(struct hpt_iopmu_mv, outbound_q[outbound_tail]), (u_int32_t *)&p, 2); outbound_tail++; if (outbound_tail == MVIOP_QUEUE_LEN) outbound_tail = 0; BUS_SPACE_WRT4_MV2(outbound_tail, outbound_tail); return p; } else return 0; } static void hptiop_mv_inbound_write(u_int64_t p, struct hpt_iop_hba *hba) { u_int32_t inbound_head = BUS_SPACE_RD4_MV2(inbound_head); u_int32_t head = inbound_head + 1; if (head == MVIOP_QUEUE_LEN) head = 0; bus_space_write_region_4(hba->bar2t, hba->bar2h, offsetof(struct hpt_iopmu_mv, inbound_q[inbound_head]), (u_int32_t *)&p, 2); BUS_SPACE_WRT4_MV2(inbound_head, head); BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_POSTQUEUE); } static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg) { BUS_SPACE_WRT4_ITL(inbound_msgaddr0, msg); BUS_SPACE_RD4_ITL(outbound_intstatus); } static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg) { BUS_SPACE_WRT4_MV2(inbound_msg, msg); BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_MSG); BUS_SPACE_RD4_MV0(outbound_intmask); } static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg) { BUS_SPACE_WRT4_MVFREY2(f0_to_cpu_msg_a, msg); BUS_SPACE_RD4_MVFREY2(f0_to_cpu_msg_a); } static int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec) { u_int32_t req=0; int i; for (i = 0; i < millisec; i++) { req = BUS_SPACE_RD4_ITL(inbound_queue); if (req != IOPMU_QUEUE_EMPTY) break; DELAY(1000); } if (req!=IOPMU_QUEUE_EMPTY) { BUS_SPACE_WRT4_ITL(outbound_queue, req); BUS_SPACE_RD4_ITL(outbound_intstatus); return 0; } return -1; } static int hptiop_wait_ready_mv(struct hpt_iop_hba * hba, u_int32_t millisec) { if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec)) return -1; return 0; } static int hptiop_wait_ready_mvfrey(struct hpt_iop_hba * hba, u_int32_t millisec) { if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec)) return -1; return 0; } static void hptiop_request_callback_itl(struct hpt_iop_hba * hba, u_int32_t index) { struct hpt_iop_srb *srb; - struct hpt_iop_request_scsi_command *req=0; + struct hpt_iop_request_scsi_command *req=NULL; union ccb *ccb; u_int8_t *cdb; u_int32_t result, temp, dxfer; u_int64_t temp64; if (index & IOPMU_QUEUE_MASK_HOST_BITS) { /*host req*/ if (hba->firmware_version > 0x01020000 || hba->interface_version > 0x01020000) { srb = hba->srb[index & ~(u_int32_t) (IOPMU_QUEUE_ADDR_HOST_BIT | IOPMU_QUEUE_REQUEST_RESULT_BIT)]; req = (struct hpt_iop_request_scsi_command *)srb; if (index & IOPMU_QUEUE_REQUEST_RESULT_BIT) result = IOP_RESULT_SUCCESS; else result = req->header.result; } else { srb = hba->srb[index & ~(u_int32_t)IOPMU_QUEUE_ADDR_HOST_BIT]; req = (struct hpt_iop_request_scsi_command *)srb; result = req->header.result; } dxfer = req->dataxfer_length; goto srb_complete; } /*iop req*/ temp = bus_space_read_4(hba->bar0t, hba->bar0h, index + offsetof(struct hpt_iop_request_header, type)); result = bus_space_read_4(hba->bar0t, hba->bar0h, index + offsetof(struct hpt_iop_request_header, result)); switch(temp) { case IOP_REQUEST_TYPE_IOCTL_COMMAND: { temp64 = 0; bus_space_write_region_4(hba->bar0t, hba->bar0h, index + offsetof(struct hpt_iop_request_header, context), (u_int32_t *)&temp64, 2); wakeup((void *)((unsigned long)hba->u.itl.mu + index)); break; } case IOP_REQUEST_TYPE_SCSI_COMMAND: bus_space_read_region_4(hba->bar0t, hba->bar0h, index + offsetof(struct hpt_iop_request_header, context), (u_int32_t *)&temp64, 2); srb = (struct hpt_iop_srb *)(unsigned long)temp64; dxfer = bus_space_read_4(hba->bar0t, hba->bar0h, index + offsetof(struct hpt_iop_request_scsi_command, dataxfer_length)); srb_complete: ccb = (union ccb *)srb->ccb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */ ccb->ccb_h.status = CAM_REQ_CMP; goto scsi_done; } switch (result) { case IOP_RESULT_SUCCESS: switch (ccb->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_IN: bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(hba->io_dmat, srb->dma_map); break; case CAM_DIR_OUT: bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(hba->io_dmat, srb->dma_map); break; } ccb->ccb_h.status = CAM_REQ_CMP; break; case IOP_RESULT_BAD_TARGET: ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case IOP_RESULT_BUSY: ccb->ccb_h.status = CAM_BUSY; break; case IOP_RESULT_INVALID_REQUEST: ccb->ccb_h.status = CAM_REQ_INVALID; break; case IOP_RESULT_FAIL: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; case IOP_RESULT_RESET: ccb->ccb_h.status = CAM_BUSY; break; case IOP_RESULT_CHECK_CONDITION: memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data)); if (dxfer < ccb->csio.sense_len) ccb->csio.sense_resid = ccb->csio.sense_len - dxfer; else ccb->csio.sense_resid = 0; if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {/*iop*/ bus_space_read_region_1(hba->bar0t, hba->bar0h, index + offsetof(struct hpt_iop_request_scsi_command, sg_list), (u_int8_t *)&ccb->csio.sense_data, MIN(dxfer, sizeof(ccb->csio.sense_data))); } else { memcpy(&ccb->csio.sense_data, &req->sg_list, MIN(dxfer, sizeof(ccb->csio.sense_data))); } ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; break; default: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } scsi_done: if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) BUS_SPACE_WRT4_ITL(outbound_queue, index); ccb->csio.resid = ccb->csio.dxfer_len - dxfer; hptiop_free_srb(hba, srb); xpt_done(ccb); break; } } static void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba *hba) { u_int32_t req, temp; while ((req = BUS_SPACE_RD4_ITL(outbound_queue)) !=IOPMU_QUEUE_EMPTY) { if (req & IOPMU_QUEUE_MASK_HOST_BITS) hptiop_request_callback_itl(hba, req); else { struct hpt_iop_request_header *p; p = (struct hpt_iop_request_header *) ((char *)hba->u.itl.mu + req); temp = bus_space_read_4(hba->bar0t, hba->bar0h,req + offsetof(struct hpt_iop_request_header, flags)); if (temp & IOP_REQUEST_FLAG_SYNC_REQUEST) { u_int64_t temp64; bus_space_read_region_4(hba->bar0t, hba->bar0h,req + offsetof(struct hpt_iop_request_header, context), (u_int32_t *)&temp64, 2); if (temp64) { hptiop_request_callback_itl(hba, req); } else { temp64 = 1; bus_space_write_region_4(hba->bar0t, hba->bar0h,req + offsetof(struct hpt_iop_request_header, context), (u_int32_t *)&temp64, 2); } } else hptiop_request_callback_itl(hba, req); } } } static int hptiop_intr_itl(struct hpt_iop_hba * hba) { u_int32_t status; int ret = 0; status = BUS_SPACE_RD4_ITL(outbound_intstatus); if (status & IOPMU_OUTBOUND_INT_MSG0) { u_int32_t msg = BUS_SPACE_RD4_ITL(outbound_msgaddr0); KdPrint(("hptiop: received outbound msg %x\n", msg)); BUS_SPACE_WRT4_ITL(outbound_intstatus, IOPMU_OUTBOUND_INT_MSG0); hptiop_os_message_callback(hba, msg); ret = 1; } if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) { hptiop_drain_outbound_queue_itl(hba); ret = 1; } return ret; } static void hptiop_request_callback_mv(struct hpt_iop_hba * hba, u_int64_t _tag) { u_int32_t context = (u_int32_t)_tag; if (context & MVIOP_CMD_TYPE_SCSI) { struct hpt_iop_srb *srb; struct hpt_iop_request_scsi_command *req; union ccb *ccb; u_int8_t *cdb; srb = hba->srb[context >> MVIOP_REQUEST_NUMBER_START_BIT]; req = (struct hpt_iop_request_scsi_command *)srb; ccb = (union ccb *)srb->ccb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */ ccb->ccb_h.status = CAM_REQ_CMP; goto scsi_done; } if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT) req->header.result = IOP_RESULT_SUCCESS; switch (req->header.result) { case IOP_RESULT_SUCCESS: switch (ccb->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_IN: bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(hba->io_dmat, srb->dma_map); break; case CAM_DIR_OUT: bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(hba->io_dmat, srb->dma_map); break; } ccb->ccb_h.status = CAM_REQ_CMP; break; case IOP_RESULT_BAD_TARGET: ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case IOP_RESULT_BUSY: ccb->ccb_h.status = CAM_BUSY; break; case IOP_RESULT_INVALID_REQUEST: ccb->ccb_h.status = CAM_REQ_INVALID; break; case IOP_RESULT_FAIL: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; case IOP_RESULT_RESET: ccb->ccb_h.status = CAM_BUSY; break; case IOP_RESULT_CHECK_CONDITION: memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data)); if (req->dataxfer_length < ccb->csio.sense_len) ccb->csio.sense_resid = ccb->csio.sense_len - req->dataxfer_length; else ccb->csio.sense_resid = 0; memcpy(&ccb->csio.sense_data, &req->sg_list, MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data))); ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; break; default: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } scsi_done: ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length; hptiop_free_srb(hba, srb); xpt_done(ccb); } else if (context & MVIOP_CMD_TYPE_IOCTL) { struct hpt_iop_request_ioctl_command *req = hba->ctlcfg_ptr; if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT) hba->config_done = 1; else hba->config_done = -1; wakeup(req); } else if (context & (MVIOP_CMD_TYPE_SET_CONFIG | MVIOP_CMD_TYPE_GET_CONFIG)) hba->config_done = 1; else { device_printf(hba->pcidev, "wrong callback type\n"); } } static void hptiop_request_callback_mvfrey(struct hpt_iop_hba * hba, u_int32_t _tag) { u_int32_t req_type = _tag & 0xf; struct hpt_iop_srb *srb; struct hpt_iop_request_scsi_command *req; union ccb *ccb; u_int8_t *cdb; switch (req_type) { case IOP_REQUEST_TYPE_GET_CONFIG: case IOP_REQUEST_TYPE_SET_CONFIG: hba->config_done = 1; break; case IOP_REQUEST_TYPE_SCSI_COMMAND: srb = hba->srb[(_tag >> 4) & 0xff]; req = (struct hpt_iop_request_scsi_command *)srb; ccb = (union ccb *)srb->ccb; callout_stop(&srb->timeout); if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */ ccb->ccb_h.status = CAM_REQ_CMP; goto scsi_done; } if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT) req->header.result = IOP_RESULT_SUCCESS; switch (req->header.result) { case IOP_RESULT_SUCCESS: switch (ccb->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_IN: bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(hba->io_dmat, srb->dma_map); break; case CAM_DIR_OUT: bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(hba->io_dmat, srb->dma_map); break; } ccb->ccb_h.status = CAM_REQ_CMP; break; case IOP_RESULT_BAD_TARGET: ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case IOP_RESULT_BUSY: ccb->ccb_h.status = CAM_BUSY; break; case IOP_RESULT_INVALID_REQUEST: ccb->ccb_h.status = CAM_REQ_INVALID; break; case IOP_RESULT_FAIL: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; case IOP_RESULT_RESET: ccb->ccb_h.status = CAM_BUSY; break; case IOP_RESULT_CHECK_CONDITION: memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data)); if (req->dataxfer_length < ccb->csio.sense_len) ccb->csio.sense_resid = ccb->csio.sense_len - req->dataxfer_length; else ccb->csio.sense_resid = 0; memcpy(&ccb->csio.sense_data, &req->sg_list, MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data))); ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; break; default: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } scsi_done: ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length; hptiop_free_srb(hba, srb); xpt_done(ccb); break; case IOP_REQUEST_TYPE_IOCTL_COMMAND: if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT) hba->config_done = 1; else hba->config_done = -1; wakeup((struct hpt_iop_request_ioctl_command *)hba->ctlcfg_ptr); break; default: device_printf(hba->pcidev, "wrong callback type\n"); break; } } static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba) { u_int64_t req; while ((req = hptiop_mv_outbound_read(hba))) { if (req & MVIOP_MU_QUEUE_ADDR_HOST_BIT) { if (req & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) { hptiop_request_callback_mv(hba, req); } } } } static int hptiop_intr_mv(struct hpt_iop_hba * hba) { u_int32_t status; int ret = 0; status = BUS_SPACE_RD4_MV0(outbound_doorbell); if (status) BUS_SPACE_WRT4_MV0(outbound_doorbell, ~status); if (status & MVIOP_MU_OUTBOUND_INT_MSG) { u_int32_t msg = BUS_SPACE_RD4_MV2(outbound_msg); KdPrint(("hptiop: received outbound msg %x\n", msg)); hptiop_os_message_callback(hba, msg); ret = 1; } if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) { hptiop_drain_outbound_queue_mv(hba); ret = 1; } return ret; } static int hptiop_intr_mvfrey(struct hpt_iop_hba * hba) { u_int32_t status, _tag, cptr; int ret = 0; if (hba->initialized) { BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0); } status = BUS_SPACE_RD4_MVFREY2(f0_doorbell); if (status) { BUS_SPACE_WRT4_MVFREY2(f0_doorbell, status); if (status & CPU_TO_F0_DRBL_MSG_A_BIT) { u_int32_t msg = BUS_SPACE_RD4_MVFREY2(cpu_to_f0_msg_a); hptiop_os_message_callback(hba, msg); } ret = 1; } status = BUS_SPACE_RD4_MVFREY2(isr_cause); if (status) { BUS_SPACE_WRT4_MVFREY2(isr_cause, status); do { cptr = *hba->u.mvfrey.outlist_cptr & 0xff; while (hba->u.mvfrey.outlist_rptr != cptr) { hba->u.mvfrey.outlist_rptr++; if (hba->u.mvfrey.outlist_rptr == hba->u.mvfrey.list_count) { hba->u.mvfrey.outlist_rptr = 0; } _tag = hba->u.mvfrey.outlist[hba->u.mvfrey.outlist_rptr].val; hptiop_request_callback_mvfrey(hba, _tag); ret = 2; } } while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff)); } if (hba->initialized) { BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010); } return ret; } static int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba, u_int32_t req32, u_int32_t millisec) { u_int32_t i; u_int64_t temp64; BUS_SPACE_WRT4_ITL(inbound_queue, req32); BUS_SPACE_RD4_ITL(outbound_intstatus); for (i = 0; i < millisec; i++) { hptiop_intr_itl(hba); bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 + offsetof(struct hpt_iop_request_header, context), (u_int32_t *)&temp64, 2); if (temp64) return 0; DELAY(1000); } return -1; } static int hptiop_send_sync_request_mv(struct hpt_iop_hba *hba, void *req, u_int32_t millisec) { u_int32_t i; u_int64_t phy_addr; hba->config_done = 0; phy_addr = hba->ctlcfgcmd_phy | (u_int64_t)MVIOP_MU_QUEUE_ADDR_HOST_BIT; ((struct hpt_iop_request_get_config *)req)->header.flags |= IOP_REQUEST_FLAG_SYNC_REQUEST | IOP_REQUEST_FLAG_OUTPUT_CONTEXT; hptiop_mv_inbound_write(phy_addr, hba); BUS_SPACE_RD4_MV0(outbound_intmask); for (i = 0; i < millisec; i++) { hptiop_intr_mv(hba); if (hba->config_done) return 0; DELAY(1000); } return -1; } static int hptiop_send_sync_request_mvfrey(struct hpt_iop_hba *hba, void *req, u_int32_t millisec) { u_int32_t i, index; u_int64_t phy_addr; struct hpt_iop_request_header *reqhdr = (struct hpt_iop_request_header *)req; hba->config_done = 0; phy_addr = hba->ctlcfgcmd_phy; reqhdr->flags = IOP_REQUEST_FLAG_SYNC_REQUEST | IOP_REQUEST_FLAG_OUTPUT_CONTEXT | IOP_REQUEST_FLAG_ADDR_BITS | ((phy_addr >> 16) & 0xffff0000); reqhdr->context = ((phy_addr & 0xffffffff) << 32 ) | IOPMU_QUEUE_ADDR_HOST_BIT | reqhdr->type; hba->u.mvfrey.inlist_wptr++; index = hba->u.mvfrey.inlist_wptr & 0x3fff; if (index == hba->u.mvfrey.list_count) { index = 0; hba->u.mvfrey.inlist_wptr &= ~0x3fff; hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE; } hba->u.mvfrey.inlist[index].addr = phy_addr; hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4; BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr); BUS_SPACE_RD4_MVFREY2(inbound_write_ptr); for (i = 0; i < millisec; i++) { hptiop_intr_mvfrey(hba); if (hba->config_done) return 0; DELAY(1000); } return -1; } static int hptiop_send_sync_msg(struct hpt_iop_hba *hba, u_int32_t msg, u_int32_t millisec) { u_int32_t i; hba->msg_done = 0; hba->ops->post_msg(hba, msg); for (i=0; iops->iop_intr(hba); if (hba->msg_done) break; DELAY(1000); } return hba->msg_done? 0 : -1; } static int hptiop_get_config_itl(struct hpt_iop_hba * hba, struct hpt_iop_request_get_config * config) { u_int32_t req32; config->header.size = sizeof(struct hpt_iop_request_get_config); config->header.type = IOP_REQUEST_TYPE_GET_CONFIG; config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST; config->header.result = IOP_RESULT_PENDING; config->header.context = 0; req32 = BUS_SPACE_RD4_ITL(inbound_queue); if (req32 == IOPMU_QUEUE_EMPTY) return -1; bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)config, sizeof(struct hpt_iop_request_header) >> 2); if (hptiop_send_sync_request_itl(hba, req32, 20000)) { KdPrint(("hptiop: get config send cmd failed")); return -1; } bus_space_read_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)config, sizeof(struct hpt_iop_request_get_config) >> 2); BUS_SPACE_WRT4_ITL(outbound_queue, req32); return 0; } static int hptiop_get_config_mv(struct hpt_iop_hba * hba, struct hpt_iop_request_get_config * config) { struct hpt_iop_request_get_config *req; if (!(req = hba->ctlcfg_ptr)) return -1; req->header.flags = 0; req->header.type = IOP_REQUEST_TYPE_GET_CONFIG; req->header.size = sizeof(struct hpt_iop_request_get_config); req->header.result = IOP_RESULT_PENDING; req->header.context = MVIOP_CMD_TYPE_GET_CONFIG; if (hptiop_send_sync_request_mv(hba, req, 20000)) { KdPrint(("hptiop: get config send cmd failed")); return -1; } *config = *req; return 0; } static int hptiop_get_config_mvfrey(struct hpt_iop_hba * hba, struct hpt_iop_request_get_config * config) { struct hpt_iop_request_get_config *info = hba->u.mvfrey.config; if (info->header.size != sizeof(struct hpt_iop_request_get_config) || info->header.type != IOP_REQUEST_TYPE_GET_CONFIG) { KdPrint(("hptiop: header size %x/%x type %x/%x", info->header.size, (int)sizeof(struct hpt_iop_request_get_config), info->header.type, IOP_REQUEST_TYPE_GET_CONFIG)); return -1; } config->interface_version = info->interface_version; config->firmware_version = info->firmware_version; config->max_requests = info->max_requests; config->request_size = info->request_size; config->max_sg_count = info->max_sg_count; config->data_transfer_length = info->data_transfer_length; config->alignment_mask = info->alignment_mask; config->max_devices = info->max_devices; config->sdram_size = info->sdram_size; KdPrint(("hptiop: maxreq %x reqsz %x datalen %x maxdev %x sdram %x", config->max_requests, config->request_size, config->data_transfer_length, config->max_devices, config->sdram_size)); return 0; } static int hptiop_set_config_itl(struct hpt_iop_hba *hba, struct hpt_iop_request_set_config *config) { u_int32_t req32; req32 = BUS_SPACE_RD4_ITL(inbound_queue); if (req32 == IOPMU_QUEUE_EMPTY) return -1; config->header.size = sizeof(struct hpt_iop_request_set_config); config->header.type = IOP_REQUEST_TYPE_SET_CONFIG; config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST; config->header.result = IOP_RESULT_PENDING; config->header.context = 0; bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)config, sizeof(struct hpt_iop_request_set_config) >> 2); if (hptiop_send_sync_request_itl(hba, req32, 20000)) { KdPrint(("hptiop: set config send cmd failed")); return -1; } BUS_SPACE_WRT4_ITL(outbound_queue, req32); return 0; } static int hptiop_set_config_mv(struct hpt_iop_hba *hba, struct hpt_iop_request_set_config *config) { struct hpt_iop_request_set_config *req; if (!(req = hba->ctlcfg_ptr)) return -1; memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header), (u_int8_t *)config + sizeof(struct hpt_iop_request_header), sizeof(struct hpt_iop_request_set_config) - sizeof(struct hpt_iop_request_header)); req->header.flags = 0; req->header.type = IOP_REQUEST_TYPE_SET_CONFIG; req->header.size = sizeof(struct hpt_iop_request_set_config); req->header.result = IOP_RESULT_PENDING; req->header.context = MVIOP_CMD_TYPE_SET_CONFIG; if (hptiop_send_sync_request_mv(hba, req, 20000)) { KdPrint(("hptiop: set config send cmd failed")); return -1; } return 0; } static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_request_set_config *config) { struct hpt_iop_request_set_config *req; if (!(req = hba->ctlcfg_ptr)) return -1; memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header), (u_int8_t *)config + sizeof(struct hpt_iop_request_header), sizeof(struct hpt_iop_request_set_config) - sizeof(struct hpt_iop_request_header)); req->header.type = IOP_REQUEST_TYPE_SET_CONFIG; req->header.size = sizeof(struct hpt_iop_request_set_config); req->header.result = IOP_RESULT_PENDING; if (hptiop_send_sync_request_mvfrey(hba, req, 20000)) { KdPrint(("hptiop: set config send cmd failed")); return -1; } return 0; } static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba, u_int32_t req32, struct hpt_iop_ioctl_param *pParams) { u_int64_t temp64; struct hpt_iop_request_ioctl_command req; if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) > (hba->max_request_size - offsetof(struct hpt_iop_request_ioctl_command, buf))) { device_printf(hba->pcidev, "request size beyond max value"); return -1; } req.header.size = offsetof(struct hpt_iop_request_ioctl_command, buf) + pParams->nInBufferSize; req.header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND; req.header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST; req.header.result = IOP_RESULT_PENDING; req.header.context = req32 + (u_int64_t)(unsigned long)hba->u.itl.mu; req.ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode); req.inbuf_size = pParams->nInBufferSize; req.outbuf_size = pParams->nOutBufferSize; req.bytes_returned = 0; bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)&req, offsetof(struct hpt_iop_request_ioctl_command, buf)>>2); hptiop_lock_adapter(hba); BUS_SPACE_WRT4_ITL(inbound_queue, req32); BUS_SPACE_RD4_ITL(outbound_intstatus); bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 + offsetof(struct hpt_iop_request_ioctl_command, header.context), (u_int32_t *)&temp64, 2); while (temp64) { if (hptiop_sleep(hba, (void *)((unsigned long)hba->u.itl.mu + req32), PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) break; hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000); bus_space_read_region_4(hba->bar0t, hba->bar0h,req32 + offsetof(struct hpt_iop_request_ioctl_command, header.context), (u_int32_t *)&temp64, 2); } hptiop_unlock_adapter(hba); return 0; } static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus, void *user, int size) { unsigned char byte; int i; for (i=0; ibar0t, hba->bar0h, bus + i, byte); } return 0; } static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus, void *user, int size) { unsigned char byte; int i; for (i=0; ibar0t, hba->bar0h, bus + i); if (copyout(&byte, (u_int8_t *)user + i, 1)) return -1; } return 0; } static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba, struct hpt_iop_ioctl_param * pParams) { u_int32_t req32; u_int32_t result; if ((pParams->Magic != HPT_IOCTL_MAGIC) && (pParams->Magic != HPT_IOCTL_MAGIC32)) return EFAULT; req32 = BUS_SPACE_RD4_ITL(inbound_queue); if (req32 == IOPMU_QUEUE_EMPTY) return EFAULT; if (pParams->nInBufferSize) if (hptiop_bus_space_copyin(hba, req32 + offsetof(struct hpt_iop_request_ioctl_command, buf), (void *)pParams->lpInBuffer, pParams->nInBufferSize)) goto invalid; if (hptiop_post_ioctl_command_itl(hba, req32, pParams)) goto invalid; result = bus_space_read_4(hba->bar0t, hba->bar0h, req32 + offsetof(struct hpt_iop_request_ioctl_command, header.result)); if (result == IOP_RESULT_SUCCESS) { if (pParams->nOutBufferSize) if (hptiop_bus_space_copyout(hba, req32 + offsetof(struct hpt_iop_request_ioctl_command, buf) + ((pParams->nInBufferSize + 3) & ~3), (void *)pParams->lpOutBuffer, pParams->nOutBufferSize)) goto invalid; if (pParams->lpBytesReturned) { if (hptiop_bus_space_copyout(hba, req32 + offsetof(struct hpt_iop_request_ioctl_command, bytes_returned), (void *)pParams->lpBytesReturned, sizeof(unsigned long))) goto invalid; } BUS_SPACE_WRT4_ITL(outbound_queue, req32); return 0; } else{ invalid: BUS_SPACE_WRT4_ITL(outbound_queue, req32); return EFAULT; } } static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba, struct hpt_iop_request_ioctl_command *req, struct hpt_iop_ioctl_param *pParams) { u_int64_t req_phy; int size = 0; if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) > (hba->max_request_size - offsetof(struct hpt_iop_request_ioctl_command, buf))) { device_printf(hba->pcidev, "request size beyond max value"); return -1; } req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode); req->inbuf_size = pParams->nInBufferSize; req->outbuf_size = pParams->nOutBufferSize; req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf) + pParams->nInBufferSize; req->header.context = (u_int64_t)MVIOP_CMD_TYPE_IOCTL; req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND; req->header.result = IOP_RESULT_PENDING; req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT; size = req->header.size >> 8; size = imin(3, size); req_phy = hba->ctlcfgcmd_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size; hptiop_mv_inbound_write(req_phy, hba); BUS_SPACE_RD4_MV0(outbound_intmask); while (hba->config_done == 0) { if (hptiop_sleep(hba, req, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) continue; hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000); } return 0; } static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba, struct hpt_iop_ioctl_param *pParams) { struct hpt_iop_request_ioctl_command *req; if ((pParams->Magic != HPT_IOCTL_MAGIC) && (pParams->Magic != HPT_IOCTL_MAGIC32)) return EFAULT; req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr); hba->config_done = 0; hptiop_lock_adapter(hba); if (pParams->nInBufferSize) if (copyin((void *)pParams->lpInBuffer, req->buf, pParams->nInBufferSize)) goto invalid; if (hptiop_post_ioctl_command_mv(hba, req, pParams)) goto invalid; if (hba->config_done == 1) { if (pParams->nOutBufferSize) if (copyout(req->buf + ((pParams->nInBufferSize + 3) & ~3), (void *)pParams->lpOutBuffer, pParams->nOutBufferSize)) goto invalid; if (pParams->lpBytesReturned) if (copyout(&req->bytes_returned, (void*)pParams->lpBytesReturned, sizeof(u_int32_t))) goto invalid; hptiop_unlock_adapter(hba); return 0; } else{ invalid: hptiop_unlock_adapter(hba); return EFAULT; } } static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_request_ioctl_command *req, struct hpt_iop_ioctl_param *pParams) { u_int64_t phy_addr; u_int32_t index; phy_addr = hba->ctlcfgcmd_phy; if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) > (hba->max_request_size - offsetof(struct hpt_iop_request_ioctl_command, buf))) { device_printf(hba->pcidev, "request size beyond max value"); return -1; } req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode); req->inbuf_size = pParams->nInBufferSize; req->outbuf_size = pParams->nOutBufferSize; req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf) + pParams->nInBufferSize; req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND; req->header.result = IOP_RESULT_PENDING; req->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST | IOP_REQUEST_FLAG_OUTPUT_CONTEXT | IOP_REQUEST_FLAG_ADDR_BITS | ((phy_addr >> 16) & 0xffff0000); req->header.context = ((phy_addr & 0xffffffff) << 32 ) | IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type; hba->u.mvfrey.inlist_wptr++; index = hba->u.mvfrey.inlist_wptr & 0x3fff; if (index == hba->u.mvfrey.list_count) { index = 0; hba->u.mvfrey.inlist_wptr &= ~0x3fff; hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE; } hba->u.mvfrey.inlist[index].addr = phy_addr; hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4; BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr); BUS_SPACE_RD4_MVFREY2(inbound_write_ptr); while (hba->config_done == 0) { if (hptiop_sleep(hba, req, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) continue; hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000); } return 0; } static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_ioctl_param *pParams) { struct hpt_iop_request_ioctl_command *req; if ((pParams->Magic != HPT_IOCTL_MAGIC) && (pParams->Magic != HPT_IOCTL_MAGIC32)) return EFAULT; req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr); hba->config_done = 0; hptiop_lock_adapter(hba); if (pParams->nInBufferSize) if (copyin((void *)pParams->lpInBuffer, req->buf, pParams->nInBufferSize)) goto invalid; if (hptiop_post_ioctl_command_mvfrey(hba, req, pParams)) goto invalid; if (hba->config_done == 1) { if (pParams->nOutBufferSize) if (copyout(req->buf + ((pParams->nInBufferSize + 3) & ~3), (void *)pParams->lpOutBuffer, pParams->nOutBufferSize)) goto invalid; if (pParams->lpBytesReturned) if (copyout(&req->bytes_returned, (void*)pParams->lpBytesReturned, sizeof(u_int32_t))) goto invalid; hptiop_unlock_adapter(hba); return 0; } else{ invalid: hptiop_unlock_adapter(hba); return EFAULT; } } static int hptiop_rescan_bus(struct hpt_iop_hba * hba) { union ccb *ccb; if ((ccb = xpt_alloc_ccb()) == NULL) return(ENOMEM); if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(hba->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return(EIO); } xpt_rescan(ccb); return(0); } static bus_dmamap_callback_t hptiop_map_srb; static bus_dmamap_callback_t hptiop_post_scsi_command; static bus_dmamap_callback_t hptiop_mv_map_ctlcfg; static bus_dmamap_callback_t hptiop_mvfrey_map_ctlcfg; static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba) { hba->bar0_rid = 0x10; hba->bar0_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE); if (hba->bar0_res == NULL) { device_printf(hba->pcidev, "failed to get iop base adrress.\n"); return -1; } hba->bar0t = rman_get_bustag(hba->bar0_res); hba->bar0h = rman_get_bushandle(hba->bar0_res); hba->u.itl.mu = (struct hpt_iopmu_itl *) rman_get_virtual(hba->bar0_res); if (!hba->u.itl.mu) { bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); device_printf(hba->pcidev, "alloc mem res failed\n"); return -1; } return 0; } static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba) { hba->bar0_rid = 0x10; hba->bar0_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE); if (hba->bar0_res == NULL) { device_printf(hba->pcidev, "failed to get iop bar0.\n"); return -1; } hba->bar0t = rman_get_bustag(hba->bar0_res); hba->bar0h = rman_get_bushandle(hba->bar0_res); hba->u.mv.regs = (struct hpt_iopmv_regs *) rman_get_virtual(hba->bar0_res); if (!hba->u.mv.regs) { bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); device_printf(hba->pcidev, "alloc bar0 mem res failed\n"); return -1; } hba->bar2_rid = 0x18; hba->bar2_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE); if (hba->bar2_res == NULL) { bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); device_printf(hba->pcidev, "failed to get iop bar2.\n"); return -1; } hba->bar2t = rman_get_bustag(hba->bar2_res); hba->bar2h = rman_get_bushandle(hba->bar2_res); hba->u.mv.mu = (struct hpt_iopmu_mv *)rman_get_virtual(hba->bar2_res); if (!hba->u.mv.mu) { bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar2_rid, hba->bar2_res); device_printf(hba->pcidev, "alloc mem bar2 res failed\n"); return -1; } return 0; } static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba) { hba->bar0_rid = 0x10; hba->bar0_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE); if (hba->bar0_res == NULL) { device_printf(hba->pcidev, "failed to get iop bar0.\n"); return -1; } hba->bar0t = rman_get_bustag(hba->bar0_res); hba->bar0h = rman_get_bushandle(hba->bar0_res); hba->u.mvfrey.config = (struct hpt_iop_request_get_config *) rman_get_virtual(hba->bar0_res); if (!hba->u.mvfrey.config) { bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); device_printf(hba->pcidev, "alloc bar0 mem res failed\n"); return -1; } hba->bar2_rid = 0x18; hba->bar2_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE); if (hba->bar2_res == NULL) { bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); device_printf(hba->pcidev, "failed to get iop bar2.\n"); return -1; } hba->bar2t = rman_get_bustag(hba->bar2_res); hba->bar2h = rman_get_bushandle(hba->bar2_res); hba->u.mvfrey.mu = (struct hpt_iopmu_mvfrey *)rman_get_virtual(hba->bar2_res); if (!hba->u.mvfrey.mu) { bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar2_rid, hba->bar2_res); device_printf(hba->pcidev, "alloc mem bar2 res failed\n"); return -1; } return 0; } static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba) { if (hba->bar0_res) bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); } static void hptiop_release_pci_res_mv(struct hpt_iop_hba *hba) { if (hba->bar0_res) bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); if (hba->bar2_res) bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar2_rid, hba->bar2_res); } static void hptiop_release_pci_res_mvfrey(struct hpt_iop_hba *hba) { if (hba->bar0_res) bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); if (hba->bar2_res) bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar2_rid, hba->bar2_res); } static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba) { if (bus_dma_tag_create(hba->parent_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 0x800 - 0x8, 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, NULL, NULL, &hba->ctlcfg_dmat)) { device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n"); return -1; } if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr, BUS_DMA_WAITOK | BUS_DMA_COHERENT, &hba->ctlcfg_dmamap) != 0) { device_printf(hba->pcidev, "bus_dmamem_alloc failed!\n"); bus_dma_tag_destroy(hba->ctlcfg_dmat); return -1; } if (bus_dmamap_load(hba->ctlcfg_dmat, hba->ctlcfg_dmamap, hba->ctlcfg_ptr, MVIOP_IOCTLCFG_SIZE, hptiop_mv_map_ctlcfg, hba, 0)) { device_printf(hba->pcidev, "bus_dmamap_load failed!\n"); if (hba->ctlcfg_dmat) { bus_dmamem_free(hba->ctlcfg_dmat, hba->ctlcfg_ptr, hba->ctlcfg_dmamap); bus_dma_tag_destroy(hba->ctlcfg_dmat); } return -1; } return 0; } static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba) { u_int32_t list_count = BUS_SPACE_RD4_MVFREY2(inbound_conf_ctl); list_count >>= 16; if (list_count == 0) { return -1; } hba->u.mvfrey.list_count = list_count; hba->u.mvfrey.internal_mem_size = 0x800 + list_count * sizeof(struct mvfrey_inlist_entry) + list_count * sizeof(struct mvfrey_outlist_entry) + sizeof(int); if (bus_dma_tag_create(hba->parent_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, hba->u.mvfrey.internal_mem_size, 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, NULL, NULL, &hba->ctlcfg_dmat)) { device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n"); return -1; } if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr, BUS_DMA_WAITOK | BUS_DMA_COHERENT, &hba->ctlcfg_dmamap) != 0) { device_printf(hba->pcidev, "bus_dmamem_alloc failed!\n"); bus_dma_tag_destroy(hba->ctlcfg_dmat); return -1; } if (bus_dmamap_load(hba->ctlcfg_dmat, hba->ctlcfg_dmamap, hba->ctlcfg_ptr, hba->u.mvfrey.internal_mem_size, hptiop_mvfrey_map_ctlcfg, hba, 0)) { device_printf(hba->pcidev, "bus_dmamap_load failed!\n"); if (hba->ctlcfg_dmat) { bus_dmamem_free(hba->ctlcfg_dmat, hba->ctlcfg_ptr, hba->ctlcfg_dmamap); bus_dma_tag_destroy(hba->ctlcfg_dmat); } return -1; } return 0; } static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba) { return 0; } static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba) { if (hba->ctlcfg_dmat) { bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap); bus_dmamem_free(hba->ctlcfg_dmat, hba->ctlcfg_ptr, hba->ctlcfg_dmamap); bus_dma_tag_destroy(hba->ctlcfg_dmat); } return 0; } static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba) { if (hba->ctlcfg_dmat) { bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap); bus_dmamem_free(hba->ctlcfg_dmat, hba->ctlcfg_ptr, hba->ctlcfg_dmamap); bus_dma_tag_destroy(hba->ctlcfg_dmat); } return 0; } static int hptiop_reset_comm_mvfrey(struct hpt_iop_hba *hba) { u_int32_t i = 100; if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000)) return -1; /* wait 100ms for MCU ready */ while(i--) { DELAY(1000); } BUS_SPACE_WRT4_MVFREY2(inbound_base, hba->u.mvfrey.inlist_phy & 0xffffffff); BUS_SPACE_WRT4_MVFREY2(inbound_base_high, (hba->u.mvfrey.inlist_phy >> 16) >> 16); BUS_SPACE_WRT4_MVFREY2(outbound_base, hba->u.mvfrey.outlist_phy & 0xffffffff); BUS_SPACE_WRT4_MVFREY2(outbound_base_high, (hba->u.mvfrey.outlist_phy >> 16) >> 16); BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base, hba->u.mvfrey.outlist_cptr_phy & 0xffffffff); BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base_high, (hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16); hba->u.mvfrey.inlist_wptr = (hba->u.mvfrey.list_count - 1) | CL_POINTER_TOGGLE; *hba->u.mvfrey.outlist_cptr = (hba->u.mvfrey.list_count - 1) | CL_POINTER_TOGGLE; hba->u.mvfrey.outlist_rptr = hba->u.mvfrey.list_count - 1; return 0; } /* * CAM driver interface */ static device_method_t driver_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hptiop_probe), DEVMETHOD(device_attach, hptiop_attach), DEVMETHOD(device_detach, hptiop_detach), DEVMETHOD(device_shutdown, hptiop_shutdown), { 0, 0 } }; static struct hptiop_adapter_ops hptiop_itl_ops = { .family = INTEL_BASED_IOP, .iop_wait_ready = hptiop_wait_ready_itl, .internal_memalloc = 0, .internal_memfree = hptiop_internal_memfree_itl, .alloc_pci_res = hptiop_alloc_pci_res_itl, .release_pci_res = hptiop_release_pci_res_itl, .enable_intr = hptiop_enable_intr_itl, .disable_intr = hptiop_disable_intr_itl, .get_config = hptiop_get_config_itl, .set_config = hptiop_set_config_itl, .iop_intr = hptiop_intr_itl, .post_msg = hptiop_post_msg_itl, .post_req = hptiop_post_req_itl, .do_ioctl = hptiop_do_ioctl_itl, .reset_comm = 0, }; static struct hptiop_adapter_ops hptiop_mv_ops = { .family = MV_BASED_IOP, .iop_wait_ready = hptiop_wait_ready_mv, .internal_memalloc = hptiop_internal_memalloc_mv, .internal_memfree = hptiop_internal_memfree_mv, .alloc_pci_res = hptiop_alloc_pci_res_mv, .release_pci_res = hptiop_release_pci_res_mv, .enable_intr = hptiop_enable_intr_mv, .disable_intr = hptiop_disable_intr_mv, .get_config = hptiop_get_config_mv, .set_config = hptiop_set_config_mv, .iop_intr = hptiop_intr_mv, .post_msg = hptiop_post_msg_mv, .post_req = hptiop_post_req_mv, .do_ioctl = hptiop_do_ioctl_mv, .reset_comm = 0, }; static struct hptiop_adapter_ops hptiop_mvfrey_ops = { .family = MVFREY_BASED_IOP, .iop_wait_ready = hptiop_wait_ready_mvfrey, .internal_memalloc = hptiop_internal_memalloc_mvfrey, .internal_memfree = hptiop_internal_memfree_mvfrey, .alloc_pci_res = hptiop_alloc_pci_res_mvfrey, .release_pci_res = hptiop_release_pci_res_mvfrey, .enable_intr = hptiop_enable_intr_mvfrey, .disable_intr = hptiop_disable_intr_mvfrey, .get_config = hptiop_get_config_mvfrey, .set_config = hptiop_set_config_mvfrey, .iop_intr = hptiop_intr_mvfrey, .post_msg = hptiop_post_msg_mvfrey, .post_req = hptiop_post_req_mvfrey, .do_ioctl = hptiop_do_ioctl_mvfrey, .reset_comm = hptiop_reset_comm_mvfrey, }; static driver_t hptiop_pci_driver = { driver_name, driver_methods, sizeof(struct hpt_iop_hba) }; DRIVER_MODULE(hptiop, pci, hptiop_pci_driver, hptiop_devclass, 0, 0); MODULE_DEPEND(hptiop, cam, 1, 1, 1); static int hptiop_probe(device_t dev) { struct hpt_iop_hba *hba; u_int32_t id; static char buf[256]; int sas = 0; struct hptiop_adapter_ops *ops; if (pci_get_vendor(dev) != 0x1103) return (ENXIO); id = pci_get_device(dev); switch (id) { case 0x4520: case 0x4521: case 0x4522: sas = 1; case 0x3620: case 0x3622: case 0x3640: ops = &hptiop_mvfrey_ops; break; case 0x4210: case 0x4211: case 0x4310: case 0x4311: case 0x4320: case 0x4321: case 0x4322: sas = 1; case 0x3220: case 0x3320: case 0x3410: case 0x3520: case 0x3510: case 0x3511: case 0x3521: case 0x3522: case 0x3530: case 0x3540: case 0x3560: ops = &hptiop_itl_ops; break; case 0x3020: case 0x3120: case 0x3122: ops = &hptiop_mv_ops; break; default: return (ENXIO); } device_printf(dev, "adapter at PCI %d:%d:%d, IRQ %d\n", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev)); sprintf(buf, "RocketRAID %x %s Controller\n", id, sas ? "SAS" : "SATA"); device_set_desc_copy(dev, buf); hba = (struct hpt_iop_hba *)device_get_softc(dev); bzero(hba, sizeof(struct hpt_iop_hba)); hba->ops = ops; KdPrint(("hba->ops=%p\n", hba->ops)); return 0; } static int hptiop_attach(device_t dev) { struct hpt_iop_hba *hba = (struct hpt_iop_hba *)device_get_softc(dev); struct hpt_iop_request_get_config iop_config; struct hpt_iop_request_set_config set_config; int rid = 0; struct cam_devq *devq; struct ccb_setasync ccb; u_int32_t unit = device_get_unit(dev); device_printf(dev, "%d RocketRAID 3xxx/4xxx controller driver %s\n", unit, driver_version); KdPrint(("hptiop: attach(%d, %d/%d/%d) ops=%p\n", unit, pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), hba->ops)); pci_enable_busmaster(dev); hba->pcidev = dev; hba->pciunit = unit; if (hba->ops->alloc_pci_res(hba)) return ENXIO; if (hba->ops->iop_wait_ready(hba, 2000)) { device_printf(dev, "adapter is not ready\n"); goto release_pci_res; } mtx_init(&hba->lock, "hptioplock", NULL, MTX_DEF); if (bus_dma_tag_create(bus_get_dma_tag(dev),/* PCI parent */ 1, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &hba->parent_dmat /* tag */)) { device_printf(dev, "alloc parent_dmat failed\n"); goto release_pci_res; } if (hba->ops->family == MV_BASED_IOP) { if (hba->ops->internal_memalloc(hba)) { device_printf(dev, "alloc srb_dmat failed\n"); goto destroy_parent_tag; } } if (hba->ops->get_config(hba, &iop_config)) { device_printf(dev, "get iop config failed.\n"); goto get_config_failed; } hba->firmware_version = iop_config.firmware_version; hba->interface_version = iop_config.interface_version; hba->max_requests = iop_config.max_requests; hba->max_devices = iop_config.max_devices; hba->max_request_size = iop_config.request_size; hba->max_sg_count = iop_config.max_sg_count; if (hba->ops->family == MVFREY_BASED_IOP) { if (hba->ops->internal_memalloc(hba)) { device_printf(dev, "alloc srb_dmat failed\n"); goto destroy_parent_tag; } if (hba->ops->reset_comm(hba)) { device_printf(dev, "reset comm failed\n"); goto get_config_failed; } } if (bus_dma_tag_create(hba->parent_dmat,/* parent */ 4, /* alignment */ BUS_SPACE_MAXADDR_32BIT+1, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ PAGE_SIZE * (hba->max_sg_count-1), /* maxsize */ hba->max_sg_count, /* nsegments */ 0x20000, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ busdma_lock_mutex, /* lockfunc */ &hba->lock, /* lockfuncarg */ &hba->io_dmat /* tag */)) { device_printf(dev, "alloc io_dmat failed\n"); goto get_config_failed; } if (bus_dma_tag_create(hba->parent_dmat,/* parent */ 1, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE + 0x20, 1, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &hba->srb_dmat /* tag */)) { device_printf(dev, "alloc srb_dmat failed\n"); goto destroy_io_dmat; } if (bus_dmamem_alloc(hba->srb_dmat, (void **)&hba->uncached_ptr, BUS_DMA_WAITOK | BUS_DMA_COHERENT, &hba->srb_dmamap) != 0) { device_printf(dev, "srb bus_dmamem_alloc failed!\n"); goto destroy_srb_dmat; } if (bus_dmamap_load(hba->srb_dmat, hba->srb_dmamap, hba->uncached_ptr, (HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE) + 0x20, hptiop_map_srb, hba, 0)) { device_printf(dev, "bus_dmamap_load failed!\n"); goto srb_dmamem_free; } if ((devq = cam_simq_alloc(hba->max_requests - 1 )) == NULL) { device_printf(dev, "cam_simq_alloc failed\n"); goto srb_dmamap_unload; } hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name, hba, unit, &hba->lock, hba->max_requests - 1, 1, devq); if (!hba->sim) { device_printf(dev, "cam_sim_alloc failed\n"); cam_simq_free(devq); goto srb_dmamap_unload; } hptiop_lock_adapter(hba); if (xpt_bus_register(hba->sim, dev, 0) != CAM_SUCCESS) { device_printf(dev, "xpt_bus_register failed\n"); goto free_cam_sim; } if (xpt_create_path(&hba->path, /*periph */ NULL, cam_sim_path(hba->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { device_printf(dev, "xpt_create_path failed\n"); goto deregister_xpt_bus; } hptiop_unlock_adapter(hba); bzero(&set_config, sizeof(set_config)); set_config.iop_id = unit; set_config.vbus_id = cam_sim_path(hba->sim); set_config.max_host_request_size = HPT_SRB_MAX_REQ_SIZE; if (hba->ops->set_config(hba, &set_config)) { device_printf(dev, "set iop config failed.\n"); goto free_hba_path; } xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5); ccb.ccb_h.func_code = XPT_SASYNC_CB; ccb.event_enable = (AC_FOUND_DEVICE | AC_LOST_DEVICE); ccb.callback = hptiop_async; ccb.callback_arg = hba->sim; xpt_action((union ccb *)&ccb); rid = 0; if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(dev, "allocate irq failed!\n"); goto free_hba_path; } if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE, NULL, hptiop_pci_intr, hba, &hba->irq_handle)) { device_printf(dev, "allocate intr function failed!\n"); goto free_irq_resource; } if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) { device_printf(dev, "fail to start background task\n"); goto teartown_irq_resource; } hba->ops->enable_intr(hba); hba->initialized = 1; hba->ioctl_dev = make_dev(&hptiop_cdevsw, unit, UID_ROOT, GID_WHEEL /*GID_OPERATOR*/, S_IRUSR | S_IWUSR, "%s%d", driver_name, unit); return 0; teartown_irq_resource: bus_teardown_intr(dev, hba->irq_res, hba->irq_handle); free_irq_resource: bus_release_resource(dev, SYS_RES_IRQ, 0, hba->irq_res); hptiop_lock_adapter(hba); free_hba_path: xpt_free_path(hba->path); deregister_xpt_bus: xpt_bus_deregister(cam_sim_path(hba->sim)); free_cam_sim: cam_sim_free(hba->sim, /*free devq*/ TRUE); hptiop_unlock_adapter(hba); srb_dmamap_unload: if (hba->uncached_ptr) bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap); srb_dmamem_free: if (hba->uncached_ptr) bus_dmamem_free(hba->srb_dmat, hba->uncached_ptr, hba->srb_dmamap); destroy_srb_dmat: if (hba->srb_dmat) bus_dma_tag_destroy(hba->srb_dmat); destroy_io_dmat: if (hba->io_dmat) bus_dma_tag_destroy(hba->io_dmat); get_config_failed: hba->ops->internal_memfree(hba); destroy_parent_tag: if (hba->parent_dmat) bus_dma_tag_destroy(hba->parent_dmat); release_pci_res: if (hba->ops->release_pci_res) hba->ops->release_pci_res(hba); return ENXIO; } static int hptiop_detach(device_t dev) { struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev); int i; int error = EBUSY; hptiop_lock_adapter(hba); for (i = 0; i < hba->max_devices; i++) if (hptiop_os_query_remove_device(hba, i)) { device_printf(dev, "%d file system is busy. id=%d", hba->pciunit, i); goto out; } if ((error = hptiop_shutdown(dev)) != 0) goto out; if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK, 60000)) goto out; hptiop_unlock_adapter(hba); hptiop_release_resource(hba); return (0); out: hptiop_unlock_adapter(hba); return error; } static int hptiop_shutdown(device_t dev) { struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev); int error = 0; if (hba->flag & HPT_IOCTL_FLAG_OPEN) { device_printf(dev, "%d device is busy", hba->pciunit); return EBUSY; } hba->ops->disable_intr(hba); if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000)) error = EBUSY; return error; } static void hptiop_pci_intr(void *arg) { struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg; hptiop_lock_adapter(hba); hba->ops->iop_intr(hba); hptiop_unlock_adapter(hba); } static void hptiop_poll(struct cam_sim *sim) { struct hpt_iop_hba *hba; hba = cam_sim_softc(sim); hba->ops->iop_intr(hba); } static void hptiop_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg) { } static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba) { BUS_SPACE_WRT4_ITL(outbound_intmask, ~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0)); } static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba) { u_int32_t int_mask; int_mask = BUS_SPACE_RD4_MV0(outbound_intmask); int_mask |= MVIOP_MU_OUTBOUND_INT_POSTQUEUE | MVIOP_MU_OUTBOUND_INT_MSG; BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask); } static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba) { BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, CPU_TO_F0_DRBL_MSG_A_BIT); BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable); BUS_SPACE_WRT4_MVFREY2(isr_enable, 0x1); BUS_SPACE_RD4_MVFREY2(isr_enable); BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010); BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable); } static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba) { u_int32_t int_mask; int_mask = BUS_SPACE_RD4_ITL(outbound_intmask); int_mask |= IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0; BUS_SPACE_WRT4_ITL(outbound_intmask, int_mask); BUS_SPACE_RD4_ITL(outbound_intstatus); } static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba) { u_int32_t int_mask; int_mask = BUS_SPACE_RD4_MV0(outbound_intmask); int_mask &= ~(MVIOP_MU_OUTBOUND_INT_MSG | MVIOP_MU_OUTBOUND_INT_POSTQUEUE); BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask); BUS_SPACE_RD4_MV0(outbound_intmask); } static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba) { BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, 0); BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable); BUS_SPACE_WRT4_MVFREY2(isr_enable, 0); BUS_SPACE_RD4_MVFREY2(isr_enable); BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0); BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable); } static void hptiop_reset_adapter(void *argv) { struct hpt_iop_hba * hba = (struct hpt_iop_hba *)argv; if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000)) return; hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000); } static void *hptiop_get_srb(struct hpt_iop_hba * hba) { struct hpt_iop_srb * srb; if (hba->srb_list) { srb = hba->srb_list; hba->srb_list = srb->next; return srb; } return NULL; } static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb) { srb->next = hba->srb_list; hba->srb_list = srb; } static void hptiop_action(struct cam_sim *sim, union ccb *ccb) { struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim); struct hpt_iop_srb * srb; int error; switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: if (ccb->ccb_h.target_lun != 0 || ccb->ccb_h.target_id >= hba->max_devices || (ccb->ccb_h.flags & CAM_CDB_PHYS)) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return; } if ((srb = hptiop_get_srb(hba)) == NULL) { device_printf(hba->pcidev, "srb allocated failed"); ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); return; } srb->ccb = ccb; error = bus_dmamap_load_ccb(hba->io_dmat, srb->dma_map, ccb, hptiop_post_scsi_command, srb, 0); if (error && error != EINPROGRESS) { device_printf(hba->pcidev, "%d bus_dmamap_load error %d", hba->pciunit, error); xpt_freeze_simq(hba->sim, 1); ccb->ccb_h.status = CAM_REQ_CMP_ERR; hptiop_free_srb(hba, srb); xpt_done(ccb); return; } return; case XPT_RESET_BUS: device_printf(hba->pcidev, "reset adapter"); hba->msg_done = 0; hptiop_reset_adapter(hba); break; case XPT_GET_TRAN_SETTINGS: case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, 1); break; case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = hba->max_devices; cpi->max_lun = 0; cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->initiator_id = hba->max_devices; cpi->base_transfer_speed = 3300; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "HPT ", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hptiop_post_req_itl(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb, bus_dma_segment_t *segs, int nsegs) { int idx; union ccb *ccb = srb->ccb; u_int8_t *cdb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; KdPrint(("ccb=%p %x-%x-%x\n", ccb, *(u_int32_t *)cdb, *((u_int32_t *)cdb+1), *((u_int32_t *)cdb+2))); if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) { u_int32_t iop_req32; struct hpt_iop_request_scsi_command req; iop_req32 = BUS_SPACE_RD4_ITL(inbound_queue); if (iop_req32 == IOPMU_QUEUE_EMPTY) { device_printf(hba->pcidev, "invalid req offset\n"); ccb->ccb_h.status = CAM_BUSY; bus_dmamap_unload(hba->io_dmat, srb->dma_map); hptiop_free_srb(hba, srb); xpt_done(ccb); return; } if (ccb->csio.dxfer_len && nsegs > 0) { struct hpt_iopsg *psg = req.sg_list; for (idx = 0; idx < nsegs; idx++, psg++) { psg->pci_address = (u_int64_t)segs[idx].ds_addr; psg->size = segs[idx].ds_len; psg->eot = 0; } psg[-1].eot = 1; } bcopy(cdb, req.cdb, ccb->csio.cdb_len); req.header.size = offsetof(struct hpt_iop_request_scsi_command, sg_list) + nsegs*sizeof(struct hpt_iopsg); req.header.type = IOP_REQUEST_TYPE_SCSI_COMMAND; req.header.flags = 0; req.header.result = IOP_RESULT_PENDING; req.header.context = (u_int64_t)(unsigned long)srb; req.dataxfer_length = ccb->csio.dxfer_len; req.channel = 0; req.target = ccb->ccb_h.target_id; req.lun = ccb->ccb_h.target_lun; bus_space_write_region_1(hba->bar0t, hba->bar0h, iop_req32, (u_int8_t *)&req, req.header.size); if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREREAD); } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREWRITE); BUS_SPACE_WRT4_ITL(inbound_queue,iop_req32); } else { struct hpt_iop_request_scsi_command *req; req = (struct hpt_iop_request_scsi_command *)srb; if (ccb->csio.dxfer_len && nsegs > 0) { struct hpt_iopsg *psg = req->sg_list; for (idx = 0; idx < nsegs; idx++, psg++) { psg->pci_address = (u_int64_t)segs[idx].ds_addr; psg->size = segs[idx].ds_len; psg->eot = 0; } psg[-1].eot = 1; } bcopy(cdb, req->cdb, ccb->csio.cdb_len); req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND; req->header.result = IOP_RESULT_PENDING; req->dataxfer_length = ccb->csio.dxfer_len; req->channel = 0; req->target = ccb->ccb_h.target_id; req->lun = ccb->ccb_h.target_lun; req->header.size = offsetof(struct hpt_iop_request_scsi_command, sg_list) + nsegs*sizeof(struct hpt_iopsg); req->header.context = (u_int64_t)srb->index | IOPMU_QUEUE_ADDR_HOST_BIT; req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREREAD); }else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREWRITE); } if (hba->firmware_version > 0x01020000 || hba->interface_version > 0x01020000) { u_int32_t size_bits; if (req->header.size < 256) size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT; else if (req->header.size < 512) size_bits = IOPMU_QUEUE_ADDR_HOST_BIT; else size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT | IOPMU_QUEUE_ADDR_HOST_BIT; BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr | size_bits); } else BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr |IOPMU_QUEUE_ADDR_HOST_BIT); } } static void hptiop_post_req_mv(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb, bus_dma_segment_t *segs, int nsegs) { int idx, size; union ccb *ccb = srb->ccb; u_int8_t *cdb; struct hpt_iop_request_scsi_command *req; u_int64_t req_phy; req = (struct hpt_iop_request_scsi_command *)srb; req_phy = srb->phy_addr; if (ccb->csio.dxfer_len && nsegs > 0) { struct hpt_iopsg *psg = req->sg_list; for (idx = 0; idx < nsegs; idx++, psg++) { psg->pci_address = (u_int64_t)segs[idx].ds_addr; psg->size = segs[idx].ds_len; psg->eot = 0; } psg[-1].eot = 1; } if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; bcopy(cdb, req->cdb, ccb->csio.cdb_len); req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND; req->header.result = IOP_RESULT_PENDING; req->dataxfer_length = ccb->csio.dxfer_len; req->channel = 0; req->target = ccb->ccb_h.target_id; req->lun = ccb->ccb_h.target_lun; req->header.size = sizeof(struct hpt_iop_request_scsi_command) - sizeof(struct hpt_iopsg) + nsegs * sizeof(struct hpt_iopsg); if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREREAD); } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREWRITE); req->header.context = (u_int64_t)srb->index << MVIOP_REQUEST_NUMBER_START_BIT | MVIOP_CMD_TYPE_SCSI; req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT; size = req->header.size >> 8; hptiop_mv_inbound_write(req_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | imin(3, size), hba); } static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb, bus_dma_segment_t *segs, int nsegs) { int idx, index; union ccb *ccb = srb->ccb; u_int8_t *cdb; struct hpt_iop_request_scsi_command *req; u_int64_t req_phy; req = (struct hpt_iop_request_scsi_command *)srb; req_phy = srb->phy_addr; if (ccb->csio.dxfer_len && nsegs > 0) { struct hpt_iopsg *psg = req->sg_list; for (idx = 0; idx < nsegs; idx++, psg++) { psg->pci_address = (u_int64_t)segs[idx].ds_addr | 1; psg->size = segs[idx].ds_len; psg->eot = 0; } psg[-1].eot = 1; } if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; bcopy(cdb, req->cdb, ccb->csio.cdb_len); req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND; req->header.result = IOP_RESULT_PENDING; req->dataxfer_length = ccb->csio.dxfer_len; req->channel = 0; req->target = ccb->ccb_h.target_id; req->lun = ccb->ccb_h.target_lun; req->header.size = sizeof(struct hpt_iop_request_scsi_command) - sizeof(struct hpt_iopsg) + nsegs * sizeof(struct hpt_iopsg); if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREREAD); } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREWRITE); req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT | IOP_REQUEST_FLAG_ADDR_BITS | ((req_phy >> 16) & 0xffff0000); req->header.context = ((req_phy & 0xffffffff) << 32 ) | srb->index << 4 | IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type; hba->u.mvfrey.inlist_wptr++; index = hba->u.mvfrey.inlist_wptr & 0x3fff; if (index == hba->u.mvfrey.list_count) { index = 0; hba->u.mvfrey.inlist_wptr &= ~0x3fff; hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE; } hba->u.mvfrey.inlist[index].addr = req_phy; hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4; BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr); BUS_SPACE_RD4_MVFREY2(inbound_write_ptr); if (req->header.type == IOP_REQUEST_TYPE_SCSI_COMMAND) { callout_reset(&srb->timeout, 20 * hz, hptiop_reset_adapter, hba); } } static void hptiop_post_scsi_command(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct hpt_iop_srb *srb = (struct hpt_iop_srb *)arg; union ccb *ccb = srb->ccb; struct hpt_iop_hba *hba = srb->hba; if (error || nsegs > hba->max_sg_count) { KdPrint(("hptiop: func_code=%x tid=%x lun=%jx nsegs=%d\n", ccb->ccb_h.func_code, ccb->ccb_h.target_id, (uintmax_t)ccb->ccb_h.target_lun, nsegs)); ccb->ccb_h.status = CAM_BUSY; bus_dmamap_unload(hba->io_dmat, srb->dma_map); hptiop_free_srb(hba, srb); xpt_done(ccb); return; } hba->ops->post_req(hba, srb, segs, nsegs); } static void hptiop_mv_map_ctlcfg(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg; hba->ctlcfgcmd_phy = ((u_int64_t)segs->ds_addr + 0x1F) & ~(u_int64_t)0x1F; hba->ctlcfg_ptr = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F) & ~0x1F); } static void hptiop_mvfrey_map_ctlcfg(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg; char *p; u_int64_t phy; u_int32_t list_count = hba->u.mvfrey.list_count; phy = ((u_int64_t)segs->ds_addr + 0x1F) & ~(u_int64_t)0x1F; p = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F) & ~0x1F); hba->ctlcfgcmd_phy = phy; hba->ctlcfg_ptr = p; p += 0x800; phy += 0x800; hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p; hba->u.mvfrey.inlist_phy = phy; p += list_count * sizeof(struct mvfrey_inlist_entry); phy += list_count * sizeof(struct mvfrey_inlist_entry); hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p; hba->u.mvfrey.outlist_phy = phy; p += list_count * sizeof(struct mvfrey_outlist_entry); phy += list_count * sizeof(struct mvfrey_outlist_entry); hba->u.mvfrey.outlist_cptr = (u_int32_t *)p; hba->u.mvfrey.outlist_cptr_phy = phy; } static void hptiop_map_srb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg; bus_addr_t phy_addr = (segs->ds_addr + 0x1F) & ~(bus_addr_t)0x1F; struct hpt_iop_srb *srb, *tmp_srb; int i; if (error || nsegs == 0) { device_printf(hba->pcidev, "hptiop_map_srb error"); return; } /* map srb */ srb = (struct hpt_iop_srb *) (((unsigned long)hba->uncached_ptr + 0x1F) & ~(unsigned long)0x1F); for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) { tmp_srb = (struct hpt_iop_srb *) ((char *)srb + i * HPT_SRB_MAX_SIZE); if (((unsigned long)tmp_srb & 0x1F) == 0) { if (bus_dmamap_create(hba->io_dmat, 0, &tmp_srb->dma_map)) { device_printf(hba->pcidev, "dmamap create failed"); return; } bzero(tmp_srb, sizeof(struct hpt_iop_srb)); tmp_srb->hba = hba; tmp_srb->index = i; if (hba->ctlcfg_ptr == 0) {/*itl iop*/ tmp_srb->phy_addr = (u_int64_t)(u_int32_t) (phy_addr >> 5); if (phy_addr & IOPMU_MAX_MEM_SUPPORT_MASK_32G) tmp_srb->srb_flag = HPT_SRB_FLAG_HIGH_MEM_ACESS; } else { tmp_srb->phy_addr = phy_addr; } callout_init_mtx(&tmp_srb->timeout, &hba->lock, 0); hptiop_free_srb(hba, tmp_srb); hba->srb[i] = tmp_srb; phy_addr += HPT_SRB_MAX_SIZE; } else { device_printf(hba->pcidev, "invalid alignment"); return; } } } static void hptiop_os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg) { hba->msg_done = 1; } static int hptiop_os_query_remove_device(struct hpt_iop_hba * hba, int target_id) { struct cam_periph *periph = NULL; struct cam_path *path; int status, retval = 0; status = xpt_create_path(&path, NULL, hba->sim->path_id, target_id, 0); if (status == CAM_REQ_CMP) { if ((periph = cam_periph_find(path, "da")) != NULL) { if (periph->refcount >= 1) { device_printf(hba->pcidev, "%d ," "target_id=0x%x," "refcount=%d", hba->pciunit, target_id, periph->refcount); retval = -1; } } xpt_free_path(path); } return retval; } static void hptiop_release_resource(struct hpt_iop_hba *hba) { int i; if (hba->ioctl_dev) destroy_dev(hba->ioctl_dev); if (hba->path) { struct ccb_setasync ccb; xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5); ccb.ccb_h.func_code = XPT_SASYNC_CB; ccb.event_enable = 0; ccb.callback = hptiop_async; ccb.callback_arg = hba->sim; xpt_action((union ccb *)&ccb); xpt_free_path(hba->path); } if (hba->irq_handle) bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle); if (hba->sim) { hptiop_lock_adapter(hba); xpt_bus_deregister(cam_sim_path(hba->sim)); cam_sim_free(hba->sim, TRUE); hptiop_unlock_adapter(hba); } if (hba->ctlcfg_dmat) { bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap); bus_dmamem_free(hba->ctlcfg_dmat, hba->ctlcfg_ptr, hba->ctlcfg_dmamap); bus_dma_tag_destroy(hba->ctlcfg_dmat); } for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) { struct hpt_iop_srb *srb = hba->srb[i]; if (srb->dma_map) bus_dmamap_destroy(hba->io_dmat, srb->dma_map); callout_drain(&srb->timeout); } if (hba->srb_dmat) { bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap); bus_dmamap_destroy(hba->srb_dmat, hba->srb_dmamap); bus_dma_tag_destroy(hba->srb_dmat); } if (hba->io_dmat) bus_dma_tag_destroy(hba->io_dmat); if (hba->parent_dmat) bus_dma_tag_destroy(hba->parent_dmat); if (hba->irq_res) bus_release_resource(hba->pcidev, SYS_RES_IRQ, 0, hba->irq_res); if (hba->bar0_res) bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); if (hba->bar2_res) bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar2_rid, hba->bar2_res); mtx_destroy(&hba->lock); } Index: head/sys/dev/hptmv/entry.c =================================================================== --- head/sys/dev/hptmv/entry.c (revision 313981) +++ head/sys/dev/hptmv/entry.c (revision 313982) @@ -1,2988 +1,2988 @@ /* * Copyright (c) 2004-2005 HighPoint Technologies, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef __KERNEL__ #define __KERNEL__ #endif #include #include #include #include #ifdef DEBUG #ifdef DEBUG_LEVEL int hpt_dbg_level = DEBUG_LEVEL; #else int hpt_dbg_level = 0; #endif #endif #define MV_ERROR printf /* * CAM SIM entry points */ static int hpt_probe (device_t dev); static void launch_worker_thread(void); static int hpt_attach(device_t dev); static int hpt_detach(device_t dev); static int hpt_shutdown(device_t dev); static void hpt_poll(struct cam_sim *sim); static void hpt_intr(void *arg); static void hpt_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void hpt_action(struct cam_sim *sim, union ccb *ccb); static device_method_t driver_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hpt_probe), DEVMETHOD(device_attach, hpt_attach), DEVMETHOD(device_detach, hpt_detach), DEVMETHOD(device_shutdown, hpt_shutdown), DEVMETHOD_END }; static driver_t hpt_pci_driver = { __str(PROC_DIR_NAME), driver_methods, sizeof(IAL_ADAPTER_T) }; static devclass_t hpt_devclass; #define __DRIVER_MODULE(p1, p2, p3, p4, p5, p6) DRIVER_MODULE(p1, p2, p3, p4, p5, p6) __DRIVER_MODULE(PROC_DIR_NAME, pci, hpt_pci_driver, hpt_devclass, 0, 0); MODULE_DEPEND(PROC_DIR_NAME, cam, 1, 1, 1); #define ccb_ccb_ptr spriv_ptr0 #define ccb_adapter ccb_h.spriv_ptr1 static void SetInquiryData(PINQUIRYDATA inquiryData, PVDevice pVDev); static void HPTLIBAPI OsSendCommand (_VBUS_ARG union ccb * ccb); static void HPTLIBAPI fOsCommandDone(_VBUS_ARG PCommand pCmd); static void ccb_done(union ccb *ccb); static void hpt_queue_ccb(union ccb **ccb_Q, union ccb *ccb); static void hpt_free_ccb(union ccb **ccb_Q, union ccb *ccb); static void hpt_intr_locked(IAL_ADAPTER_T *pAdapter); static void hptmv_free_edma_queues(IAL_ADAPTER_T *pAdapter); static void hptmv_free_channel(IAL_ADAPTER_T *pAdapter, MV_U8 channelNum); static void handleEdmaError(_VBUS_ARG PCommand pCmd); static int hptmv_init_channel(IAL_ADAPTER_T *pAdapter, MV_U8 channelNum); static int fResetActiveCommands(PVBus _vbus_p); static void fRegisterVdevice(IAL_ADAPTER_T *pAdapter); static int hptmv_allocate_edma_queues(IAL_ADAPTER_T *pAdapter); static void hptmv_handle_event_disconnect(void *data); static void hptmv_handle_event_connect(void *data); static int start_channel(IAL_ADAPTER_T *pAdapter, MV_U8 channelNum); static void init_vdev_params(IAL_ADAPTER_T *pAdapter, MV_U8 channel); static int hptmv_parse_identify_results(MV_SATA_CHANNEL *pMvSataChannel); static int HPTLIBAPI fOsBuildSgl(_VBUS_ARG PCommand pCmd, FPSCAT_GATH pSg, int logical); static MV_BOOLEAN CommandCompletionCB(MV_SATA_ADAPTER *pMvSataAdapter, MV_U8 channelNum, MV_COMPLETION_TYPE comp_type, MV_VOID_PTR commandId, MV_U16 responseFlags, MV_U32 timeStamp, MV_STORAGE_DEVICE_REGISTERS *registerStruct); static MV_BOOLEAN hptmv_event_notify(MV_SATA_ADAPTER *pMvSataAdapter, MV_EVENT_TYPE eventType, MV_U32 param1, MV_U32 param2); #define ccb_ccb_ptr spriv_ptr0 #define ccb_adapter ccb_h.spriv_ptr1 static struct sx hptmv_list_lock; SX_SYSINIT(hptmv_list_lock, &hptmv_list_lock, "hptmv list"); -IAL_ADAPTER_T *gIal_Adapter = 0; -IAL_ADAPTER_T *pCurAdapter = 0; +IAL_ADAPTER_T *gIal_Adapter = NULL; +IAL_ADAPTER_T *pCurAdapter = NULL; static MV_SATA_CHANNEL gMvSataChannels[MAX_VBUS][MV_SATA_CHANNELS_NUM]; typedef struct st_HPT_DPC { IAL_ADAPTER_T *pAdapter; void (*dpc)(IAL_ADAPTER_T *, void *, UCHAR); void *arg; UCHAR flags; } ST_HPT_DPC; #define MAX_DPC 16 UCHAR DPC_Request_Nums = 0; static ST_HPT_DPC DpcQueue[MAX_DPC]; static int DpcQueue_First=0; static int DpcQueue_Last = 0; static struct mtx DpcQueue_Lock; MTX_SYSINIT(hpmtv_dpc_lock, &DpcQueue_Lock, "hptmv dpc", MTX_DEF); char DRIVER_VERSION[] = "v1.16"; /******************************************************************************* * Name: hptmv_free_channel * * Description: free allocated queues for the given channel * * Parameters: pMvSataAdapter - pointer to the RR18xx controller this * channel connected to. * channelNum - channel number. * ******************************************************************************/ static void hptmv_free_channel(IAL_ADAPTER_T *pAdapter, MV_U8 channelNum) { HPT_ASSERT(channelNum < MV_SATA_CHANNELS_NUM); pAdapter->mvSataAdapter.sataChannel[channelNum] = NULL; } static void failDevice(PVDevice pVDev) { PVBus _vbus_p = pVDev->pVBus; IAL_ADAPTER_T *pAdapter = (IAL_ADAPTER_T *)_vbus_p->OsExt; pVDev->u.disk.df_on_line = 0; pVDev->vf_online = 0; if (pVDev->pfnDeviceFailed) CallWhenIdle(_VBUS_P (DPC_PROC)pVDev->pfnDeviceFailed, pVDev); fNotifyGUI(ET_DEVICE_REMOVED, pVDev); #ifndef FOR_DEMO if (pAdapter->ver_601==2 && !pAdapter->beeping) { pAdapter->beeping = 1; BeepOn(pAdapter->mvSataAdapter.adapterIoBaseAddress); set_fail_led(&pAdapter->mvSataAdapter, pVDev->u.disk.mv->channelNumber, 1); } #endif } int MvSataResetChannel(MV_SATA_ADAPTER *pMvSataAdapter, MV_U8 channel); static void handleEdmaError(_VBUS_ARG PCommand pCmd) { PDevice pDevice = &pCmd->pVDevice->u.disk; MV_SATA_ADAPTER * pSataAdapter = pDevice->mv->mvSataAdapter; if (!pDevice->df_on_line) { KdPrint(("Device is offline")); pCmd->Result = RETURN_BAD_DEVICE; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); return; } if (pCmd->RetryCount++>5) { hpt_printk(("too many retries on channel(%d)\n", pDevice->mv->channelNumber)); failed: failDevice(pCmd->pVDevice); pCmd->Result = RETURN_IDE_ERROR; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); return; } /* reset the channel and retry the command */ if (MvSataResetChannel(pSataAdapter, pDevice->mv->channelNumber)) goto failed; fNotifyGUI(ET_DEVICE_ERROR, Map2pVDevice(pDevice)); hpt_printk(("Retry on channel(%d)\n", pDevice->mv->channelNumber)); fDeviceSendCommand(_VBUS_P pCmd); } /**************************************************************** * Name: hptmv_init_channel * * Description: allocate request and response queues for the EDMA of the * given channel and sets other fields. * * Parameters: * pAdapter - pointer to the emulated adapter data structure * channelNum - channel number. * Return: 0 on success, otherwise on failure ****************************************************************/ static int hptmv_init_channel(IAL_ADAPTER_T *pAdapter, MV_U8 channelNum) { MV_SATA_CHANNEL *pMvSataChannel; dma_addr_t req_dma_addr; dma_addr_t rsp_dma_addr; if (channelNum >= MV_SATA_CHANNELS_NUM) { MV_ERROR("RR18xx[%d]: Bad channelNum=%d", pAdapter->mvSataAdapter.adapterId, channelNum); return -1; } pMvSataChannel = &gMvSataChannels[pAdapter->mvSataAdapter.adapterId][channelNum]; pAdapter->mvSataAdapter.sataChannel[channelNum] = pMvSataChannel; pMvSataChannel->channelNumber = channelNum; pMvSataChannel->lba48Address = MV_FALSE; pMvSataChannel->maxReadTransfer = MV_FALSE; pMvSataChannel->requestQueue = (struct mvDmaRequestQueueEntry *) (pAdapter->requestsArrayBaseAlignedAddr + (channelNum * MV_EDMA_REQUEST_QUEUE_SIZE)); req_dma_addr = pAdapter->requestsArrayBaseDmaAlignedAddr + (channelNum * MV_EDMA_REQUEST_QUEUE_SIZE); KdPrint(("requestQueue addr is 0x%llX", (HPT_U64)(ULONG_PTR)req_dma_addr)); /* check the 1K alignment of the request queue*/ if (req_dma_addr & 0x3ff) { MV_ERROR("RR18xx[%d]: request queue allocated isn't 1 K aligned," " dma_addr=%llx channel=%d\n", pAdapter->mvSataAdapter.adapterId, (HPT_U64)(ULONG_PTR)req_dma_addr, channelNum); return -1; } pMvSataChannel->requestQueuePciLowAddress = req_dma_addr; pMvSataChannel->requestQueuePciHiAddress = 0; KdPrint(("RR18xx[%d,%d]: request queue allocated: 0x%p", pAdapter->mvSataAdapter.adapterId, channelNum, pMvSataChannel->requestQueue)); pMvSataChannel->responseQueue = (struct mvDmaResponseQueueEntry *) (pAdapter->responsesArrayBaseAlignedAddr + (channelNum * MV_EDMA_RESPONSE_QUEUE_SIZE)); rsp_dma_addr = pAdapter->responsesArrayBaseDmaAlignedAddr + (channelNum * MV_EDMA_RESPONSE_QUEUE_SIZE); /* check the 256 alignment of the response queue*/ if (rsp_dma_addr & 0xff) { MV_ERROR("RR18xx[%d,%d]: response queue allocated isn't 256 byte " "aligned, dma_addr=%llx\n", pAdapter->mvSataAdapter.adapterId, channelNum, (HPT_U64)(ULONG_PTR)rsp_dma_addr); return -1; } pMvSataChannel->responseQueuePciLowAddress = rsp_dma_addr; pMvSataChannel->responseQueuePciHiAddress = 0; KdPrint(("RR18xx[%d,%d]: response queue allocated: 0x%p", pAdapter->mvSataAdapter.adapterId, channelNum, pMvSataChannel->responseQueue)); pAdapter->mvChannel[channelNum].online = MV_TRUE; return 0; } /****************************************************************************** * Name: hptmv_parse_identify_results * * Description: this functions parses the identify command results, checks * that the connected deives can be accesed by RR18xx EDMA, * and updates the channel structure accordingly. * * Parameters: pMvSataChannel, pointer to the channel data structure. * * Returns: =0 ->success, < 0 ->failure. * ******************************************************************************/ static int hptmv_parse_identify_results(MV_SATA_CHANNEL *pMvSataChannel) { MV_U16 *iden = pMvSataChannel->identifyDevice; /*LBA addressing*/ if (! (iden[IDEN_CAPACITY_1_OFFSET] & 0x200)) { KdPrint(("IAL Error in IDENTIFY info: LBA not supported\n")); return -1; } else { KdPrint(("%25s - %s\n", "Capabilities", "LBA supported")); } /*DMA support*/ if (! (iden[IDEN_CAPACITY_1_OFFSET] & 0x100)) { KdPrint(("IAL Error in IDENTIFY info: DMA not supported\n")); return -1; } else { KdPrint(("%25s - %s\n", "Capabilities", "DMA supported")); } /* PIO */ if ((iden[IDEN_VALID] & 2) == 0) { KdPrint(("IAL Error in IDENTIFY info: not able to find PIO mode\n")); return -1; } KdPrint(("%25s - 0x%02x\n", "PIO modes supported", iden[IDEN_PIO_MODE_SPPORTED] & 0xff)); /*UDMA*/ if ((iden[IDEN_VALID] & 4) == 0) { KdPrint(("IAL Error in IDENTIFY info: not able to find UDMA mode\n")); return -1; } /* 48 bit address */ if ((iden[IDEN_SUPPORTED_COMMANDS2] & 0x400)) { KdPrint(("%25s - %s\n", "LBA48 addressing", "supported")); pMvSataChannel->lba48Address = MV_TRUE; } else { KdPrint(("%25s - %s\n", "LBA48 addressing", "Not supported")); pMvSataChannel->lba48Address = MV_FALSE; } return 0; } static void init_vdev_params(IAL_ADAPTER_T *pAdapter, MV_U8 channel) { PVDevice pVDev = &pAdapter->VDevices[channel]; MV_SATA_CHANNEL *pMvSataChannel = pAdapter->mvSataAdapter.sataChannel[channel]; MV_U16_PTR IdentifyData = pMvSataChannel->identifyDevice; pMvSataChannel->outstandingCommands = 0; pVDev->u.disk.mv = pMvSataChannel; pVDev->u.disk.df_on_line = 1; pVDev->u.disk.pVBus = &pAdapter->VBus; pVDev->pVBus = &pAdapter->VBus; #ifdef SUPPORT_48BIT_LBA if (pMvSataChannel->lba48Address == MV_TRUE) pVDev->u.disk.dDeRealCapacity = ((IdentifyData[101]<<16) | IdentifyData[100]) - 1; else #endif if(IdentifyData[53] & 1) { pVDev->u.disk.dDeRealCapacity = (((IdentifyData[58]<<16 | IdentifyData[57]) < (IdentifyData[61]<<16 | IdentifyData[60])) ? (IdentifyData[61]<<16 | IdentifyData[60]) : (IdentifyData[58]<<16 | IdentifyData[57])) - 1; } else pVDev->u.disk.dDeRealCapacity = (IdentifyData[61]<<16 | IdentifyData[60]) - 1; pVDev->u.disk.bDeUsable_Mode = pVDev->u.disk.bDeModeSetting = pAdapter->mvChannel[channel].maxPioModeSupported - MV_ATA_TRANSFER_PIO_0; if (pAdapter->mvChannel[channel].maxUltraDmaModeSupported!=0xFF) { pVDev->u.disk.bDeUsable_Mode = pVDev->u.disk.bDeModeSetting = pAdapter->mvChannel[channel].maxUltraDmaModeSupported - MV_ATA_TRANSFER_UDMA_0 + 8; } } static void device_change(IAL_ADAPTER_T *pAdapter , MV_U8 channelIndex, int plugged) { PVDevice pVDev; MV_SATA_ADAPTER *pMvSataAdapter = &pAdapter->mvSataAdapter; MV_SATA_CHANNEL *pMvSataChannel = pMvSataAdapter->sataChannel[channelIndex]; if (!pMvSataChannel) return; if (plugged) { pVDev = &(pAdapter->VDevices[channelIndex]); init_vdev_params(pAdapter, channelIndex); pVDev->VDeviceType = pVDev->u.disk.df_atapi? VD_ATAPI : pVDev->u.disk.df_removable_drive? VD_REMOVABLE : VD_SINGLE_DISK; pVDev->VDeviceCapacity = pVDev->u.disk.dDeRealCapacity-SAVE_FOR_RAID_INFO; pVDev->pfnSendCommand = pfnSendCommand[pVDev->VDeviceType]; pVDev->pfnDeviceFailed = pfnDeviceFailed[pVDev->VDeviceType]; pVDev->vf_online = 1; #ifdef SUPPORT_ARRAY if(pVDev->pParent) { int iMember; for(iMember = 0; iMember < pVDev->pParent->u.array.bArnMember; iMember++) if((PVDevice)pVDev->pParent->u.array.pMember[iMember] == pVDev) pVDev->pParent->u.array.pMember[iMember] = NULL; pVDev->pParent = NULL; } #endif fNotifyGUI(ET_DEVICE_PLUGGED,pVDev); fCheckBootable(pVDev); RegisterVDevice(pVDev); #ifndef FOR_DEMO if (pAdapter->beeping) { pAdapter->beeping = 0; BeepOff(pAdapter->mvSataAdapter.adapterIoBaseAddress); } #endif } else { pVDev = &(pAdapter->VDevices[channelIndex]); failDevice(pVDev); } } static int start_channel(IAL_ADAPTER_T *pAdapter, MV_U8 channelNum) { MV_SATA_ADAPTER *pMvSataAdapter = &pAdapter->mvSataAdapter; MV_SATA_CHANNEL *pMvSataChannel = pMvSataAdapter->sataChannel[channelNum]; MV_CHANNEL *pChannelInfo = &(pAdapter->mvChannel[channelNum]); MV_U32 udmaMode,pioMode; KdPrint(("RR18xx [%d]: start channel (%d)", pMvSataAdapter->adapterId, channelNum)); /* Software reset channel */ if (mvStorageDevATASoftResetDevice(pMvSataAdapter, channelNum) == MV_FALSE) { MV_ERROR("RR18xx [%d,%d]: failed to perform Software reset\n", pMvSataAdapter->adapterId, channelNum); return -1; } /* Hardware reset channel */ if (mvSataChannelHardReset(pMvSataAdapter, channelNum) == MV_FALSE) { /* If failed, try again - this is when trying to hardreset a channel */ /* when drive is just spinning up */ StallExec(5000000); /* wait 5 sec before trying again */ if (mvSataChannelHardReset(pMvSataAdapter, channelNum) == MV_FALSE) { MV_ERROR("RR18xx [%d,%d]: failed to perform Hard reset\n", pMvSataAdapter->adapterId, channelNum); return -1; } } /* identify device*/ if (mvStorageDevATAIdentifyDevice(pMvSataAdapter, channelNum) == MV_FALSE) { MV_ERROR("RR18xx [%d,%d]: failed to perform ATA Identify command\n" , pMvSataAdapter->adapterId, channelNum); return -1; } if (hptmv_parse_identify_results(pMvSataChannel)) { MV_ERROR("RR18xx [%d,%d]: Error in parsing ATA Identify message\n" , pMvSataAdapter->adapterId, channelNum); return -1; } /* mvStorageDevATASetFeatures */ /* Disable 8 bit PIO in case CFA enabled */ if (pMvSataChannel->identifyDevice[86] & 4) { KdPrint(("RR18xx [%d]: Disable 8 bit PIO (CFA enabled) \n", pMvSataAdapter->adapterId)); if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_DISABLE_8_BIT_PIO, 0, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d]: channel %d: mvStorageDevATASetFeatures" " failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } } /* Write cache */ #ifdef ENABLE_WRITE_CACHE if (pMvSataChannel->identifyDevice[82] & 0x20) { if (!(pMvSataChannel->identifyDevice[85] & 0x20)) /* if not enabled by default */ { if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_ENABLE_WCACHE, 0, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d]: channel %d: mvStorageDevATASetFeatures failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } } KdPrint(("RR18xx [%d]: channel %d, write cache enabled\n", pMvSataAdapter->adapterId, channelNum)); } else { KdPrint(("RR18xx [%d]: channel %d, write cache not supported\n", pMvSataAdapter->adapterId, channelNum)); } #else /* disable write cache */ { if (pMvSataChannel->identifyDevice[85] & 0x20) { KdPrint(("RR18xx [%d]: channel =%d, disable write cache\n", pMvSataAdapter->adapterId, channelNum)); if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_DISABLE_WCACHE, 0, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d]: channel %d: mvStorageDevATASetFeatures failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } } KdPrint(("RR18xx [%d]: channel=%d, write cache disabled\n", pMvSataAdapter->adapterId, channelNum)); } #endif /* Set transfer mode */ KdPrint(("RR18xx [%d] Set transfer mode XFER_PIO_SLOW\n", pMvSataAdapter->adapterId)); if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_TRANSFER, MV_ATA_TRANSFER_PIO_SLOW, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d] channel %d: Set Features failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } if (pMvSataChannel->identifyDevice[IDEN_PIO_MODE_SPPORTED] & 1) { pioMode = MV_ATA_TRANSFER_PIO_4; } else if (pMvSataChannel->identifyDevice[IDEN_PIO_MODE_SPPORTED] & 2) { pioMode = MV_ATA_TRANSFER_PIO_3; } else { MV_ERROR("IAL Error in IDENTIFY info: PIO modes 3 and 4 not supported\n"); pioMode = MV_ATA_TRANSFER_PIO_SLOW; } KdPrint(("RR18xx [%d] Set transfer mode XFER_PIO_4\n", pMvSataAdapter->adapterId)); pAdapter->mvChannel[channelNum].maxPioModeSupported = pioMode; if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_TRANSFER, pioMode, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d] channel %d: Set Features failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } udmaMode = MV_ATA_TRANSFER_UDMA_0; if (pMvSataChannel->identifyDevice[IDEN_UDMA_MODE] & 0x40) { udmaMode = MV_ATA_TRANSFER_UDMA_6; } else if (pMvSataChannel->identifyDevice[IDEN_UDMA_MODE] & 0x20) { udmaMode = MV_ATA_TRANSFER_UDMA_5; } else if (pMvSataChannel->identifyDevice[IDEN_UDMA_MODE] & 0x10) { udmaMode = MV_ATA_TRANSFER_UDMA_4; } else if (pMvSataChannel->identifyDevice[IDEN_UDMA_MODE] & 8) { udmaMode = MV_ATA_TRANSFER_UDMA_3; } else if (pMvSataChannel->identifyDevice[IDEN_UDMA_MODE] & 4) { udmaMode = MV_ATA_TRANSFER_UDMA_2; } KdPrint(("RR18xx [%d] Set transfer mode XFER_UDMA_%d\n", pMvSataAdapter->adapterId, udmaMode & 0xf)); pChannelInfo->maxUltraDmaModeSupported = udmaMode; /*if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_TRANSFER, udmaMode, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d] channel %d: Set Features failed\n", pMvSataAdapter->adapterId, channelNum); return -1; }*/ if (pChannelInfo->maxUltraDmaModeSupported == 0xFF) return TRUE; else do { if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_TRANSFER, pChannelInfo->maxUltraDmaModeSupported, 0, 0, 0) == MV_FALSE) { if (pChannelInfo->maxUltraDmaModeSupported > MV_ATA_TRANSFER_UDMA_0) { if (mvStorageDevATASoftResetDevice(pMvSataAdapter, channelNum) == MV_FALSE) { MV_REG_WRITE_BYTE(pMvSataAdapter->adapterIoBaseAddress, pMvSataChannel->eDmaRegsOffset + 0x11c, /* command reg */ MV_ATA_COMMAND_IDLE_IMMEDIATE); mvMicroSecondsDelay(10000); mvSataChannelHardReset(pMvSataAdapter, channelNum); if (mvStorageDevATASoftResetDevice(pMvSataAdapter, channelNum) == MV_FALSE) return FALSE; } if (mvSataChannelHardReset(pMvSataAdapter, channelNum) == MV_FALSE) return FALSE; pChannelInfo->maxUltraDmaModeSupported--; continue; } else return FALSE; } break; }while (1); /* Read look ahead */ #ifdef ENABLE_READ_AHEAD if (pMvSataChannel->identifyDevice[82] & 0x40) { if (!(pMvSataChannel->identifyDevice[85] & 0x40)) /* if not enabled by default */ { if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_ENABLE_RLA, 0, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d] channel %d: Set Features failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } } KdPrint(("RR18xx [%d]: channel=%d, read look ahead enabled\n", pMvSataAdapter->adapterId, channelNum)); } else { KdPrint(("RR18xx [%d]: channel %d, Read Look Ahead not supported\n", pMvSataAdapter->adapterId, channelNum)); } #else { if (pMvSataChannel->identifyDevice[86] & 0x20) { KdPrint(("RR18xx [%d]:channel %d, disable read look ahead\n", pMvSataAdapter->adapterId, channelNum)); if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_DISABLE_RLA, 0, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d]:channel %d: ATA Set Features failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } } KdPrint(("RR18xx [%d]:channel %d, read look ahead disabled\n", pMvSataAdapter->adapterId, channelNum)); } #endif { KdPrint(("RR18xx [%d]: channel %d config EDMA, Non Queued Mode\n", pMvSataAdapter->adapterId, channelNum)); if (mvSataConfigEdmaMode(pMvSataAdapter, channelNum, MV_EDMA_MODE_NOT_QUEUED, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d] channel %d Error: mvSataConfigEdmaMode failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } } /* Enable EDMA */ if (mvSataEnableChannelDma(pMvSataAdapter, channelNum) == MV_FALSE) { MV_ERROR("RR18xx [%d] Failed to enable DMA, channel=%d\n", pMvSataAdapter->adapterId, channelNum); return -1; } MV_ERROR("RR18xx [%d,%d]: channel started successfully\n", pMvSataAdapter->adapterId, channelNum); #ifndef FOR_DEMO set_fail_led(pMvSataAdapter, channelNum, 0); #endif return 0; } static void hptmv_handle_event(void * data, int flag) { IAL_ADAPTER_T *pAdapter = (IAL_ADAPTER_T *)data; MV_SATA_ADAPTER *pMvSataAdapter = &pAdapter->mvSataAdapter; MV_U8 channelIndex; mtx_assert(&pAdapter->lock, MA_OWNED); /* mvOsSemTake(&pMvSataAdapter->semaphore); */ for (channelIndex = 0; channelIndex < MV_SATA_CHANNELS_NUM; channelIndex++) { switch(pAdapter->sataEvents[channelIndex]) { case SATA_EVENT_CHANNEL_CONNECTED: /* Handle only connects */ if (flag == 1) break; KdPrint(("RR18xx [%d,%d]: new device connected\n", pMvSataAdapter->adapterId, channelIndex)); hptmv_init_channel(pAdapter, channelIndex); if (mvSataConfigureChannel( pMvSataAdapter, channelIndex) == MV_FALSE) { MV_ERROR("RR18xx [%d,%d] Failed to configure\n", pMvSataAdapter->adapterId, channelIndex); hptmv_free_channel(pAdapter, channelIndex); } else { /*mvSataChannelHardReset(pMvSataAdapter, channel);*/ if (start_channel( pAdapter, channelIndex)) { MV_ERROR("RR18xx [%d,%d]Failed to start channel\n", pMvSataAdapter->adapterId, channelIndex); hptmv_free_channel(pAdapter, channelIndex); } else { device_change(pAdapter, channelIndex, TRUE); } } pAdapter->sataEvents[channelIndex] = SATA_EVENT_NO_CHANGE; break; case SATA_EVENT_CHANNEL_DISCONNECTED: /* Handle only disconnects */ if (flag == 0) break; KdPrint(("RR18xx [%d,%d]: device disconnected\n", pMvSataAdapter->adapterId, channelIndex)); /* Flush pending commands */ if(pMvSataAdapter->sataChannel[channelIndex]) { _VBUS_INST(&pAdapter->VBus) mvSataFlushDmaQueue (pMvSataAdapter, channelIndex, MV_FLUSH_TYPE_CALLBACK); CheckPendingCall(_VBUS_P0); mvSataRemoveChannel(pMvSataAdapter,channelIndex); hptmv_free_channel(pAdapter, channelIndex); pMvSataAdapter->sataChannel[channelIndex] = NULL; KdPrint(("RR18xx [%d,%d]: channel removed\n", pMvSataAdapter->adapterId, channelIndex)); if (pAdapter->outstandingCommands==0 && DPC_Request_Nums==0) Check_Idle_Call(pAdapter); } else { KdPrint(("RR18xx [%d,%d]: channel already removed!!\n", pMvSataAdapter->adapterId, channelIndex)); } pAdapter->sataEvents[channelIndex] = SATA_EVENT_NO_CHANGE; break; case SATA_EVENT_NO_CHANGE: break; default: break; } } /* mvOsSemRelease(&pMvSataAdapter->semaphore); */ } #define EVENT_CONNECT 1 #define EVENT_DISCONNECT 0 static void hptmv_handle_event_connect(void *data) { hptmv_handle_event (data, 0); } static void hptmv_handle_event_disconnect(void *data) { hptmv_handle_event (data, 1); } static MV_BOOLEAN hptmv_event_notify(MV_SATA_ADAPTER *pMvSataAdapter, MV_EVENT_TYPE eventType, MV_U32 param1, MV_U32 param2) { IAL_ADAPTER_T *pAdapter = pMvSataAdapter->IALData; switch (eventType) { case MV_EVENT_TYPE_SATA_CABLE: { MV_U8 channel = param2; if (param1 == EVENT_CONNECT) { pAdapter->sataEvents[channel] = SATA_EVENT_CHANNEL_CONNECTED; KdPrint(("RR18xx [%d,%d]: device connected event received\n", pMvSataAdapter->adapterId, channel)); /* Delete previous timers (if multiple drives connected in the same time */ callout_reset(&pAdapter->event_timer_connect, 10 * hz, hptmv_handle_event_connect, pAdapter); } else if (param1 == EVENT_DISCONNECT) { pAdapter->sataEvents[channel] = SATA_EVENT_CHANNEL_DISCONNECTED; KdPrint(("RR18xx [%d,%d]: device disconnected event received \n", pMvSataAdapter->adapterId, channel)); device_change(pAdapter, channel, FALSE); /* Delete previous timers (if multiple drives disconnected in the same time */ /*callout_reset(&pAdapter->event_timer_disconnect, 10 * hz, hptmv_handle_event_disconnect, pAdapter); */ /*It is not necessary to wait, handle it directly*/ hptmv_handle_event_disconnect(pAdapter); } else { MV_ERROR("RR18xx: illegal value for param1(%d) at " "connect/disconnect event, host=%d\n", param1, pMvSataAdapter->adapterId ); } } break; case MV_EVENT_TYPE_ADAPTER_ERROR: KdPrint(("RR18xx: DEVICE error event received, pci cause " "reg=%x, don't how to handle this\n", param1)); return MV_TRUE; default: MV_ERROR("RR18xx[%d]: unknown event type (%d)\n", pMvSataAdapter->adapterId, eventType); return MV_FALSE; } return MV_TRUE; } static int hptmv_allocate_edma_queues(IAL_ADAPTER_T *pAdapter) { pAdapter->requestsArrayBaseAddr = (MV_U8 *)contigmalloc(REQUESTS_ARRAY_SIZE, M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0ul); if (pAdapter->requestsArrayBaseAddr == NULL) { MV_ERROR("RR18xx[%d]: Failed to allocate memory for EDMA request" " queues\n", pAdapter->mvSataAdapter.adapterId); return -1; } pAdapter->requestsArrayBaseDmaAddr = fOsPhysicalAddress(pAdapter->requestsArrayBaseAddr); pAdapter->requestsArrayBaseAlignedAddr = pAdapter->requestsArrayBaseAddr; pAdapter->requestsArrayBaseAlignedAddr += MV_EDMA_REQUEST_QUEUE_SIZE; pAdapter->requestsArrayBaseAlignedAddr = (MV_U8 *) (((ULONG_PTR)pAdapter->requestsArrayBaseAlignedAddr) & ~(ULONG_PTR)(MV_EDMA_REQUEST_QUEUE_SIZE - 1)); pAdapter->requestsArrayBaseDmaAlignedAddr = pAdapter->requestsArrayBaseDmaAddr; pAdapter->requestsArrayBaseDmaAlignedAddr += MV_EDMA_REQUEST_QUEUE_SIZE; pAdapter->requestsArrayBaseDmaAlignedAddr &= ~(ULONG_PTR)(MV_EDMA_REQUEST_QUEUE_SIZE - 1); if ((pAdapter->requestsArrayBaseDmaAlignedAddr - pAdapter->requestsArrayBaseDmaAddr) != (pAdapter->requestsArrayBaseAlignedAddr - pAdapter->requestsArrayBaseAddr)) { MV_ERROR("RR18xx[%d]: Error in Request Quueues Alignment\n", pAdapter->mvSataAdapter.adapterId); contigfree(pAdapter->requestsArrayBaseAddr, REQUESTS_ARRAY_SIZE, M_DEVBUF); return -1; } /* response queues */ pAdapter->responsesArrayBaseAddr = (MV_U8 *)contigmalloc(RESPONSES_ARRAY_SIZE, M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0ul); if (pAdapter->responsesArrayBaseAddr == NULL) { MV_ERROR("RR18xx[%d]: Failed to allocate memory for EDMA response" " queues\n", pAdapter->mvSataAdapter.adapterId); contigfree(pAdapter->requestsArrayBaseAddr, RESPONSES_ARRAY_SIZE, M_DEVBUF); return -1; } pAdapter->responsesArrayBaseDmaAddr = fOsPhysicalAddress(pAdapter->responsesArrayBaseAddr); pAdapter->responsesArrayBaseAlignedAddr = pAdapter->responsesArrayBaseAddr; pAdapter->responsesArrayBaseAlignedAddr += MV_EDMA_RESPONSE_QUEUE_SIZE; pAdapter->responsesArrayBaseAlignedAddr = (MV_U8 *) (((ULONG_PTR)pAdapter->responsesArrayBaseAlignedAddr) & ~(ULONG_PTR)(MV_EDMA_RESPONSE_QUEUE_SIZE - 1)); pAdapter->responsesArrayBaseDmaAlignedAddr = pAdapter->responsesArrayBaseDmaAddr; pAdapter->responsesArrayBaseDmaAlignedAddr += MV_EDMA_RESPONSE_QUEUE_SIZE; pAdapter->responsesArrayBaseDmaAlignedAddr &= ~(ULONG_PTR)(MV_EDMA_RESPONSE_QUEUE_SIZE - 1); if ((pAdapter->responsesArrayBaseDmaAlignedAddr - pAdapter->responsesArrayBaseDmaAddr) != (pAdapter->responsesArrayBaseAlignedAddr - pAdapter->responsesArrayBaseAddr)) { MV_ERROR("RR18xx[%d]: Error in Response Queues Alignment\n", pAdapter->mvSataAdapter.adapterId); contigfree(pAdapter->requestsArrayBaseAddr, REQUESTS_ARRAY_SIZE, M_DEVBUF); contigfree(pAdapter->responsesArrayBaseAddr, RESPONSES_ARRAY_SIZE, M_DEVBUF); return -1; } return 0; } static void hptmv_free_edma_queues(IAL_ADAPTER_T *pAdapter) { contigfree(pAdapter->requestsArrayBaseAddr, REQUESTS_ARRAY_SIZE, M_DEVBUF); contigfree(pAdapter->responsesArrayBaseAddr, RESPONSES_ARRAY_SIZE, M_DEVBUF); } static PVOID AllocatePRDTable(IAL_ADAPTER_T *pAdapter) { PVOID ret; if (pAdapter->pFreePRDLink) { KdPrint(("pAdapter->pFreePRDLink:%p\n",pAdapter->pFreePRDLink)); ret = pAdapter->pFreePRDLink; pAdapter->pFreePRDLink = *(void**)ret; return ret; } return NULL; } static void FreePRDTable(IAL_ADAPTER_T *pAdapter, PVOID PRDTable) { *(void**)PRDTable = pAdapter->pFreePRDLink; pAdapter->pFreePRDLink = PRDTable; } extern PVDevice fGetFirstChild(PVDevice pLogical); extern void fResetBootMark(PVDevice pLogical); static void fRegisterVdevice(IAL_ADAPTER_T *pAdapter) { PVDevice pPhysical, pLogical; PVBus pVBus; int i,j; for(i=0;iVDevices[i]); pLogical = pPhysical; while (pLogical->pParent) pLogical = pLogical->pParent; if (pLogical->vf_online==0) { pPhysical->vf_bootmark = pLogical->vf_bootmark = 0; continue; } if (pLogical->VDeviceType==VD_SPARE || pPhysical!=fGetFirstChild(pLogical)) continue; pVBus = &pAdapter->VBus; if(pVBus) { j=0; while(jpVDevice[j]) j++; if(jpVDevice[j] = pLogical; pLogical->pVBus = pVBus; if (j>0 && pLogical->vf_bootmark) { if (pVBus->pVDevice[0]->vf_bootmark) { fResetBootMark(pLogical); } else { do { pVBus->pVDevice[j] = pVBus->pVDevice[j-1]; } while (--j); pVBus->pVDevice[0] = pLogical; } } } } } } PVDevice GetSpareDisk(_VBUS_ARG PVDevice pArray) { IAL_ADAPTER_T *pAdapter = (IAL_ADAPTER_T *)pArray->pVBus->OsExt; LBA_T capacity = LongDiv(pArray->VDeviceCapacity, pArray->u.array.bArnMember-1); LBA_T thiscap, maxcap = MAX_LBA_T; PVDevice pVDevice, pFind = NULL; int i; for(i=0;iVDevices[i]; if(!pVDevice) continue; thiscap = pArray->vf_format_v2? pVDevice->u.disk.dDeRealCapacity : pVDevice->VDeviceCapacity; /* find the smallest usable spare disk */ if (pVDevice->VDeviceType==VD_SPARE && pVDevice->u.disk.df_on_line && thiscap < maxcap && thiscap >= capacity) { maxcap = pVDevice->VDeviceCapacity; pFind = pVDevice; } } return pFind; } /****************************************************************** * IO ATA Command *******************************************************************/ int HPTLIBAPI fDeReadWrite(PDevice pDev, ULONG Lba, UCHAR Cmd, void *tmpBuffer) { return mvReadWrite(pDev->mv, Lba, Cmd, tmpBuffer); } void HPTLIBAPI fDeSelectMode(PDevice pDev, UCHAR NewMode) { MV_SATA_CHANNEL *pSataChannel = pDev->mv; MV_SATA_ADAPTER *pSataAdapter = pSataChannel->mvSataAdapter; MV_U8 channelIndex = pSataChannel->channelNumber; UCHAR mvMode; /* 508x don't use MW-DMA? */ if (NewMode>4 && NewMode<8) NewMode = 4; pDev->bDeModeSetting = NewMode; if (NewMode<=4) mvMode = MV_ATA_TRANSFER_PIO_0 + NewMode; else mvMode = MV_ATA_TRANSFER_UDMA_0 + (NewMode-8); /*To fix 88i8030 bug*/ if (mvMode > MV_ATA_TRANSFER_UDMA_0 && mvMode < MV_ATA_TRANSFER_UDMA_4) mvMode = MV_ATA_TRANSFER_UDMA_0; mvSataDisableChannelDma(pSataAdapter, channelIndex); /* Flush pending commands */ mvSataFlushDmaQueue (pSataAdapter, channelIndex, MV_FLUSH_TYPE_NONE); if (mvStorageDevATASetFeatures(pSataAdapter, channelIndex, MV_ATA_SET_FEATURES_TRANSFER, mvMode, 0, 0, 0) == MV_FALSE) { KdPrint(("channel %d: Set Features failed\n", channelIndex)); } /* Enable EDMA */ if (mvSataEnableChannelDma(pSataAdapter, channelIndex) == MV_FALSE) KdPrint(("Failed to enable DMA, channel=%d", channelIndex)); } int HPTLIBAPI fDeSetTCQ(PDevice pDev, int enable, int depth) { MV_SATA_CHANNEL *pSataChannel = pDev->mv; MV_SATA_ADAPTER *pSataAdapter = pSataChannel->mvSataAdapter; MV_U8 channelIndex = pSataChannel->channelNumber; IAL_ADAPTER_T *pAdapter = pSataAdapter->IALData; MV_CHANNEL *channelInfo = &(pAdapter->mvChannel[channelIndex]); int dmaActive = pSataChannel->queueCommandsEnabled; int ret = 0; if (dmaActive) { mvSataDisableChannelDma(pSataAdapter, channelIndex); mvSataFlushDmaQueue(pSataAdapter,channelIndex,MV_FLUSH_TYPE_CALLBACK); } if (enable) { if (pSataChannel->queuedDMA == MV_EDMA_MODE_NOT_QUEUED && (pSataChannel->identifyDevice[IDEN_SUPPORTED_COMMANDS2] & (0x2))) { UCHAR depth = ((pSataChannel->identifyDevice[IDEN_QUEUE_DEPTH]) & 0x1f) + 1; channelInfo->queueDepth = (depth==32)? 31 : depth; mvSataConfigEdmaMode(pSataAdapter, channelIndex, MV_EDMA_MODE_QUEUED, depth); ret = 1; } } else { if (pSataChannel->queuedDMA != MV_EDMA_MODE_NOT_QUEUED) { channelInfo->queueDepth = 2; mvSataConfigEdmaMode(pSataAdapter, channelIndex, MV_EDMA_MODE_NOT_QUEUED, 0); ret = 1; } } if (dmaActive) mvSataEnableChannelDma(pSataAdapter,channelIndex); return ret; } int HPTLIBAPI fDeSetNCQ(PDevice pDev, int enable, int depth) { return 0; } int HPTLIBAPI fDeSetWriteCache(PDevice pDev, int enable) { MV_SATA_CHANNEL *pSataChannel = pDev->mv; MV_SATA_ADAPTER *pSataAdapter = pSataChannel->mvSataAdapter; MV_U8 channelIndex = pSataChannel->channelNumber; IAL_ADAPTER_T *pAdapter = pSataAdapter->IALData; MV_CHANNEL *channelInfo = &(pAdapter->mvChannel[channelIndex]); int dmaActive = pSataChannel->queueCommandsEnabled; int ret = 0; if (dmaActive) { mvSataDisableChannelDma(pSataAdapter, channelIndex); mvSataFlushDmaQueue(pSataAdapter,channelIndex,MV_FLUSH_TYPE_CALLBACK); } if ((pSataChannel->identifyDevice[82] & (0x20))) { if (enable) { if (mvStorageDevATASetFeatures(pSataAdapter, channelIndex, MV_ATA_SET_FEATURES_ENABLE_WCACHE, 0, 0, 0, 0)) { channelInfo->writeCacheEnabled = MV_TRUE; ret = 1; } } else { if (mvStorageDevATASetFeatures(pSataAdapter, channelIndex, MV_ATA_SET_FEATURES_DISABLE_WCACHE, 0, 0, 0, 0)) { channelInfo->writeCacheEnabled = MV_FALSE; ret = 1; } } } if (dmaActive) mvSataEnableChannelDma(pSataAdapter,channelIndex); return ret; } int HPTLIBAPI fDeSetReadAhead(PDevice pDev, int enable) { MV_SATA_CHANNEL *pSataChannel = pDev->mv; MV_SATA_ADAPTER *pSataAdapter = pSataChannel->mvSataAdapter; MV_U8 channelIndex = pSataChannel->channelNumber; IAL_ADAPTER_T *pAdapter = pSataAdapter->IALData; MV_CHANNEL *channelInfo = &(pAdapter->mvChannel[channelIndex]); int dmaActive = pSataChannel->queueCommandsEnabled; int ret = 0; if (dmaActive) { mvSataDisableChannelDma(pSataAdapter, channelIndex); mvSataFlushDmaQueue(pSataAdapter,channelIndex,MV_FLUSH_TYPE_CALLBACK); } if ((pSataChannel->identifyDevice[82] & (0x40))) { if (enable) { if (mvStorageDevATASetFeatures(pSataAdapter, channelIndex, MV_ATA_SET_FEATURES_ENABLE_RLA, 0, 0, 0, 0)) { channelInfo->readAheadEnabled = MV_TRUE; ret = 1; } } else { if (mvStorageDevATASetFeatures(pSataAdapter, channelIndex, MV_ATA_SET_FEATURES_DISABLE_RLA, 0, 0, 0, 0)) { channelInfo->readAheadEnabled = MV_FALSE; ret = 1; } } } if (dmaActive) mvSataEnableChannelDma(pSataAdapter,channelIndex); return ret; } #ifdef SUPPORT_ARRAY #define IdeRegisterVDevice fCheckArray #else void IdeRegisterVDevice(PDevice pDev) { PVDevice pVDev = Map2pVDevice(pDev); pVDev->VDeviceType = pDev->df_atapi? VD_ATAPI : pDev->df_removable_drive? VD_REMOVABLE : VD_SINGLE_DISK; pVDev->vf_online = 1; pVDev->VDeviceCapacity = pDev->dDeRealCapacity; pVDev->pfnSendCommand = pfnSendCommand[pVDev->VDeviceType]; pVDev->pfnDeviceFailed = pfnDeviceFailed[pVDev->VDeviceType]; } #endif static __inline PBUS_DMAMAP dmamap_get(struct IALAdapter * pAdapter) { PBUS_DMAMAP p = pAdapter->pbus_dmamap_list; if (p) pAdapter->pbus_dmamap_list = p-> next; return p; } static __inline void dmamap_put(PBUS_DMAMAP p) { p->next = p->pAdapter->pbus_dmamap_list; p->pAdapter->pbus_dmamap_list = p; } static int num_adapters = 0; static int init_adapter(IAL_ADAPTER_T *pAdapter) { PVBus _vbus_p = &pAdapter->VBus; MV_SATA_ADAPTER *pMvSataAdapter; int i, channel, rid; PVDevice pVDev; mtx_init(&pAdapter->lock, "hptsleeplock", NULL, MTX_DEF); callout_init_mtx(&pAdapter->event_timer_connect, &pAdapter->lock, 0); callout_init_mtx(&pAdapter->event_timer_disconnect, &pAdapter->lock, 0); sx_xlock(&hptmv_list_lock); pAdapter->next = 0; - if(gIal_Adapter == 0){ + if(gIal_Adapter == NULL){ gIal_Adapter = pAdapter; pCurAdapter = gIal_Adapter; } else { pCurAdapter->next = pAdapter; pCurAdapter = pAdapter; } sx_xunlock(&hptmv_list_lock); pAdapter->outstandingCommands = 0; pMvSataAdapter = &(pAdapter->mvSataAdapter); _vbus_p->OsExt = (void *)pAdapter; pMvSataAdapter->IALData = pAdapter; if (bus_dma_tag_create(bus_get_dma_tag(pAdapter->hpt_dev),/* parent */ 4, /* alignment */ BUS_SPACE_MAXADDR_32BIT+1, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ PAGE_SIZE * (MAX_SG_DESCRIPTORS-1), /* maxsize */ MAX_SG_DESCRIPTORS, /* nsegments */ 0x10000, /* maxsegsize */ BUS_DMA_WAITOK, /* flags */ busdma_lock_mutex, /* lockfunc */ &pAdapter->lock, /* lockfuncarg */ &pAdapter->io_dma_parent /* tag */)) { return ENXIO; } if (hptmv_allocate_edma_queues(pAdapter)) { MV_ERROR("RR18xx: Failed to allocate memory for EDMA queues\n"); return ENOMEM; } /* also map EPROM address */ rid = 0x10; if (!(pAdapter->mem_res = bus_alloc_resource_any(pAdapter->hpt_dev, SYS_RES_MEMORY, &rid, RF_ACTIVE)) || !(pMvSataAdapter->adapterIoBaseAddress = rman_get_virtual(pAdapter->mem_res))) { MV_ERROR("RR18xx: Failed to remap memory space\n"); hptmv_free_edma_queues(pAdapter); return ENXIO; } else { KdPrint(("RR18xx: io base address 0x%p\n", pMvSataAdapter->adapterIoBaseAddress)); } pMvSataAdapter->adapterId = num_adapters++; /* get the revision ID */ pMvSataAdapter->pciConfigRevisionId = pci_read_config(pAdapter->hpt_dev, PCIR_REVID, 1); pMvSataAdapter->pciConfigDeviceId = pci_get_device(pAdapter->hpt_dev); /* init RR18xx */ pMvSataAdapter->intCoalThre[0]= 1; pMvSataAdapter->intCoalThre[1]= 1; pMvSataAdapter->intTimeThre[0] = 1; pMvSataAdapter->intTimeThre[1] = 1; pMvSataAdapter->pciCommand = 0x0107E371; pMvSataAdapter->pciSerrMask = 0xd77fe6ul; pMvSataAdapter->pciInterruptMask = 0xd77fe6ul; pMvSataAdapter->mvSataEventNotify = hptmv_event_notify; if (mvSataInitAdapter(pMvSataAdapter) == MV_FALSE) { MV_ERROR("RR18xx[%d]: core failed to initialize the adapter\n", pMvSataAdapter->adapterId); unregister: bus_release_resource(pAdapter->hpt_dev, SYS_RES_MEMORY, rid, pAdapter->mem_res); hptmv_free_edma_queues(pAdapter); return ENXIO; } pAdapter->ver_601 = pMvSataAdapter->pcbVersion; #ifndef FOR_DEMO set_fail_leds(pMvSataAdapter, 0); #endif /* setup command blocks */ KdPrint(("Allocate command blocks\n")); _vbus_(pFreeCommands) = 0; pAdapter->pCommandBlocks = malloc(sizeof(struct _Command) * MAX_COMMAND_BLOCKS_FOR_EACH_VBUS, M_DEVBUF, M_NOWAIT); KdPrint(("pCommandBlocks:%p\n",pAdapter->pCommandBlocks)); if (!pAdapter->pCommandBlocks) { MV_ERROR("insufficient memory\n"); goto unregister; } for (i=0; ipCommandBlocks[i])); } /*Set up the bus_dmamap*/ pAdapter->pbus_dmamap = (PBUS_DMAMAP)malloc (sizeof(struct _BUS_DMAMAP) * MAX_QUEUE_COMM, M_DEVBUF, M_NOWAIT); if(!pAdapter->pbus_dmamap) { MV_ERROR("insufficient memory\n"); free(pAdapter->pCommandBlocks, M_DEVBUF); goto unregister; } memset((void *)pAdapter->pbus_dmamap, 0, sizeof(struct _BUS_DMAMAP) * MAX_QUEUE_COMM); pAdapter->pbus_dmamap_list = 0; for (i=0; i < MAX_QUEUE_COMM; i++) { PBUS_DMAMAP pmap = &(pAdapter->pbus_dmamap[i]); pmap->pAdapter = pAdapter; dmamap_put(pmap); if(bus_dmamap_create(pAdapter->io_dma_parent, 0, &pmap->dma_map)) { MV_ERROR("Can not allocate dma map\n"); free(pAdapter->pCommandBlocks, M_DEVBUF); free(pAdapter->pbus_dmamap, M_DEVBUF); goto unregister; } callout_init_mtx(&pmap->timeout, &pAdapter->lock, 0); } /* setup PRD Tables */ KdPrint(("Allocate PRD Tables\n")); pAdapter->pFreePRDLink = 0; pAdapter->prdTableAddr = (PUCHAR)contigmalloc( (PRD_ENTRIES_SIZE*PRD_TABLES_FOR_VBUS + 32), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0ul); KdPrint(("prdTableAddr:%p\n",pAdapter->prdTableAddr)); if (!pAdapter->prdTableAddr) { MV_ERROR("insufficient PRD Tables\n"); goto unregister; } pAdapter->prdTableAlignedAddr = (PUCHAR)(((ULONG_PTR)pAdapter->prdTableAddr + 0x1f) & ~(ULONG_PTR)0x1fL); { PUCHAR PRDTable = pAdapter->prdTableAlignedAddr; for (i=0; ipFreePRDLink=%p\n",i,pAdapter->pFreePRDLink)); */ FreePRDTable(pAdapter, PRDTable); PRDTable += PRD_ENTRIES_SIZE; } } /* enable the adapter interrupts */ /* configure and start the connected channels*/ for (channel = 0; channel < MV_SATA_CHANNELS_NUM; channel++) { pAdapter->mvChannel[channel].online = MV_FALSE; if (mvSataIsStorageDeviceConnected(pMvSataAdapter, channel) == MV_TRUE) { KdPrint(("RR18xx[%d]: channel %d is connected\n", pMvSataAdapter->adapterId, channel)); if (hptmv_init_channel(pAdapter, channel) == 0) { if (mvSataConfigureChannel(pMvSataAdapter, channel) == MV_FALSE) { MV_ERROR("RR18xx[%d]: Failed to configure channel" " %d\n",pMvSataAdapter->adapterId, channel); hptmv_free_channel(pAdapter, channel); } else { if (start_channel(pAdapter, channel)) { MV_ERROR("RR18xx[%d]: Failed to start channel," " channel=%d\n",pMvSataAdapter->adapterId, channel); hptmv_free_channel(pAdapter, channel); } pAdapter->mvChannel[channel].online = MV_TRUE; /* mvSataChannelSetEdmaLoopBackMode(pMvSataAdapter, channel, MV_TRUE);*/ } } } KdPrint(("pAdapter->mvChannel[channel].online:%x, channel:%d\n", pAdapter->mvChannel[channel].online, channel)); } #ifdef SUPPORT_ARRAY for(i = MAX_ARRAY_DEVICE - 1; i >= 0; i--) { pVDev = ArrayTables(i); mArFreeArrayTable(pVDev); } #endif KdPrint(("Initialize Devices\n")); for (channel = 0; channel < MV_SATA_CHANNELS_NUM; channel++) { MV_SATA_CHANNEL *pMvSataChannel = pMvSataAdapter->sataChannel[channel]; if (pMvSataChannel) { init_vdev_params(pAdapter, channel); IdeRegisterVDevice(&pAdapter->VDevices[channel].u.disk); } } #ifdef SUPPORT_ARRAY CheckArrayCritical(_VBUS_P0); #endif _vbus_p->nInstances = 1; fRegisterVdevice(pAdapter); for (channel=0;channelpVDevice[channel]; if (pVDev && pVDev->vf_online) fCheckBootable(pVDev); } #if defined(SUPPORT_ARRAY) && defined(_RAID5N_) init_raid5_memory(_VBUS_P0); _vbus_(r5).enable_write_back = 1; printf("RR18xx: RAID5 write-back %s\n", _vbus_(r5).enable_write_back? "enabled" : "disabled"); #endif mvSataUnmaskAdapterInterrupt(pMvSataAdapter); return 0; } int MvSataResetChannel(MV_SATA_ADAPTER *pMvSataAdapter, MV_U8 channel) { IAL_ADAPTER_T *pAdapter = (IAL_ADAPTER_T *)pMvSataAdapter->IALData; mvSataDisableChannelDma(pMvSataAdapter, channel); /* Flush pending commands */ mvSataFlushDmaQueue (pMvSataAdapter, channel, MV_FLUSH_TYPE_CALLBACK); /* Software reset channel */ if (mvStorageDevATASoftResetDevice(pMvSataAdapter, channel) == MV_FALSE) { MV_ERROR("RR18xx [%d,%d]: failed to perform Software reset\n", pMvSataAdapter->adapterId, channel); hptmv_free_channel(pAdapter, channel); return -1; } /* Hardware reset channel */ if (mvSataChannelHardReset(pMvSataAdapter, channel)== MV_FALSE) { MV_ERROR("RR18xx [%d,%d] Failed to Hard reser the SATA channel\n", pMvSataAdapter->adapterId, channel); hptmv_free_channel(pAdapter, channel); return -1; } if (mvSataIsStorageDeviceConnected(pMvSataAdapter, channel) == MV_FALSE) { MV_ERROR("RR18xx [%d,%d] Failed to Connect Device\n", pMvSataAdapter->adapterId, channel); hptmv_free_channel(pAdapter, channel); return -1; }else { MV_ERROR("channel %d: perform recalibrate command", channel); if (!mvStorageDevATAExecuteNonUDMACommand(pMvSataAdapter, channel, MV_NON_UDMA_PROTOCOL_NON_DATA, MV_FALSE, NULL, /* pBuffer*/ 0, /* count */ 0, /*features*/ /* sectorCount */ 0, 0, /* lbaLow */ 0, /* lbaMid */ /* lbaHigh */ 0, 0, /* device */ /* command */ 0x10)) MV_ERROR("channel %d: recalibrate failed", channel); /* Set transfer mode */ if((mvStorageDevATASetFeatures(pMvSataAdapter, channel, MV_ATA_SET_FEATURES_TRANSFER, MV_ATA_TRANSFER_PIO_SLOW, 0, 0, 0) == MV_FALSE) || (mvStorageDevATASetFeatures(pMvSataAdapter, channel, MV_ATA_SET_FEATURES_TRANSFER, pAdapter->mvChannel[channel].maxPioModeSupported, 0, 0, 0) == MV_FALSE) || (mvStorageDevATASetFeatures(pMvSataAdapter, channel, MV_ATA_SET_FEATURES_TRANSFER, pAdapter->mvChannel[channel].maxUltraDmaModeSupported, 0, 0, 0) == MV_FALSE) ) { MV_ERROR("channel %d: Set Features failed", channel); hptmv_free_channel(pAdapter, channel); return -1; } /* Enable EDMA */ if (mvSataEnableChannelDma(pMvSataAdapter, channel) == MV_FALSE) { MV_ERROR("Failed to enable DMA, channel=%d", channel); hptmv_free_channel(pAdapter, channel); return -1; } } return 0; } static int fResetActiveCommands(PVBus _vbus_p) { MV_SATA_ADAPTER *pMvSataAdapter = &((IAL_ADAPTER_T *)_vbus_p->OsExt)->mvSataAdapter; MV_U8 channel; for (channel=0;channel< MV_SATA_CHANNELS_NUM;channel++) { if (pMvSataAdapter->sataChannel[channel] && pMvSataAdapter->sataChannel[channel]->outstandingCommands) MvSataResetChannel(pMvSataAdapter,channel); } return 0; } void fCompleteAllCommandsSynchronously(PVBus _vbus_p) { UINT cont; ULONG ticks = 0; MV_U8 channel; MV_SATA_ADAPTER *pMvSataAdapter = &((IAL_ADAPTER_T *)_vbus_p->OsExt)->mvSataAdapter; MV_SATA_CHANNEL *pMvSataChannel; do { check_cmds: cont = 0; CheckPendingCall(_VBUS_P0); #ifdef _RAID5N_ dataxfer_poll(); xor_poll(); #endif for (channel=0;channel< MV_SATA_CHANNELS_NUM;channel++) { pMvSataChannel = pMvSataAdapter->sataChannel[channel]; if (pMvSataChannel && pMvSataChannel->outstandingCommands) { while (pMvSataChannel->outstandingCommands) { if (!mvSataInterruptServiceRoutine(pMvSataAdapter)) { StallExec(1000); if (ticks++ > 3000) { MvSataResetChannel(pMvSataAdapter,channel); goto check_cmds; } } else ticks = 0; } cont = 1; } } } while (cont); } void fResetVBus(_VBUS_ARG0) { KdPrint(("fMvResetBus(%p)", _vbus_p)); /* some commands may already finished. */ CheckPendingCall(_VBUS_P0); fResetActiveCommands(_vbus_p); /* * the other pending commands may still be finished successfully. */ fCompleteAllCommandsSynchronously(_vbus_p); /* Now there should be no pending commands. No more action needed. */ CheckIdleCall(_VBUS_P0); KdPrint(("fMvResetBus() done")); } /*No rescan function*/ void fRescanAllDevice(_VBUS_ARG0) { } static MV_BOOLEAN CommandCompletionCB(MV_SATA_ADAPTER *pMvSataAdapter, MV_U8 channelNum, MV_COMPLETION_TYPE comp_type, MV_VOID_PTR commandId, MV_U16 responseFlags, MV_U32 timeStamp, MV_STORAGE_DEVICE_REGISTERS *registerStruct) { PCommand pCmd = (PCommand) commandId; _VBUS_INST(pCmd->pVDevice->pVBus) if (pCmd->uScratch.sata_param.prdAddr) FreePRDTable(pMvSataAdapter->IALData,pCmd->uScratch.sata_param.prdAddr); switch (comp_type) { case MV_COMPLETION_TYPE_NORMAL: pCmd->Result = RETURN_SUCCESS; break; case MV_COMPLETION_TYPE_ABORT: pCmd->Result = RETURN_BUS_RESET; break; case MV_COMPLETION_TYPE_ERROR: MV_ERROR("IAL: COMPLETION ERROR, adapter %d, channel %d, flags=%x\n", pMvSataAdapter->adapterId, channelNum, responseFlags); if (responseFlags & 4) { MV_ERROR("ATA regs: error %x, sector count %x, LBA low %x, LBA mid %x," " LBA high %x, device %x, status %x\n", registerStruct->errorRegister, registerStruct->sectorCountRegister, registerStruct->lbaLowRegister, registerStruct->lbaMidRegister, registerStruct->lbaHighRegister, registerStruct->deviceRegister, registerStruct->statusRegister); } /*We can't do handleEdmaError directly here, because CommandCompletionCB is called by * mv's ISR, if we retry the command, than the internel data structure may be destroyed*/ pCmd->uScratch.sata_param.responseFlags = responseFlags; pCmd->uScratch.sata_param.bIdeStatus = registerStruct->statusRegister; pCmd->uScratch.sata_param.errorRegister = registerStruct->errorRegister; pCmd->pVDevice->u.disk.QueueLength--; CallAfterReturn(_VBUS_P (DPC_PROC)handleEdmaError,pCmd); return TRUE; default: MV_ERROR(" Unknown completion type (%d)\n", comp_type); return MV_FALSE; } if (pCmd->uCmd.Ide.Command == IDE_COMMAND_VERIFY && pCmd->uScratch.sata_param.cmd_priv > 1) { pCmd->uScratch.sata_param.cmd_priv --; return TRUE; } pCmd->pVDevice->u.disk.QueueLength--; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); return TRUE; } void fDeviceSendCommand(_VBUS_ARG PCommand pCmd) { MV_SATA_EDMA_PRD_ENTRY *pPRDTable = 0; MV_SATA_ADAPTER *pMvSataAdapter; MV_SATA_CHANNEL *pMvSataChannel; PVDevice pVDevice = pCmd->pVDevice; PDevice pDevice = &pVDevice->u.disk; LBA_T Lba = pCmd->uCmd.Ide.Lba; USHORT nSector = pCmd->uCmd.Ide.nSectors; MV_QUEUE_COMMAND_RESULT result; MV_QUEUE_COMMAND_INFO commandInfo; MV_UDMA_COMMAND_PARAMS *pUdmaParams = &commandInfo.commandParams.udmaCommand; MV_NONE_UDMA_COMMAND_PARAMS *pNoUdmaParams = &commandInfo.commandParams.NoneUdmaCommand; MV_BOOLEAN is48bit; MV_U8 channel; int i=0; DECLARE_BUFFER(FPSCAT_GATH, tmpSg); if (!pDevice->df_on_line) { MV_ERROR("Device is offline"); pCmd->Result = RETURN_BAD_DEVICE; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); return; } pDevice->HeadPosition = pCmd->uCmd.Ide.Lba + pCmd->uCmd.Ide.nSectors; pMvSataChannel = pDevice->mv; pMvSataAdapter = pMvSataChannel->mvSataAdapter; channel = pMvSataChannel->channelNumber; /* old RAID0 has hidden lba. Remember to clear dDeHiddenLba when delete array! */ Lba += pDevice->dDeHiddenLba; /* check LBA */ if (Lba+nSector-1 > pDevice->dDeRealCapacity) { pCmd->Result = RETURN_INVALID_REQUEST; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); return; } /* * always use 48bit LBA if drive supports it. * Some Seagate drives report error if you use a 28-bit command * to access sector 0xfffffff. */ is48bit = pMvSataChannel->lba48Address; switch (pCmd->uCmd.Ide.Command) { case IDE_COMMAND_READ: case IDE_COMMAND_WRITE: if (pDevice->bDeModeSetting<8) goto pio; commandInfo.type = MV_QUEUED_COMMAND_TYPE_UDMA; pUdmaParams->isEXT = is48bit; pUdmaParams->numOfSectors = nSector; pUdmaParams->lowLBAAddress = Lba; pUdmaParams->highLBAAddress = 0; pUdmaParams->prdHighAddr = 0; pUdmaParams->callBack = CommandCompletionCB; pUdmaParams->commandId = (MV_VOID_PTR )pCmd; if(pCmd->uCmd.Ide.Command == IDE_COMMAND_READ) pUdmaParams->readWrite = MV_UDMA_TYPE_READ; else pUdmaParams->readWrite = MV_UDMA_TYPE_WRITE; if (pCmd->pSgTable && pCmd->cf_physical_sg) { FPSCAT_GATH sg1=tmpSg, sg2=pCmd->pSgTable; do { *sg1++=*sg2; } while ((sg2++->wSgFlag & SG_FLAG_EOT)==0); } else { if (!pCmd->pfnBuildSgl || !pCmd->pfnBuildSgl(_VBUS_P pCmd, tmpSg, 0)) { pio: mvSataDisableChannelDma(pMvSataAdapter, channel); mvSataFlushDmaQueue(pMvSataAdapter, channel, MV_FLUSH_TYPE_CALLBACK); if (pCmd->pSgTable && pCmd->cf_physical_sg==0) { FPSCAT_GATH sg1=tmpSg, sg2=pCmd->pSgTable; do { *sg1++=*sg2; } while ((sg2++->wSgFlag & SG_FLAG_EOT)==0); } else { if (!pCmd->pfnBuildSgl || !pCmd->pfnBuildSgl(_VBUS_P pCmd, tmpSg, 1)) { pCmd->Result = RETURN_NEED_LOGICAL_SG; goto finish_cmd; } } do { ULONG size = tmpSg->wSgSize? tmpSg->wSgSize : 0x10000; ULONG_PTR addr = tmpSg->dSgAddress; if (size & 0x1ff) { pCmd->Result = RETURN_INVALID_REQUEST; goto finish_cmd; } if (mvStorageDevATAExecuteNonUDMACommand(pMvSataAdapter, channel, (pCmd->cf_data_out)?MV_NON_UDMA_PROTOCOL_PIO_DATA_OUT:MV_NON_UDMA_PROTOCOL_PIO_DATA_IN, is48bit, (MV_U16_PTR)addr, size >> 1, /* count */ 0, /* features N/A */ (MV_U16)(size>>9), /*sector count*/ (MV_U16)( (is48bit? (MV_U16)((Lba >> 16) & 0xFF00) : 0 ) | (UCHAR)(Lba & 0xFF) ), /*lbalow*/ (MV_U16)((Lba >> 8) & 0xFF), /* lbaMid */ (MV_U16)((Lba >> 16) & 0xFF),/* lbaHigh */ (MV_U8)(0x40 | (is48bit ? 0 : (UCHAR)(Lba >> 24) & 0xFF )),/* device */ (MV_U8)(is48bit ? (pCmd->cf_data_in?IDE_COMMAND_READ_EXT:IDE_COMMAND_WRITE_EXT):pCmd->uCmd.Ide.Command) )==MV_FALSE) { pCmd->Result = RETURN_IDE_ERROR; goto finish_cmd; } Lba += size>>9; if(Lba & 0xF0000000) is48bit = MV_TRUE; } while ((tmpSg++->wSgFlag & SG_FLAG_EOT)==0); pCmd->Result = RETURN_SUCCESS; finish_cmd: mvSataEnableChannelDma(pMvSataAdapter,channel); CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); return; } } pPRDTable = (MV_SATA_EDMA_PRD_ENTRY *) AllocatePRDTable(pMvSataAdapter->IALData); KdPrint(("pPRDTable:%p\n",pPRDTable)); if (!pPRDTable) { pCmd->Result = RETURN_DEVICE_BUSY; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); HPT_ASSERT(0); return; } do{ pPRDTable[i].highBaseAddr = (sizeof(tmpSg->dSgAddress)>4 ? (MV_U32)(tmpSg->dSgAddress>>32) : 0); pPRDTable[i].flags = (MV_U16)tmpSg->wSgFlag; pPRDTable[i].byteCount = (MV_U16)tmpSg->wSgSize; pPRDTable[i].lowBaseAddr = (MV_U32)tmpSg->dSgAddress; pPRDTable[i].reserved = 0; i++; }while((tmpSg++->wSgFlag & SG_FLAG_EOT)==0); pUdmaParams->prdLowAddr = (ULONG)fOsPhysicalAddress(pPRDTable); if ((pUdmaParams->numOfSectors == 256) && (pMvSataChannel->lba48Address == MV_FALSE)) { pUdmaParams->numOfSectors = 0; } pCmd->uScratch.sata_param.prdAddr = (PVOID)pPRDTable; result = mvSataQueueCommand(pMvSataAdapter, channel, &commandInfo); if (result != MV_QUEUE_COMMAND_RESULT_OK) { queue_failed: switch (result) { case MV_QUEUE_COMMAND_RESULT_BAD_LBA_ADDRESS: MV_ERROR("IAL Error: Edma Queue command failed. Bad LBA " "LBA[31:0](0x%08x)\n", pUdmaParams->lowLBAAddress); pCmd->Result = RETURN_IDE_ERROR; break; case MV_QUEUE_COMMAND_RESULT_QUEUED_MODE_DISABLED: MV_ERROR("IAL Error: Edma Queue command failed. EDMA" " disabled adapter %d channel %d\n", pMvSataAdapter->adapterId, channel); mvSataEnableChannelDma(pMvSataAdapter,channel); pCmd->Result = RETURN_IDE_ERROR; break; case MV_QUEUE_COMMAND_RESULT_FULL: MV_ERROR("IAL Error: Edma Queue command failed. Queue is" " Full adapter %d channel %d\n", pMvSataAdapter->adapterId, channel); pCmd->Result = RETURN_DEVICE_BUSY; break; case MV_QUEUE_COMMAND_RESULT_BAD_PARAMS: MV_ERROR("IAL Error: Edma Queue command failed. (Bad " "Params), pMvSataAdapter: %p, pSataChannel: %p.\n", pMvSataAdapter, pMvSataAdapter->sataChannel[channel]); pCmd->Result = RETURN_IDE_ERROR; break; default: MV_ERROR("IAL Error: Bad result value (%d) from queue" " command\n", result); pCmd->Result = RETURN_IDE_ERROR; } if(pPRDTable) FreePRDTable(pMvSataAdapter->IALData,pPRDTable); CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); } pDevice->QueueLength++; return; case IDE_COMMAND_VERIFY: commandInfo.type = MV_QUEUED_COMMAND_TYPE_NONE_UDMA; pNoUdmaParams->bufPtr = NULL; pNoUdmaParams->callBack = CommandCompletionCB; pNoUdmaParams->commandId = (MV_VOID_PTR)pCmd; pNoUdmaParams->count = 0; pNoUdmaParams->features = 0; pNoUdmaParams->protocolType = MV_NON_UDMA_PROTOCOL_NON_DATA; pCmd->uScratch.sata_param.cmd_priv = 1; if (pMvSataChannel->lba48Address == MV_TRUE){ pNoUdmaParams->command = MV_ATA_COMMAND_READ_VERIFY_SECTORS_EXT; pNoUdmaParams->isEXT = MV_TRUE; pNoUdmaParams->lbaHigh = (MV_U16)((Lba & 0xff0000) >> 16); pNoUdmaParams->lbaMid = (MV_U16)((Lba & 0xff00) >> 8); pNoUdmaParams->lbaLow = (MV_U16)(((Lba & 0xff000000) >> 16)| (Lba & 0xff)); pNoUdmaParams->sectorCount = nSector; pNoUdmaParams->device = 0x40; result = mvSataQueueCommand(pMvSataAdapter, channel, &commandInfo); if (result != MV_QUEUE_COMMAND_RESULT_OK){ goto queue_failed; } return; } else{ pNoUdmaParams->command = MV_ATA_COMMAND_READ_VERIFY_SECTORS; pNoUdmaParams->isEXT = MV_FALSE; pNoUdmaParams->lbaHigh = (MV_U16)((Lba & 0xff0000) >> 16); pNoUdmaParams->lbaMid = (MV_U16)((Lba & 0xff00) >> 8); pNoUdmaParams->lbaLow = (MV_U16)(Lba & 0xff); pNoUdmaParams->sectorCount = 0xff & nSector; pNoUdmaParams->device = (MV_U8)(0x40 | ((Lba & 0xf000000) >> 24)); pNoUdmaParams->callBack = CommandCompletionCB; result = mvSataQueueCommand(pMvSataAdapter, channel, &commandInfo); /*FIXME: how about the commands already queued? but marvel also forgets to consider this*/ if (result != MV_QUEUE_COMMAND_RESULT_OK){ goto queue_failed; } } break; default: pCmd->Result = RETURN_INVALID_REQUEST; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); break; } } /********************************************************** * * Probe the hostadapter. * **********************************************************/ static int hpt_probe(device_t dev) { if ((pci_get_vendor(dev) == MV_SATA_VENDOR_ID) && (pci_get_device(dev) == MV_SATA_DEVICE_ID_5081 #ifdef FOR_DEMO || pci_get_device(dev) == MV_SATA_DEVICE_ID_5080 #endif )) { KdPrintI((CONTROLLER_NAME " found\n")); device_set_desc(dev, CONTROLLER_NAME); return (BUS_PROBE_DEFAULT); } else return(ENXIO); } /*********************************************************** * * Auto configuration: attach and init a host adapter. * ***********************************************************/ static int hpt_attach(device_t dev) { IAL_ADAPTER_T * pAdapter = device_get_softc(dev); int rid; union ccb *ccb; struct cam_devq *devq; struct cam_sim *hpt_vsim; device_printf(dev, "%s Version %s \n", DRIVER_NAME, DRIVER_VERSION); pAdapter->hpt_dev = dev; rid = init_adapter(pAdapter); if (rid) return rid; rid = 0; if ((pAdapter->hpt_irq = bus_alloc_resource_any(pAdapter->hpt_dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { hpt_printk(("can't allocate interrupt\n")); return(ENXIO); } if (bus_setup_intr(pAdapter->hpt_dev, pAdapter->hpt_irq, INTR_TYPE_CAM | INTR_MPSAFE, NULL, hpt_intr, pAdapter, &pAdapter->hpt_intr)) { hpt_printk(("can't set up interrupt\n")); free(pAdapter, M_DEVBUF); return(ENXIO); } if((ccb = (union ccb *)malloc(sizeof(*ccb), M_DEVBUF, M_WAITOK)) != (union ccb*)NULL) { bzero(ccb, sizeof(*ccb)); ccb->ccb_h.pinfo.priority = 1; ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX; } else { return ENOMEM; } /* * Create the device queue for our SIM(s). */ if((devq = cam_simq_alloc(8/*MAX_QUEUE_COMM*/)) == NULL) { KdPrint(("ENXIO\n")); return ENOMEM; } /* * Construct our SIM entry */ hpt_vsim = cam_sim_alloc(hpt_action, hpt_poll, __str(PROC_DIR_NAME), pAdapter, device_get_unit(pAdapter->hpt_dev), &pAdapter->lock, 1, 8, devq); if (hpt_vsim == NULL) { cam_simq_free(devq); return ENOMEM; } mtx_lock(&pAdapter->lock); if (xpt_bus_register(hpt_vsim, dev, 0) != CAM_SUCCESS) { cam_sim_free(hpt_vsim, /*free devq*/ TRUE); mtx_unlock(&pAdapter->lock); hpt_vsim = NULL; return ENXIO; } if(xpt_create_path(&pAdapter->path, /*periph */ NULL, cam_sim_path(hpt_vsim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(hpt_vsim)); cam_sim_free(hpt_vsim, /*free_devq*/TRUE); mtx_unlock(&pAdapter->lock); hpt_vsim = NULL; return ENXIO; } mtx_unlock(&pAdapter->lock); xpt_setup_ccb(&(ccb->ccb_h), pAdapter->path, /*priority*/5); ccb->ccb_h.func_code = XPT_SASYNC_CB; ccb->csa.event_enable = AC_LOST_DEVICE; ccb->csa.callback = hpt_async; ccb->csa.callback_arg = hpt_vsim; xpt_action((union ccb *)ccb); free(ccb, M_DEVBUF); if (device_get_unit(dev) == 0) { /* Start the work thread. XXX */ launch_worker_thread(); } return 0; } static int hpt_detach(device_t dev) { return (EBUSY); } /*************************************************************** * The poll function is used to simulate the interrupt when * the interrupt subsystem is not functioning. * ***************************************************************/ static void hpt_poll(struct cam_sim *sim) { IAL_ADAPTER_T *pAdapter; pAdapter = cam_sim_softc(sim); hpt_intr_locked((void *)cam_sim_softc(sim)); } /**************************************************************** * Name: hpt_intr * Description: Interrupt handler. ****************************************************************/ static void hpt_intr(void *arg) { IAL_ADAPTER_T *pAdapter; pAdapter = arg; mtx_lock(&pAdapter->lock); hpt_intr_locked(pAdapter); mtx_unlock(&pAdapter->lock); } static void hpt_intr_locked(IAL_ADAPTER_T *pAdapter) { mtx_assert(&pAdapter->lock, MA_OWNED); /* KdPrintI(("----- Entering Isr() -----\n")); */ if (mvSataInterruptServiceRoutine(&pAdapter->mvSataAdapter) == MV_TRUE) { _VBUS_INST(&pAdapter->VBus) CheckPendingCall(_VBUS_P0); } /* KdPrintI(("----- Leaving Isr() -----\n")); */ } /********************************************************** * Asynchronous Events *********************************************************/ #if (!defined(UNREFERENCED_PARAMETER)) #define UNREFERENCED_PARAMETER(x) (void)(x) #endif static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg) { /* debug XXXX */ panic("Here"); UNREFERENCED_PARAMETER(callback_arg); UNREFERENCED_PARAMETER(code); UNREFERENCED_PARAMETER(path); UNREFERENCED_PARAMETER(arg); } static void FlushAdapter(IAL_ADAPTER_T *pAdapter) { int i; hpt_printk(("flush all devices\n")); /* flush all devices */ for (i=0; iVBus.pVDevice[i]; if(pVDev) fFlushVDev(pVDev); } } static int hpt_shutdown(device_t dev) { IAL_ADAPTER_T *pAdapter; pAdapter = device_get_softc(dev); EVENTHANDLER_DEREGISTER(shutdown_final, pAdapter->eh); mtx_lock(&pAdapter->lock); FlushAdapter(pAdapter); mtx_unlock(&pAdapter->lock); /* give the flush some time to happen, *otherwise "shutdown -p now" will make file system corrupted */ DELAY(1000 * 1000 * 5); return 0; } void Check_Idle_Call(IAL_ADAPTER_T *pAdapter) { _VBUS_INST(&pAdapter->VBus) if (mWaitingForIdle(_VBUS_P0)) { CheckIdleCall(_VBUS_P0); #ifdef SUPPORT_ARRAY { int i; PVDevice pArray; for(i = 0; i < MAX_ARRAY_PER_VBUS; i++){ if ((pArray=ArrayTables(i))->u.array.dArStamp==0) continue; else if (pArray->u.array.rf_auto_rebuild) { KdPrint(("auto rebuild.\n")); pArray->u.array.rf_auto_rebuild = 0; hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapter, pArray, DUPLICATE); } } } #endif } /* launch the awaiting commands blocked by mWaitingForIdle */ while(pAdapter->pending_Q!= NULL) { _VBUS_INST(&pAdapter->VBus) union ccb *ccb = (union ccb *)pAdapter->pending_Q->ccb_h.ccb_ccb_ptr; hpt_free_ccb(&pAdapter->pending_Q, ccb); CallAfterReturn(_VBUS_P (DPC_PROC)OsSendCommand, ccb); } } static void ccb_done(union ccb *ccb) { PBUS_DMAMAP pmap = (PBUS_DMAMAP)ccb->ccb_adapter; IAL_ADAPTER_T * pAdapter = pmap->pAdapter; KdPrintI(("ccb_done: ccb %p status %x\n", ccb, ccb->ccb_h.status)); dmamap_put(pmap); xpt_done(ccb); pAdapter->outstandingCommands--; if (pAdapter->outstandingCommands == 0) { if(DPC_Request_Nums == 0) Check_Idle_Call(pAdapter); wakeup(pAdapter); } } /**************************************************************** * Name: hpt_action * Description: Process a queued command from the CAM layer. * Parameters: sim - Pointer to SIM object * ccb - Pointer to SCSI command structure. ****************************************************************/ void hpt_action(struct cam_sim *sim, union ccb *ccb) { IAL_ADAPTER_T * pAdapter = (IAL_ADAPTER_T *) cam_sim_softc(sim); PBUS_DMAMAP pmap; _VBUS_INST(&pAdapter->VBus) mtx_assert(&pAdapter->lock, MA_OWNED); CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("hpt_action\n")); KdPrint(("hpt_action(%lx,%lx{%x})\n", (u_long)sim, (u_long)ccb, ccb->ccb_h.func_code)); switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: /* Execute the requested I/O operation */ { /* ccb->ccb_h.path_id is not our bus id - don't check it */ if (ccb->ccb_h.target_lun) { ccb->ccb_h.status = CAM_LUN_INVALID; xpt_done(ccb); return; } if (ccb->ccb_h.target_id >= MAX_VDEVICE_PER_VBUS || pAdapter->VBus.pVDevice[ccb->ccb_h.target_id]==0) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return; } if (pAdapter->outstandingCommands==0 && DPC_Request_Nums==0) Check_Idle_Call(pAdapter); pmap = dmamap_get(pAdapter); HPT_ASSERT(pmap); ccb->ccb_adapter = pmap; memset((void *)pmap->psg, 0, sizeof(pmap->psg)); if (mWaitingForIdle(_VBUS_P0)) hpt_queue_ccb(&pAdapter->pending_Q, ccb); else OsSendCommand(_VBUS_P ccb); /* KdPrint(("leave scsiio\n")); */ break; } case XPT_RESET_BUS: KdPrint(("reset bus\n")); fResetVBus(_VBUS_P0); xpt_done(ccb); break; case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ case XPT_ABORT: /* Abort the specified CCB */ case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_GET_TRAN_SETTINGS: case XPT_SET_TRAN_SETTINGS: /* XXX Implement */ ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; xpt_done(ccb); break; case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, 1); xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; /* Not necessary to reset bus */ cpi->hba_misc = PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = MAX_VDEVICE_PER_VBUS; cpi->max_lun = 0; cpi->initiator_id = MAX_VDEVICE_PER_VBUS; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "HPT ", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: KdPrint(("invalid cmd\n")); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } /* KdPrint(("leave hpt_action..............\n")); */ } /* shall be called at lock_driver() */ static void hpt_queue_ccb(union ccb **ccb_Q, union ccb *ccb) { if(*ccb_Q == NULL) ccb->ccb_h.ccb_ccb_ptr = ccb; else { ccb->ccb_h.ccb_ccb_ptr = (*ccb_Q)->ccb_h.ccb_ccb_ptr; (*ccb_Q)->ccb_h.ccb_ccb_ptr = (char *)ccb; } *ccb_Q = ccb; } /* shall be called at lock_driver() */ static void hpt_free_ccb(union ccb **ccb_Q, union ccb *ccb) { union ccb *TempCCB; TempCCB = *ccb_Q; if(ccb->ccb_h.ccb_ccb_ptr == ccb) /*it means SCpnt is the last one in CURRCMDs*/ *ccb_Q = NULL; else { while(TempCCB->ccb_h.ccb_ccb_ptr != (char *)ccb) TempCCB = (union ccb *)TempCCB->ccb_h.ccb_ccb_ptr; TempCCB->ccb_h.ccb_ccb_ptr = ccb->ccb_h.ccb_ccb_ptr; if(*ccb_Q == ccb) *ccb_Q = TempCCB; } } #ifdef SUPPORT_ARRAY /*************************************************************************** * Function: hpt_worker_thread * Description: Do background rebuilding. Execute in kernel thread context. * Returns: None ***************************************************************************/ static void hpt_worker_thread(void) { for(;;) { mtx_lock(&DpcQueue_Lock); while (DpcQueue_First!=DpcQueue_Last) { ST_HPT_DPC p; p = DpcQueue[DpcQueue_First]; DpcQueue_First++; DpcQueue_First %= MAX_DPC; DPC_Request_Nums++; mtx_unlock(&DpcQueue_Lock); p.dpc(p.pAdapter, p.arg, p.flags); mtx_lock(&p.pAdapter->lock); mtx_lock(&DpcQueue_Lock); DPC_Request_Nums--; /* since we may have prevented Check_Idle_Call, do it here */ if (DPC_Request_Nums==0) { if (p.pAdapter->outstandingCommands == 0) { _VBUS_INST(&p.pAdapter->VBus); Check_Idle_Call(p.pAdapter); CheckPendingCall(_VBUS_P0); } } mtx_unlock(&p.pAdapter->lock); mtx_unlock(&DpcQueue_Lock); /*Schedule out*/ if (SIGISMEMBER(curproc->p_siglist, SIGSTOP)) { /* abort rebuilding process. */ IAL_ADAPTER_T *pAdapter; PVDevice pArray; PVBus _vbus_p; int i; sx_slock(&hptmv_list_lock); pAdapter = gIal_Adapter; - while(pAdapter != 0){ + while(pAdapter != NULL){ mtx_lock(&pAdapter->lock); _vbus_p = &pAdapter->VBus; for (i=0;iu.array.dArStamp==0) continue; else if (pArray->u.array.rf_rebuilding || pArray->u.array.rf_verifying || pArray->u.array.rf_initializing) { pArray->u.array.rf_abort_rebuild = 1; } } mtx_unlock(&pAdapter->lock); pAdapter = pAdapter->next; } sx_sunlock(&hptmv_list_lock); } mtx_lock(&DpcQueue_Lock); } mtx_unlock(&DpcQueue_Lock); /*Remove this debug option*/ /* #ifdef DEBUG if (SIGISMEMBER(curproc->p_siglist, SIGSTOP)) pause("hptrdy", 2*hz); #endif */ kproc_suspend_check(curproc); pause("-", 2*hz); /* wait for something to do */ } } static struct proc *hptdaemonproc; static struct kproc_desc hpt_kp = { "hpt_wt", hpt_worker_thread, &hptdaemonproc }; /*Start this thread in the hpt_attach, to prevent kernel from loading it without our controller.*/ static void launch_worker_thread(void) { IAL_ADAPTER_T *pAdapTemp; kproc_start(&hpt_kp); sx_slock(&hptmv_list_lock); for (pAdapTemp = gIal_Adapter; pAdapTemp; pAdapTemp = pAdapTemp->next) { _VBUS_INST(&pAdapTemp->VBus) int i; PVDevice pVDev; for(i = 0; i < MAX_ARRAY_PER_VBUS; i++) if ((pVDev=ArrayTables(i))->u.array.dArStamp==0) continue; else{ if (pVDev->u.array.rf_need_rebuild && !pVDev->u.array.rf_rebuilding) hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapTemp, pVDev, (UCHAR)((pVDev->u.array.CriticalMembers || pVDev->VDeviceType == VD_RAID_1)? DUPLICATE : REBUILD_PARITY)); } } sx_sunlock(&hptmv_list_lock); /* * hpt_worker_thread needs to be suspended after shutdown sync, when fs sync finished. */ EVENTHANDLER_REGISTER(shutdown_post_sync, kproc_shutdown, hptdaemonproc, SHUTDOWN_PRI_LAST); } /* *SYSINIT(hptwt, SI_SUB_KTHREAD_IDLE, SI_ORDER_FIRST, launch_worker_thread, NULL); */ #endif /********************************************************************************/ int HPTLIBAPI fOsBuildSgl(_VBUS_ARG PCommand pCmd, FPSCAT_GATH pSg, int logical) { union ccb *ccb = (union ccb *)pCmd->pOrgCommand; if (logical) { pSg->dSgAddress = (ULONG_PTR)(UCHAR *)ccb->csio.data_ptr; pSg->wSgSize = ccb->csio.dxfer_len; pSg->wSgFlag = SG_FLAG_EOT; return TRUE; } /* since we have provided physical sg, nobody will ask us to build physical sg */ HPT_ASSERT(0); return FALSE; } /*******************************************************************************/ ULONG HPTLIBAPI GetStamp(void) { /* * the system variable, ticks, can't be used since it hasn't yet been active * when our driver starts (ticks==0, it's a invalid stamp value) */ ULONG stamp; do { stamp = random(); } while (stamp==0); return stamp; } static void SetInquiryData(PINQUIRYDATA inquiryData, PVDevice pVDev) { int i; IDENTIFY_DATA2 *pIdentify = (IDENTIFY_DATA2*)pVDev->u.disk.mv->identifyDevice; inquiryData->DeviceType = T_DIRECT; /*DIRECT_ACCESS_DEVICE*/ inquiryData->AdditionalLength = (UCHAR)(sizeof(INQUIRYDATA) - 5); #ifndef SERIAL_CMDS inquiryData->CommandQueue = 1; #endif switch(pVDev->VDeviceType) { case VD_SINGLE_DISK: case VD_ATAPI: case VD_REMOVABLE: /* Set the removable bit, if applicable. */ if ((pVDev->u.disk.df_removable_drive) || (pIdentify->GeneralConfiguration & 0x80)) inquiryData->RemovableMedia = 1; /* Fill in vendor identification fields. */ for (i = 0; i < 20; i += 2) { inquiryData->VendorId[i] = ((PUCHAR)pIdentify->ModelNumber)[i + 1]; inquiryData->VendorId[i+1] = ((PUCHAR)pIdentify->ModelNumber)[i]; } /* Initialize unused portion of product id. */ for (i = 0; i < 4; i++) inquiryData->ProductId[12+i] = ' '; /* firmware revision */ for (i = 0; i < 4; i += 2) { inquiryData->ProductRevisionLevel[i] = ((PUCHAR)pIdentify->FirmwareRevision)[i+1]; inquiryData->ProductRevisionLevel[i+1] = ((PUCHAR)pIdentify->FirmwareRevision)[i]; } break; default: memcpy(&inquiryData->VendorId, "RR18xx ", 8); #ifdef SUPPORT_ARRAY switch(pVDev->VDeviceType){ case VD_RAID_0: if ((pVDev->u.array.pMember[0] && mIsArray(pVDev->u.array.pMember[0])) || (pVDev->u.array.pMember[1] && mIsArray(pVDev->u.array.pMember[1]))) memcpy(&inquiryData->ProductId, "RAID 1/0 Array ", 16); else memcpy(&inquiryData->ProductId, "RAID 0 Array ", 16); break; case VD_RAID_1: if ((pVDev->u.array.pMember[0] && mIsArray(pVDev->u.array.pMember[0])) || (pVDev->u.array.pMember[1] && mIsArray(pVDev->u.array.pMember[1]))) memcpy(&inquiryData->ProductId, "RAID 0/1 Array ", 16); else memcpy(&inquiryData->ProductId, "RAID 1 Array ", 16); break; case VD_RAID_5: memcpy(&inquiryData->ProductId, "RAID 5 Array ", 16); break; case VD_JBOD: memcpy(&inquiryData->ProductId, "JBOD Array ", 16); break; } #endif memcpy(&inquiryData->ProductRevisionLevel, "3.00", 4); break; } } static void hpt_timeout(void *arg) { PBUS_DMAMAP pmap = (PBUS_DMAMAP)((union ccb *)arg)->ccb_adapter; IAL_ADAPTER_T *pAdapter = pmap->pAdapter; _VBUS_INST(&pAdapter->VBus) mtx_assert(&pAdapter->lock, MA_OWNED); fResetVBus(_VBUS_P0); } static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { PCommand pCmd = (PCommand)arg; union ccb *ccb = pCmd->pOrgCommand; struct ccb_hdr *ccb_h = &ccb->ccb_h; PBUS_DMAMAP pmap = (PBUS_DMAMAP) ccb->ccb_adapter; IAL_ADAPTER_T *pAdapter = pmap->pAdapter; PVDevice pVDev = pAdapter->VBus.pVDevice[ccb_h->target_id]; FPSCAT_GATH psg = pCmd->pSgTable; int idx; _VBUS_INST(pVDev->pVBus) HPT_ASSERT(pCmd->cf_physical_sg); if (error) panic("busdma error"); HPT_ASSERT(nsegs<= MAX_SG_DESCRIPTORS); if (nsegs != 0) { for (idx = 0; idx < nsegs; idx++, psg++) { psg->dSgAddress = (ULONG_PTR)(UCHAR *)segs[idx].ds_addr; psg->wSgSize = segs[idx].ds_len; psg->wSgFlag = (idx == nsegs-1)? SG_FLAG_EOT: 0; /* KdPrint(("psg[%d]:add=%p,size=%x,flag=%x\n", idx, psg->dSgAddress,psg->wSgSize,psg->wSgFlag)); */ } /* psg[-1].wSgFlag = SG_FLAG_EOT; */ if (pCmd->cf_data_in) { bus_dmamap_sync(pAdapter->io_dma_parent, pmap->dma_map, BUS_DMASYNC_PREREAD); } else if (pCmd->cf_data_out) { bus_dmamap_sync(pAdapter->io_dma_parent, pmap->dma_map, BUS_DMASYNC_PREWRITE); } } callout_reset(&pmap->timeout, 20 * hz, hpt_timeout, ccb); pVDev->pfnSendCommand(_VBUS_P pCmd); CheckPendingCall(_VBUS_P0); } static void HPTLIBAPI OsSendCommand(_VBUS_ARG union ccb *ccb) { PBUS_DMAMAP pmap = (PBUS_DMAMAP)ccb->ccb_adapter; IAL_ADAPTER_T *pAdapter = pmap->pAdapter; struct ccb_hdr *ccb_h = &ccb->ccb_h; struct ccb_scsiio *csio = &ccb->csio; PVDevice pVDev = pAdapter->VBus.pVDevice[ccb_h->target_id]; KdPrintI(("OsSendCommand: ccb %p cdb %x-%x-%x\n", ccb, *(ULONG *)&ccb->csio.cdb_io.cdb_bytes[0], *(ULONG *)&ccb->csio.cdb_io.cdb_bytes[4], *(ULONG *)&ccb->csio.cdb_io.cdb_bytes[8] )); pAdapter->outstandingCommands++; if (pVDev == NULL || pVDev->vf_online == 0) { ccb->ccb_h.status = CAM_REQ_INVALID; ccb_done(ccb); goto Command_Complished; } switch(ccb->csio.cdb_io.cdb_bytes[0]) { case TEST_UNIT_READY: case START_STOP_UNIT: case SYNCHRONIZE_CACHE: /* FALLTHROUGH */ ccb->ccb_h.status = CAM_REQ_CMP; break; case INQUIRY: ZeroMemory(ccb->csio.data_ptr, ccb->csio.dxfer_len); SetInquiryData((PINQUIRYDATA)ccb->csio.data_ptr, pVDev); ccb_h->status = CAM_REQ_CMP; break; case READ_CAPACITY: { UCHAR *rbuf=csio->data_ptr; unsigned int cap; if (pVDev->VDeviceCapacity > 0xfffffffful) { cap = 0xfffffffful; } else { cap = pVDev->VDeviceCapacity - 1; } rbuf[0] = (UCHAR)(cap>>24); rbuf[1] = (UCHAR)(cap>>16); rbuf[2] = (UCHAR)(cap>>8); rbuf[3] = (UCHAR)cap; /* Claim 512 byte blocks (big-endian). */ rbuf[4] = 0; rbuf[5] = 0; rbuf[6] = 2; rbuf[7] = 0; ccb_h->status = CAM_REQ_CMP; break; } case 0x9e: /*SERVICE_ACTION_IN*/ { UCHAR *rbuf = csio->data_ptr; LBA_T cap = pVDev->VDeviceCapacity - 1; rbuf[0] = (UCHAR)(cap>>56); rbuf[1] = (UCHAR)(cap>>48); rbuf[2] = (UCHAR)(cap>>40); rbuf[3] = (UCHAR)(cap>>32); rbuf[4] = (UCHAR)(cap>>24); rbuf[5] = (UCHAR)(cap>>16); rbuf[6] = (UCHAR)(cap>>8); rbuf[7] = (UCHAR)cap; rbuf[8] = 0; rbuf[9] = 0; rbuf[10] = 2; rbuf[11] = 0; ccb_h->status = CAM_REQ_CMP; break; } case READ_6: case WRITE_6: case READ_10: case WRITE_10: case 0x88: /* READ_16 */ case 0x8a: /* WRITE_16 */ case 0x13: case 0x2f: { UCHAR Cdb[16]; UCHAR CdbLength; _VBUS_INST(pVDev->pVBus) PCommand pCmd = AllocateCommand(_VBUS_P0); int error; HPT_ASSERT(pCmd); CdbLength = csio->cdb_len; if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) { bcopy(csio->cdb_io.cdb_ptr, Cdb, CdbLength); } else { KdPrintE(("ERROR!!!\n")); ccb->ccb_h.status = CAM_REQ_INVALID; break; } } else { bcopy(csio->cdb_io.cdb_bytes, Cdb, CdbLength); } pCmd->pOrgCommand = ccb; pCmd->pVDevice = pVDev; pCmd->pfnCompletion = fOsCommandDone; pCmd->pfnBuildSgl = fOsBuildSgl; pCmd->pSgTable = pmap->psg; switch (Cdb[0]) { case READ_6: case WRITE_6: case 0x13: pCmd->uCmd.Ide.Lba = ((ULONG)Cdb[1] << 16) | ((ULONG)Cdb[2] << 8) | (ULONG)Cdb[3]; pCmd->uCmd.Ide.nSectors = (USHORT) Cdb[4]; break; case 0x88: /* READ_16 */ case 0x8a: /* WRITE_16 */ pCmd->uCmd.Ide.Lba = (HPT_U64)Cdb[2] << 56 | (HPT_U64)Cdb[3] << 48 | (HPT_U64)Cdb[4] << 40 | (HPT_U64)Cdb[5] << 32 | (HPT_U64)Cdb[6] << 24 | (HPT_U64)Cdb[7] << 16 | (HPT_U64)Cdb[8] << 8 | (HPT_U64)Cdb[9]; pCmd->uCmd.Ide.nSectors = (USHORT)Cdb[12] << 8 | (USHORT)Cdb[13]; break; default: pCmd->uCmd.Ide.Lba = (ULONG)Cdb[5] | ((ULONG)Cdb[4] << 8) | ((ULONG)Cdb[3] << 16) | ((ULONG)Cdb[2] << 24); pCmd->uCmd.Ide.nSectors = (USHORT) Cdb[8] | ((USHORT)Cdb[7]<<8); break; } switch (Cdb[0]) { case READ_6: case READ_10: case 0x88: /* READ_16 */ pCmd->uCmd.Ide.Command = IDE_COMMAND_READ; pCmd->cf_data_in = 1; break; case WRITE_6: case WRITE_10: case 0x8a: /* WRITE_16 */ pCmd->uCmd.Ide.Command = IDE_COMMAND_WRITE; pCmd->cf_data_out = 1; break; case 0x13: case 0x2f: pCmd->uCmd.Ide.Command = IDE_COMMAND_VERIFY; break; } /*///////////////////////// */ pCmd->cf_physical_sg = 1; error = bus_dmamap_load_ccb(pAdapter->io_dma_parent, pmap->dma_map, ccb, hpt_io_dmamap_callback, pCmd, BUS_DMA_WAITOK ); KdPrint(("bus_dmamap_load return %d\n", error)); if (error && error!=EINPROGRESS) { hpt_printk(("bus_dmamap_load error %d\n", error)); FreeCommand(_VBUS_P pCmd); ccb->ccb_h.status = CAM_REQ_CMP_ERR; dmamap_put(pmap); pAdapter->outstandingCommands--; if (pAdapter->outstandingCommands == 0) wakeup(pAdapter); xpt_done(ccb); } goto Command_Complished; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } ccb_done(ccb); Command_Complished: CheckPendingCall(_VBUS_P0); return; } static void HPTLIBAPI fOsCommandDone(_VBUS_ARG PCommand pCmd) { union ccb *ccb = pCmd->pOrgCommand; PBUS_DMAMAP pmap = (PBUS_DMAMAP)ccb->ccb_adapter; IAL_ADAPTER_T *pAdapter = pmap->pAdapter; KdPrint(("fOsCommandDone(pcmd=%p, result=%d)\n", pCmd, pCmd->Result)); callout_stop(&pmap->timeout); switch(pCmd->Result) { case RETURN_SUCCESS: ccb->ccb_h.status = CAM_REQ_CMP; break; case RETURN_BAD_DEVICE: ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case RETURN_DEVICE_BUSY: ccb->ccb_h.status = CAM_BUSY; break; case RETURN_INVALID_REQUEST: ccb->ccb_h.status = CAM_REQ_INVALID; break; case RETURN_SELECTION_TIMEOUT: ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; case RETURN_RETRY: ccb->ccb_h.status = CAM_BUSY; break; default: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } if (pCmd->cf_data_in) { bus_dmamap_sync(pAdapter->io_dma_parent, pmap->dma_map, BUS_DMASYNC_POSTREAD); } else if (pCmd->cf_data_out) { bus_dmamap_sync(pAdapter->io_dma_parent, pmap->dma_map, BUS_DMASYNC_POSTWRITE); } bus_dmamap_unload(pAdapter->io_dma_parent, pmap->dma_map); FreeCommand(_VBUS_P pCmd); ccb_done(ccb); } int hpt_queue_dpc(HPT_DPC dpc, IAL_ADAPTER_T * pAdapter, void *arg, UCHAR flags) { int p; mtx_lock(&DpcQueue_Lock); p = (DpcQueue_Last + 1) % MAX_DPC; if (p==DpcQueue_First) { KdPrint(("DPC Queue full!\n")); mtx_unlock(&DpcQueue_Lock); return -1; } DpcQueue[DpcQueue_Last].dpc = dpc; DpcQueue[DpcQueue_Last].pAdapter = pAdapter; DpcQueue[DpcQueue_Last].arg = arg; DpcQueue[DpcQueue_Last].flags = flags; DpcQueue_Last = p; mtx_unlock(&DpcQueue_Lock); return 0; } #ifdef _RAID5N_ /* * Allocate memory above 16M, otherwise we may eat all low memory for ISA devices. * How about the memory for 5081 request/response array and PRD table? */ void *os_alloc_page(_VBUS_ARG0) { return (void *)contigmalloc(0x1000, M_DEVBUF, M_NOWAIT, 0x1000000, 0xffffffff, PAGE_SIZE, 0ul); } void *os_alloc_dma_page(_VBUS_ARG0) { return (void *)contigmalloc(0x1000, M_DEVBUF, M_NOWAIT, 0x1000000, 0xffffffff, PAGE_SIZE, 0ul); } void os_free_page(_VBUS_ARG void *p) { contigfree(p, 0x1000, M_DEVBUF); } void os_free_dma_page(_VBUS_ARG void *p) { contigfree(p, 0x1000, M_DEVBUF); } void DoXor1(ULONG *p0, ULONG *p1, ULONG *p2, UINT nBytes) { UINT i; for (i = 0; i < nBytes / 4; i++) *p0++ = *p1++ ^ *p2++; } void DoXor2(ULONG *p0, ULONG *p2, UINT nBytes) { UINT i; for (i = 0; i < nBytes / 4; i++) *p0++ ^= *p2++; } #endif Index: head/sys/dev/hptmv/gui_lib.c =================================================================== --- head/sys/dev/hptmv/gui_lib.c (revision 313981) +++ head/sys/dev/hptmv/gui_lib.c (revision 313982) @@ -1,1447 +1,1447 @@ /* * Copyright (c) 2004-2005 HighPoint Technologies, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * gui_lib.c * Copyright (c) 2002-2004 HighPoint Technologies, Inc. All rights reserved. * * Platform independent ioctl interface implementation. * The platform dependent part may reuse this function and/or use it own * implementation for each ioctl function. * * This implementation doesn't use any synchronization; the caller must * assure the proper context when calling these functions. */ #include #include #include #include #ifndef __KERNEL__ #define __KERNEL__ #endif #include #include #include #include static int hpt_get_driver_capabilities(PDRIVER_CAPABILITIES cap); static int hpt_get_controller_count(void); static int hpt_get_controller_info(int id, PCONTROLLER_INFO pInfo); static int hpt_get_channel_info(int id, int bus, PCHANNEL_INFO pInfo); static int hpt_get_logical_devices(DEVICEID * pIds, int nMaxCount); static int hpt_get_device_info(DEVICEID id, PLOGICAL_DEVICE_INFO pInfo); static int hpt_get_device_info_v2(DEVICEID id, PLOGICAL_DEVICE_INFO_V2 pInfo); static DEVICEID hpt_create_array(_VBUS_ARG PCREATE_ARRAY_PARAMS pParam); static DEVICEID hpt_create_array_v2(_VBUS_ARG PCREATE_ARRAY_PARAMS_V2 pParam); static int hpt_add_spare_disk(_VBUS_ARG DEVICEID idDisk); static int hpt_remove_spare_disk(_VBUS_ARG DEVICEID idDisk); static int hpt_set_array_info(_VBUS_ARG DEVICEID idArray, PALTERABLE_ARRAY_INFO pInfo); static int hpt_set_device_info(_VBUS_ARG DEVICEID idDisk, PALTERABLE_DEVICE_INFO pInfo); static int hpt_set_device_info_v2(_VBUS_ARG DEVICEID idDisk, PALTERABLE_DEVICE_INFO_V2 pInfo); int check_VDevice_valid(PVDevice p) { int i; PVDevice pVDevice; PVBus _vbus_p; IAL_ADAPTER_T *pAdapter = gIal_Adapter; - while(pAdapter != 0) + while(pAdapter != NULL) { for (i = 0; i < MV_SATA_CHANNELS_NUM; i++) if(&(pAdapter->VDevices[i]) == p) return 0; pAdapter = pAdapter->next; } #ifdef SUPPORT_ARRAY pAdapter = gIal_Adapter; - while(pAdapter != 0) + while(pAdapter != NULL) { _vbus_p = &pAdapter->VBus; for (i=0;iu.array.dArStamp != 0) && (pVDevice == p)) return 0; } pAdapter = pAdapter->next; } #endif return -1; } #ifdef SUPPORT_ARRAY static UCHAR get_vdev_type(PVDevice pVDevice) { switch (pVDevice->VDeviceType) { case VD_RAID_0: return AT_RAID0; case VD_RAID_1: return AT_RAID1; case VD_JBOD: return AT_JBOD; case VD_RAID_5: return AT_RAID5; default: return AT_UNKNOWN; } } static DWORD get_array_flag(PVDevice pVDevice) { int i; DWORD f = 0; /* The array is disabled */ if(!pVDevice->vf_online) { f |= ARRAY_FLAG_DISABLED; /* Ignore other info */ return f; } /* array need synchronizing */ if(pVDevice->u.array.rf_need_rebuild && !pVDevice->u.array.rf_duplicate_and_create) f |= ARRAY_FLAG_NEEDBUILDING; /* array is in rebuilding process */ if(pVDevice->u.array.rf_rebuilding) f |= ARRAY_FLAG_REBUILDING; /* array is being verified */ if(pVDevice->u.array.rf_verifying) f |= ARRAY_FLAG_VERIFYING; /* array is being initialized */ if(pVDevice->u.array.rf_initializing) f |= ARRAY_FLAG_INITIALIZING; /* broken but may still working */ if(pVDevice->u.array.rf_broken) f |= ARRAY_FLAG_BROKEN; /* array has a active partition */ if(pVDevice->vf_bootable) f |= ARRAY_FLAG_BOOTDISK; /* a newly created array */ if(pVDevice->u.array.rf_newly_created) f |= ARRAY_FLAG_NEWLY_CREATED; /* array has boot mark set */ if(pVDevice->vf_bootmark) f |= ARRAY_FLAG_BOOTMARK; /* auto-rebuild should start */ if(pVDevice->u.array.rf_auto_rebuild) f |= ARRAY_FLAG_NEED_AUTOREBUILD; for(i = 0; i < pVDevice->u.array.bArnMember; i++) { PVDevice pMember = pVDevice->u.array.pMember[i]; if (!pMember || !pMember->vf_online || (pMember->VDeviceType==VD_SINGLE_DISK)) continue; /* array need synchronizing */ if(pMember->u.array.rf_need_rebuild && !pMember->u.array.rf_duplicate_and_create) f |= ARRAY_FLAG_NEEDBUILDING; /* array is in rebuilding process */ if(pMember->u.array.rf_rebuilding) f |= ARRAY_FLAG_REBUILDING; /* array is being verified */ if(pMember->u.array.rf_verifying) f |= ARRAY_FLAG_VERIFYING; /* array is being initialized */ if(pMember->u.array.rf_initializing) f |= ARRAY_FLAG_INITIALIZING; /* broken but may still working */ if(pMember->u.array.rf_broken) f |= ARRAY_FLAG_BROKEN; /* a newly created array */ if(pMember->u.array.rf_newly_created) f |= ARRAY_FLAG_NEWLY_CREATED; /* auto-rebuild should start */ if(pMember->u.array.rf_auto_rebuild) f |= ARRAY_FLAG_NEED_AUTOREBUILD; } return f; } static DWORD calc_rebuild_progress(PVDevice pVDevice) { int i; DWORD result = ((ULONG)(pVDevice->u.array.RebuildSectors>>11)*1000 / (ULONG)(pVDevice->VDeviceCapacity>>11) * (pVDevice->u.array.bArnMember-1)) * 10; for(i = 0; i < pVDevice->u.array.bArnMember; i++) { PVDevice pMember = pVDevice->u.array.pMember[i]; if (!pMember || !pMember->vf_online || (pMember->VDeviceType==VD_SINGLE_DISK)) continue; /* for RAID1/0 case */ if (pMember->u.array.rf_rebuilding || pMember->u.array.rf_verifying || pMember->u.array.rf_initializing) { DWORD percent = ((ULONG)(pMember->u.array.RebuildSectors>>11)*1000 / (ULONG)(pMember->VDeviceCapacity>>11) * (pMember->u.array.bArnMember-1)) * 10; if (result==0 || result>percent) result = percent; } } if (result>10000) result = 10000; return result; } static void get_array_info(PVDevice pVDevice, PHPT_ARRAY_INFO pArrayInfo) { int i; memcpy(pArrayInfo->Name, pVDevice->u.array.ArrayName, MAX_ARRAY_NAME); pArrayInfo->ArrayType = get_vdev_type(pVDevice); pArrayInfo->BlockSizeShift = pVDevice->u.array.bArBlockSizeShift; pArrayInfo->RebuiltSectors = pVDevice->u.array.RebuildSectors; pArrayInfo->Flags = get_array_flag(pVDevice); pArrayInfo->RebuildingProgress = calc_rebuild_progress(pVDevice); pArrayInfo->nDisk = 0; for(i = 0; i < pVDevice->u.array.bArnMember; i++) if(pVDevice->u.array.pMember[i] != NULL) pArrayInfo->Members[pArrayInfo->nDisk++] = VDEV_TO_ID(pVDevice->u.array.pMember[i]); for(i=pArrayInfo->nDisk; iMembers[i] = INVALID_DEVICEID; } static void get_array_info_v2(PVDevice pVDevice, PHPT_ARRAY_INFO_V2 pArrayInfo) { int i; memcpy(pArrayInfo->Name, pVDevice->u.array.ArrayName, MAX_ARRAYNAME_LEN); pArrayInfo->ArrayType = get_vdev_type(pVDevice); pArrayInfo->BlockSizeShift = pVDevice->u.array.bArBlockSizeShift; pArrayInfo->RebuiltSectors.lo32 = pVDevice->u.array.RebuildSectors; pArrayInfo->RebuiltSectors.hi32 = sizeof(LBA_T)>4? (pVDevice->u.array.RebuildSectors>>32) : 0; pArrayInfo->Flags = get_array_flag(pVDevice); pArrayInfo->RebuildingProgress = calc_rebuild_progress(pVDevice); pArrayInfo->nDisk = 0; for(i = 0; i < pVDevice->u.array.bArnMember; i++) if(pVDevice->u.array.pMember[i] != NULL) pArrayInfo->Members[pArrayInfo->nDisk++] = VDEV_TO_ID(pVDevice->u.array.pMember[i]); for(i=pArrayInfo->nDisk; iMembers[i] = INVALID_DEVICEID; } #endif static int get_disk_info(PVDevice pVDevice, PDEVICE_INFO pDiskInfo) { MV_SATA_ADAPTER *pSataAdapter; MV_SATA_CHANNEL *pSataChannel; IAL_ADAPTER_T *pAdapter; MV_CHANNEL *channelInfo; char *p; int i; /* device location */ pSataChannel = pVDevice->u.disk.mv; if(pSataChannel == NULL) return -1; pDiskInfo->TargetId = 0; pSataAdapter = pSataChannel->mvSataAdapter; if(pSataAdapter == NULL) return -1; pAdapter = pSataAdapter->IALData; pDiskInfo->PathId = pSataChannel->channelNumber; pDiskInfo->ControllerId = (UCHAR)pSataAdapter->adapterId; /*GUI uses DeviceModeSetting to display to users (1) if users select a mode, GUI/BIOS should display that mode. (2) if SATA/150, GUI/BIOS should display 150 if case (1) isn't satisfied. (3) display real mode if case (1)&&(2) not satisfied. */ if (pVDevice->u.disk.df_user_mode_set) pDiskInfo->DeviceModeSetting = pVDevice->u.disk.bDeUserSelectMode; else if (((((PIDENTIFY_DATA)pVDevice->u.disk.mv->identifyDevice)->SataCapability) & 3)==2) pDiskInfo->DeviceModeSetting = 15; else { p = (char *)&((PIDENTIFY_DATA)pVDevice->u.disk.mv->identifyDevice)->ModelNumber; if (*(WORD*)p==(0x5354) /*'ST'*/ && (*(WORD*)(p+8)==(0x4153)/*'AS'*/ || (p[8]=='A' && p[11]=='S'))) pDiskInfo->DeviceModeSetting = 15; else pDiskInfo->DeviceModeSetting = pVDevice->u.disk.bDeModeSetting; } pDiskInfo->UsableMode = pVDevice->u.disk.bDeUsable_Mode; pDiskInfo->DeviceType = PDT_HARDDISK; pDiskInfo->Flags = 0x0; /* device is disabled */ if(!pVDevice->u.disk.df_on_line) pDiskInfo->Flags |= DEVICE_FLAG_DISABLED; /* disk has a active partition */ if(pVDevice->vf_bootable) pDiskInfo->Flags |= DEVICE_FLAG_BOOTDISK; /* disk has boot mark set */ if(pVDevice->vf_bootmark) pDiskInfo->Flags |= DEVICE_FLAG_BOOTMARK; pDiskInfo->Flags |= DEVICE_FLAG_SATA; /* is a spare disk */ if(pVDevice->VDeviceType == VD_SPARE) pDiskInfo->Flags |= DEVICE_FLAG_IS_SPARE; memcpy(&(pDiskInfo->IdentifyData), (pSataChannel->identifyDevice), sizeof(IDENTIFY_DATA2)); p = (char *)&pDiskInfo->IdentifyData.ModelNumber; for (i = 0; i < 20; i++) ((WORD*)p)[i] = shortswap(pSataChannel->identifyDevice[IDEN_MODEL_OFFSET+i]); p[39] = '\0'; channelInfo = &pAdapter->mvChannel[pSataChannel->channelNumber]; pDiskInfo->ReadAheadSupported = channelInfo->readAheadSupported; pDiskInfo->ReadAheadEnabled = channelInfo->readAheadEnabled; pDiskInfo->WriteCacheSupported = channelInfo->writeCacheSupported; pDiskInfo->WriteCacheEnabled = channelInfo->writeCacheEnabled; pDiskInfo->TCQSupported = (pSataChannel->identifyDevice[IDEN_SUPPORTED_COMMANDS2] & (0x2))!=0; pDiskInfo->TCQEnabled = pSataChannel->queuedDMA==MV_EDMA_MODE_QUEUED; pDiskInfo->NCQSupported = MV_SATA_GEN_2(pSataAdapter) && (pSataChannel->identifyDevice[IDEN_SATA_CAPABILITIES] & (0x0100)); pDiskInfo->NCQEnabled = pSataChannel->queuedDMA==MV_EDMA_MODE_NATIVE_QUEUING; return 0; } int hpt_get_driver_capabilities(PDRIVER_CAPABILITIES cap) { ZeroMemory(cap, sizeof(DRIVER_CAPABILITIES)); cap->dwSize = sizeof(DRIVER_CAPABILITIES); cap->MaximumControllers = MAX_VBUS; /* cap->SupportCrossControllerRAID = 0; */ /* take care for various OSes! */ cap->SupportCrossControllerRAID = 0; cap->MinimumBlockSizeShift = MinBlockSizeShift; cap->MaximumBlockSizeShift = MaxBlockSizeShift; cap->SupportDiskModeSetting = 0; cap->SupportSparePool = 1; cap->MaximumArrayNameLength = MAX_ARRAY_NAME - 1; cap->SupportDedicatedSpare = 0; #ifdef SUPPORT_ARRAY /* Stripe */ cap->SupportedRAIDTypes[0] = AT_RAID0; cap->MaximumArrayMembers[0] = MAX_MEMBERS; /* Mirror */ cap->SupportedRAIDTypes[1] = AT_RAID1; cap->MaximumArrayMembers[1] = 2; /* Mirror + Stripe */ #ifdef ARRAY_V2_ONLY cap->SupportedRAIDTypes[2] = (AT_RAID1<<4)|AT_RAID0; /* RAID0/1 */ #else cap->SupportedRAIDTypes[2] = (AT_RAID0<<4)|AT_RAID1; /* RAID1/0 */ #endif cap->MaximumArrayMembers[2] = MAX_MEMBERS; /* Jbod */ cap->SupportedRAIDTypes[3] = AT_JBOD; cap->MaximumArrayMembers[3] = MAX_MEMBERS; /* RAID5 */ #if SUPPORT_RAID5 cap->SupportedRAIDTypes[4] = AT_RAID5; cap->MaximumArrayMembers[4] = MAX_MEMBERS; #endif #endif return 0; } int hpt_get_controller_count(void) { IAL_ADAPTER_T *pAdapTemp = gIal_Adapter; int iControllerCount = 0; - while(pAdapTemp != 0) + while(pAdapTemp != NULL) { iControllerCount++; pAdapTemp = pAdapTemp->next; } return iControllerCount; } int hpt_get_controller_info(int id, PCONTROLLER_INFO pInfo) { IAL_ADAPTER_T *pAdapTemp; int iControllerCount = 0; for (pAdapTemp = gIal_Adapter; pAdapTemp; pAdapTemp = pAdapTemp->next) { if (iControllerCount++==id) { pInfo->InterruptLevel = 0; pInfo->ChipType = 0; pInfo->ChipFlags = CHIP_SUPPORT_ULTRA_100; strcpy( pInfo->szVendorID, "HighPoint Technologies, Inc."); #ifdef GUI_CONTROLLER_NAME #ifdef FORCE_ATA150_DISPLAY /* show "Bus Type: ATA/150" in GUI for SATA controllers */ pInfo->ChipFlags = CHIP_SUPPORT_ULTRA_150; #endif strcpy(pInfo->szProductID, GUI_CONTROLLER_NAME); #define _set_product_id(x) #else #define _set_product_id(x) strcpy(pInfo->szProductID, x) #endif _set_product_id("RocketRAID 18xx SATA Controller"); pInfo->NumBuses = 8; pInfo->ChipFlags |= CHIP_SUPPORT_ULTRA_133|CHIP_SUPPORT_ULTRA_150; return 0; } } return -1; } int hpt_get_channel_info(int id, int bus, PCHANNEL_INFO pInfo) { IAL_ADAPTER_T *pAdapTemp = gIal_Adapter; int i,iControllerCount = 0; - while(pAdapTemp != 0) + while(pAdapTemp != NULL) { if (iControllerCount++==id) goto found; pAdapTemp = pAdapTemp->next; } return -1; found: pInfo->IoPort = 0; pInfo->ControlPort = 0; for (i=0; i<2 ;i++) { pInfo->Devices[i] = (DEVICEID)INVALID_DEVICEID; } if (pAdapTemp->mvChannel[bus].online == MV_TRUE) pInfo->Devices[0] = VDEV_TO_ID(&pAdapTemp->VDevices[bus]); else pInfo->Devices[0] = (DEVICEID)INVALID_DEVICEID; return 0; } int hpt_get_logical_devices(DEVICEID * pIds, int nMaxCount) { int count = 0; int i,j; PVDevice pPhysical, pLogical; IAL_ADAPTER_T *pAdapTemp; for(i = 0; i < nMaxCount; i++) pIds[i] = INVALID_DEVICEID; /* append the arrays not registered on VBus */ for (pAdapTemp = gIal_Adapter; pAdapTemp; pAdapTemp = pAdapTemp->next) { for(i = 0; i < MV_SATA_CHANNELS_NUM; i++) { pPhysical = &pAdapTemp->VDevices[i]; pLogical = pPhysical; while (pLogical->pParent) pLogical = pLogical->pParent; if (pLogical->VDeviceType==VD_SPARE) continue; for (j=0; j=nMaxCount) goto done; next:; } } done: return count; } int hpt_get_device_info(DEVICEID id, PLOGICAL_DEVICE_INFO pInfo) { PVDevice pVDevice = ID_TO_VDEV(id); if((id == 0) || check_VDevice_valid(pVDevice)) return -1; #ifdef SUPPORT_ARRAY if (mIsArray(pVDevice)) { pInfo->Type = LDT_ARRAY; pInfo->Capacity = pVDevice->VDeviceCapacity; pInfo->ParentArray = VDEV_TO_ID(pVDevice->pParent); get_array_info(pVDevice, &pInfo->u.array); return 0; } #endif pInfo->Type = LDT_DEVICE; pInfo->ParentArray = pVDevice->pParent? VDEV_TO_ID(pVDevice->pParent) : INVALID_DEVICEID; /* report real capacity to be compatible with old arrays */ pInfo->Capacity = pVDevice->u.disk.dDeRealCapacity; return get_disk_info(pVDevice, &pInfo->u.device); } int hpt_get_device_info_v2(DEVICEID id, PLOGICAL_DEVICE_INFO_V2 pInfo) { PVDevice pVDevice = ID_TO_VDEV(id); if((id == 0) || check_VDevice_valid(pVDevice)) return -1; #ifdef SUPPORT_ARRAY if (mIsArray(pVDevice)) { pInfo->Type = LDT_ARRAY; pInfo->Capacity.lo32 = pVDevice->VDeviceCapacity; pInfo->Capacity.hi32 = sizeof(LBA_T)>4? (pVDevice->VDeviceCapacity>>32) : 0; pInfo->ParentArray = VDEV_TO_ID(pVDevice->pParent); get_array_info_v2(pVDevice, &pInfo->u.array); return 0; } #endif pInfo->Type = LDT_DEVICE; pInfo->ParentArray = pVDevice->pParent? VDEV_TO_ID(pVDevice->pParent) : INVALID_DEVICEID; /* report real capacity to be compatible with old arrays */ pInfo->Capacity.lo32 = pVDevice->u.disk.dDeRealCapacity; pInfo->Capacity.hi32 = 0; return get_disk_info(pVDevice, &pInfo->u.device); } #ifdef SUPPORT_ARRAY DEVICEID hpt_create_array_v2(_VBUS_ARG PCREATE_ARRAY_PARAMS_V2 pParam) { ULONG Stamp = GetStamp(); int i,j; LBA_T capacity = MAX_LBA_T; PVDevice pArray,pChild; int Loca = -1; if (pParam->nDisk > MAX_MEMBERS) return INVALID_DEVICEID; /* check in verify_vd for(i = 0; i < pParam->nDisk; i++) { PVDevice pVDev = ID_TO_VDEV(pParam->Members[i]); if (check_VDevice_valid(pVDev)) return INVALID_DEVICEID; if (mIsArray(pVDev)) return INVALID_DEVICEID; if (!pVDev->vf_online) return INVALID_DEVICEID; if (!_vbus_p) _vbus_p = pVDev->u.disk.pVBus; else if (_vbus_p != pVDev->u.disk.pVBus) return INVALID_DEVICEID; } */ _vbus_p = (ID_TO_VDEV(pParam->Members[0]))->u.disk.pVBus; if (!_vbus_p) return INVALID_DEVICEID; mArGetArrayTable(pArray); if(!pArray) return INVALID_DEVICEID; switch (pParam->ArrayType) { case AT_JBOD: pArray->VDeviceType = VD_JBOD; goto simple; case AT_RAID0: if((pParam->BlockSizeShift < MinBlockSizeShift) || (pParam->BlockSizeShift > MaxBlockSizeShift)) goto error; pArray->VDeviceType = VD_RAID_0; goto simple; case AT_RAID5: if((pParam->BlockSizeShift < MinBlockSizeShift) || (pParam->BlockSizeShift > MaxBlockSizeShift)) goto error; pArray->VDeviceType = VD_RAID_5; /* only "no build" R5 is not critical after creation. */ if ((pParam->CreateFlags & CAF_CREATE_R5_NO_BUILD)==0) pArray->u.array.rf_need_rebuild = 1; goto simple; case AT_RAID1: if(pParam->nDisk <= 2) { pArray->VDeviceType = VD_RAID_1; simple: pArray->u.array.bArnMember = pParam->nDisk; pArray->u.array.bArRealnMember = pParam->nDisk; pArray->u.array.bArBlockSizeShift = pParam->BlockSizeShift; pArray->u.array.bStripeWitch = (1 << pParam->BlockSizeShift); pArray->u.array.dArStamp = Stamp; pArray->u.array.rf_need_sync = 1; pArray->u.array.rf_newly_created = 1; if ((pParam->CreateFlags & CAF_CREATE_AND_DUPLICATE) && (pArray->VDeviceType == VD_RAID_1)) { pArray->u.array.rf_newly_created = 0; /* R1 shall still be accessible */ pArray->u.array.rf_need_rebuild = 1; pArray->u.array.rf_auto_rebuild = 1; pArray->u.array.rf_duplicate_and_create = 1; for(i = 0; i < MAX_VDEVICE_PER_VBUS; i++) if (_vbus_p->pVDevice[i] == ID_TO_VDEV(pParam->Members[0])) Loca = i; } pArray->u.array.RebuildSectors = pArray->u.array.rf_need_rebuild? 0 : MAX_LBA_T; memcpy(pArray->u.array.ArrayName, pParam->ArrayName, MAX_ARRAY_NAME); for(i = 0; i < pParam->nDisk; i++) { pArray->u.array.pMember[i] = ID_TO_VDEV(pParam->Members[i]); pArray->u.array.pMember[i]->bSerialNumber = i; pArray->u.array.pMember[i]->pParent = pArray; /* don't unregister source disk for duplicate RAID1 */ if (i || pArray->VDeviceType!=VD_RAID_1 || (pParam->CreateFlags & CAF_CREATE_AND_DUPLICATE)==0) UnregisterVDevice(pArray->u.array.pMember[i]); if(pArray->VDeviceType == VD_RAID_5) pArray->u.array.pMember[i]->vf_cache_disk = 1; } } else { for(i = 0; i < (pParam->nDisk / 2); i++) { mArGetArrayTable(pChild); pChild->VDeviceType = VD_RAID_1; pChild->u.array.bArnMember = 2; pChild->u.array.bArRealnMember = 2; pChild->u.array.bArBlockSizeShift = pParam->BlockSizeShift; pChild->u.array.bStripeWitch = (1 << pParam->BlockSizeShift); pChild->u.array.dArStamp = Stamp; pChild->u.array.rf_need_sync = 1; pChild->u.array.rf_newly_created = 1; pChild->u.array.RebuildSectors = MAX_LBA_T; memcpy(pChild->u.array.ArrayName, pParam->ArrayName, MAX_ARRAY_NAME); for(j = 0; j < 2; j++) { pChild->u.array.pMember[j] = ID_TO_VDEV(pParam->Members[i*2 + j]); pChild->u.array.pMember[j]->bSerialNumber = j; pChild->u.array.pMember[j]->pParent = pChild; pChild->u.array.pMember[j]->pfnDeviceFailed = pfnDeviceFailed[pChild->VDeviceType]; UnregisterVDevice(pChild->u.array.pMember[j]); } pArray->u.array.pMember[i] = pChild; pChild->vf_online = 1; pChild->bSerialNumber = i; pChild->pParent = pArray; pChild->VDeviceCapacity = MIN(pChild->u.array.pMember[0]->VDeviceCapacity, pChild->u.array.pMember[1]->VDeviceCapacity); pChild->pfnSendCommand = pfnSendCommand[pChild->VDeviceType]; pChild->pfnDeviceFailed = pfnDeviceFailed[VD_RAID_0]; } pArray->VDeviceType = VD_RAID_0; pArray->u.array.bArnMember = pParam->nDisk / 2; pArray->u.array.bArRealnMember = pParam->nDisk / 2; pArray->u.array.bArBlockSizeShift = pParam->BlockSizeShift; pArray->u.array.bStripeWitch = (1 << pParam->BlockSizeShift); pArray->u.array.dArStamp = Stamp; pArray->u.array.rf_need_sync = 1; pArray->u.array.rf_newly_created = 1; memcpy(pArray->u.array.ArrayName, pParam->ArrayName, MAX_ARRAY_NAME); } break; default: goto error; } for(i = 0; i < pArray->u.array.bArnMember; i++) pArray->u.array.pMember[i]->pfnDeviceFailed = pfnDeviceFailed[pArray->VDeviceType]; if ((pParam->CreateFlags & CAF_CREATE_AND_DUPLICATE) && (pArray->VDeviceType == VD_RAID_1)) { pArray->vf_bootmark = pArray->u.array.pMember[0]->vf_bootmark; pArray->vf_bootable = pArray->u.array.pMember[0]->vf_bootable; pArray->u.array.pMember[0]->vf_bootable = 0; pArray->u.array.pMember[0]->vf_bootmark = 0; if (Loca>=0) { _vbus_p->pVDevice[Loca] = pArray; /* to comfort OS */ pArray->u.array.rf_duplicate_and_created = 1; pArray->pVBus = _vbus_p; } } else { UCHAR TempBuffer[512]; ZeroMemory(TempBuffer, 512); for(i = 0; i < pParam->nDisk; i++) { PVDevice pDisk = ID_TO_VDEV(pParam->Members[i]); pDisk->vf_bootmark = pDisk->vf_bootable = 0; fDeReadWrite(&pDisk->u.disk, 0, IDE_COMMAND_WRITE, TempBuffer); } } pArray->vf_online = 1; pArray->pParent = NULL; switch(pArray->VDeviceType) { case VD_RAID_0: for(i = 0; i < pArray->u.array.bArnMember; i++) if(pArray->u.array.pMember[i]->VDeviceCapacity < capacity) capacity = pArray->u.array.pMember[i]->VDeviceCapacity; #ifdef ARRAY_V2_ONLY capacity -= 10; #endif capacity &= ~(pArray->u.array.bStripeWitch - 1); /* shrink member capacity for RAID 1/0 */ for(i = 0; i < pArray->u.array.bArnMember; i++) if (mIsArray(pArray->u.array.pMember[i])) pArray->u.array.pMember[i]->VDeviceCapacity = capacity; pArray->VDeviceCapacity = capacity * pArray->u.array.bArnMember; break; case VD_RAID_1: pArray->VDeviceCapacity = MIN(pArray->u.array.pMember[0]->VDeviceCapacity, pArray->u.array.pMember[1]->VDeviceCapacity); break; case VD_JBOD: for(i = 0; i < pArray->u.array.bArnMember; i++) pArray->VDeviceCapacity += pArray->u.array.pMember[i]->VDeviceCapacity #ifdef ARRAY_V2_ONLY -10 #endif ; break; case VD_RAID_5: for(i = 0; i < pArray->u.array.bArnMember; i++) if(pArray->u.array.pMember[i]->VDeviceCapacity < capacity) capacity = pArray->u.array.pMember[i]->VDeviceCapacity; pArray->VDeviceCapacity = rounddown2(capacity, pArray->u.array.bStripeWitch) * (pArray->u.array.bArnMember - 1); break; default: goto error; } pArray->pfnSendCommand = pfnSendCommand[pArray->VDeviceType]; pArray->pfnDeviceFailed = fOsDiskFailed; SyncArrayInfo(pArray); if (!pArray->u.array.rf_duplicate_and_created) RegisterVDevice(pArray); return VDEV_TO_ID(pArray); error: for(i = 0; i < pArray->u.array.bArnMember; i++) { pChild = pArray->u.array.pMember[i]; if((pChild != NULL) && (pChild->VDeviceType != VD_SINGLE_DISK)) mArFreeArrayTable(pChild); } mArFreeArrayTable(pArray); return INVALID_DEVICEID; } DEVICEID hpt_create_array(_VBUS_ARG PCREATE_ARRAY_PARAMS pParam) { CREATE_ARRAY_PARAMS_V2 param2; param2.ArrayType = pParam->ArrayType; param2.nDisk = pParam->nDisk; param2.BlockSizeShift = pParam->BlockSizeShift; param2.CreateFlags = pParam->CreateFlags; param2.CreateTime = pParam->CreateTime; memcpy(param2.ArrayName, pParam->ArrayName, sizeof(param2.ArrayName)); memcpy(param2.Description, pParam->Description, sizeof(param2.Description)); memcpy(param2.CreateManager, pParam->CreateManager, sizeof(param2.CreateManager)); param2.Capacity.lo32 = param2.Capacity.hi32 = 0; memcpy(param2.Members, pParam->Members, sizeof(pParam->Members)); return hpt_create_array_v2(_VBUS_P ¶m2); } #ifdef SUPPORT_OLD_ARRAY /* this is only for old RAID 0/1 */ int old_add_disk_to_raid01(_VBUS_ARG DEVICEID idArray, DEVICEID idDisk) { PVDevice pArray1 = ID_TO_VDEV(idArray); PVDevice pArray2 = 0; PVDevice pDisk = ID_TO_VDEV(idDisk); int i; IAL_ADAPTER_T *pAdapter = gIal_Adapter; if (pArray1->pVBus!=_vbus_p) { HPT_ASSERT(0); return -1;} if(pDisk->u.disk.dDeRealCapacity < (pArray1->VDeviceCapacity / 2)) return -1; pArray2 = pArray1->u.array.pMember[1]; if(pArray2 == NULL) { /* create a Stripe */ mArGetArrayTable(pArray2); pArray2->VDeviceType = VD_RAID_0; pArray2->u.array.dArStamp = GetStamp(); pArray2->vf_format_v2 = 1; pArray2->u.array.rf_broken = 1; pArray2->u.array.bArBlockSizeShift = pArray1->u.array.bArBlockSizeShift; pArray2->u.array.bStripeWitch = (1 << pArray2->u.array.bArBlockSizeShift); pArray2->u.array.bArnMember = 2; pArray2->VDeviceCapacity = pArray1->VDeviceCapacity; pArray2->pfnSendCommand = pfnSendCommand[pArray2->VDeviceType]; pArray2->pfnDeviceFailed = pfnDeviceFailed[pArray1->VDeviceType]; memcpy(pArray2->u.array.ArrayName, pArray1->u.array.ArrayName, MAX_ARRAY_NAME); pArray2->pParent = pArray1; pArray2->bSerialNumber = 1; pArray1->u.array.pMember[1] = pArray2; pArray1->u.array.bArRealnMember++; } for(i = 0; i < pArray2->u.array.bArnMember; i++) if((pArray2->u.array.pMember[i] == NULL) || !pArray2->u.array.pMember[i]->vf_online) { if(pArray2->u.array.pMember[i] != NULL) pArray2->u.array.pMember[i]->pParent = NULL; pArray2->u.array.pMember[i] = pDisk; goto find; } return -1; find: UnregisterVDevice(pDisk); pDisk->VDeviceType = VD_SINGLE_DISK; pDisk->bSerialNumber = i; pDisk->pParent = pArray2; pDisk->vf_format_v2 = 1; pDisk->u.disk.dDeHiddenLba = i? 10 : 0; pDisk->VDeviceCapacity = pDisk->u.disk.dDeRealCapacity; pDisk->pfnDeviceFailed = pfnDeviceFailed[pArray2->VDeviceType]; pArray2->u.array.bArRealnMember++; if(pArray2->u.array.bArnMember == pArray2->u.array.bArRealnMember){ pArray2->vf_online = 1; pArray2->u.array.rf_broken = 0; } if(pArray1->u.array.pMember[0]->vf_online && pArray1->u.array.pMember[1]->vf_online){ pArray1->u.array.bArRealnMember = pArray1->u.array.bArnMember; pArray1->u.array.rf_broken = 0; pArray1->u.array.rf_need_rebuild = 1; pArray1->u.array.rf_auto_rebuild = 1; } pArray1->u.array.RebuildSectors = 0; pArray1->u.array.dArStamp = GetStamp(); SyncArrayInfo(pArray1); return 1; } #endif int hpt_add_disk_to_array(_VBUS_ARG DEVICEID idArray, DEVICEID idDisk) { int i; LBA_T Capacity; PVDevice pArray = ID_TO_VDEV(idArray); PVDevice pDisk = ID_TO_VDEV(idDisk); if((idArray == 0) || (idDisk == 0)) return -1; if(check_VDevice_valid(pArray) || check_VDevice_valid(pDisk)) return -1; if(!pArray->u.array.rf_broken) return -1; if(pArray->VDeviceType != VD_RAID_1 && pArray->VDeviceType != VD_RAID_5) return -1; if((pDisk->VDeviceType != VD_SINGLE_DISK) && (pDisk->VDeviceType != VD_SPARE)) return -1; #ifdef SUPPORT_OLD_ARRAY /* RAID 0 + 1 */ if (pArray->vf_format_v2 && pArray->VDeviceType==VD_RAID_1 && pArray->u.array.pMember[0] && mIsArray(pArray->u.array.pMember[0])) { if(old_add_disk_to_raid01(_VBUS_P idArray, idDisk)) return 0; else return -1; } #endif Capacity = pArray->VDeviceCapacity / (pArray->u.array.bArnMember - 1); if (pArray->vf_format_v2) { if(pDisk->u.disk.dDeRealCapacity < Capacity) return -1; } else if(pDisk->VDeviceCapacity < Capacity) return -1; if (pArray->pVBus!=_vbus_p) { HPT_ASSERT(0); return -1;} for(i = 0; i < pArray->u.array.bArnMember; i++) if((pArray->u.array.pMember[i] == 0) || !pArray->u.array.pMember[i]->vf_online) { if(pArray->u.array.pMember[i] != NULL) pArray->u.array.pMember[i]->pParent = NULL; pArray->u.array.pMember[i] = pDisk; goto find; } return -1; find: UnregisterVDevice(pDisk); pDisk->VDeviceType = VD_SINGLE_DISK; pDisk->bSerialNumber = i; pDisk->pParent = pArray; if (pArray->VDeviceType==VD_RAID_5) pDisk->vf_cache_disk = 1; pDisk->pfnDeviceFailed = pfnDeviceFailed[pArray->VDeviceType]; if (pArray->vf_format_v2) { pDisk->vf_format_v2 = 1; pDisk->VDeviceCapacity = pDisk->u.disk.dDeRealCapacity; } pArray->u.array.bArRealnMember++; if(pArray->u.array.bArnMember == pArray->u.array.bArRealnMember) { pArray->u.array.rf_need_rebuild = 1; pArray->u.array.RebuildSectors = 0; pArray->u.array.rf_auto_rebuild = 1; pArray->u.array.rf_broken = 0; } pArray->u.array.RebuildSectors = 0; /* sync the whole array */ while (pArray->pParent) pArray = pArray->pParent; pArray->u.array.dArStamp = GetStamp(); SyncArrayInfo(pArray); return 0; } int hpt_add_spare_disk(_VBUS_ARG DEVICEID idDisk) { PVDevice pVDevice = ID_TO_VDEV(idDisk); DECLARE_BUFFER(PUCHAR, pbuffer); if(idDisk == 0 || check_VDevice_valid(pVDevice)) return -1; if (pVDevice->VDeviceType != VD_SINGLE_DISK || pVDevice->pParent) return -1; if (pVDevice->u.disk.pVBus!=_vbus_p) return -1; UnregisterVDevice(pVDevice); pVDevice->VDeviceType = VD_SPARE; pVDevice->vf_bootmark = 0; ZeroMemory((char *)pbuffer, 512); fDeReadWrite(&pVDevice->u.disk, 0, IDE_COMMAND_WRITE, pbuffer); SyncArrayInfo(pVDevice); return 0; } int hpt_remove_spare_disk(_VBUS_ARG DEVICEID idDisk) { PVDevice pVDevice = ID_TO_VDEV(idDisk); if(idDisk == 0 || check_VDevice_valid(pVDevice)) return -1; if (pVDevice->u.disk.pVBus!=_vbus_p) return -1; pVDevice->VDeviceType = VD_SINGLE_DISK; SyncArrayInfo(pVDevice); RegisterVDevice(pVDevice); return 0; } int hpt_set_array_info(_VBUS_ARG DEVICEID idArray, PALTERABLE_ARRAY_INFO pInfo) { PVDevice pVDevice = ID_TO_VDEV(idArray); if(idArray == 0 || check_VDevice_valid(pVDevice)) return -1; if (!mIsArray(pVDevice)) return -1; /* if the pVDevice isn't a top level, return -1; */ if(pVDevice->pParent != NULL) return -1; if (pVDevice->pVBus!=_vbus_p) { HPT_ASSERT(0); return -1;} if (pInfo->ValidFields & AAIF_NAME) { memset(pVDevice->u.array.ArrayName, 0, MAX_ARRAY_NAME); memcpy(pVDevice->u.array.ArrayName, pInfo->Name, sizeof(pInfo->Name)); pVDevice->u.array.rf_need_sync = 1; } if (pInfo->ValidFields & AAIF_DESCRIPTION) { memcpy(pVDevice->u.array.Description, pInfo->Description, sizeof(pInfo->Description)); pVDevice->u.array.rf_need_sync = 1; } if (pVDevice->u.array.rf_need_sync) SyncArrayInfo(pVDevice); return 0; } static int hpt_set_device_info(_VBUS_ARG DEVICEID idDisk, PALTERABLE_DEVICE_INFO pInfo) { PVDevice pVDevice = ID_TO_VDEV(idDisk); if(idDisk == 0 || check_VDevice_valid(pVDevice)) return -1; if (mIsArray(pVDevice)) return -1; if (pVDevice->u.disk.pVBus!=_vbus_p) return -1; /* TODO */ return 0; } static int hpt_set_device_info_v2(_VBUS_ARG DEVICEID idDisk, PALTERABLE_DEVICE_INFO_V2 pInfo) { PVDevice pVDevice = ID_TO_VDEV(idDisk); int sync = 0; if(idDisk==0 || check_VDevice_valid(pVDevice)) return -1; if (mIsArray(pVDevice)) return -1; if (pVDevice->u.disk.pVBus!=_vbus_p) return -1; if (pInfo->ValidFields & ADIF_MODE) { pVDevice->u.disk.bDeModeSetting = pInfo->DeviceModeSetting; pVDevice->u.disk.bDeUserSelectMode = pInfo->DeviceModeSetting; pVDevice->u.disk.df_user_mode_set = 1; fDeSelectMode((PDevice)&(pVDevice->u.disk), (UCHAR)pInfo->DeviceModeSetting); sync = 1; } if (pInfo->ValidFields & ADIF_TCQ) { if (fDeSetTCQ(&pVDevice->u.disk, pInfo->TCQEnabled, 0)) { pVDevice->u.disk.df_tcq_set = 1; pVDevice->u.disk.df_tcq = pInfo->TCQEnabled!=0; sync = 1; } } if (pInfo->ValidFields & ADIF_NCQ) { if (fDeSetNCQ(&pVDevice->u.disk, pInfo->NCQEnabled, 0)) { pVDevice->u.disk.df_ncq_set = 1; pVDevice->u.disk.df_ncq = pInfo->NCQEnabled!=0; sync = 1; } } if (pInfo->ValidFields & ADIF_WRITE_CACHE) { if (fDeSetWriteCache(&pVDevice->u.disk, pInfo->WriteCacheEnabled)) { pVDevice->u.disk.df_write_cache_set = 1; pVDevice->u.disk.df_write_cache = pInfo->WriteCacheEnabled!=0; sync = 1; } } if (pInfo->ValidFields & ADIF_READ_AHEAD) { if (fDeSetReadAhead(&pVDevice->u.disk, pInfo->ReadAheadEnabled)) { pVDevice->u.disk.df_read_ahead_set = 1; pVDevice->u.disk.df_read_ahead = pInfo->ReadAheadEnabled!=0; sync = 1; } } if (sync) SyncArrayInfo(pVDevice); return 0; } #endif /* hpt_default_ioctl() * This is a default implementation. The platform dependent part * may reuse this function and/or use it own implementation for * each ioctl function. */ int hpt_default_ioctl(_VBUS_ARG DWORD dwIoControlCode, /* operation control code */ PVOID lpInBuffer, /* input data buffer */ DWORD nInBufferSize, /* size of input data buffer */ PVOID lpOutBuffer, /* output data buffer */ DWORD nOutBufferSize, /* size of output data buffer */ PDWORD lpBytesReturned /* byte count */ ) { switch(dwIoControlCode) { case HPT_IOCTL_GET_VERSION: if (nInBufferSize != 0) return -1; if (nOutBufferSize != sizeof(DWORD)) return -1; *((DWORD*)lpOutBuffer) = HPT_INTERFACE_VERSION; break; case HPT_IOCTL_GET_CONTROLLER_COUNT: if (nOutBufferSize!=sizeof(DWORD)) return -1; *(PDWORD)lpOutBuffer = hpt_get_controller_count(); break; case HPT_IOCTL_GET_CONTROLLER_INFO: { int id; PCONTROLLER_INFO pInfo; if (nInBufferSize!=sizeof(DWORD)) return -1; if (nOutBufferSize!=sizeof(CONTROLLER_INFO)) return -1; id = *(DWORD *)lpInBuffer; pInfo = (PCONTROLLER_INFO)lpOutBuffer; if (hpt_get_controller_info(id, pInfo)!=0) return -1; } break; case HPT_IOCTL_GET_CHANNEL_INFO: { int id, bus; PCHANNEL_INFO pInfo; if (nInBufferSize!=8) return -1; if (nOutBufferSize!=sizeof(CHANNEL_INFO)) return -1; id = *(DWORD *)lpInBuffer; bus = ((DWORD *)lpInBuffer)[1]; pInfo = (PCHANNEL_INFO)lpOutBuffer; if (hpt_get_channel_info(id, bus, pInfo)!=0) return -1; } break; case HPT_IOCTL_GET_LOGICAL_DEVICES: { DWORD nMax; DEVICEID *pIds; if (nInBufferSize!=sizeof(DWORD)) return -1; nMax = *(DWORD *)lpInBuffer; if (nOutBufferSize < sizeof(DWORD)+sizeof(DWORD)*nMax) return -1; pIds = ((DEVICEID *)lpOutBuffer)+1; *(DWORD*)lpOutBuffer = hpt_get_logical_devices(pIds, nMax); } break; case HPT_IOCTL_GET_DEVICE_INFO: { DEVICEID id; PLOGICAL_DEVICE_INFO pInfo; if (nInBufferSize!=sizeof(DEVICEID)) return -1; if (nOutBufferSize!=sizeof(LOGICAL_DEVICE_INFO)) return -1; id = *(DWORD *)lpInBuffer; if (id == INVALID_DEVICEID) return -1; pInfo = (PLOGICAL_DEVICE_INFO)lpOutBuffer; memset(pInfo, 0, sizeof(LOGICAL_DEVICE_INFO)); if (hpt_get_device_info(id, pInfo)!=0) return -1; } break; case HPT_IOCTL_GET_DEVICE_INFO_V2: { DEVICEID id; PLOGICAL_DEVICE_INFO_V2 pInfo; if (nInBufferSize!=sizeof(DEVICEID)) return -1; if (nOutBufferSize!=sizeof(LOGICAL_DEVICE_INFO_V2)) return -1; id = *(DWORD *)lpInBuffer; if (id == INVALID_DEVICEID) return -1; pInfo = (PLOGICAL_DEVICE_INFO_V2)lpOutBuffer; memset(pInfo, 0, sizeof(LOGICAL_DEVICE_INFO_V2)); if (hpt_get_device_info_v2(id, pInfo)!=0) return -1; } break; #ifdef SUPPORT_ARRAY case HPT_IOCTL_CREATE_ARRAY: { if (nInBufferSize!=sizeof(CREATE_ARRAY_PARAMS)) return -1; if (nOutBufferSize!=sizeof(DEVICEID)) return -1; *(DEVICEID *)lpOutBuffer = hpt_create_array(_VBUS_P (PCREATE_ARRAY_PARAMS)lpInBuffer); if(*(DEVICEID *)lpOutBuffer == INVALID_DEVICEID) return -1; } break; case HPT_IOCTL_CREATE_ARRAY_V2: { if (nInBufferSize!=sizeof(CREATE_ARRAY_PARAMS_V2)) return -1; if (nOutBufferSize!=sizeof(DEVICEID)) return -1; *(DEVICEID *)lpOutBuffer = hpt_create_array_v2(_VBUS_P (PCREATE_ARRAY_PARAMS_V2)lpInBuffer); if (*(DEVICEID *)lpOutBuffer == INVALID_DEVICEID) return -1; } break; case HPT_IOCTL_SET_ARRAY_INFO: { DEVICEID idArray; PALTERABLE_ARRAY_INFO pInfo; if (nInBufferSize!=sizeof(HPT_SET_ARRAY_INFO)) return -1; if (nOutBufferSize!=0) return -1; idArray = ((PHPT_SET_ARRAY_INFO)lpInBuffer)->idArray; pInfo = &((PHPT_SET_ARRAY_INFO)lpInBuffer)->Info; if(hpt_set_array_info(_VBUS_P idArray, pInfo)) return -1; } break; case HPT_IOCTL_SET_DEVICE_INFO: { DEVICEID idDisk; PALTERABLE_DEVICE_INFO pInfo; if (nInBufferSize!=sizeof(HPT_SET_DEVICE_INFO)) return -1; if (nOutBufferSize!=0) return -1; idDisk = ((PHPT_SET_DEVICE_INFO)lpInBuffer)->idDisk; pInfo = &((PHPT_SET_DEVICE_INFO)lpInBuffer)->Info; if(hpt_set_device_info(_VBUS_P idDisk, pInfo) != 0) return -1; } break; case HPT_IOCTL_SET_DEVICE_INFO_V2: { DEVICEID idDisk; PALTERABLE_DEVICE_INFO_V2 pInfo; if (nInBufferSize < sizeof(HPT_SET_DEVICE_INFO_V2)) return -1; if (nOutBufferSize!=0) return -1; idDisk = ((PHPT_SET_DEVICE_INFO_V2)lpInBuffer)->idDisk; pInfo = &((PHPT_SET_DEVICE_INFO_V2)lpInBuffer)->Info; if(hpt_set_device_info_v2(_VBUS_P idDisk, pInfo) != 0) return -1; } break; case HPT_IOCTL_SET_BOOT_MARK: { DEVICEID id; PVDevice pTop; int i; IAL_ADAPTER_T *pAdapter = gIal_Adapter; PVBus pVBus; if (nInBufferSize!=sizeof(DEVICEID)) return -1; id = *(DEVICEID *)lpInBuffer; while(pAdapter != 0) { pVBus = &pAdapter->VBus; for(i = 0; i < MAX_VDEVICE_PER_VBUS; i++) { if(!(pTop = pVBus->pVDevice[i])) continue; if (pTop->pVBus!=_vbus_p) return -1; while (pTop->pParent) pTop = pTop->pParent; if (id==0 && pTop->vf_bootmark) pTop->vf_bootmark = 0; else if (pTop==ID_TO_VDEV(id) && !pTop->vf_bootmark) pTop->vf_bootmark = 1; else continue; SyncArrayInfo(pTop); break; } pAdapter = pAdapter->next; } } break; case HPT_IOCTL_ADD_SPARE_DISK: { DEVICEID id; if (nInBufferSize!=sizeof(DEVICEID)) return -1; if (nOutBufferSize!=0) return -1; id = *(DEVICEID *)lpInBuffer; if(hpt_add_spare_disk(_VBUS_P id)) return -1; } break; case HPT_IOCTL_REMOVE_SPARE_DISK: { DEVICEID id; if (nInBufferSize!=sizeof(DEVICEID)) return -1; if (nOutBufferSize!=0) return -1; id = *(DEVICEID *)lpInBuffer; if(hpt_remove_spare_disk(_VBUS_P id)) return -1; } break; case HPT_IOCTL_ADD_DISK_TO_ARRAY: { DEVICEID id1,id2; id1 = ((PHPT_ADD_DISK_TO_ARRAY)lpInBuffer)->idArray; id2 = ((PHPT_ADD_DISK_TO_ARRAY)lpInBuffer)->idDisk; if (nInBufferSize != sizeof(HPT_ADD_DISK_TO_ARRAY)) return -1; if (nOutBufferSize != 0) return -1; if(hpt_add_disk_to_array(_VBUS_P id1, id2)) return -1; } break; #endif case HPT_IOCTL_GET_DRIVER_CAPABILITIES: { PDRIVER_CAPABILITIES cap; if (nOutBufferSizenext) if (iControllerCount++==id) break; if (!pAdapTemp) return -1; if (nOutBufferSize < 4) return -1; *(DWORD*)lpOutBuffer = ((DWORD)pAdapTemp->mvSataAdapter.pciConfigDeviceId << 16) | 0x11AB; return 0; } case HPT_IOCTL_EPROM_IO: { DWORD id = ((DWORD*)lpInBuffer)[0]; DWORD offset = ((DWORD*)lpInBuffer)[1]; DWORD direction = ((DWORD*)lpInBuffer)[2]; DWORD length = ((DWORD*)lpInBuffer)[3]; IAL_ADAPTER_T *pAdapTemp; int iControllerCount = 0; for (pAdapTemp = gIal_Adapter; pAdapTemp; pAdapTemp = pAdapTemp->next) if (iControllerCount++==id) break; if (!pAdapTemp) return -1; if (nInBufferSize < sizeof(DWORD) * 4 + (direction? length : 0) || nOutBufferSize < (direction? 0 : length)) return -1; if (direction == 0) /* read */ sx508x_flash_access(&pAdapTemp->mvSataAdapter, offset, lpOutBuffer, length, 1); else sx508x_flash_access(&pAdapTemp->mvSataAdapter, offset, (char *)lpInBuffer + 16, length, 0); return 0; } break; default: return -1; } if (lpBytesReturned) *lpBytesReturned = nOutBufferSize; return 0; } Index: head/sys/dev/hptmv/hptproc.c =================================================================== --- head/sys/dev/hptmv/hptproc.c (revision 313981) +++ head/sys/dev/hptmv/hptproc.c (revision 313982) @@ -1,650 +1,650 @@ /* * Copyright (c) 2004-2005 HighPoint Technologies, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * hptproc.c sysctl support */ #include #include #include #include #include #include #ifndef __KERNEL__ #define __KERNEL__ #endif #include #include #include #include int hpt_rescan_all(void); /***************************************************************************/ static char hptproc_buffer[256]; extern char DRIVER_VERSION[]; typedef struct sysctl_req HPT_GET_INFO; static int hpt_set_asc_info(IAL_ADAPTER_T *pAdapter, char *buffer,int length) { int orig_length = length+4; PVBus _vbus_p = &pAdapter->VBus; PVDevice pArray; PVDevice pSubArray, pVDev; UINT i, iarray, ichan; struct cam_periph *periph = NULL; mtx_lock(&pAdapter->lock); #ifdef SUPPORT_ARRAY if (length>=8 && strncmp(buffer, "rebuild ", 8)==0) { buffer+=8; length-=8; if (length>=5 && strncmp(buffer, "start", 5)==0) { for(i = 0; i < MAX_ARRAY_PER_VBUS; i++) if ((pArray=ArrayTables(i))->u.array.dArStamp==0) continue; else{ if (pArray->u.array.rf_need_rebuild && !pArray->u.array.rf_rebuilding) hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapter, pArray, (UCHAR)((pArray->u.array.CriticalMembers || pArray->VDeviceType == VD_RAID_1)? DUPLICATE : REBUILD_PARITY)); } mtx_unlock(&pAdapter->lock); return orig_length; } else if (length>=4 && strncmp(buffer, "stop", 4)==0) { for(i = 0; i < MAX_ARRAY_PER_VBUS; i++) if ((pArray=ArrayTables(i))->u.array.dArStamp==0) continue; else{ if (pArray->u.array.rf_rebuilding) pArray->u.array.rf_abort_rebuild = 1; } mtx_unlock(&pAdapter->lock); return orig_length; } else if (length>=3 && buffer[1]==','&& buffer[0]>='1'&& buffer[2]>='1') { iarray = buffer[0]-'1'; ichan = buffer[2]-'1'; if(iarray >= MAX_VDEVICE_PER_VBUS || ichan >= MV_SATA_CHANNELS_NUM) return -EINVAL; pArray = _vbus_p->pVDevice[iarray]; if (!pArray || (pArray->vf_online == 0)) { mtx_unlock(&pAdapter->lock); return -EINVAL; } for (i=0;ilock); return -EINVAL; rebuild: pVDev = &pAdapter->VDevices[ichan]; if(!pVDev->u.disk.df_on_line || pVDev->pParent) { mtx_unlock(&pAdapter->lock); return -EINVAL; } /* Not allow to use a mounted disk ??? test*/ for(i = 0; i < MAX_VDEVICE_PER_VBUS; i++) if(pVDev == _vbus_p->pVDevice[i]) { periph = hpt_get_periph(pAdapter->mvSataAdapter.adapterId,i); if (periph != NULL && periph->refcount >= 1) { hpt_printk(("Can not use disk used by OS!\n")); mtx_unlock(&pAdapter->lock); return -EINVAL; } /* the Mounted Disk isn't delete */ } switch(pArray->VDeviceType) { case VD_RAID_1: case VD_RAID_5: { pSubArray = pArray; loop: if(hpt_add_disk_to_array(_VBUS_P VDEV_TO_ID(pSubArray), VDEV_TO_ID(pVDev)) == -1) { mtx_unlock(&pAdapter->lock); return -EINVAL; } pSubArray->u.array.rf_auto_rebuild = 0; pSubArray->u.array.rf_abort_rebuild = 0; hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapter, pSubArray, DUPLICATE); break; } case VD_RAID_0: for (i = 0; (UCHAR)i < pArray->u.array.bArnMember; i++) if(pArray->u.array.pMember[i] && mIsArray(pArray->u.array.pMember[i]) && (pArray->u.array.pMember[i]->u.array.rf_broken == 1)) { pSubArray = pArray->u.array.pMember[i]; goto loop; } default: mtx_unlock(&pAdapter->lock); return -EINVAL; } mtx_unlock(&pAdapter->lock); return orig_length; } } else if (length>=7 && strncmp(buffer, "verify ", 7)==0) { buffer+=7; length-=7; if (length>=6 && strncmp(buffer, "start ", 6)==0) { buffer+=6; length-=6; if (length>=1 && *buffer>='1') { iarray = *buffer-'1'; if(iarray >= MAX_VDEVICE_PER_VBUS) { mtx_unlock(&pAdapter->lock); return -EINVAL; } pArray = _vbus_p->pVDevice[iarray]; if (!pArray || (pArray->vf_online == 0)) { mtx_unlock(&pAdapter->lock); return -EINVAL; } if(pArray->VDeviceType != VD_RAID_1 && pArray->VDeviceType != VD_RAID_5) { mtx_unlock(&pAdapter->lock); return -EINVAL; } if (!(pArray->u.array.rf_need_rebuild || pArray->u.array.rf_rebuilding || pArray->u.array.rf_verifying || pArray->u.array.rf_initializing)) { pArray->u.array.RebuildSectors = 0; hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapter, pArray, VERIFY); } mtx_unlock(&pAdapter->lock); return orig_length; } } else if (length>=5 && strncmp(buffer, "stop ", 5)==0) { buffer+=5; length-=5; if (length>=1 && *buffer>='1') { iarray = *buffer-'1'; if(iarray >= MAX_VDEVICE_PER_VBUS) { mtx_unlock(&pAdapter->lock); return -EINVAL; } pArray = _vbus_p->pVDevice[iarray]; if (!pArray || (pArray->vf_online == 0)) { mtx_unlock(&pAdapter->lock); return -EINVAL; } if(pArray->u.array.rf_verifying) { pArray->u.array.rf_abort_rebuild = 1; } mtx_unlock(&pAdapter->lock); return orig_length; } } } else #ifdef _RAID5N_ if (length>=10 && strncmp(buffer, "writeback ", 10)==0) { buffer+=10; length-=10; if (length>=1 && *buffer>='0' && *buffer<='1') { _vbus_(r5.enable_write_back) = *buffer-'0'; if (_vbus_(r5.enable_write_back)) hpt_printk(("RAID5 write back enabled")); mtx_unlock(&pAdapter->lock); return orig_length; } } else #endif #endif if (0) {} /* just to compile */ #ifdef DEBUG else if (length>=9 && strncmp(buffer, "dbglevel ", 9)==0) { buffer+=9; length-=9; if (length>=1 && *buffer>='0' && *buffer<='3') { hpt_dbg_level = *buffer-'0'; mtx_unlock(&pAdapter->lock); return orig_length; } } else if (length>=8 && strncmp(buffer, "disable ", 8)==0) { /* TO DO */ } #endif mtx_unlock(&pAdapter->lock); return -EINVAL; } /* * Since we have only one sysctl node, add adapter ID in the command * line string: e.g. "hpt 0 rebuild start" */ static int hpt_set_info(int length) { int retval; #ifdef SUPPORT_IOCTL PUCHAR ke_area; int err; DWORD dwRet; PHPT_IOCTL_PARAM piop; #endif char *buffer = hptproc_buffer; if (length >= 6) { if (strncmp(buffer,"hpt ",4) == 0) { IAL_ADAPTER_T *pAdapter; retval = buffer[4]-'0'; for (pAdapter=gIal_Adapter; pAdapter; pAdapter=pAdapter->next) { if (pAdapter->mvSataAdapter.adapterId==retval) return (retval = hpt_set_asc_info(pAdapter, buffer+6, length-6)) >= 0? retval : -EINVAL; } return -EINVAL; } #ifdef SUPPORT_IOCTL piop = (PHPT_IOCTL_PARAM)buffer; if (piop->Magic == HPT_IOCTL_MAGIC || piop->Magic == HPT_IOCTL_MAGIC32) { KdPrintE(("ioctl=%d in=%p len=%d out=%p len=%d\n", piop->dwIoControlCode, piop->lpInBuffer, piop->nInBufferSize, piop->lpOutBuffer, piop->nOutBufferSize)); /* * map buffer to kernel. */ if (piop->nInBufferSize > PAGE_SIZE || piop->nOutBufferSize > PAGE_SIZE || piop->nInBufferSize+piop->nOutBufferSize > PAGE_SIZE) { KdPrintE(("User buffer too large\n")); return -EINVAL; } ke_area = malloc(piop->nInBufferSize+piop->nOutBufferSize, M_DEVBUF, M_NOWAIT); if (ke_area == NULL) { KdPrintE(("Couldn't allocate kernel mem.\n")); return -EINVAL; } if (piop->nInBufferSize) { if (copyin((void*)(ULONG_PTR)piop->lpInBuffer, ke_area, piop->nInBufferSize) != 0) { KdPrintE(("Failed to copyin from lpInBuffer\n")); free(ke_area, M_DEVBUF); return -EFAULT; } } /* * call kernel handler. */ err = Kernel_DeviceIoControl(&gIal_Adapter->VBus, piop->dwIoControlCode, ke_area, piop->nInBufferSize, ke_area + piop->nInBufferSize, piop->nOutBufferSize, &dwRet); if (err==0) { if (piop->nOutBufferSize) copyout(ke_area + piop->nInBufferSize, (void*)(ULONG_PTR)piop->lpOutBuffer, piop->nOutBufferSize); if (piop->lpBytesReturned) copyout(&dwRet, (void*)(ULONG_PTR)piop->lpBytesReturned, sizeof(DWORD)); free(ke_area, M_DEVBUF); return length; } else KdPrintW(("Kernel_ioctl(): return %d\n", err)); free(ke_area, M_DEVBUF); return -EINVAL; } else { KdPrintW(("Wrong signature: %x\n", piop->Magic)); return -EINVAL; } #endif } return -EINVAL; } #define shortswap(w) ((WORD)((w)>>8 | ((w) & 0xFF)<<8)) static void get_disk_name(char *name, PDevice pDev) { int i; MV_SATA_CHANNEL *pMvSataChannel = pDev->mv; IDENTIFY_DATA2 *pIdentifyData = (IDENTIFY_DATA2 *)pMvSataChannel->identifyDevice; for (i = 0; i < 10; i++) ((WORD*)name)[i] = shortswap(pIdentifyData->ModelNumber[i]); name[20] = '\0'; } static int hpt_copy_info(HPT_GET_INFO *pinfo, char *fmt, ...) { int printfretval; va_list ap; if(fmt == NULL) { *hptproc_buffer = 0; return (SYSCTL_OUT(pinfo, hptproc_buffer, 1)); } else { va_start(ap, fmt); printfretval = vsnprintf(hptproc_buffer, sizeof(hptproc_buffer), fmt, ap); va_end(ap); return(SYSCTL_OUT(pinfo, hptproc_buffer, strlen(hptproc_buffer))); } } static void hpt_copy_disk_info(HPT_GET_INFO *pinfo, PVDevice pVDev, UINT iChan) { char name[32], arrayname[16], *status; get_disk_name(name, &pVDev->u.disk); if (!pVDev->u.disk.df_on_line) status = "Disabled"; else if (pVDev->VDeviceType==VD_SPARE) status = "Spare "; else status = "Normal "; #ifdef SUPPORT_ARRAY if(pVDev->pParent) { memcpy(arrayname, pVDev->pParent->u.array.ArrayName, MAX_ARRAY_NAME); if (pVDev->pParent->u.array.CriticalMembers & (1<bSerialNumber)) status = "Degraded"; } else #endif arrayname[0]=0; hpt_copy_info(pinfo, "Channel %d %s %5dMB %s %s\n", iChan+1, name, pVDev->VDeviceCapacity>>11, status, arrayname); } #ifdef SUPPORT_ARRAY static void hpt_copy_array_info(HPT_GET_INFO *pinfo, int nld, PVDevice pArray) { int i; - char *sType=0, *sStatus=0; + char *sType = NULL, *sStatus = NULL; char buf[32]; PVDevice pTmpArray; switch (pArray->VDeviceType) { case VD_RAID_0: for (i = 0; (UCHAR)i < pArray->u.array.bArnMember; i++) if(pArray->u.array.pMember[i]) { if(mIsArray(pArray->u.array.pMember[i])) sType = "RAID 1/0 "; /* TO DO */ else sType = "RAID 0 "; break; } break; case VD_RAID_1: sType = "RAID 1 "; break; case VD_JBOD: sType = "JBOD "; break; case VD_RAID_5: sType = "RAID 5 "; break; default: sType = "N/A "; break; } if (pArray->vf_online == 0) sStatus = "Disabled"; else if (pArray->u.array.rf_broken) sStatus = "Critical"; for (i = 0; (UCHAR)i < pArray->u.array.bArnMember; i++) { if (!sStatus) { if(mIsArray(pArray->u.array.pMember[i])) pTmpArray = pArray->u.array.pMember[i]; else pTmpArray = pArray; if (pTmpArray->u.array.rf_rebuilding) { #ifdef DEBUG sprintf(buf, "Rebuilding %lldMB", (pTmpArray->u.array.RebuildSectors>>11)); #else sprintf(buf, "Rebuilding %d%%", (UINT)((pTmpArray->u.array.RebuildSectors>>11)*100/((pTmpArray->VDeviceCapacity/(pTmpArray->u.array.bArnMember-1))>>11))); #endif sStatus = buf; } else if (pTmpArray->u.array.rf_verifying) { sprintf(buf, "Verifying %d%%", (UINT)((pTmpArray->u.array.RebuildSectors>>11)*100/((pTmpArray->VDeviceCapacity/(pTmpArray->u.array.bArnMember-1))>>11))); sStatus = buf; } else if (pTmpArray->u.array.rf_need_rebuild) sStatus = "Critical"; else if (pTmpArray->u.array.rf_broken) sStatus = "Critical"; if(pTmpArray == pArray) goto out; } else goto out; } out: if (!sStatus) sStatus = "Normal"; hpt_copy_info(pinfo, "%2d %11s %-20s %5lldMB %-16s", nld, sType, pArray->u.array.ArrayName, pArray->VDeviceCapacity>>11, sStatus); } #endif static int hpt_get_info(IAL_ADAPTER_T *pAdapter, HPT_GET_INFO *pinfo) { PVBus _vbus_p = &pAdapter->VBus; struct cam_periph *periph = NULL; UINT channel,j,i; PVDevice pVDev; #ifndef FOR_DEMO mtx_lock(&pAdapter->lock); if (pAdapter->beeping) { pAdapter->beeping = 0; BeepOff(pAdapter->mvSataAdapter.adapterIoBaseAddress); } mtx_unlock(&pAdapter->lock); #endif hpt_copy_info(pinfo, "Controller #%d:\n\n", pAdapter->mvSataAdapter.adapterId); hpt_copy_info(pinfo, "Physical device list\n"); hpt_copy_info(pinfo, "Channel Model Capacity Status Array\n"); hpt_copy_info(pinfo, "-------------------------------------------------------------------\n"); for (channel = 0; channel < MV_SATA_CHANNELS_NUM; channel++) { pVDev = &(pAdapter->VDevices[channel]); if(pVDev->u.disk.df_on_line) hpt_copy_disk_info(pinfo, pVDev, channel); } hpt_copy_info(pinfo, "\nLogical device list\n"); hpt_copy_info(pinfo, "No. Type Name Capacity Status OsDisk\n"); hpt_copy_info(pinfo, "--------------------------------------------------------------------------\n"); j=1; for(i = 0; i < MAX_VDEVICE_PER_VBUS; i++){ pVDev = _vbus_p->pVDevice[i]; if(pVDev){ j=i+1; #ifdef SUPPORT_ARRAY if (mIsArray(pVDev)) { is_array: hpt_copy_array_info(pinfo, j, pVDev); } else #endif { char name[32]; /* it may be add to an array after driver loaded, check it */ #ifdef SUPPORT_ARRAY if (pVDev->pParent) /* in this case, pVDev can only be a RAID 1 source disk. */ if (pVDev->pParent->VDeviceType==VD_RAID_1 && pVDev==pVDev->pParent->u.array.pMember[0]) goto is_array; #endif get_disk_name(name, &pVDev->u.disk); hpt_copy_info(pinfo, "%2d %s %s %5dMB %-16s", j, "Single disk", name, pVDev->VDeviceCapacity>>11, /* gmm 2001-6-19: Check if pDev has been added to an array. */ ((pVDev->pParent) ? "Unavailable" : "Normal")); } periph = hpt_get_periph(pAdapter->mvSataAdapter.adapterId, i); if (periph == NULL) hpt_copy_info(pinfo," %s\n","not registered"); else hpt_copy_info(pinfo," %s%d\n", periph->periph_name, periph->unit_number); } } return 0; } static __inline int hpt_proc_in(SYSCTL_HANDLER_ARGS, int *len) { int i, error=0; *len = 0; if ((req->newlen - req->newidx) >= sizeof(hptproc_buffer)) { error = EINVAL; } else { i = (req->newlen - req->newidx); error = SYSCTL_IN(req, hptproc_buffer, i); if (!error) *len = i; (hptproc_buffer)[i] = '\0'; } return (error); } static int hpt_status(SYSCTL_HANDLER_ARGS) { int length, error=0, retval=0; IAL_ADAPTER_T *pAdapter; error = hpt_proc_in(oidp, arg1, arg2, req, &length); if (req->newptr != NULL) { if (error || length == 0) { KdPrint(("error!\n")); retval = EINVAL; goto out; } if (hpt_set_info(length) >= 0) retval = 0; else retval = EINVAL; goto out; } hpt_copy_info(req, "%s Version %s\n", DRIVER_NAME, DRIVER_VERSION); for (pAdapter=gIal_Adapter; pAdapter; pAdapter=pAdapter->next) { if (hpt_get_info(pAdapter, req) < 0) { retval = EINVAL; break; } } hpt_copy_info(req, NULL); goto out; out: return (retval); } #define xhptregister_node(name) hptregister_node(name) #if __FreeBSD_version >= 1100024 #define hptregister_node(name) \ SYSCTL_ROOT_NODE(OID_AUTO, name, CTLFLAG_RW, 0, "Get/Set " #name " state root node"); \ SYSCTL_OID(_ ## name, OID_AUTO, status, CTLTYPE_STRING|CTLFLAG_RW, \ NULL, 0, hpt_status, "A", "Get/Set " #name " state") #else #define hptregister_node(name) \ SYSCTL_NODE(, OID_AUTO, name, CTLFLAG_RW, 0, "Get/Set " #name " state root node"); \ SYSCTL_OID(_ ## name, OID_AUTO, status, CTLTYPE_STRING|CTLFLAG_RW, \ NULL, 0, hpt_status, "A", "Get/Set " #name " state") #endif xhptregister_node(PROC_DIR_NAME); Index: head/sys/dev/hptmv/ioctl.c =================================================================== --- head/sys/dev/hptmv/ioctl.c (revision 313981) +++ head/sys/dev/hptmv/ioctl.c (revision 313982) @@ -1,949 +1,949 @@ /* * Copyright (c) 2004-2005 HighPoint Technologies, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * ioctl.c ioctl interface implementation */ #include #include #include #include #ifndef __KERNEL__ #define __KERNEL__ #endif #include #include #include #include #pragma pack(1) typedef struct _HPT_REBUILD_PARAM { DEVICEID idMirror; DWORD Lba; UCHAR nSector; } HPT_REBUILD_PARAM, *PHPT_REBUILD_PARAM; #pragma pack() #define MAX_EVENTS 10 static HPT_EVENT hpt_event_queue[MAX_EVENTS]; static int event_queue_head=0, event_queue_tail=0; static int hpt_get_event(PHPT_EVENT pEvent); static int hpt_set_array_state(DEVICEID idArray, DWORD state); static void lock_driver_idle(IAL_ADAPTER_T *pAdapter); static void HPTLIBAPI thread_io_done(_VBUS_ARG PCommand pCmd); static int HPTLIBAPI R1ControlSgl(_VBUS_ARG PCommand pCmd, FPSCAT_GATH pSgTable, int logical); static void get_disk_location(PDevice pDev, int *controller, int *channel) { IAL_ADAPTER_T *pAdapTemp; int i, j; *controller = *channel = 0; for (i=1, pAdapTemp = gIal_Adapter; pAdapTemp; pAdapTemp = pAdapTemp->next, i++) { for (j=0; jVDevices[j].u.disk) { *controller = i; *channel = j; return; } } } } static int event_queue_add(PHPT_EVENT pEvent) { int p; p = (event_queue_tail + 1) % MAX_EVENTS; if (p==event_queue_head) { return -1; } hpt_event_queue[event_queue_tail] = *pEvent; event_queue_tail = p; return 0; } static int event_queue_remove(PHPT_EVENT pEvent) { if (event_queue_head != event_queue_tail) { *pEvent = hpt_event_queue[event_queue_head]; event_queue_head++; event_queue_head %= MAX_EVENTS; return 0; } return -1; } void HPTLIBAPI ioctl_ReportEvent(UCHAR event, PVOID param) { HPT_EVENT e; ZeroMemory(&e, sizeof(e)); e.EventType = event; switch(event) { case ET_INITIALIZE_ABORTED: case ET_INITIALIZE_FAILED: memcpy(e.Data, ((PVDevice)param)->u.array.ArrayName, MAX_ARRAY_NAME); case ET_INITIALIZE_STARTED: case ET_INITIALIZE_FINISHED: case ET_REBUILD_STARTED: case ET_REBUILD_ABORTED: case ET_REBUILD_FAILED: case ET_REBUILD_FINISHED: case ET_VERIFY_STARTED: case ET_VERIFY_ABORTED: case ET_VERIFY_FAILED: case ET_VERIFY_FINISHED: case ET_VERIFY_DATA_ERROR: case ET_SPARE_TOOK_OVER: case ET_DEVICE_REMOVED: case ET_DEVICE_PLUGGED: case ET_DEVICE_ERROR: e.DeviceID = VDEV_TO_ID((PVDevice)param); break; default: break; } event_queue_add(&e); if (event==ET_DEVICE_REMOVED) { int controller, channel; get_disk_location(&((PVDevice)param)->u.disk, &controller, &channel); hpt_printk(("Device removed: controller %d channel %d\n", controller, channel)); } wakeup(param); } static int hpt_delete_array(_VBUS_ARG DEVICEID id, DWORD options) { PVDevice pArray = ID_TO_VDEV(id); BOOLEAN del_block0 = (options & DAF_KEEP_DATA_IF_POSSIBLE)?0:1; int i; PVDevice pa; if ((id==0) || check_VDevice_valid(pArray)) return -1; if(!mIsArray(pArray)) return -1; if (pArray->u.array.rf_rebuilding || pArray->u.array.rf_verifying || pArray->u.array.rf_initializing) return -1; for(i=0; iu.array.bArnMember; i++) { pa = pArray->u.array.pMember[i]; if (pa && mIsArray(pa)) { if (pa->u.array.rf_rebuilding || pa->u.array.rf_verifying || pa->u.array.rf_initializing) return -1; } } if (pArray->pVBus!=_vbus_p) { HPT_ASSERT(0); return -1;} fDeleteArray(_VBUS_P pArray, del_block0); return 0; } /* just to prevent driver from sending more commands */ static void HPTLIBAPI nothing(_VBUS_ARG void *notused){} void lock_driver_idle(IAL_ADAPTER_T *pAdapter) { _VBUS_INST(&pAdapter->VBus) mtx_lock(&pAdapter->lock); while (pAdapter->outstandingCommands) { KdPrint(("outstandingCommands is %d, wait..\n", pAdapter->outstandingCommands)); if (!mWaitingForIdle(_VBUS_P0)) CallWhenIdle(_VBUS_P nothing, 0); mtx_sleep(pAdapter, &pAdapter->lock, 0, "hptidle", 0); } CheckIdleCall(_VBUS_P0); } int Kernel_DeviceIoControl(_VBUS_ARG DWORD dwIoControlCode, /* operation control code */ PVOID lpInBuffer, /* input data buffer */ DWORD nInBufferSize, /* size of input data buffer */ PVOID lpOutBuffer, /* output data buffer */ DWORD nOutBufferSize, /* size of output data buffer */ PDWORD lpBytesReturned /* byte count */ ) { IAL_ADAPTER_T *pAdapter; switch(dwIoControlCode) { case HPT_IOCTL_DELETE_ARRAY: { DEVICEID idArray; int iSuccess; int i; PVDevice pArray; PVBus _vbus_p; struct cam_periph *periph = NULL; if (nInBufferSize!=sizeof(DEVICEID)+sizeof(DWORD)) return -1; if (nOutBufferSize!=sizeof(int)) return -1; idArray = *(DEVICEID *)lpInBuffer; pArray = ID_TO_VDEV(idArray); if((idArray == 0) || check_VDevice_valid(pArray)) return -1; if(!mIsArray(pArray)) return -1; _vbus_p=pArray->pVBus; pAdapter = (IAL_ADAPTER_T *)_vbus_p->OsExt; for(i = 0; i < MAX_VDEVICE_PER_VBUS; i++) { if(pArray == _vbus_p->pVDevice[i]) { periph = hpt_get_periph(pAdapter->mvSataAdapter.adapterId, i); if (periph != NULL && periph->refcount >= 1) { hpt_printk(("Can not delete a mounted device.\n")); return -1; } } /* the Mounted Disk isn't delete */ } iSuccess = hpt_delete_array(_VBUS_P idArray, *(DWORD*)((DEVICEID *)lpInBuffer+1)); *(int*)lpOutBuffer = iSuccess; if(iSuccess != 0) return -1; break; } case HPT_IOCTL_GET_EVENT: { PHPT_EVENT pInfo; if (nInBufferSize!=0) return -1; if (nOutBufferSize!=sizeof(HPT_EVENT)) return -1; pInfo = (PHPT_EVENT)lpOutBuffer; if (hpt_get_event(pInfo)!=0) return -1; } break; case HPT_IOCTL_SET_ARRAY_STATE: { DEVICEID idArray; DWORD state; if (nInBufferSize!=sizeof(HPT_SET_STATE_PARAM)) return -1; if (nOutBufferSize!=0) return -1; idArray = ((PHPT_SET_STATE_PARAM)lpInBuffer)->idArray; state = ((PHPT_SET_STATE_PARAM)lpInBuffer)->state; if(hpt_set_array_state(idArray, state)!=0) return -1; } break; case HPT_IOCTL_RESCAN_DEVICES: { if (nInBufferSize!=0) return -1; if (nOutBufferSize!=0) return -1; #ifndef FOR_DEMO /* stop buzzer if user perform rescan */ for (pAdapter=gIal_Adapter; pAdapter; pAdapter=pAdapter->next) { if (pAdapter->beeping) { pAdapter->beeping = 0; BeepOff(pAdapter->mvSataAdapter.adapterIoBaseAddress); } } #endif } break; default: { PVDevice pVDev; switch(dwIoControlCode) { /* read-only ioctl functions can be called directly. */ case HPT_IOCTL_GET_VERSION: case HPT_IOCTL_GET_CONTROLLER_IDS: case HPT_IOCTL_GET_CONTROLLER_COUNT: case HPT_IOCTL_GET_CONTROLLER_INFO: case HPT_IOCTL_GET_CHANNEL_INFO: case HPT_IOCTL_GET_LOGICAL_DEVICES: case HPT_IOCTL_GET_DEVICE_INFO: case HPT_IOCTL_GET_DEVICE_INFO_V2: case HPT_IOCTL_GET_EVENT: case HPT_IOCTL_GET_DRIVER_CAPABILITIES: if(hpt_default_ioctl(_VBUS_P dwIoControlCode, lpInBuffer, nInBufferSize, lpOutBuffer, nOutBufferSize, lpBytesReturned) == -1) return -1; break; default: /* * GUI always use /proc/scsi/hptmv/0, so the _vbus_p param will be * wrong for second controller. */ switch(dwIoControlCode) { case HPT_IOCTL_CREATE_ARRAY: pVDev = ID_TO_VDEV(((PCREATE_ARRAY_PARAMS)lpInBuffer)->Members[0]); break; case HPT_IOCTL_CREATE_ARRAY_V2: pVDev = ID_TO_VDEV(((PCREATE_ARRAY_PARAMS_V2)lpInBuffer)->Members[0]); break; case HPT_IOCTL_SET_ARRAY_INFO: pVDev = ID_TO_VDEV(((PHPT_SET_ARRAY_INFO)lpInBuffer)->idArray); break; case HPT_IOCTL_SET_DEVICE_INFO: pVDev = ID_TO_VDEV(((PHPT_SET_DEVICE_INFO)lpInBuffer)->idDisk); break; case HPT_IOCTL_SET_DEVICE_INFO_V2: pVDev = ID_TO_VDEV(((PHPT_SET_DEVICE_INFO_V2)lpInBuffer)->idDisk); break; case HPT_IOCTL_SET_BOOT_MARK: case HPT_IOCTL_ADD_SPARE_DISK: case HPT_IOCTL_REMOVE_SPARE_DISK: pVDev = ID_TO_VDEV(*(DEVICEID *)lpInBuffer); break; case HPT_IOCTL_ADD_DISK_TO_ARRAY: pVDev = ID_TO_VDEV(((PHPT_ADD_DISK_TO_ARRAY)lpInBuffer)->idArray); break; default: pVDev = 0; } if (pVDev && !check_VDevice_valid(pVDev)){ _vbus_p = pVDev->pVBus; pAdapter = (IAL_ADAPTER_T *)_vbus_p->OsExt; /* * create_array, and other functions can't be executed while channel is * perform I/O commands. Wait until driver is idle. */ lock_driver_idle(pAdapter); if (hpt_default_ioctl(_VBUS_P dwIoControlCode, lpInBuffer, nInBufferSize, lpOutBuffer, nOutBufferSize, lpBytesReturned) == -1) { mtx_unlock(&pAdapter->lock); return -1; } mtx_unlock(&pAdapter->lock); } else return -1; break; } #ifdef SUPPORT_ARRAY switch(dwIoControlCode) { case HPT_IOCTL_CREATE_ARRAY: { pAdapter=(IAL_ADAPTER_T *)(ID_TO_VDEV(*(DEVICEID *)lpOutBuffer))->pVBus->OsExt; mtx_lock(&pAdapter->lock); if(((PCREATE_ARRAY_PARAMS)lpInBuffer)->CreateFlags & CAF_CREATE_AND_DUPLICATE) { (ID_TO_VDEV(*(DEVICEID *)lpOutBuffer))->u.array.rf_auto_rebuild = 0; hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapter, ID_TO_VDEV(*(DEVICEID *)lpOutBuffer), DUPLICATE); } else if(((PCREATE_ARRAY_PARAMS)lpInBuffer)->CreateFlags & CAF_CREATE_R5_ZERO_INIT) { hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapter, ID_TO_VDEV(*(DEVICEID *)lpOutBuffer), INITIALIZE); } else if(((PCREATE_ARRAY_PARAMS)lpInBuffer)->CreateFlags & CAF_CREATE_R5_BUILD_PARITY) { hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapter, ID_TO_VDEV(*(DEVICEID *)lpOutBuffer), REBUILD_PARITY); } mtx_unlock(&pAdapter->lock); break; } case HPT_IOCTL_CREATE_ARRAY_V2: { pAdapter=(IAL_ADAPTER_T *)(ID_TO_VDEV(*(DEVICEID *)lpOutBuffer))->pVBus->OsExt; mtx_lock(&pAdapter->lock); if(((PCREATE_ARRAY_PARAMS_V2)lpInBuffer)->CreateFlags & CAF_CREATE_AND_DUPLICATE) { (ID_TO_VDEV(*(DEVICEID *)lpOutBuffer))->u.array.rf_auto_rebuild = 0; hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapter, ID_TO_VDEV(*(DEVICEID *)lpOutBuffer), DUPLICATE); } else if(((PCREATE_ARRAY_PARAMS_V2)lpInBuffer)->CreateFlags & CAF_CREATE_R5_ZERO_INIT) { hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapter, ID_TO_VDEV(*(DEVICEID *)lpOutBuffer), INITIALIZE); } else if(((PCREATE_ARRAY_PARAMS_V2)lpInBuffer)->CreateFlags & CAF_CREATE_R5_BUILD_PARITY) { hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapter, ID_TO_VDEV(*(DEVICEID *)lpOutBuffer), REBUILD_PARITY); } mtx_unlock(&pAdapter->lock); break; } case HPT_IOCTL_ADD_DISK_TO_ARRAY: { PVDevice pArray = ID_TO_VDEV(((PHPT_ADD_DISK_TO_ARRAY)lpInBuffer)->idArray); pAdapter=(IAL_ADAPTER_T *)pArray->pVBus->OsExt; if(pArray->u.array.rf_rebuilding == 0) { mtx_lock(&pAdapter->lock); pArray->u.array.rf_auto_rebuild = 0; pArray->u.array.rf_abort_rebuild = 0; hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapter, pArray, DUPLICATE); while (!pArray->u.array.rf_rebuilding) { if (mtx_sleep(pArray, &pAdapter->lock, 0, "hptwait", hz * 3) != 0) break; } mtx_unlock(&pAdapter->lock); } break; } } #endif return 0; } } if (lpBytesReturned) *lpBytesReturned = nOutBufferSize; return 0; } static int hpt_get_event(PHPT_EVENT pEvent) { int ret = event_queue_remove(pEvent); return ret; } static int hpt_set_array_state(DEVICEID idArray, DWORD state) { IAL_ADAPTER_T *pAdapter; PVDevice pVDevice = ID_TO_VDEV(idArray); int i; if(idArray == 0 || check_VDevice_valid(pVDevice)) return -1; if(!mIsArray(pVDevice)) return -1; if(!pVDevice->vf_online || pVDevice->u.array.rf_broken) return -1; pAdapter=(IAL_ADAPTER_T *)pVDevice->pVBus->OsExt; switch(state) { case MIRROR_REBUILD_START: { mtx_lock(&pAdapter->lock); if (pVDevice->u.array.rf_rebuilding || pVDevice->u.array.rf_verifying || pVDevice->u.array.rf_initializing) { mtx_unlock(&pAdapter->lock); return -1; } pVDevice->u.array.rf_auto_rebuild = 0; pVDevice->u.array.rf_abort_rebuild = 0; hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapter, pVDevice, (UCHAR)((pVDevice->u.array.CriticalMembers || pVDevice->VDeviceType == VD_RAID_1)? DUPLICATE : REBUILD_PARITY)); while (!pVDevice->u.array.rf_rebuilding) { if (mtx_sleep(pVDevice, &pAdapter->lock, 0, "hptwait", hz * 20) != 0) break; } mtx_unlock(&pAdapter->lock); } break; case MIRROR_REBUILD_ABORT: { for(i = 0; i < pVDevice->u.array.bArnMember; i++) { if(pVDevice->u.array.pMember[i] != 0 && pVDevice->u.array.pMember[i]->VDeviceType == VD_RAID_1) hpt_set_array_state(VDEV_TO_ID(pVDevice->u.array.pMember[i]), state); } mtx_lock(&pAdapter->lock); if(pVDevice->u.array.rf_rebuilding != 1) { mtx_unlock(&pAdapter->lock); return -1; } pVDevice->u.array.rf_abort_rebuild = 1; while (pVDevice->u.array.rf_abort_rebuild) { if (mtx_sleep(pVDevice, &pAdapter->lock, 0, "hptabrt", hz * 20) != 0) break; } mtx_unlock(&pAdapter->lock); } break; case AS_VERIFY_START: { /*if(pVDevice->u.array.rf_verifying) return -1;*/ mtx_lock(&pAdapter->lock); if (pVDevice->u.array.rf_rebuilding || pVDevice->u.array.rf_verifying || pVDevice->u.array.rf_initializing) { mtx_unlock(&pAdapter->lock); return -1; } pVDevice->u.array.RebuildSectors = 0; hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapter, pVDevice, VERIFY); while (!pVDevice->u.array.rf_verifying) { if (mtx_sleep(pVDevice, &pAdapter->lock, 0, "hptvrfy", hz * 20) != 0) break; } mtx_unlock(&pAdapter->lock); } break; case AS_VERIFY_ABORT: { mtx_lock(&pAdapter->lock); if(pVDevice->u.array.rf_verifying != 1) { mtx_unlock(&pAdapter->lock); return -1; } pVDevice->u.array.rf_abort_rebuild = 1; while (pVDevice->u.array.rf_abort_rebuild) { if (mtx_sleep(pVDevice, &pAdapter->lock, 0, "hptvrfy", hz * 80) != 0) break; } mtx_unlock(&pAdapter->lock); } break; case AS_INITIALIZE_START: { mtx_lock(&pAdapter->lock); if (pVDevice->u.array.rf_rebuilding || pVDevice->u.array.rf_verifying || pVDevice->u.array.rf_initializing) { mtx_unlock(&pAdapter->lock); return -1; } hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapter, pVDevice, VERIFY); while (!pVDevice->u.array.rf_initializing) { if (mtx_sleep(pVDevice, &pAdapter->lock, 0, "hptinit", hz * 80) != 0) break; } mtx_unlock(&pAdapter->lock); } break; case AS_INITIALIZE_ABORT: { mtx_lock(&pAdapter->lock); if(pVDevice->u.array.rf_initializing != 1) { mtx_unlock(&pAdapter->lock); return -1; } pVDevice->u.array.rf_abort_rebuild = 1; while (pVDevice->u.array.rf_abort_rebuild) { if (mtx_sleep(pVDevice, &pAdapter->lock, 0, "hptinit", hz * 80) != 0) break; } mtx_unlock(&pAdapter->lock); } break; default: return -1; } return 0; } int HPTLIBAPI R1ControlSgl(_VBUS_ARG PCommand pCmd, FPSCAT_GATH pSgTable, int logical) { ULONG bufferSize = SECTOR_TO_BYTE(pCmd->uCmd.R1Control.nSectors); if (pCmd->uCmd.R1Control.Command==CTRL_CMD_VERIFY) bufferSize<<=1; if (logical) { pSgTable->dSgAddress = (ULONG_PTR)pCmd->uCmd.R1Control.Buffer; pSgTable->wSgSize = (USHORT)bufferSize; pSgTable->wSgFlag = SG_FLAG_EOT; } else { /* build physical SG table for pCmd->uCmd.R1Control.Buffer */ ADDRESS dataPointer, v, nextpage, currvaddr, nextvaddr, currphypage, nextphypage; ULONG length; int idx = 0; v = pCmd->uCmd.R1Control.Buffer; dataPointer = (ADDRESS)fOsPhysicalAddress(v); if ((ULONG_PTR)dataPointer & 0x1) return FALSE; #define ON64KBOUNDARY(x) (((ULONG_PTR)(x) & 0xFFFF) == 0) #define NOTNEIGHBORPAGE(highvaddr, lowvaddr) ((ULONG_PTR)(highvaddr) - (ULONG_PTR)(lowvaddr) != PAGE_SIZE) do { if (idx >= MAX_SG_DESCRIPTORS) return FALSE; pSgTable[idx].dSgAddress = fOsPhysicalAddress(v); currvaddr = v; currphypage = (ADDRESS)fOsPhysicalAddress((void*)trunc_page((ULONG_PTR)currvaddr)); do { nextpage = (ADDRESS)trunc_page(((ULONG_PTR)currvaddr + PAGE_SIZE)); nextvaddr = (ADDRESS)MIN(((ULONG_PTR)v + bufferSize), (ULONG_PTR)(nextpage)); if (nextvaddr == (ADDRESS)((ULONG_PTR)v + bufferSize)) break; nextphypage = (ADDRESS)fOsPhysicalAddress(nextpage); if (NOTNEIGHBORPAGE(nextphypage, currphypage) || ON64KBOUNDARY(nextphypage)) { nextvaddr = nextpage; break; } currvaddr = nextvaddr; currphypage = nextphypage; }while (1); length = (ULONG_PTR)nextvaddr - (ULONG_PTR)v; v = nextvaddr; bufferSize -= length; pSgTable[idx].wSgSize = (USHORT)length; pSgTable[idx].wSgFlag = (bufferSize)? 0 : SG_FLAG_EOT; idx++; }while (bufferSize); } return 1; } static int End_Job=0; void HPTLIBAPI thread_io_done(_VBUS_ARG PCommand pCmd) { End_Job = 1; wakeup((caddr_t)pCmd); } void hpt_rebuild_data_block(IAL_ADAPTER_T *pAdapter, PVDevice pArray, UCHAR flags) { ULONG capacity = pArray->VDeviceCapacity / (pArray->u.array.bArnMember-1); PCommand pCmd; UINT result; int needsync=0, retry=0, needdelete=0; - void *buffer = 0; + void *buffer = NULL; _VBUS_INST(&pAdapter->VBus) if (pArray->u.array.rf_broken==1 || pArray->u.array.RebuildSectors>=capacity) return; mtx_lock(&pAdapter->lock); switch(flags) { case DUPLICATE: case REBUILD_PARITY: if(pArray->u.array.rf_rebuilding == 0) { pArray->u.array.rf_rebuilding = 1; hpt_printk(("Rebuilding started.\n")); ioctl_ReportEvent(ET_REBUILD_STARTED, pArray); } break; case INITIALIZE: if(pArray->u.array.rf_initializing == 0) { pArray->u.array.rf_initializing = 1; hpt_printk(("Initializing started.\n")); ioctl_ReportEvent(ET_INITIALIZE_STARTED, pArray); } break; case VERIFY: if(pArray->u.array.rf_verifying == 0) { pArray->u.array.rf_verifying = 1; hpt_printk(("Verifying started.\n")); ioctl_ReportEvent(ET_VERIFY_STARTED, pArray); } break; } retry_cmd: pCmd = AllocateCommand(_VBUS_P0); HPT_ASSERT(pCmd); pCmd->cf_control = 1; End_Job = 0; if (pArray->VDeviceType==VD_RAID_1) { #define MAX_REBUILD_SECTORS 0x40 /* take care for discontinuous buffer in R1ControlSgl */ buffer = malloc(SECTOR_TO_BYTE(MAX_REBUILD_SECTORS), M_DEVBUF, M_NOWAIT); if(!buffer) { FreeCommand(_VBUS_P pCmd); hpt_printk(("can't allocate rebuild buffer\n")); goto fail; } switch(flags) { case DUPLICATE: pCmd->uCmd.R1Control.Command = CTRL_CMD_REBUILD; pCmd->uCmd.R1Control.nSectors = MAX_REBUILD_SECTORS; break; case VERIFY: pCmd->uCmd.R1Control.Command = CTRL_CMD_VERIFY; pCmd->uCmd.R1Control.nSectors = MAX_REBUILD_SECTORS/2; break; case INITIALIZE: pCmd->uCmd.R1Control.Command = CTRL_CMD_REBUILD; pCmd->uCmd.R1Control.nSectors = MAX_REBUILD_SECTORS; break; } pCmd->uCmd.R1Control.Lba = pArray->u.array.RebuildSectors; if (capacity - pArray->u.array.RebuildSectors < pCmd->uCmd.R1Control.nSectors) pCmd->uCmd.R1Control.nSectors = capacity - pArray->u.array.RebuildSectors; pCmd->uCmd.R1Control.Buffer = buffer; pCmd->pfnBuildSgl = R1ControlSgl; } else if (pArray->VDeviceType==VD_RAID_5) { switch(flags) { case DUPLICATE: case REBUILD_PARITY: pCmd->uCmd.R5Control.Command = CTRL_CMD_REBUILD; break; case VERIFY: pCmd->uCmd.R5Control.Command = CTRL_CMD_VERIFY; break; case INITIALIZE: pCmd->uCmd.R5Control.Command = CTRL_CMD_INIT; break; } pCmd->uCmd.R5Control.StripeLine=pArray->u.array.RebuildSectors>>pArray->u.array.bArBlockSizeShift; } else HPT_ASSERT(0); pCmd->pVDevice = pArray; pCmd->pfnCompletion = thread_io_done; pArray->pfnSendCommand(_VBUS_P pCmd); CheckPendingCall(_VBUS_P0); if (!End_Job) { mtx_sleep(pCmd, &pAdapter->lock, 0, "hptrbld", hz * 60); if (!End_Job) { hpt_printk(("timeout, reset\n")); fResetVBus(_VBUS_P0); } } result = pCmd->Result; FreeCommand(_VBUS_P pCmd); if (buffer) free(buffer, M_DEVBUF); KdPrintI(("cmd finished %d", result)); switch(result) { case RETURN_SUCCESS: if (!pArray->u.array.rf_abort_rebuild) { if(pArray->u.array.RebuildSectors < capacity) { hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapter, pArray, flags); } else { switch (flags) { case DUPLICATE: case REBUILD_PARITY: needsync = 1; pArray->u.array.rf_rebuilding = 0; pArray->u.array.rf_need_rebuild = 0; pArray->u.array.CriticalMembers = 0; pArray->u.array.RebuildSectors = MAX_LBA_T; pArray->u.array.rf_duplicate_and_create = 0; hpt_printk(("Rebuilding finished.\n")); ioctl_ReportEvent(ET_REBUILD_FINISHED, pArray); break; case INITIALIZE: needsync = 1; pArray->u.array.rf_initializing = 0; pArray->u.array.rf_need_rebuild = 0; pArray->u.array.RebuildSectors = MAX_LBA_T; hpt_printk(("Initializing finished.\n")); ioctl_ReportEvent(ET_INITIALIZE_FINISHED, pArray); break; case VERIFY: pArray->u.array.rf_verifying = 0; hpt_printk(("Verifying finished.\n")); ioctl_ReportEvent(ET_VERIFY_FINISHED, pArray); break; } } } else { pArray->u.array.rf_abort_rebuild = 0; if (pArray->u.array.rf_rebuilding) { hpt_printk(("Abort rebuilding.\n")); pArray->u.array.rf_rebuilding = 0; pArray->u.array.rf_duplicate_and_create = 0; ioctl_ReportEvent(ET_REBUILD_ABORTED, pArray); } else if (pArray->u.array.rf_verifying) { hpt_printk(("Abort verifying.\n")); pArray->u.array.rf_verifying = 0; ioctl_ReportEvent(ET_VERIFY_ABORTED, pArray); } else if (pArray->u.array.rf_initializing) { hpt_printk(("Abort initializing.\n")); pArray->u.array.rf_initializing = 0; ioctl_ReportEvent(ET_INITIALIZE_ABORTED, pArray); } needdelete=1; } break; case RETURN_DATA_ERROR: if (flags==VERIFY) { needsync = 1; pArray->u.array.rf_verifying = 0; pArray->u.array.rf_need_rebuild = 1; hpt_printk(("Verifying failed: found inconsistency\n")); ioctl_ReportEvent(ET_VERIFY_DATA_ERROR, pArray); ioctl_ReportEvent(ET_VERIFY_FAILED, pArray); if (!pArray->vf_online || pArray->u.array.rf_broken) break; pArray->u.array.rf_auto_rebuild = 0; pArray->u.array.rf_abort_rebuild = 0; hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapter, pArray, (pArray->VDeviceType == VD_RAID_1) ? DUPLICATE : REBUILD_PARITY); } break; default: hpt_printk(("command failed with error %d\n", result)); if (++retry<3) { hpt_printk(("retry (%d)\n", retry)); goto retry_cmd; } fail: pArray->u.array.rf_abort_rebuild = 0; switch (flags) { case DUPLICATE: case REBUILD_PARITY: needsync = 1; pArray->u.array.rf_rebuilding = 0; pArray->u.array.rf_duplicate_and_create = 0; hpt_printk(((flags==DUPLICATE)? "Duplicating failed.\n":"Rebuilding failed.\n")); ioctl_ReportEvent(ET_REBUILD_FAILED, pArray); break; case INITIALIZE: needsync = 1; pArray->u.array.rf_initializing = 0; hpt_printk(("Initializing failed.\n")); ioctl_ReportEvent(ET_INITIALIZE_FAILED, pArray); break; case VERIFY: needsync = 1; pArray->u.array.rf_verifying = 0; hpt_printk(("Verifying failed.\n")); ioctl_ReportEvent(ET_VERIFY_FAILED, pArray); break; } needdelete=1; } while (pAdapter->outstandingCommands) { KdPrintI(("currcmds is %d, wait..\n", pAdapter->outstandingCommands)); /* put this to have driver stop processing system commands quickly */ if (!mWaitingForIdle(_VBUS_P0)) CallWhenIdle(_VBUS_P nothing, 0); mtx_sleep(pAdapter, &pAdapter->lock, 0, "hptidle", 0); } if (needsync) SyncArrayInfo(pArray); if(needdelete && (pArray->u.array.rf_duplicate_must_done || (flags == INITIALIZE))) fDeleteArray(_VBUS_P pArray, TRUE); Check_Idle_Call(pAdapter); mtx_unlock(&pAdapter->lock); } Index: head/sys/dev/iicbus/if_ic.c =================================================================== --- head/sys/dev/iicbus/if_ic.c (revision 313981) +++ head/sys/dev/iicbus/if_ic.c (revision 313982) @@ -1,434 +1,434 @@ /*- * Copyright (c) 1998, 2001 Nicolas Souchu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * I2C bus IP driver */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "iicbus_if.h" #define PCF_MASTER_ADDRESS 0xaa #define ICHDRLEN sizeof(u_int32_t) #define ICMTU 1500 /* default mtu */ struct ic_softc { struct ifnet *ic_ifp; device_t ic_dev; u_char ic_addr; /* peer I2C address */ int ic_flags; char *ic_obuf; char *ic_ifbuf; char *ic_cp; int ic_xfercnt; int ic_iferrs; struct mtx ic_lock; }; #define IC_SENDING 0x0001 #define IC_OBUF_BUSY 0x0002 #define IC_IFBUF_BUSY 0x0004 #define IC_BUFFERS_BUSY (IC_OBUF_BUSY | IC_IFBUF_BUSY) #define IC_BUFFER_WAITER 0x0004 static devclass_t ic_devclass; static int icprobe(device_t); static int icattach(device_t); static int icioctl(struct ifnet *, u_long, caddr_t); static int icoutput(struct ifnet *, struct mbuf *, const struct sockaddr *, struct route *); static int icintr(device_t, int, char *); static device_method_t ic_methods[] = { /* device interface */ DEVMETHOD(device_probe, icprobe), DEVMETHOD(device_attach, icattach), /* iicbus interface */ DEVMETHOD(iicbus_intr, icintr), { 0, 0 } }; static driver_t ic_driver = { "ic", ic_methods, sizeof(struct ic_softc), }; static void ic_alloc_buffers(struct ic_softc *sc, int mtu) { char *obuf, *ifbuf; obuf = malloc(mtu + ICHDRLEN, M_DEVBUF, M_WAITOK); ifbuf = malloc(mtu + ICHDRLEN, M_DEVBUF, M_WAITOK); mtx_lock(&sc->ic_lock); while (sc->ic_flags & IC_BUFFERS_BUSY) { sc->ic_flags |= IC_BUFFER_WAITER; mtx_sleep(sc, &sc->ic_lock, 0, "icalloc", 0); sc->ic_flags &= ~IC_BUFFER_WAITER; } free(sc->ic_obuf, M_DEVBUF); free(sc->ic_ifbuf, M_DEVBUF); sc->ic_obuf = obuf; sc->ic_ifbuf = ifbuf; sc->ic_ifp->if_mtu = mtu; mtx_unlock(&sc->ic_lock); } /* * icprobe() */ static int icprobe(device_t dev) { return (BUS_PROBE_NOWILDCARD); } /* * icattach() */ static int icattach(device_t dev) { struct ic_softc *sc = (struct ic_softc *)device_get_softc(dev); struct ifnet *ifp; ifp = sc->ic_ifp = if_alloc(IFT_PARA); if (ifp == NULL) return (ENOSPC); mtx_init(&sc->ic_lock, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); sc->ic_addr = PCF_MASTER_ADDRESS; /* XXX only PCF masters */ sc->ic_dev = dev; ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_SIMPLEX | IFF_POINTOPOINT | IFF_MULTICAST; ifp->if_ioctl = icioctl; ifp->if_output = icoutput; ifp->if_hdrlen = 0; ifp->if_addrlen = 0; ifp->if_snd.ifq_maxlen = ifqmaxlen; ic_alloc_buffers(sc, ICMTU); if_attach(ifp); bpfattach(ifp, DLT_NULL, ICHDRLEN); return (0); } /* * iciotcl() */ static int icioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct ic_softc *sc = ifp->if_softc; device_t icdev = sc->ic_dev; device_t parent = device_get_parent(icdev); struct ifaddr *ifa = (struct ifaddr *)data; struct ifreq *ifr = (struct ifreq *)data; int error; switch (cmd) { case SIOCAIFADDR: case SIOCSIFADDR: if (ifa->ifa_addr->sa_family != AF_INET) return (EAFNOSUPPORT); mtx_lock(&sc->ic_lock); ifp->if_flags |= IFF_UP; goto locked; case SIOCSIFFLAGS: mtx_lock(&sc->ic_lock); locked: if ((!(ifp->if_flags & IFF_UP)) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) { /* XXX disable PCF */ ifp->if_drv_flags &= ~IFF_DRV_RUNNING; mtx_unlock(&sc->ic_lock); /* IFF_UP is not set, try to release the bus anyway */ iicbus_release_bus(parent, icdev); break; } if (((ifp->if_flags & IFF_UP)) && (!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { mtx_unlock(&sc->ic_lock); if ((error = iicbus_request_bus(parent, icdev, IIC_WAIT | IIC_INTR))) return (error); mtx_lock(&sc->ic_lock); iicbus_reset(parent, IIC_FASTEST, 0, NULL); ifp->if_drv_flags |= IFF_DRV_RUNNING; } mtx_unlock(&sc->ic_lock); break; case SIOCSIFMTU: ic_alloc_buffers(sc, ifr->ifr_mtu); break; case SIOCGIFMTU: mtx_lock(&sc->ic_lock); ifr->ifr_mtu = sc->ic_ifp->if_mtu; mtx_unlock(&sc->ic_lock); break; case SIOCADDMULTI: case SIOCDELMULTI: - if (ifr == 0) + if (ifr == NULL) return (EAFNOSUPPORT); /* XXX */ switch (ifr->ifr_addr.sa_family) { case AF_INET: break; default: return (EAFNOSUPPORT); } break; default: return (EINVAL); } return (0); } /* * icintr() */ static int icintr(device_t dev, int event, char *ptr) { struct ic_softc *sc = (struct ic_softc *)device_get_softc(dev); struct mbuf *top; int len; mtx_lock(&sc->ic_lock); switch (event) { case INTR_GENERAL: case INTR_START: sc->ic_cp = sc->ic_ifbuf; sc->ic_xfercnt = 0; sc->ic_flags |= IC_IFBUF_BUSY; break; case INTR_STOP: /* if any error occurred during transfert, * drop the packet */ sc->ic_flags &= ~IC_IFBUF_BUSY; if ((sc->ic_flags & (IC_BUFFERS_BUSY | IC_BUFFER_WAITER)) == IC_BUFFER_WAITER) wakeup(&sc); if (sc->ic_iferrs) goto err; if ((len = sc->ic_xfercnt) == 0) break; /* ignore */ if (len <= ICHDRLEN) goto err; len -= ICHDRLEN; if_inc_counter(sc->ic_ifp, IFCOUNTER_IPACKETS, 1); if_inc_counter(sc->ic_ifp, IFCOUNTER_IBYTES, len); BPF_TAP(sc->ic_ifp, sc->ic_ifbuf, len + ICHDRLEN); top = m_devget(sc->ic_ifbuf + ICHDRLEN, len, 0, sc->ic_ifp, 0); if (top) { mtx_unlock(&sc->ic_lock); M_SETFIB(top, sc->ic_ifp->if_fib); netisr_dispatch(NETISR_IP, top); mtx_lock(&sc->ic_lock); } break; err: if_printf(sc->ic_ifp, "errors (%d)!\n", sc->ic_iferrs); sc->ic_iferrs = 0; /* reset error count */ if_inc_counter(sc->ic_ifp, IFCOUNTER_IERRORS, 1); break; case INTR_RECEIVE: if (sc->ic_xfercnt >= sc->ic_ifp->if_mtu + ICHDRLEN) { sc->ic_iferrs++; } else { *sc->ic_cp++ = *ptr; sc->ic_xfercnt++; } break; case INTR_NOACK: /* xfer terminated by master */ break; case INTR_TRANSMIT: *ptr = 0xff; /* XXX */ break; case INTR_ERROR: sc->ic_iferrs++; break; default: panic("%s: unknown event (%d)!", __func__, event); } mtx_unlock(&sc->ic_lock); return (0); } /* * icoutput() */ static int icoutput(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, struct route *ro) { struct ic_softc *sc = ifp->if_softc; device_t icdev = sc->ic_dev; device_t parent = device_get_parent(icdev); int len, sent; struct mbuf *mm; u_char *cp; u_int32_t hdr; /* BPF writes need to be handled specially. */ if (dst->sa_family == AF_UNSPEC) bcopy(dst->sa_data, &hdr, sizeof(hdr)); else hdr = dst->sa_family; mtx_lock(&sc->ic_lock); ifp->if_drv_flags |= IFF_DRV_RUNNING; /* already sending? */ if (sc->ic_flags & IC_SENDING) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); goto error; } /* insert header */ bcopy ((char *)&hdr, sc->ic_obuf, ICHDRLEN); cp = sc->ic_obuf + ICHDRLEN; len = 0; mm = m; do { if (len + mm->m_len > sc->ic_ifp->if_mtu) { /* packet too large */ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); goto error; } bcopy(mtod(mm,char *), cp, mm->m_len); cp += mm->m_len; len += mm->m_len; } while ((mm = mm->m_next)); BPF_MTAP2(ifp, &hdr, sizeof(hdr), m); sc->ic_flags |= (IC_SENDING | IC_OBUF_BUSY); m_freem(m); mtx_unlock(&sc->ic_lock); /* send the packet */ if (iicbus_block_write(parent, sc->ic_addr, sc->ic_obuf, len + ICHDRLEN, &sent)) if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); else { if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); if_inc_counter(ifp, IFCOUNTER_OBYTES, len); } mtx_lock(&sc->ic_lock); sc->ic_flags &= ~(IC_SENDING | IC_OBUF_BUSY); if ((sc->ic_flags & (IC_BUFFERS_BUSY | IC_BUFFER_WAITER)) == IC_BUFFER_WAITER) wakeup(&sc); mtx_unlock(&sc->ic_lock); return (0); error: m_freem(m); mtx_unlock(&sc->ic_lock); return(0); } DRIVER_MODULE(ic, iicbus, ic_driver, ic_devclass, 0, 0); MODULE_DEPEND(ic, iicbus, IICBUS_MINVER, IICBUS_PREFVER, IICBUS_MAXVER); MODULE_VERSION(ic, 1); Index: head/sys/dev/isp/isp_pci.c =================================================================== --- head/sys/dev/isp/isp_pci.c (revision 313981) +++ head/sys/dev/isp/isp_pci.c (revision 313982) @@ -1,2128 +1,2128 @@ /*- * Copyright (c) 1997-2008 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. * FreeBSD Version. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __sparc64__ #include #include #endif #include static uint32_t isp_pci_rd_reg(ispsoftc_t *, int); static void isp_pci_wr_reg(ispsoftc_t *, int, uint32_t); static uint32_t isp_pci_rd_reg_1080(ispsoftc_t *, int); static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint32_t); static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *, int); static void isp_pci_wr_reg_2400(ispsoftc_t *, int, uint32_t); static uint32_t isp_pci_rd_reg_2600(ispsoftc_t *, int); static void isp_pci_wr_reg_2600(ispsoftc_t *, int, uint32_t); static int isp_pci_rd_isr(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *); static int isp_pci_rd_isr_2300(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *); static int isp_pci_rd_isr_2400(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *); static int isp_pci_mbxdma(ispsoftc_t *); static int isp_pci_dmasetup(ispsoftc_t *, XS_T *, void *); static void isp_pci_reset0(ispsoftc_t *); static void isp_pci_reset1(ispsoftc_t *); static void isp_pci_dumpregs(ispsoftc_t *, const char *); static struct ispmdvec mdvec = { isp_pci_rd_isr, isp_pci_rd_reg, isp_pci_wr_reg, isp_pci_mbxdma, isp_pci_dmasetup, isp_common_dmateardown, isp_pci_reset0, isp_pci_reset1, isp_pci_dumpregs, NULL, BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 }; static struct ispmdvec mdvec_1080 = { isp_pci_rd_isr, isp_pci_rd_reg_1080, isp_pci_wr_reg_1080, isp_pci_mbxdma, isp_pci_dmasetup, isp_common_dmateardown, isp_pci_reset0, isp_pci_reset1, isp_pci_dumpregs, NULL, BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 }; static struct ispmdvec mdvec_12160 = { isp_pci_rd_isr, isp_pci_rd_reg_1080, isp_pci_wr_reg_1080, isp_pci_mbxdma, isp_pci_dmasetup, isp_common_dmateardown, isp_pci_reset0, isp_pci_reset1, isp_pci_dumpregs, NULL, BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 }; static struct ispmdvec mdvec_2100 = { isp_pci_rd_isr, isp_pci_rd_reg, isp_pci_wr_reg, isp_pci_mbxdma, isp_pci_dmasetup, isp_common_dmateardown, isp_pci_reset0, isp_pci_reset1, isp_pci_dumpregs }; static struct ispmdvec mdvec_2200 = { isp_pci_rd_isr, isp_pci_rd_reg, isp_pci_wr_reg, isp_pci_mbxdma, isp_pci_dmasetup, isp_common_dmateardown, isp_pci_reset0, isp_pci_reset1, isp_pci_dumpregs }; static struct ispmdvec mdvec_2300 = { isp_pci_rd_isr_2300, isp_pci_rd_reg, isp_pci_wr_reg, isp_pci_mbxdma, isp_pci_dmasetup, isp_common_dmateardown, isp_pci_reset0, isp_pci_reset1, isp_pci_dumpregs }; static struct ispmdvec mdvec_2400 = { isp_pci_rd_isr_2400, isp_pci_rd_reg_2400, isp_pci_wr_reg_2400, isp_pci_mbxdma, isp_pci_dmasetup, isp_common_dmateardown, isp_pci_reset0, isp_pci_reset1, NULL }; static struct ispmdvec mdvec_2500 = { isp_pci_rd_isr_2400, isp_pci_rd_reg_2400, isp_pci_wr_reg_2400, isp_pci_mbxdma, isp_pci_dmasetup, isp_common_dmateardown, isp_pci_reset0, isp_pci_reset1, NULL }; static struct ispmdvec mdvec_2600 = { isp_pci_rd_isr_2400, isp_pci_rd_reg_2600, isp_pci_wr_reg_2600, isp_pci_mbxdma, isp_pci_dmasetup, isp_common_dmateardown, isp_pci_reset0, isp_pci_reset1, NULL }; #ifndef PCIM_CMD_INVEN #define PCIM_CMD_INVEN 0x10 #endif #ifndef PCIM_CMD_BUSMASTEREN #define PCIM_CMD_BUSMASTEREN 0x0004 #endif #ifndef PCIM_CMD_PERRESPEN #define PCIM_CMD_PERRESPEN 0x0040 #endif #ifndef PCIM_CMD_SEREN #define PCIM_CMD_SEREN 0x0100 #endif #ifndef PCIM_CMD_INTX_DISABLE #define PCIM_CMD_INTX_DISABLE 0x0400 #endif #ifndef PCIR_COMMAND #define PCIR_COMMAND 0x04 #endif #ifndef PCIR_CACHELNSZ #define PCIR_CACHELNSZ 0x0c #endif #ifndef PCIR_LATTIMER #define PCIR_LATTIMER 0x0d #endif #ifndef PCIR_ROMADDR #define PCIR_ROMADDR 0x30 #endif #ifndef PCI_VENDOR_QLOGIC #define PCI_VENDOR_QLOGIC 0x1077 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP1020 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP1080 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP10160 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP12160 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP1240 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP1280 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP2100 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP2200 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP2300 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP2312 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP2322 #define PCI_PRODUCT_QLOGIC_ISP2322 0x2322 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP2422 #define PCI_PRODUCT_QLOGIC_ISP2422 0x2422 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP2432 #define PCI_PRODUCT_QLOGIC_ISP2432 0x2432 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP2532 #define PCI_PRODUCT_QLOGIC_ISP2532 0x2532 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP6312 #define PCI_PRODUCT_QLOGIC_ISP6312 0x6312 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP6322 #define PCI_PRODUCT_QLOGIC_ISP6322 0x6322 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP5432 #define PCI_PRODUCT_QLOGIC_ISP5432 0x5432 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP2031 #define PCI_PRODUCT_QLOGIC_ISP2031 0x2031 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP8031 #define PCI_PRODUCT_QLOGIC_ISP8031 0x8031 #endif #define PCI_QLOGIC_ISP5432 \ ((PCI_PRODUCT_QLOGIC_ISP5432 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP1020 \ ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP1080 \ ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP10160 \ ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP12160 \ ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP1240 \ ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP1280 \ ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP2100 \ ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP2200 \ ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP2300 \ ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP2312 \ ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP2322 \ ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP2422 \ ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP2432 \ ((PCI_PRODUCT_QLOGIC_ISP2432 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP2532 \ ((PCI_PRODUCT_QLOGIC_ISP2532 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP6312 \ ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP6322 \ ((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP2031 \ ((PCI_PRODUCT_QLOGIC_ISP2031 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP8031 \ ((PCI_PRODUCT_QLOGIC_ISP8031 << 16) | PCI_VENDOR_QLOGIC) /* * Odd case for some AMI raid cards... We need to *not* attach to this. */ #define AMI_RAID_SUBVENDOR_ID 0x101e #define PCI_DFLT_LTNCY 0x40 #define PCI_DFLT_LNSZ 0x10 static int isp_pci_probe (device_t); static int isp_pci_attach (device_t); static int isp_pci_detach (device_t); #define ISP_PCD(isp) ((struct isp_pcisoftc *)isp)->pci_dev struct isp_pcisoftc { ispsoftc_t pci_isp; device_t pci_dev; struct resource * regs; struct resource * regs1; struct resource * regs2; void * irq; int iqd; int rtp; int rgd; int rtp1; int rgd1; int rtp2; int rgd2; void * ih; int16_t pci_poff[_NREG_BLKS]; bus_dma_tag_t dmat; int msicount; }; static device_method_t isp_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, isp_pci_probe), DEVMETHOD(device_attach, isp_pci_attach), DEVMETHOD(device_detach, isp_pci_detach), { 0, 0 } }; static driver_t isp_pci_driver = { "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) }; static devclass_t isp_devclass; DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); MODULE_DEPEND(isp, cam, 1, 1, 1); MODULE_DEPEND(isp, firmware, 1, 1, 1); static int isp_nvports = 0; static int isp_pci_probe(device_t dev) { switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { case PCI_QLOGIC_ISP1020: device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); break; case PCI_QLOGIC_ISP1080: device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); break; case PCI_QLOGIC_ISP1240: device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); break; case PCI_QLOGIC_ISP1280: device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); break; case PCI_QLOGIC_ISP10160: device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); break; case PCI_QLOGIC_ISP12160: if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { return (ENXIO); } device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); break; case PCI_QLOGIC_ISP2100: device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); break; case PCI_QLOGIC_ISP2200: device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); break; case PCI_QLOGIC_ISP2300: device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); break; case PCI_QLOGIC_ISP2312: device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); break; case PCI_QLOGIC_ISP2322: device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter"); break; case PCI_QLOGIC_ISP2422: device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter"); break; case PCI_QLOGIC_ISP2432: device_set_desc(dev, "Qlogic ISP 2432 PCI FC-AL Adapter"); break; case PCI_QLOGIC_ISP2532: device_set_desc(dev, "Qlogic ISP 2532 PCI FC-AL Adapter"); break; case PCI_QLOGIC_ISP5432: device_set_desc(dev, "Qlogic ISP 5432 PCI FC-AL Adapter"); break; case PCI_QLOGIC_ISP6312: device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter"); break; case PCI_QLOGIC_ISP6322: device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter"); break; case PCI_QLOGIC_ISP2031: device_set_desc(dev, "Qlogic ISP 2031 PCI FC-AL Adapter"); break; case PCI_QLOGIC_ISP8031: device_set_desc(dev, "Qlogic ISP 8031 PCI FCoE Adapter"); break; default: return (ENXIO); } if (isp_announced == 0 && bootverbose) { printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " "Core Version %d.%d\n", ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); isp_announced++; } /* * XXXX: Here is where we might load the f/w module * XXXX: (or increase a reference count to it). */ return (BUS_PROBE_DEFAULT); } static void isp_get_generic_options(device_t dev, ispsoftc_t *isp) { int tval; tval = 0; if (resource_int_value(device_get_name(dev), device_get_unit(dev), "fwload_disable", &tval) == 0 && tval != 0) { isp->isp_confopts |= ISP_CFG_NORELOAD; } tval = 0; if (resource_int_value(device_get_name(dev), device_get_unit(dev), "ignore_nvram", &tval) == 0 && tval != 0) { isp->isp_confopts |= ISP_CFG_NONVRAM; } tval = 0; (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "debug", &tval); if (tval) { isp->isp_dblev = tval; } else { isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; } if (bootverbose) { isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; } tval = -1; (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "vports", &tval); if (tval > 0 && tval <= 254) { isp_nvports = tval; } tval = 7; (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "quickboot_time", &tval); isp_quickboot_time = tval; } static void isp_get_specific_options(device_t dev, int chan, ispsoftc_t *isp) { const char *sptr; int tval = 0; char prefix[12], name[16]; if (chan == 0) prefix[0] = 0; else snprintf(prefix, sizeof(prefix), "chan%d.", chan); snprintf(name, sizeof(name), "%siid", prefix); if (resource_int_value(device_get_name(dev), device_get_unit(dev), name, &tval)) { if (IS_FC(isp)) { ISP_FC_PC(isp, chan)->default_id = 109 - chan; } else { #ifdef __sparc64__ ISP_SPI_PC(isp, chan)->iid = OF_getscsinitid(dev); #else ISP_SPI_PC(isp, chan)->iid = 7; #endif } } else { if (IS_FC(isp)) { ISP_FC_PC(isp, chan)->default_id = tval - chan; } else { ISP_SPI_PC(isp, chan)->iid = tval; } isp->isp_confopts |= ISP_CFG_OWNLOOPID; } if (IS_SCSI(isp)) return; tval = -1; snprintf(name, sizeof(name), "%srole", prefix); if (resource_int_value(device_get_name(dev), device_get_unit(dev), name, &tval) == 0) { switch (tval) { case ISP_ROLE_NONE: case ISP_ROLE_INITIATOR: case ISP_ROLE_TARGET: case ISP_ROLE_BOTH: device_printf(dev, "Chan %d setting role to 0x%x\n", chan, tval); break; default: tval = -1; break; } } if (tval == -1) { tval = ISP_DEFAULT_ROLES; } ISP_FC_PC(isp, chan)->def_role = tval; tval = 0; snprintf(name, sizeof(name), "%sfullduplex", prefix); if (resource_int_value(device_get_name(dev), device_get_unit(dev), name, &tval) == 0 && tval != 0) { isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; } - sptr = 0; + sptr = NULL; snprintf(name, sizeof(name), "%stopology", prefix); if (resource_string_value(device_get_name(dev), device_get_unit(dev), - name, (const char **) &sptr) == 0 && sptr != 0) { + name, (const char **) &sptr) == 0 && sptr != NULL) { if (strcmp(sptr, "lport") == 0) { isp->isp_confopts |= ISP_CFG_LPORT; } else if (strcmp(sptr, "nport") == 0) { isp->isp_confopts |= ISP_CFG_NPORT; } else if (strcmp(sptr, "lport-only") == 0) { isp->isp_confopts |= ISP_CFG_LPORT_ONLY; } else if (strcmp(sptr, "nport-only") == 0) { isp->isp_confopts |= ISP_CFG_NPORT_ONLY; } } #ifdef ISP_FCTAPE_OFF isp->isp_confopts |= ISP_CFG_NOFCTAPE; #else isp->isp_confopts |= ISP_CFG_FCTAPE; #endif tval = 0; snprintf(name, sizeof(name), "%snofctape", prefix); (void) resource_int_value(device_get_name(dev), device_get_unit(dev), name, &tval); if (tval) { isp->isp_confopts &= ~ISP_CFG_FCTAPE; isp->isp_confopts |= ISP_CFG_NOFCTAPE; } tval = 0; snprintf(name, sizeof(name), "%sfctape", prefix); (void) resource_int_value(device_get_name(dev), device_get_unit(dev), name, &tval); if (tval) { isp->isp_confopts &= ~ISP_CFG_NOFCTAPE; isp->isp_confopts |= ISP_CFG_FCTAPE; } /* * Because the resource_*_value functions can neither return * 64 bit integer values, nor can they be directly coerced * to interpret the right hand side of the assignment as * you want them to interpret it, we have to force WWN * hint replacement to specify WWN strings with a leading * 'w' (e..g w50000000aaaa0001). Sigh. */ - sptr = 0; + sptr = NULL; snprintf(name, sizeof(name), "%sportwwn", prefix); tval = resource_string_value(device_get_name(dev), device_get_unit(dev), name, (const char **) &sptr); - if (tval == 0 && sptr != 0 && *sptr++ == 'w') { - char *eptr = 0; + if (tval == 0 && sptr != NULL && *sptr++ == 'w') { + char *eptr = NULL; ISP_FC_PC(isp, chan)->def_wwpn = strtouq(sptr, &eptr, 16); if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwpn == -1) { device_printf(dev, "mangled portwwn hint '%s'\n", sptr); ISP_FC_PC(isp, chan)->def_wwpn = 0; } } - sptr = 0; + sptr = NULL; snprintf(name, sizeof(name), "%snodewwn", prefix); tval = resource_string_value(device_get_name(dev), device_get_unit(dev), name, (const char **) &sptr); - if (tval == 0 && sptr != 0 && *sptr++ == 'w') { - char *eptr = 0; + if (tval == 0 && sptr != NULL && *sptr++ == 'w') { + char *eptr = NULL; ISP_FC_PC(isp, chan)->def_wwnn = strtouq(sptr, &eptr, 16); if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwnn == 0) { device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); ISP_FC_PC(isp, chan)->def_wwnn = 0; } } tval = -1; snprintf(name, sizeof(name), "%sloop_down_limit", prefix); (void) resource_int_value(device_get_name(dev), device_get_unit(dev), name, &tval); if (tval >= 0 && tval < 0xffff) { ISP_FC_PC(isp, chan)->loop_down_limit = tval; } else { ISP_FC_PC(isp, chan)->loop_down_limit = isp_loop_down_limit; } tval = -1; snprintf(name, sizeof(name), "%sgone_device_time", prefix); (void) resource_int_value(device_get_name(dev), device_get_unit(dev), name, &tval); if (tval >= 0 && tval < 0xffff) { ISP_FC_PC(isp, chan)->gone_device_time = tval; } else { ISP_FC_PC(isp, chan)->gone_device_time = isp_gone_device_time; } } static int isp_pci_attach(device_t dev) { int i, locksetup = 0; uint32_t data, cmd, linesz, did; struct isp_pcisoftc *pcs; ispsoftc_t *isp; size_t psize, xsize; char fwname[32]; pcs = device_get_softc(dev); if (pcs == NULL) { device_printf(dev, "cannot get softc\n"); return (ENOMEM); } memset(pcs, 0, sizeof (*pcs)); pcs->pci_dev = dev; isp = &pcs->pci_isp; isp->isp_dev = dev; isp->isp_nchan = 1; if (sizeof (bus_addr_t) > 4) isp->isp_osinfo.sixtyfourbit = 1; /* * Get Generic Options */ isp_nvports = 0; isp_get_generic_options(dev, isp); linesz = PCI_DFLT_LNSZ; pcs->irq = pcs->regs = pcs->regs2 = NULL; pcs->rgd = pcs->rtp = pcs->iqd = 0; pcs->pci_dev = dev; pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; switch (pci_get_devid(dev)) { case PCI_QLOGIC_ISP1020: did = 0x1040; isp->isp_mdvec = &mdvec; isp->isp_type = ISP_HA_SCSI_UNKNOWN; break; case PCI_QLOGIC_ISP1080: did = 0x1080; isp->isp_mdvec = &mdvec_1080; isp->isp_type = ISP_HA_SCSI_1080; pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; break; case PCI_QLOGIC_ISP1240: did = 0x1080; isp->isp_mdvec = &mdvec_1080; isp->isp_type = ISP_HA_SCSI_1240; isp->isp_nchan = 2; pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; break; case PCI_QLOGIC_ISP1280: did = 0x1080; isp->isp_mdvec = &mdvec_1080; isp->isp_type = ISP_HA_SCSI_1280; pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; break; case PCI_QLOGIC_ISP10160: did = 0x12160; isp->isp_mdvec = &mdvec_12160; isp->isp_type = ISP_HA_SCSI_10160; pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; break; case PCI_QLOGIC_ISP12160: did = 0x12160; isp->isp_nchan = 2; isp->isp_mdvec = &mdvec_12160; isp->isp_type = ISP_HA_SCSI_12160; pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; break; case PCI_QLOGIC_ISP2100: did = 0x2100; isp->isp_mdvec = &mdvec_2100; isp->isp_type = ISP_HA_FC_2100; pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF; if (pci_get_revid(dev) < 3) { /* * XXX: Need to get the actual revision * XXX: number of the 2100 FB. At any rate, * XXX: lower cache line size for early revision * XXX; boards. */ linesz = 1; } break; case PCI_QLOGIC_ISP2200: did = 0x2200; isp->isp_mdvec = &mdvec_2200; isp->isp_type = ISP_HA_FC_2200; pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF; break; case PCI_QLOGIC_ISP2300: did = 0x2300; isp->isp_mdvec = &mdvec_2300; isp->isp_type = ISP_HA_FC_2300; pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; break; case PCI_QLOGIC_ISP2312: case PCI_QLOGIC_ISP6312: did = 0x2300; isp->isp_mdvec = &mdvec_2300; isp->isp_type = ISP_HA_FC_2312; pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; break; case PCI_QLOGIC_ISP2322: case PCI_QLOGIC_ISP6322: did = 0x2322; isp->isp_mdvec = &mdvec_2300; isp->isp_type = ISP_HA_FC_2322; pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; break; case PCI_QLOGIC_ISP2422: case PCI_QLOGIC_ISP2432: did = 0x2400; isp->isp_nchan += isp_nvports; isp->isp_mdvec = &mdvec_2400; isp->isp_type = ISP_HA_FC_2400; pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; break; case PCI_QLOGIC_ISP2532: did = 0x2500; isp->isp_nchan += isp_nvports; isp->isp_mdvec = &mdvec_2500; isp->isp_type = ISP_HA_FC_2500; pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; break; case PCI_QLOGIC_ISP5432: did = 0x2500; isp->isp_mdvec = &mdvec_2500; isp->isp_type = ISP_HA_FC_2500; pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; break; case PCI_QLOGIC_ISP2031: case PCI_QLOGIC_ISP8031: did = 0x2600; isp->isp_nchan += isp_nvports; isp->isp_mdvec = &mdvec_2600; isp->isp_type = ISP_HA_FC_2600; pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; break; default: device_printf(dev, "unknown device type\n"); goto bad; break; } isp->isp_revision = pci_get_revid(dev); if (IS_26XX(isp)) { pcs->rtp = SYS_RES_MEMORY; pcs->rgd = PCIR_BAR(0); pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd, RF_ACTIVE); pcs->rtp1 = SYS_RES_MEMORY; pcs->rgd1 = PCIR_BAR(2); pcs->regs1 = bus_alloc_resource_any(dev, pcs->rtp1, &pcs->rgd1, RF_ACTIVE); pcs->rtp2 = SYS_RES_MEMORY; pcs->rgd2 = PCIR_BAR(4); pcs->regs2 = bus_alloc_resource_any(dev, pcs->rtp2, &pcs->rgd2, RF_ACTIVE); } else { pcs->rtp = SYS_RES_MEMORY; pcs->rgd = PCIR_BAR(1); pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd, RF_ACTIVE); if (pcs->regs == NULL) { pcs->rtp = SYS_RES_IOPORT; pcs->rgd = PCIR_BAR(0); pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd, RF_ACTIVE); } } if (pcs->regs == NULL) { device_printf(dev, "Unable to map any ports\n"); goto bad; } if (bootverbose) { device_printf(dev, "Using %s space register mapping\n", (pcs->rtp == SYS_RES_IOPORT)? "I/O" : "Memory"); } isp->isp_regs = pcs->regs; isp->isp_regs2 = pcs->regs2; if (IS_FC(isp)) { psize = sizeof (fcparam); xsize = sizeof (struct isp_fc); } else { psize = sizeof (sdparam); xsize = sizeof (struct isp_spi); } psize *= isp->isp_nchan; xsize *= isp->isp_nchan; isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); if (isp->isp_param == NULL) { device_printf(dev, "cannot allocate parameter data\n"); goto bad; } isp->isp_osinfo.pc.ptr = malloc(xsize, M_DEVBUF, M_NOWAIT | M_ZERO); if (isp->isp_osinfo.pc.ptr == NULL) { device_printf(dev, "cannot allocate parameter data\n"); goto bad; } /* * Now that we know who we are (roughly) get/set specific options */ for (i = 0; i < isp->isp_nchan; i++) { isp_get_specific_options(dev, i, isp); } isp->isp_osinfo.fw = NULL; if (isp->isp_osinfo.fw == NULL) { snprintf(fwname, sizeof (fwname), "isp_%04x", did); isp->isp_osinfo.fw = firmware_get(fwname); } if (isp->isp_osinfo.fw != NULL) { isp_prt(isp, ISP_LOGCONFIG, "loaded firmware %s", fwname); isp->isp_mdvec->dv_ispfw = isp->isp_osinfo.fw->data; } /* * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER are set. */ cmd = pci_read_config(dev, PCIR_COMMAND, 2); cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; if (IS_2300(isp)) { /* per QLogic errata */ cmd &= ~PCIM_CMD_INVEN; } if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { cmd &= ~PCIM_CMD_INTX_DISABLE; } if (IS_24XX(isp)) { cmd &= ~PCIM_CMD_INTX_DISABLE; } pci_write_config(dev, PCIR_COMMAND, cmd, 2); /* * Make sure the Cache Line Size register is set sensibly. */ data = pci_read_config(dev, PCIR_CACHELNSZ, 1); if (data == 0 || (linesz != PCI_DFLT_LNSZ && data != linesz)) { isp_prt(isp, ISP_LOGDEBUG0, "set PCI line size to %d from %d", linesz, data); data = linesz; pci_write_config(dev, PCIR_CACHELNSZ, data, 1); } /* * Make sure the Latency Timer is sane. */ data = pci_read_config(dev, PCIR_LATTIMER, 1); if (data < PCI_DFLT_LTNCY) { data = PCI_DFLT_LTNCY; isp_prt(isp, ISP_LOGDEBUG0, "set PCI latency to %d", data); pci_write_config(dev, PCIR_LATTIMER, data, 1); } /* * Make sure we've disabled the ROM. */ data = pci_read_config(dev, PCIR_ROMADDR, 4); data &= ~1; pci_write_config(dev, PCIR_ROMADDR, data, 4); if (IS_26XX(isp)) { /* 26XX chips support only MSI-X, so start from them. */ pcs->msicount = imin(pci_msix_count(dev), 1); if (pcs->msicount > 0 && (i = pci_alloc_msix(dev, &pcs->msicount)) == 0) { pcs->iqd = 1; } else { pcs->msicount = 0; } } if (pcs->msicount == 0 && (IS_24XX(isp) || IS_2322(isp))) { /* * Older chips support both MSI and MSI-X, but I have * feeling that older firmware may not support MSI-X, * but we have no way to check the firmware flag here. */ pcs->msicount = imin(pci_msi_count(dev), 1); if (pcs->msicount > 0 && pci_alloc_msi(dev, &pcs->msicount) == 0) { pcs->iqd = 1; } else { pcs->msicount = 0; } } pcs->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &pcs->iqd, RF_ACTIVE | RF_SHAREABLE); if (pcs->irq == NULL) { device_printf(dev, "could not allocate interrupt\n"); goto bad; } /* Make sure the lock is set up. */ mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); locksetup++; if (isp_setup_intr(dev, pcs->irq, ISP_IFLAGS, NULL, isp_platform_intr, isp, &pcs->ih)) { device_printf(dev, "could not setup interrupt\n"); goto bad; } /* * Last minute checks... */ if (IS_23XX(isp) || IS_24XX(isp)) { isp->isp_port = pci_get_function(dev); } /* * Make sure we're in reset state. */ ISP_LOCK(isp); if (isp_reinit(isp, 1) != 0) { ISP_UNLOCK(isp); goto bad; } ISP_UNLOCK(isp); if (isp_attach(isp)) { ISP_LOCK(isp); isp_uninit(isp); ISP_UNLOCK(isp); goto bad; } return (0); bad: if (pcs->ih) { (void) bus_teardown_intr(dev, pcs->irq, pcs->ih); } if (locksetup) { mtx_destroy(&isp->isp_osinfo.lock); } if (pcs->irq) { (void) bus_release_resource(dev, SYS_RES_IRQ, pcs->iqd, pcs->irq); } if (pcs->msicount) { pci_release_msi(dev); } if (pcs->regs) (void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs); if (pcs->regs1) (void) bus_release_resource(dev, pcs->rtp1, pcs->rgd1, pcs->regs1); if (pcs->regs2) (void) bus_release_resource(dev, pcs->rtp2, pcs->rgd2, pcs->regs2); if (pcs->pci_isp.isp_param) { free(pcs->pci_isp.isp_param, M_DEVBUF); pcs->pci_isp.isp_param = NULL; } if (pcs->pci_isp.isp_osinfo.pc.ptr) { free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF); pcs->pci_isp.isp_osinfo.pc.ptr = NULL; } return (ENXIO); } static int isp_pci_detach(device_t dev) { struct isp_pcisoftc *pcs; ispsoftc_t *isp; int status; pcs = device_get_softc(dev); if (pcs == NULL) { return (ENXIO); } isp = (ispsoftc_t *) pcs; status = isp_detach(isp); if (status) return (status); ISP_LOCK(isp); isp_uninit(isp); if (pcs->ih) { (void) bus_teardown_intr(dev, pcs->irq, pcs->ih); } ISP_UNLOCK(isp); mtx_destroy(&isp->isp_osinfo.lock); (void) bus_release_resource(dev, SYS_RES_IRQ, pcs->iqd, pcs->irq); if (pcs->msicount) { pci_release_msi(dev); } (void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs); if (pcs->regs1) (void) bus_release_resource(dev, pcs->rtp1, pcs->rgd1, pcs->regs1); if (pcs->regs2) (void) bus_release_resource(dev, pcs->rtp2, pcs->rgd2, pcs->regs2); /* * XXX: THERE IS A LOT OF LEAKAGE HERE */ if (pcs->pci_isp.isp_param) { free(pcs->pci_isp.isp_param, M_DEVBUF); pcs->pci_isp.isp_param = NULL; } if (pcs->pci_isp.isp_osinfo.pc.ptr) { free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF); pcs->pci_isp.isp_osinfo.pc.ptr = NULL; } return (0); } #define IspVirt2Off(a, x) \ (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ _BLK_REG_SHFT] + ((x) & 0xfff)) #define BXR2(isp, off) bus_read_2((isp)->isp_regs, (off)) #define BXW2(isp, off, v) bus_write_2((isp)->isp_regs, (off), (v)) #define BXR4(isp, off) bus_read_4((isp)->isp_regs, (off)) #define BXW4(isp, off, v) bus_write_4((isp)->isp_regs, (off), (v)) #define B2R4(isp, off) bus_read_4((isp)->isp_regs2, (off)) #define B2W4(isp, off, v) bus_write_4((isp)->isp_regs2, (off), (v)) static ISP_INLINE int isp_pci_rd_debounced(ispsoftc_t *isp, int off, uint16_t *rp) { uint32_t val0, val1; int i = 0; do { val0 = BXR2(isp, IspVirt2Off(isp, off)); val1 = BXR2(isp, IspVirt2Off(isp, off)); } while (val0 != val1 && ++i < 1000); if (val0 != val1) { return (1); } *rp = val0; return (0); } static int isp_pci_rd_isr(ispsoftc_t *isp, uint16_t *isrp, uint16_t *semap, uint16_t *info) { uint16_t isr, sema; if (IS_2100(isp)) { if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { return (0); } if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { return (0); } } else { isr = BXR2(isp, IspVirt2Off(isp, BIU_ISR)); sema = BXR2(isp, IspVirt2Off(isp, BIU_SEMA)); } isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); isr &= INT_PENDING_MASK(isp); sema &= BIU_SEMA_LOCK; if (isr == 0 && sema == 0) { return (0); } *isrp = isr; if ((*semap = sema) != 0) { if (IS_2100(isp)) { if (isp_pci_rd_debounced(isp, OUTMAILBOX0, info)) { return (0); } } else { *info = BXR2(isp, IspVirt2Off(isp, OUTMAILBOX0)); } } return (1); } static int isp_pci_rd_isr_2300(ispsoftc_t *isp, uint16_t *isrp, uint16_t *semap, uint16_t *info) { uint32_t hccr, r2hisr; if (!(BXR2(isp, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { *isrp = 0; return (0); } r2hisr = BXR4(isp, IspVirt2Off(isp, BIU_R2HSTSLO)); isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); if ((r2hisr & BIU_R2HST_INTR) == 0) { *isrp = 0; return (0); } switch ((*isrp = r2hisr & BIU_R2HST_ISTAT_MASK)) { case ISPR2HST_ROM_MBX_OK: case ISPR2HST_ROM_MBX_FAIL: case ISPR2HST_MBX_OK: case ISPR2HST_MBX_FAIL: case ISPR2HST_ASYNC_EVENT: *semap = 1; break; case ISPR2HST_RIO_16: *info = ASYNC_RIO16_1; *semap = 1; return (1); case ISPR2HST_FPOST: *info = ASYNC_CMD_CMPLT; *semap = 1; return (1); case ISPR2HST_FPOST_CTIO: *info = ASYNC_CTIO_DONE; *semap = 1; return (1); case ISPR2HST_RSPQ_UPDATE: *semap = 0; break; default: hccr = ISP_READ(isp, HCCR); if (hccr & HCCR_PAUSE) { ISP_WRITE(isp, HCCR, HCCR_RESET); isp_prt(isp, ISP_LOGERR, "RISC paused at interrupt (%x->%x)", hccr, ISP_READ(isp, HCCR)); ISP_WRITE(isp, BIU_ICR, 0); } else { isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); } return (0); } *info = (r2hisr >> 16); return (1); } static int isp_pci_rd_isr_2400(ispsoftc_t *isp, uint16_t *isrp, uint16_t *semap, uint16_t *info) { uint32_t r2hisr; r2hisr = BXR4(isp, IspVirt2Off(isp, BIU2400_R2HSTSLO)); isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); if ((r2hisr & BIU_R2HST_INTR) == 0) { *isrp = 0; return (0); } switch ((*isrp = r2hisr & BIU_R2HST_ISTAT_MASK)) { case ISPR2HST_ROM_MBX_OK: case ISPR2HST_ROM_MBX_FAIL: case ISPR2HST_MBX_OK: case ISPR2HST_MBX_FAIL: case ISPR2HST_ASYNC_EVENT: *semap = 1; break; case ISPR2HST_RSPQ_UPDATE: case ISPR2HST_RSPQ_UPDATE2: case ISPR2HST_ATIO_UPDATE: case ISPR2HST_ATIO_RSPQ_UPDATE: case ISPR2HST_ATIO_UPDATE2: *semap = 0; break; default: ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); return (0); } *info = (r2hisr >> 16); return (1); } static uint32_t isp_pci_rd_reg(ispsoftc_t *isp, int regoff) { uint16_t rv; int oldconf = 0; if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { /* * We will assume that someone has paused the RISC processor. */ oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf | BIU_PCI_CONF1_SXP); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); } rv = BXR2(isp, IspVirt2Off(isp, regoff)); if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); } return (rv); } static void isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val) { int oldconf = 0; if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { /* * We will assume that someone has paused the RISC processor. */ oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf | BIU_PCI_CONF1_SXP); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); } BXW2(isp, IspVirt2Off(isp, regoff), val); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); } } static uint32_t isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff) { uint32_t rv, oc = 0; if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { uint32_t tc; /* * We will assume that someone has paused the RISC processor. */ oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); tc = oc & ~BIU_PCI1080_CONF1_DMA; if (regoff & SXP_BANK1_SELECT) tc |= BIU_PCI1080_CONF1_SXP1; else tc |= BIU_PCI1080_CONF1_SXP0; BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc | BIU_PCI1080_CONF1_DMA); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); } rv = BXR2(isp, IspVirt2Off(isp, regoff)); if (oc) { BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); } return (rv); } static void isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint32_t val) { int oc = 0; if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { uint32_t tc; /* * We will assume that someone has paused the RISC processor. */ oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); tc = oc & ~BIU_PCI1080_CONF1_DMA; if (regoff & SXP_BANK1_SELECT) tc |= BIU_PCI1080_CONF1_SXP1; else tc |= BIU_PCI1080_CONF1_SXP0; BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc | BIU_PCI1080_CONF1_DMA); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); } BXW2(isp, IspVirt2Off(isp, regoff), val); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); if (oc) { BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); } } static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff) { uint32_t rv; int block = regoff & _BLK_REG_MASK; switch (block) { case BIU_BLOCK: break; case MBOX_BLOCK: return (BXR2(isp, IspVirt2Off(isp, regoff))); case SXP_BLOCK: isp_prt(isp, ISP_LOGERR, "SXP_BLOCK read at 0x%x", regoff); return (0xffffffff); case RISC_BLOCK: isp_prt(isp, ISP_LOGERR, "RISC_BLOCK read at 0x%x", regoff); return (0xffffffff); case DMA_BLOCK: isp_prt(isp, ISP_LOGERR, "DMA_BLOCK read at 0x%x", regoff); return (0xffffffff); default: isp_prt(isp, ISP_LOGERR, "unknown block read at 0x%x", regoff); return (0xffffffff); } switch (regoff) { case BIU2400_FLASH_ADDR: case BIU2400_FLASH_DATA: case BIU2400_ICR: case BIU2400_ISR: case BIU2400_CSR: case BIU2400_REQINP: case BIU2400_REQOUTP: case BIU2400_RSPINP: case BIU2400_RSPOUTP: case BIU2400_PRI_REQINP: case BIU2400_PRI_REQOUTP: case BIU2400_ATIO_RSPINP: case BIU2400_ATIO_RSPOUTP: case BIU2400_HCCR: case BIU2400_GPIOD: case BIU2400_GPIOE: case BIU2400_HSEMA: rv = BXR4(isp, IspVirt2Off(isp, regoff)); break; case BIU2400_R2HSTSLO: rv = BXR4(isp, IspVirt2Off(isp, regoff)); break; case BIU2400_R2HSTSHI: rv = BXR4(isp, IspVirt2Off(isp, regoff)) >> 16; break; default: isp_prt(isp, ISP_LOGERR, "unknown register read at 0x%x", regoff); rv = 0xffffffff; break; } return (rv); } static void isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val) { int block = regoff & _BLK_REG_MASK; switch (block) { case BIU_BLOCK: break; case MBOX_BLOCK: BXW2(isp, IspVirt2Off(isp, regoff), val); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); return; case SXP_BLOCK: isp_prt(isp, ISP_LOGERR, "SXP_BLOCK write at 0x%x", regoff); return; case RISC_BLOCK: isp_prt(isp, ISP_LOGERR, "RISC_BLOCK write at 0x%x", regoff); return; case DMA_BLOCK: isp_prt(isp, ISP_LOGERR, "DMA_BLOCK write at 0x%x", regoff); return; default: isp_prt(isp, ISP_LOGERR, "unknown block write at 0x%x", regoff); break; } switch (regoff) { case BIU2400_FLASH_ADDR: case BIU2400_FLASH_DATA: case BIU2400_ICR: case BIU2400_ISR: case BIU2400_CSR: case BIU2400_REQINP: case BIU2400_REQOUTP: case BIU2400_RSPINP: case BIU2400_RSPOUTP: case BIU2400_PRI_REQINP: case BIU2400_PRI_REQOUTP: case BIU2400_ATIO_RSPINP: case BIU2400_ATIO_RSPOUTP: case BIU2400_HCCR: case BIU2400_GPIOD: case BIU2400_GPIOE: case BIU2400_HSEMA: BXW4(isp, IspVirt2Off(isp, regoff), val); #ifdef MEMORYBARRIERW if (regoff == BIU2400_REQINP || regoff == BIU2400_RSPOUTP || regoff == BIU2400_PRI_REQINP || regoff == BIU2400_ATIO_RSPOUTP) MEMORYBARRIERW(isp, SYNC_REG, IspVirt2Off(isp, regoff), 4, -1) else #endif MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 4, -1); break; default: isp_prt(isp, ISP_LOGERR, "unknown register write at 0x%x", regoff); break; } } static uint32_t isp_pci_rd_reg_2600(ispsoftc_t *isp, int regoff) { uint32_t rv; switch (regoff) { case BIU2400_PRI_REQINP: case BIU2400_PRI_REQOUTP: isp_prt(isp, ISP_LOGERR, "unknown register read at 0x%x", regoff); rv = 0xffffffff; break; case BIU2400_REQINP: rv = B2R4(isp, 0x00); break; case BIU2400_REQOUTP: rv = B2R4(isp, 0x04); break; case BIU2400_RSPINP: rv = B2R4(isp, 0x08); break; case BIU2400_RSPOUTP: rv = B2R4(isp, 0x0c); break; case BIU2400_ATIO_RSPINP: rv = B2R4(isp, 0x10); break; case BIU2400_ATIO_RSPOUTP: rv = B2R4(isp, 0x14); break; default: rv = isp_pci_rd_reg_2400(isp, regoff); break; } return (rv); } static void isp_pci_wr_reg_2600(ispsoftc_t *isp, int regoff, uint32_t val) { int off; switch (regoff) { case BIU2400_PRI_REQINP: case BIU2400_PRI_REQOUTP: isp_prt(isp, ISP_LOGERR, "unknown register write at 0x%x", regoff); return; case BIU2400_REQINP: off = 0x00; break; case BIU2400_REQOUTP: off = 0x04; break; case BIU2400_RSPINP: off = 0x08; break; case BIU2400_RSPOUTP: off = 0x0c; break; case BIU2400_ATIO_RSPINP: off = 0x10; break; case BIU2400_ATIO_RSPOUTP: off = 0x14; break; default: isp_pci_wr_reg_2400(isp, regoff, val); return; } B2W4(isp, off, val); } struct imush { bus_addr_t maddr; int error; }; static void imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct imush *imushp = (struct imush *) arg; if (!(imushp->error = error)) imushp->maddr = segs[0].ds_addr; } static int isp_pci_mbxdma(ispsoftc_t *isp) { caddr_t base; uint32_t len, nsegs; int i, error, cmap = 0; bus_size_t slim; /* segment size */ bus_addr_t llim; /* low limit of unavailable dma */ bus_addr_t hlim; /* high limit of unavailable dma */ struct imush im; isp_ecmd_t *ecmd; /* * Already been here? If so, leave... */ if (isp->isp_rquest) { return (0); } ISP_UNLOCK(isp); if (isp->isp_maxcmds == 0) { isp_prt(isp, ISP_LOGERR, "maxcmds not set"); ISP_LOCK(isp); return (1); } hlim = BUS_SPACE_MAXADDR; if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { if (sizeof (bus_size_t) > 4) { slim = (bus_size_t) (1ULL << 32); } else { slim = (bus_size_t) (1UL << 31); } llim = BUS_SPACE_MAXADDR; } else { llim = BUS_SPACE_MAXADDR_32BIT; slim = (1UL << 24); } len = isp->isp_maxcmds * sizeof (struct isp_pcmd); isp->isp_osinfo.pcmd_pool = (struct isp_pcmd *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); if (isp->isp_osinfo.sixtyfourbit) { nsegs = ISP_NSEG64_MAX; } else { nsegs = ISP_NSEG_MAX; } if (isp_dma_tag_create(BUS_DMA_ROOTARG(ISP_PCD(isp)), 1, slim, llim, hlim, NULL, NULL, BUS_SPACE_MAXSIZE, nsegs, slim, 0, &isp->isp_osinfo.dmat)) { free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); ISP_LOCK(isp); isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); return (1); } len = sizeof (isp_hdl_t) * isp->isp_maxcmds; isp->isp_xflist = (isp_hdl_t *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); for (len = 0; len < isp->isp_maxcmds - 1; len++) { isp->isp_xflist[len].cmd = &isp->isp_xflist[len+1]; } isp->isp_xffree = isp->isp_xflist; /* * Allocate and map the request queue and a region for external * DMA addressable command/status structures (22XX and later). */ len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); if (isp->isp_type >= ISP_HA_FC_2200) len += (N_XCMDS * XCMD_SIZE); if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, &isp->isp_osinfo.reqdmat)) { isp_prt(isp, ISP_LOGERR, "cannot create request DMA tag"); goto bad1; } if (bus_dmamem_alloc(isp->isp_osinfo.reqdmat, (void **)&base, BUS_DMA_COHERENT, &isp->isp_osinfo.reqmap) != 0) { isp_prt(isp, ISP_LOGERR, "cannot allocate request DMA memory"); bus_dma_tag_destroy(isp->isp_osinfo.reqdmat); goto bad1; } isp->isp_rquest = base; im.error = 0; if (bus_dmamap_load(isp->isp_osinfo.reqdmat, isp->isp_osinfo.reqmap, base, len, imc, &im, 0) || im.error) { isp_prt(isp, ISP_LOGERR, "error loading request DMA map %d", im.error); goto bad1; } isp_prt(isp, ISP_LOGDEBUG0, "request area @ 0x%jx/0x%jx", (uintmax_t)im.maddr, (uintmax_t)len); isp->isp_rquest_dma = im.maddr; base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); im.maddr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); if (isp->isp_type >= ISP_HA_FC_2200) { isp->isp_osinfo.ecmd_dma = im.maddr; isp->isp_osinfo.ecmd_free = (isp_ecmd_t *)base; isp->isp_osinfo.ecmd_base = isp->isp_osinfo.ecmd_free; for (ecmd = isp->isp_osinfo.ecmd_free; ecmd < &isp->isp_osinfo.ecmd_free[N_XCMDS]; ecmd++) { if (ecmd == &isp->isp_osinfo.ecmd_free[N_XCMDS - 1]) ecmd->next = NULL; else ecmd->next = ecmd + 1; } } /* * Allocate and map the result queue. */ len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, &isp->isp_osinfo.respdmat)) { isp_prt(isp, ISP_LOGERR, "cannot create response DMA tag"); goto bad1; } if (bus_dmamem_alloc(isp->isp_osinfo.respdmat, (void **)&base, BUS_DMA_COHERENT, &isp->isp_osinfo.respmap) != 0) { isp_prt(isp, ISP_LOGERR, "cannot allocate response DMA memory"); bus_dma_tag_destroy(isp->isp_osinfo.respdmat); goto bad1; } isp->isp_result = base; im.error = 0; if (bus_dmamap_load(isp->isp_osinfo.respdmat, isp->isp_osinfo.respmap, base, len, imc, &im, 0) || im.error) { isp_prt(isp, ISP_LOGERR, "error loading response DMA map %d", im.error); goto bad1; } isp_prt(isp, ISP_LOGDEBUG0, "response area @ 0x%jx/0x%jx", (uintmax_t)im.maddr, (uintmax_t)len); isp->isp_result_dma = im.maddr; #ifdef ISP_TARGET_MODE /* * Allocate and map ATIO queue on 24xx with target mode. */ if (IS_24XX(isp)) { len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, &isp->isp_osinfo.atiodmat)) { isp_prt(isp, ISP_LOGERR, "cannot create ATIO DMA tag"); goto bad1; } if (bus_dmamem_alloc(isp->isp_osinfo.atiodmat, (void **)&base, BUS_DMA_COHERENT, &isp->isp_osinfo.atiomap) != 0) { isp_prt(isp, ISP_LOGERR, "cannot allocate ATIO DMA memory"); bus_dma_tag_destroy(isp->isp_osinfo.atiodmat); goto bad1; } isp->isp_atioq = base; im.error = 0; if (bus_dmamap_load(isp->isp_osinfo.atiodmat, isp->isp_osinfo.atiomap, base, len, imc, &im, 0) || im.error) { isp_prt(isp, ISP_LOGERR, "error loading ATIO DMA map %d", im.error); goto bad; } isp_prt(isp, ISP_LOGDEBUG0, "ATIO area @ 0x%jx/0x%jx", (uintmax_t)im.maddr, (uintmax_t)len); isp->isp_atioq_dma = im.maddr; } #endif if (IS_FC(isp)) { if (isp_dma_tag_create(isp->isp_osinfo.dmat, 64, slim, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 2*QENTRY_LEN, 1, 2*QENTRY_LEN, 0, &isp->isp_osinfo.iocbdmat)) { goto bad; } if (bus_dmamem_alloc(isp->isp_osinfo.iocbdmat, (void **)&base, BUS_DMA_COHERENT, &isp->isp_osinfo.iocbmap) != 0) goto bad; isp->isp_iocb = base; im.error = 0; if (bus_dmamap_load(isp->isp_osinfo.iocbdmat, isp->isp_osinfo.iocbmap, base, 2*QENTRY_LEN, imc, &im, 0) || im.error) goto bad; isp->isp_iocb_dma = im.maddr; if (isp_dma_tag_create(isp->isp_osinfo.dmat, 64, slim, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, ISP_FC_SCRLEN, 1, ISP_FC_SCRLEN, 0, &isp->isp_osinfo.scdmat)) goto bad; for (cmap = 0; cmap < isp->isp_nchan; cmap++) { struct isp_fc *fc = ISP_FC_PC(isp, cmap); if (bus_dmamem_alloc(isp->isp_osinfo.scdmat, (void **)&base, BUS_DMA_COHERENT, &fc->scmap) != 0) goto bad; FCPARAM(isp, cmap)->isp_scratch = base; im.error = 0; if (bus_dmamap_load(isp->isp_osinfo.scdmat, fc->scmap, base, ISP_FC_SCRLEN, imc, &im, 0) || im.error) { bus_dmamem_free(isp->isp_osinfo.scdmat, base, fc->scmap); goto bad; } FCPARAM(isp, cmap)->isp_scdma = im.maddr; if (!IS_2100(isp)) { for (i = 0; i < INITIAL_NEXUS_COUNT; i++) { struct isp_nexus *n = malloc(sizeof (struct isp_nexus), M_DEVBUF, M_NOWAIT | M_ZERO); if (n == NULL) { while (fc->nexus_free_list) { n = fc->nexus_free_list; fc->nexus_free_list = n->next; free(n, M_DEVBUF); } goto bad; } n->next = fc->nexus_free_list; fc->nexus_free_list = n; } } } } for (i = 0; i < isp->isp_maxcmds; i++) { struct isp_pcmd *pcmd = &isp->isp_osinfo.pcmd_pool[i]; error = bus_dmamap_create(isp->isp_osinfo.dmat, 0, &pcmd->dmap); if (error) { isp_prt(isp, ISP_LOGERR, "error %d creating per-cmd DMA maps", error); while (--i >= 0) { bus_dmamap_destroy(isp->isp_osinfo.dmat, isp->isp_osinfo.pcmd_pool[i].dmap); } goto bad; } callout_init_mtx(&pcmd->wdog, &isp->isp_osinfo.lock, 0); if (i == isp->isp_maxcmds-1) { pcmd->next = NULL; } else { pcmd->next = &isp->isp_osinfo.pcmd_pool[i+1]; } } isp->isp_osinfo.pcmd_free = &isp->isp_osinfo.pcmd_pool[0]; ISP_LOCK(isp); return (0); bad: if (IS_FC(isp)) { while (--cmap >= 0) { struct isp_fc *fc = ISP_FC_PC(isp, cmap); bus_dmamap_unload(isp->isp_osinfo.scdmat, fc->scmap); bus_dmamem_free(isp->isp_osinfo.scdmat, FCPARAM(isp, cmap)->isp_scratch, fc->scmap); while (fc->nexus_free_list) { struct isp_nexus *n = fc->nexus_free_list; fc->nexus_free_list = n->next; free(n, M_DEVBUF); } } bus_dma_tag_destroy(isp->isp_osinfo.scdmat); bus_dmamap_unload(isp->isp_osinfo.iocbdmat, isp->isp_osinfo.iocbmap); bus_dmamem_free(isp->isp_osinfo.iocbdmat, isp->isp_iocb, isp->isp_osinfo.iocbmap); bus_dma_tag_destroy(isp->isp_osinfo.iocbdmat); } bad1: if (isp->isp_rquest_dma != 0) { bus_dmamap_unload(isp->isp_osinfo.reqdmat, isp->isp_osinfo.reqmap); } if (isp->isp_rquest != NULL) { bus_dmamem_free(isp->isp_osinfo.reqdmat, isp->isp_rquest, isp->isp_osinfo.reqmap); bus_dma_tag_destroy(isp->isp_osinfo.reqdmat); } if (isp->isp_result_dma != 0) { bus_dmamap_unload(isp->isp_osinfo.respdmat, isp->isp_osinfo.respmap); } if (isp->isp_result != NULL) { bus_dmamem_free(isp->isp_osinfo.respdmat, isp->isp_result, isp->isp_osinfo.respmap); bus_dma_tag_destroy(isp->isp_osinfo.respdmat); } #ifdef ISP_TARGET_MODE if (IS_24XX(isp)) { if (isp->isp_atioq_dma != 0) { bus_dmamap_unload(isp->isp_osinfo.atiodmat, isp->isp_osinfo.atiomap); } if (isp->isp_atioq != NULL) { bus_dmamem_free(isp->isp_osinfo.reqdmat, isp->isp_atioq, isp->isp_osinfo.atiomap); bus_dma_tag_destroy(isp->isp_osinfo.atiodmat); } } #endif free(isp->isp_xflist, M_DEVBUF); free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); isp->isp_rquest = NULL; ISP_LOCK(isp); return (1); } typedef struct { ispsoftc_t *isp; void *cmd_token; void *rq; /* original request */ int error; bus_size_t mapsize; } mush_t; #define MUSHERR_NOQENTRIES -2 #ifdef ISP_TARGET_MODE static void tdma2_2(void *, bus_dma_segment_t *, int, bus_size_t, int); static void tdma2(void *, bus_dma_segment_t *, int, int); static void tdma2_2(void *arg, bus_dma_segment_t *dm_segs, int nseg, bus_size_t mapsize, int error) { mush_t *mp; mp = (mush_t *)arg; mp->mapsize = mapsize; tdma2(arg, dm_segs, nseg, error); } static void tdma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { mush_t *mp; ispsoftc_t *isp; struct ccb_scsiio *csio; isp_ddir_t ddir; ispreq_t *rq; mp = (mush_t *) arg; if (error) { mp->error = error; return; } csio = mp->cmd_token; isp = mp->isp; rq = mp->rq; if (nseg) { if (isp->isp_osinfo.sixtyfourbit) { if (nseg >= ISP_NSEG64_MAX) { isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX); mp->error = EFAULT; return; } if (rq->req_header.rqs_entry_type == RQSTYPE_CTIO2) { rq->req_header.rqs_entry_type = RQSTYPE_CTIO3; } } else { if (nseg >= ISP_NSEG_MAX) { isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX); mp->error = EFAULT; return; } } if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); ddir = ISP_TO_DEVICE; } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); ddir = ISP_FROM_DEVICE; } else { dm_segs = NULL; nseg = 0; ddir = ISP_NOXFR; } } else { dm_segs = NULL; nseg = 0; ddir = ISP_NOXFR; } error = isp_send_tgt_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir, &csio->sense_data, csio->sense_len); switch (error) { case CMD_EAGAIN: mp->error = MUSHERR_NOQENTRIES; case CMD_QUEUED: break; default: mp->error = EIO; } } #endif static void dma2_2(void *, bus_dma_segment_t *, int, bus_size_t, int); static void dma2(void *, bus_dma_segment_t *, int, int); static void dma2_2(void *arg, bus_dma_segment_t *dm_segs, int nseg, bus_size_t mapsize, int error) { mush_t *mp; mp = (mush_t *)arg; mp->mapsize = mapsize; dma2(arg, dm_segs, nseg, error); } static void dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { mush_t *mp; ispsoftc_t *isp; struct ccb_scsiio *csio; isp_ddir_t ddir; ispreq_t *rq; mp = (mush_t *) arg; if (error) { mp->error = error; return; } csio = mp->cmd_token; isp = mp->isp; rq = mp->rq; if (nseg) { if (isp->isp_osinfo.sixtyfourbit) { if (nseg >= ISP_NSEG64_MAX) { isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX); mp->error = EFAULT; return; } if (rq->req_header.rqs_entry_type == RQSTYPE_T2RQS) { rq->req_header.rqs_entry_type = RQSTYPE_T3RQS; } else if (rq->req_header.rqs_entry_type == RQSTYPE_REQUEST) { rq->req_header.rqs_entry_type = RQSTYPE_A64; } } else { if (nseg >= ISP_NSEG_MAX) { isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX); mp->error = EFAULT; return; } } if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); ddir = ISP_FROM_DEVICE; } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); ddir = ISP_TO_DEVICE; } else { ddir = ISP_NOXFR; } } else { dm_segs = NULL; nseg = 0; ddir = ISP_NOXFR; } error = isp_send_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir, (ispds64_t *)csio->req_map); switch (error) { case CMD_EAGAIN: mp->error = MUSHERR_NOQENTRIES; break; case CMD_QUEUED: break; default: mp->error = EIO; break; } } static int isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *ff) { mush_t mush, *mp; void (*eptr)(void *, bus_dma_segment_t *, int, int); void (*eptr2)(void *, bus_dma_segment_t *, int, bus_size_t, int); int error; mp = &mush; mp->isp = isp; mp->cmd_token = csio; mp->rq = ff; mp->error = 0; mp->mapsize = 0; #ifdef ISP_TARGET_MODE if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { eptr = tdma2; eptr2 = tdma2_2; } else #endif { eptr = dma2; eptr2 = dma2_2; } error = bus_dmamap_load_ccb(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, (union ccb *)csio, eptr, mp, 0); if (error == EINPROGRESS) { bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap); mp->error = EINVAL; isp_prt(isp, ISP_LOGERR, "deferred dma allocation not supported"); } else if (error && mp->error == 0) { #ifdef DIAGNOSTIC isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error); #endif mp->error = error; } if (mp->error) { int retval = CMD_COMPLETE; if (mp->error == MUSHERR_NOQENTRIES) { retval = CMD_EAGAIN; } else if (mp->error == EFBIG) { csio->ccb_h.status = CAM_REQ_TOO_BIG; } else if (mp->error == EINVAL) { csio->ccb_h.status = CAM_REQ_INVALID; } else { csio->ccb_h.status = CAM_UNREC_HBA_ERROR; } return (retval); } return (CMD_QUEUED); } static void isp_pci_reset0(ispsoftc_t *isp) { ISP_DISABLE_INTS(isp); } static void isp_pci_reset1(ispsoftc_t *isp) { if (!IS_24XX(isp)) { /* Make sure the BIOS is disabled */ isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); } /* and enable interrupts */ ISP_ENABLE_INTS(isp); } static void isp_pci_dumpregs(ispsoftc_t *isp, const char *msg) { struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; if (msg) printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); else printf("%s:\n", device_get_nameunit(isp->isp_dev)); if (IS_SCSI(isp)) printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); else printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); if (IS_SCSI(isp)) { ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), ISP_READ(isp, CDMA_FIFO_STS)); printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), ISP_READ(isp, DDMA_FIFO_STS)); printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", ISP_READ(isp, SXP_INTERRUPT), ISP_READ(isp, SXP_GROSS_ERR), ISP_READ(isp, SXP_PINS_CTRL)); ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); } printf(" mbox regs: %x %x %x %x %x\n", ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), ISP_READ(isp, OUTMAILBOX4)); printf(" PCI Status Command/Status=%x\n", pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); } Index: head/sys/dev/le/am7990.c =================================================================== --- head/sys/dev/le/am7990.c (revision 313981) +++ head/sys/dev/le/am7990.c (revision 313982) @@ -1,619 +1,619 @@ /* $NetBSD: am7990.c,v 1.68 2005/12/11 12:21:25 christos Exp $ */ /*- * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace * Simulation Facility, NASA Ames Research Center. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Ralph Campbell and Rick Macklem. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)if_le.c 8.2 (Berkeley) 11/16/93 */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void am7990_meminit(struct lance_softc *); static void am7990_rint(struct lance_softc *); static void am7990_tint(struct lance_softc *); static void am7990_start_locked(struct lance_softc *sc); #ifdef LEDEBUG static void am7990_recv_print(struct lance_softc *, int); static void am7990_xmit_print(struct lance_softc *, int); #endif int am7990_config(struct am7990_softc *sc, const char* name, int unit) { int error, mem; sc->lsc.sc_meminit = am7990_meminit; sc->lsc.sc_start_locked = am7990_start_locked; error = lance_config(&sc->lsc, name, unit); if (error != 0) return (error); mem = 0; sc->lsc.sc_initaddr = mem; mem += sizeof(struct leinit); sc->lsc.sc_rmdaddr = mem; mem += sizeof(struct lermd) * sc->lsc.sc_nrbuf; sc->lsc.sc_tmdaddr = mem; mem += sizeof(struct letmd) * sc->lsc.sc_ntbuf; sc->lsc.sc_rbufaddr = mem; mem += LEBLEN * sc->lsc.sc_nrbuf; sc->lsc.sc_tbufaddr = mem; mem += LEBLEN * sc->lsc.sc_ntbuf; if (mem > sc->lsc.sc_memsize) panic("%s: memsize", __func__); lance_attach(&sc->lsc); return (0); } void am7990_detach(struct am7990_softc *sc) { lance_detach(&sc->lsc); } /* * Set up the initialization block and the descriptor rings. */ static void am7990_meminit(struct lance_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct leinit init; struct lermd rmd; struct letmd tmd; u_long a; int bix; LE_LOCK_ASSERT(sc, MA_OWNED); if (ifp->if_flags & IFF_PROMISC) init.init_mode = LE_MODE_NORMAL | LE_MODE_PROM; else init.init_mode = LE_MODE_NORMAL; init.init_padr[0] = (sc->sc_enaddr[1] << 8) | sc->sc_enaddr[0]; init.init_padr[1] = (sc->sc_enaddr[3] << 8) | sc->sc_enaddr[2]; init.init_padr[2] = (sc->sc_enaddr[5] << 8) | sc->sc_enaddr[4]; lance_setladrf(sc, init.init_ladrf); sc->sc_last_rd = 0; sc->sc_first_td = sc->sc_last_td = sc->sc_no_td = 0; a = sc->sc_addr + LE_RMDADDR(sc, 0); init.init_rdra = a; init.init_rlen = (a >> 16) | ((ffs(sc->sc_nrbuf) - 1) << 13); a = sc->sc_addr + LE_TMDADDR(sc, 0); init.init_tdra = a; init.init_tlen = (a >> 16) | ((ffs(sc->sc_ntbuf) - 1) << 13); (*sc->sc_copytodesc)(sc, &init, LE_INITADDR(sc), sizeof(init)); /* * Set up receive ring descriptors. */ for (bix = 0; bix < sc->sc_nrbuf; bix++) { a = sc->sc_addr + LE_RBUFADDR(sc, bix); rmd.rmd0 = a; rmd.rmd1_hadr = a >> 16; rmd.rmd1_bits = LE_R1_OWN; rmd.rmd2 = -LEBLEN | LE_XMD2_ONES; rmd.rmd3 = 0; (*sc->sc_copytodesc)(sc, &rmd, LE_RMDADDR(sc, bix), sizeof(rmd)); } /* * Set up transmit ring descriptors. */ for (bix = 0; bix < sc->sc_ntbuf; bix++) { a = sc->sc_addr + LE_TBUFADDR(sc, bix); tmd.tmd0 = a; tmd.tmd1_hadr = a >> 16; tmd.tmd1_bits = 0; tmd.tmd2 = LE_XMD2_ONES; tmd.tmd3 = 0; (*sc->sc_copytodesc)(sc, &tmd, LE_TMDADDR(sc, bix), sizeof(tmd)); } } static void am7990_rint(struct lance_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct mbuf *m; struct lermd rmd; int bix, rp; #if defined(LANCE_REVC_BUG) struct ether_header *eh; /* Make sure this is short-aligned, for ether_cmp(). */ static uint16_t bcast_enaddr[3] = { ~0, ~0, ~0 }; #endif bix = sc->sc_last_rd; /* Process all buffers with valid data. */ for (;;) { rp = LE_RMDADDR(sc, bix); (*sc->sc_copyfromdesc)(sc, &rmd, rp, sizeof(rmd)); if (rmd.rmd1_bits & LE_R1_OWN) break; m = NULL; if ((rmd.rmd1_bits & (LE_R1_ERR | LE_R1_STP | LE_R1_ENP)) != (LE_R1_STP | LE_R1_ENP)) { if (rmd.rmd1_bits & LE_R1_ERR) { #ifdef LEDEBUG if (rmd.rmd1_bits & LE_R1_ENP) { if ((rmd.rmd1_bits & LE_R1_OFLO) == 0) { if (rmd.rmd1_bits & LE_R1_FRAM) if_printf(ifp, "framing error\n"); if (rmd.rmd1_bits & LE_R1_CRC) if_printf(ifp, "crc mismatch\n"); } } else if (rmd.rmd1_bits & LE_R1_OFLO) if_printf(ifp, "overflow\n"); #endif if (rmd.rmd1_bits & LE_R1_BUFF) if_printf(ifp, "receive buffer error\n"); } else if ((rmd.rmd1_bits & (LE_R1_STP | LE_R1_ENP)) != (LE_R1_STP | LE_R1_ENP)) if_printf(ifp, "dropping chained buffer\n"); } else { #ifdef LEDEBUG if (sc->sc_flags & LE_DEBUG) am7990_recv_print(sc, bix); #endif /* Pull the packet off the interface. */ m = lance_get(sc, LE_RBUFADDR(sc, bix), (int)rmd.rmd3 - ETHER_CRC_LEN); } rmd.rmd1_bits = LE_R1_OWN; rmd.rmd2 = -LEBLEN | LE_XMD2_ONES; rmd.rmd3 = 0; (*sc->sc_copytodesc)(sc, &rmd, rp, sizeof(rmd)); if (++bix == sc->sc_nrbuf) bix = 0; if (m != NULL) { if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); #ifdef LANCE_REVC_BUG /* * The old LANCE (Rev. C) chips have a bug which * causes garbage to be inserted in front of the * received packet. The workaround is to ignore * packets with an invalid destination address * (garbage will usually not match). * Of course, this precludes multicast support... */ eh = mtod(m, struct ether_header *); if (ether_cmp(eh->ether_dhost, sc->sc_enaddr) && ether_cmp(eh->ether_dhost, bcast_enaddr)) { m_freem(m); continue; } #endif /* Pass the packet up. */ LE_UNLOCK(sc); (*ifp->if_input)(ifp, m); LE_LOCK(sc); } else if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); } sc->sc_last_rd = bix; } static void am7990_tint(struct lance_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct letmd tmd; int bix; bix = sc->sc_first_td; for (;;) { if (sc->sc_no_td <= 0) break; (*sc->sc_copyfromdesc)(sc, &tmd, LE_TMDADDR(sc, bix), sizeof(tmd)); #ifdef LEDEBUG if (sc->sc_flags & LE_DEBUG) if_printf(ifp, "trans tmd: " "ladr %04x, hadr %02x, flags %02x, " "bcnt %04x, mcnt %04x\n", tmd.tmd0, tmd.tmd1_hadr, tmd.tmd1_bits, tmd.tmd2, tmd.tmd3); #endif if (tmd.tmd1_bits & LE_T1_OWN) break; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (tmd.tmd1_bits & LE_T1_ERR) { if (tmd.tmd3 & LE_T3_BUFF) if_printf(ifp, "transmit buffer error\n"); else if (tmd.tmd3 & LE_T3_UFLO) if_printf(ifp, "underflow\n"); if (tmd.tmd3 & (LE_T3_BUFF | LE_T3_UFLO)) { lance_init_locked(sc); return; } if (tmd.tmd3 & LE_T3_LCAR) { if (sc->sc_flags & LE_CARRIER) if_link_state_change(ifp, LINK_STATE_DOWN); sc->sc_flags &= ~LE_CARRIER; if (sc->sc_nocarrier) (*sc->sc_nocarrier)(sc); else if_printf(ifp, "lost carrier\n"); } if (tmd.tmd3 & LE_T3_LCOL) if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); if (tmd.tmd3 & LE_T3_RTRY) { #ifdef LEDEBUG if_printf(ifp, "excessive collisions, tdr %d\n", tmd.tmd3 & LE_T3_TDR_MASK); #endif if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 16); } if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } else { if (tmd.tmd1_bits & LE_T1_ONE) if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); else if (tmd.tmd1_bits & LE_T1_MORE) /* Real number is unknown. */ if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 2); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); } if (++bix == sc->sc_ntbuf) bix = 0; --sc->sc_no_td; } sc->sc_first_td = bix; sc->sc_wdog_timer = sc->sc_no_td > 0 ? 5 : 0; } /* * Controller interrupt */ void am7990_intr(void *arg) { struct lance_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; uint16_t isr; LE_LOCK(sc); if (sc->sc_hwintr && (*sc->sc_hwintr)(sc) == -1) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); lance_init_locked(sc); LE_UNLOCK(sc); return; } isr = (*sc->sc_rdcsr)(sc, LE_CSR0); #if defined(LEDEBUG) && LEDEBUG > 1 if (sc->sc_flags & LE_DEBUG) if_printf(ifp, "%s: entering with isr=%04x\n", __func__, isr); #endif if ((isr & LE_C0_INTR) == 0) { LE_UNLOCK(sc); return; } /* * Clear interrupt source flags and turn off interrupts. If we * don't clear these flags before processing their sources we * could completely miss some interrupt events as the NIC can * change these flags while we're in this handler. We toggle * the interrupt enable bit in order to keep receiving them * (some chips work without this, some don't). */ (*sc->sc_wrcsr)(sc, LE_CSR0, isr & ~(LE_C0_INEA | LE_C0_TDMD | LE_C0_STOP | LE_C0_STRT | LE_C0_INIT)); if (isr & LE_C0_ERR) { if (isr & LE_C0_BABL) { #ifdef LEDEBUG if_printf(ifp, "babble\n"); #endif if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } #if 0 if (isr & LE_C0_CERR) { if_printf(ifp, "collision error\n"); if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); } #endif if (isr & LE_C0_MISS) { #ifdef LEDEBUG if_printf(ifp, "missed packet\n"); #endif if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); } if (isr & LE_C0_MERR) { if_printf(ifp, "memory error\n"); lance_init_locked(sc); LE_UNLOCK(sc); return; } } if ((isr & LE_C0_RXON) == 0) { if_printf(ifp, "receiver disabled\n"); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); lance_init_locked(sc); LE_UNLOCK(sc); return; } if ((isr & LE_C0_TXON) == 0) { if_printf(ifp, "transmitter disabled\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); lance_init_locked(sc); LE_UNLOCK(sc); return; } /* * Pretend we have carrier; if we don't this will be cleared shortly. */ if (!(sc->sc_flags & LE_CARRIER)) if_link_state_change(ifp, LINK_STATE_UP); sc->sc_flags |= LE_CARRIER; if (isr & LE_C0_RINT) am7990_rint(sc); if (isr & LE_C0_TINT) am7990_tint(sc); /* Enable interrupts again. */ (*sc->sc_wrcsr)(sc, LE_CSR0, LE_C0_INEA); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) am7990_start_locked(sc); LE_UNLOCK(sc); } /* * Set up output on interface. * Get another datagram to send off of the interface queue, and map it to the * interface before starting the output. */ static void am7990_start_locked(struct lance_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct letmd tmd; struct mbuf *m; int bix, enq, len, rp; LE_LOCK_ASSERT(sc, MA_OWNED); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; bix = sc->sc_last_td; enq = 0; for (; sc->sc_no_td < sc->sc_ntbuf && !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) { rp = LE_TMDADDR(sc, bix); (*sc->sc_copyfromdesc)(sc, &tmd, rp, sizeof(tmd)); if (tmd.tmd1_bits & LE_T1_OWN) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; if_printf(ifp, "missing buffer, no_td = %d, last_td = %d\n", sc->sc_no_td, sc->sc_last_td); } IFQ_DRV_DEQUEUE(&ifp->if_snd, m); - if (m == 0) + if (m == NULL) break; /* * If BPF is listening on this interface, let it see the packet * before we commit it to the wire. */ BPF_MTAP(ifp, m); /* * Copy the mbuf chain into the transmit buffer. */ len = lance_put(sc, LE_TBUFADDR(sc, bix), m); #ifdef LEDEBUG if (len > ETHERMTU + ETHER_HDR_LEN) if_printf(ifp, "packet length %d\n", len); #endif /* * Init transmit registers, and set transmit start flag. */ tmd.tmd1_bits = LE_T1_OWN | LE_T1_STP | LE_T1_ENP; tmd.tmd2 = -len | LE_XMD2_ONES; tmd.tmd3 = 0; (*sc->sc_copytodesc)(sc, &tmd, rp, sizeof(tmd)); #ifdef LEDEBUG if (sc->sc_flags & LE_DEBUG) am7990_xmit_print(sc, bix); #endif (*sc->sc_wrcsr)(sc, LE_CSR0, LE_C0_INEA | LE_C0_TDMD); enq++; if (++bix == sc->sc_ntbuf) bix = 0; if (++sc->sc_no_td == sc->sc_ntbuf) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } } sc->sc_last_td = bix; if (enq > 0) sc->sc_wdog_timer = 5; } #ifdef LEDEBUG static void am7990_recv_print(struct lance_softc *sc, int no) { struct ifnet *ifp = sc->sc_ifp; struct ether_header eh; struct lermd rmd; uint16_t len; (*sc->sc_copyfromdesc)(sc, &rmd, LE_RMDADDR(sc, no), sizeof(rmd)); len = rmd.rmd3; if_printf(ifp, "receive buffer %d, len = %d\n", no, len); if_printf(ifp, "status %04x\n", (*sc->sc_rdcsr)(sc, LE_CSR0)); if_printf(ifp, "ladr %04x, hadr %02x, flags %02x, bcnt %04x, mcnt %04x\n", rmd.rmd0, rmd.rmd1_hadr, rmd.rmd1_bits, rmd.rmd2, rmd.rmd3); if (len - ETHER_CRC_LEN >= sizeof(eh)) { (*sc->sc_copyfrombuf)(sc, &eh, LE_RBUFADDR(sc, no), sizeof(eh)); if_printf(ifp, "dst %s", ether_sprintf(eh.ether_dhost)); printf(" src %s type %04x\n", ether_sprintf(eh.ether_shost), ntohs(eh.ether_type)); } } static void am7990_xmit_print(struct lance_softc *sc, int no) { struct ifnet *ifp = sc->sc_ifp; struct ether_header eh; struct letmd tmd; uint16_t len; (*sc->sc_copyfromdesc)(sc, &tmd, LE_TMDADDR(sc, no), sizeof(tmd)); len = -tmd.tmd2; if_printf(ifp, "transmit buffer %d, len = %d\n", no, len); if_printf(ifp, "status %04x\n", (*sc->sc_rdcsr)(sc, LE_CSR0)); if_printf(ifp, "ladr %04x, hadr %02x, flags %02x, bcnt %04x, mcnt %04x\n", tmd.tmd0, tmd.tmd1_hadr, tmd.tmd1_bits, tmd.tmd2, tmd.tmd3); if (len >= sizeof(eh)) { (*sc->sc_copyfrombuf)(sc, &eh, LE_TBUFADDR(sc, no), sizeof(eh)); if_printf(ifp, "dst %s", ether_sprintf(eh.ether_dhost)); printf(" src %s type %04x\n", ether_sprintf(eh.ether_shost), ntohs(eh.ether_type)); } } #endif /* LEDEBUG */ Index: head/sys/dev/le/am79900.c =================================================================== --- head/sys/dev/le/am79900.c (revision 313981) +++ head/sys/dev/le/am79900.c (revision 313982) @@ -1,656 +1,656 @@ /* $NetBSD: am79900.c,v 1.17 2005/12/24 20:27:29 perry Exp $ */ /*- * Copyright (c) 1997 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Jason R. Thorpe. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Ralph Campbell and Rick Macklem. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)if_le.c 8.2 (Berkeley) 11/16/93 */ /*- * Copyright (c) 1998 * Matthias Drochner. All rights reserved. * Copyright (c) 1995 Charles M. Hannum. All rights reserved. * * This code is derived from software contributed to Berkeley by * Ralph Campbell and Rick Macklem. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)if_le.c 8.2 (Berkeley) 11/16/93 */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void am79900_meminit(struct lance_softc *); static void am79900_rint(struct lance_softc *); static void am79900_tint(struct lance_softc *); static void am79900_start_locked(struct lance_softc *sc); #ifdef LEDEBUG static void am79900_recv_print(struct lance_softc *, int); static void am79900_xmit_print(struct lance_softc *, int); #endif int am79900_config(struct am79900_softc *sc, const char* name, int unit) { int error, mem; sc->lsc.sc_meminit = am79900_meminit; sc->lsc.sc_start_locked = am79900_start_locked; error = lance_config(&sc->lsc, name, unit); if (error != 0) return (error); mem = 0; sc->lsc.sc_initaddr = mem; mem += sizeof(struct leinit); sc->lsc.sc_rmdaddr = mem; mem += sizeof(struct lermd) * sc->lsc.sc_nrbuf; sc->lsc.sc_tmdaddr = mem; mem += sizeof(struct letmd) * sc->lsc.sc_ntbuf; sc->lsc.sc_rbufaddr = mem; mem += LEBLEN * sc->lsc.sc_nrbuf; sc->lsc.sc_tbufaddr = mem; mem += LEBLEN * sc->lsc.sc_ntbuf; if (mem > sc->lsc.sc_memsize) panic("%s: memsize", __func__); lance_attach(&sc->lsc); return (0); } void am79900_detach(struct am79900_softc *sc) { lance_detach(&sc->lsc); } /* * Set up the initialization block and the descriptor rings. */ static void am79900_meminit(struct lance_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct leinit init; struct lermd rmd; struct letmd tmd; u_long a; int bix; LE_LOCK_ASSERT(sc, MA_OWNED); if (ifp->if_flags & IFF_PROMISC) init.init_mode = LE_HTOLE32(LE_MODE_NORMAL | LE_MODE_PROM); else init.init_mode = LE_HTOLE32(LE_MODE_NORMAL); init.init_mode |= LE_HTOLE32(((ffs(sc->sc_ntbuf) - 1) << 28) | ((ffs(sc->sc_nrbuf) - 1) << 20)); init.init_padr[0] = LE_HTOLE32(sc->sc_enaddr[0] | (sc->sc_enaddr[1] << 8) | (sc->sc_enaddr[2] << 16) | (sc->sc_enaddr[3] << 24)); init.init_padr[1] = LE_HTOLE32(sc->sc_enaddr[4] | (sc->sc_enaddr[5] << 8)); lance_setladrf(sc, init.init_ladrf); sc->sc_last_rd = 0; sc->sc_first_td = sc->sc_last_td = sc->sc_no_td = 0; a = sc->sc_addr + LE_RMDADDR(sc, 0); init.init_rdra = LE_HTOLE32(a); a = sc->sc_addr + LE_TMDADDR(sc, 0); init.init_tdra = LE_HTOLE32(a); (*sc->sc_copytodesc)(sc, &init, LE_INITADDR(sc), sizeof(init)); /* * Set up receive ring descriptors. */ for (bix = 0; bix < sc->sc_nrbuf; bix++) { a = sc->sc_addr + LE_RBUFADDR(sc, bix); rmd.rmd0 = LE_HTOLE32(a); rmd.rmd1 = LE_HTOLE32(LE_R1_OWN | LE_R1_ONES | (-LEBLEN & 0xfff)); rmd.rmd2 = 0; rmd.rmd3 = 0; (*sc->sc_copytodesc)(sc, &rmd, LE_RMDADDR(sc, bix), sizeof(rmd)); } /* * Set up transmit ring descriptors. */ for (bix = 0; bix < sc->sc_ntbuf; bix++) { a = sc->sc_addr + LE_TBUFADDR(sc, bix); tmd.tmd0 = LE_HTOLE32(a); tmd.tmd1 = LE_HTOLE32(LE_T1_ONES); tmd.tmd2 = 0; tmd.tmd3 = 0; (*sc->sc_copytodesc)(sc, &tmd, LE_TMDADDR(sc, bix), sizeof(tmd)); } } static inline void am79900_rint(struct lance_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct mbuf *m; struct lermd rmd; uint32_t rmd1; int bix, rp; #if defined(__i386__) struct ether_header *eh; #endif bix = sc->sc_last_rd; /* Process all buffers with valid data. */ for (;;) { rp = LE_RMDADDR(sc, bix); (*sc->sc_copyfromdesc)(sc, &rmd, rp, sizeof(rmd)); rmd1 = LE_LE32TOH(rmd.rmd1); if (rmd1 & LE_R1_OWN) break; m = NULL; if ((rmd1 & (LE_R1_ERR | LE_R1_STP | LE_R1_ENP)) != (LE_R1_STP | LE_R1_ENP)){ if (rmd1 & LE_R1_ERR) { #ifdef LEDEBUG if (rmd1 & LE_R1_ENP) { if ((rmd1 & LE_R1_OFLO) == 0) { if (rmd1 & LE_R1_FRAM) if_printf(ifp, "framing error\n"); if (rmd1 & LE_R1_CRC) if_printf(ifp, "crc mismatch\n"); } } else if (rmd1 & LE_R1_OFLO) if_printf(ifp, "overflow\n"); #endif if (rmd1 & LE_R1_BUFF) if_printf(ifp, "receive buffer error\n"); } else if ((rmd1 & (LE_R1_STP | LE_R1_ENP)) != (LE_R1_STP | LE_R1_ENP)) if_printf(ifp, "dropping chained buffer\n"); } else { #ifdef LEDEBUG if (sc->sc_flags & LE_DEBUG) am79900_recv_print(sc, bix); #endif /* Pull the packet off the interface. */ m = lance_get(sc, LE_RBUFADDR(sc, bix), (LE_LE32TOH(rmd.rmd2) & 0xfff) - ETHER_CRC_LEN); } rmd.rmd1 = LE_HTOLE32(LE_R1_OWN | LE_R1_ONES | (-LEBLEN & 0xfff)); rmd.rmd2 = 0; rmd.rmd3 = 0; (*sc->sc_copytodesc)(sc, &rmd, rp, sizeof(rmd)); if (++bix == sc->sc_nrbuf) bix = 0; if (m != NULL) { if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); #if defined(__i386__) /* * The VMware LANCE does not present IFF_SIMPLEX * behavior on multicast packets. Thus drop the * packet if it is from ourselves. */ eh = mtod(m, struct ether_header *); if (!ether_cmp(eh->ether_shost, sc->sc_enaddr)) { m_freem(m); continue; } #endif /* Pass the packet up. */ LE_UNLOCK(sc); (*ifp->if_input)(ifp, m); LE_LOCK(sc); } else if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); } sc->sc_last_rd = bix; } static inline void am79900_tint(struct lance_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct letmd tmd; uint32_t tmd1, tmd2; int bix; bix = sc->sc_first_td; for (;;) { if (sc->sc_no_td <= 0) break; (*sc->sc_copyfromdesc)(sc, &tmd, LE_TMDADDR(sc, bix), sizeof(tmd)); tmd1 = LE_LE32TOH(tmd.tmd1); #ifdef LEDEBUG if (sc->sc_flags & LE_DEBUG) if_printf(ifp, "trans tmd: " "adr %08x, flags/blen %08x\n", LE_LE32TOH(tmd.tmd0), tmd1); #endif if (tmd1 & LE_T1_OWN) break; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (tmd1 & LE_T1_ERR) { tmd2 = LE_LE32TOH(tmd.tmd2); if (tmd2 & LE_T2_BUFF) if_printf(ifp, "transmit buffer error\n"); else if (tmd2 & LE_T2_UFLO) if_printf(ifp, "underflow\n"); if (tmd2 & (LE_T2_BUFF | LE_T2_UFLO)) { lance_init_locked(sc); return; } if (tmd2 & LE_T2_LCAR) { if (sc->sc_flags & LE_CARRIER) if_link_state_change(ifp, LINK_STATE_DOWN); sc->sc_flags &= ~LE_CARRIER; if (sc->sc_nocarrier) (*sc->sc_nocarrier)(sc); else if_printf(ifp, "lost carrier\n"); } if (tmd2 & LE_T2_LCOL) if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); if (tmd2 & LE_T2_RTRY) { #ifdef LEDEBUG if_printf(ifp, "excessive collisions\n"); #endif if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 16); } if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } else { if (tmd1 & LE_T1_ONE) if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); else if (tmd1 & LE_T1_MORE) /* Real number is unknown. */ if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 2); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); } if (++bix == sc->sc_ntbuf) bix = 0; --sc->sc_no_td; } sc->sc_first_td = bix; sc->sc_wdog_timer = sc->sc_no_td > 0 ? 5 : 0; } /* * Controller interrupt */ void am79900_intr(void *arg) { struct lance_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; uint16_t isr; LE_LOCK(sc); if (sc->sc_hwintr && (*sc->sc_hwintr)(sc) == -1) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); lance_init_locked(sc); LE_UNLOCK(sc); return; } isr = (*sc->sc_rdcsr)(sc, LE_CSR0); #if defined(LEDEBUG) && LEDEBUG > 1 if (sc->sc_flags & LE_DEBUG) if_printf(ifp, "%s: entering with isr=%04x\n", __func__, isr); #endif if ((isr & LE_C0_INTR) == 0) { LE_UNLOCK(sc); return; } /* * Clear interrupt source flags and turn off interrupts. If we * don't clear these flags before processing their sources we * could completely miss some interrupt events as the NIC can * change these flags while we're in this handler. We toggle * the interrupt enable bit in order to keep receiving them * (some chips work without this, some don't). */ (*sc->sc_wrcsr)(sc, LE_CSR0, isr & ~(LE_C0_INEA | LE_C0_TDMD | LE_C0_STOP | LE_C0_STRT | LE_C0_INIT)); if (isr & LE_C0_ERR) { if (isr & LE_C0_BABL) { #ifdef LEDEBUG if_printf(ifp, "babble\n"); #endif if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } #if 0 if (isr & LE_C0_CERR) { if_printf(ifp, "collision error\n"); if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); } #endif if (isr & LE_C0_MISS) { #ifdef LEDEBUG if_printf(ifp, "missed packet\n"); #endif if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); } if (isr & LE_C0_MERR) { if_printf(ifp, "memory error\n"); lance_init_locked(sc); LE_UNLOCK(sc); return; } } if ((isr & LE_C0_RXON) == 0) { if_printf(ifp, "receiver disabled\n"); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); lance_init_locked(sc); LE_UNLOCK(sc); return; } if ((isr & LE_C0_TXON) == 0) { if_printf(ifp, "transmitter disabled\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); lance_init_locked(sc); LE_UNLOCK(sc); return; } /* * Pretend we have carrier; if we don't this will be cleared shortly. */ if (!(sc->sc_flags & LE_CARRIER)) if_link_state_change(ifp, LINK_STATE_UP); sc->sc_flags |= LE_CARRIER; if (isr & LE_C0_RINT) am79900_rint(sc); if (isr & LE_C0_TINT) am79900_tint(sc); /* Enable interrupts again. */ (*sc->sc_wrcsr)(sc, LE_CSR0, LE_C0_INEA); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) am79900_start_locked(sc); LE_UNLOCK(sc); } /* * Set up output on interface. * Get another datagram to send off of the interface queue, and map it to the * interface before starting the output. */ static void am79900_start_locked(struct lance_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct letmd tmd; struct mbuf *m; int bix, enq, len, rp; LE_LOCK_ASSERT(sc, MA_OWNED); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; bix = sc->sc_last_td; enq = 0; for (; sc->sc_no_td < sc->sc_ntbuf && !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) { rp = LE_TMDADDR(sc, bix); (*sc->sc_copyfromdesc)(sc, &tmd, rp, sizeof(tmd)); if (LE_LE32TOH(tmd.tmd1) & LE_T1_OWN) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; if_printf(ifp, "missing buffer, no_td = %d, last_td = %d\n", sc->sc_no_td, sc->sc_last_td); } IFQ_DRV_DEQUEUE(&ifp->if_snd, m); - if (m == 0) + if (m == NULL) break; /* * If BPF is listening on this interface, let it see the packet * before we commit it to the wire. */ BPF_MTAP(ifp, m); /* * Copy the mbuf chain into the transmit buffer. */ len = lance_put(sc, LE_TBUFADDR(sc, bix), m); #ifdef LEDEBUG if (len > ETHERMTU + ETHER_HDR_LEN) if_printf(ifp, "packet length %d\n", len); #endif /* * Init transmit registers, and set transmit start flag. */ tmd.tmd1 = LE_HTOLE32(LE_T1_OWN | LE_T1_STP | LE_T1_ENP | LE_T1_ONES | (-len & 0xfff)); tmd.tmd2 = 0; tmd.tmd3 = 0; (*sc->sc_copytodesc)(sc, &tmd, rp, sizeof(tmd)); #ifdef LEDEBUG if (sc->sc_flags & LE_DEBUG) am79900_xmit_print(sc, bix); #endif (*sc->sc_wrcsr)(sc, LE_CSR0, LE_C0_INEA | LE_C0_TDMD); enq++; if (++bix == sc->sc_ntbuf) bix = 0; if (++sc->sc_no_td == sc->sc_ntbuf) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } } sc->sc_last_td = bix; if (enq > 0) sc->sc_wdog_timer = 5; } #ifdef LEDEBUG static void am79900_recv_print(struct lance_softc *sc, int no) { struct ifnet *ifp = sc->sc_ifp; struct ether_header eh; struct lermd rmd; uint16_t len; (*sc->sc_copyfromdesc)(sc, &rmd, LE_RMDADDR(sc, no), sizeof(rmd)); len = LE_LE32TOH(rmd.rmd2) & 0xfff; if_printf(ifp, "receive buffer %d, len = %d\n", no, len); if_printf(ifp, "status %04x\n", (*sc->sc_rdcsr)(sc, LE_CSR0)); if_printf(ifp, "adr %08x, flags/blen %08x\n", LE_LE32TOH(rmd.rmd0), LE_LE32TOH(rmd.rmd1)); if (len - ETHER_CRC_LEN >= sizeof(eh)) { (*sc->sc_copyfrombuf)(sc, &eh, LE_RBUFADDR(sc, no), sizeof(eh)); if_printf(ifp, "dst %s", ether_sprintf(eh.ether_dhost)); printf(" src %s type %04x\n", ether_sprintf(eh.ether_shost), ntohs(eh.ether_type)); } } static void am79900_xmit_print(struct lance_softc *sc, int no) { struct ifnet *ifp = sc->sc_ifp; struct ether_header eh; struct letmd tmd; uint16_t len; (*sc->sc_copyfromdesc)(sc, &tmd, LE_TMDADDR(sc, no), sizeof(tmd)); len = -(LE_LE32TOH(tmd.tmd1) & 0xfff); if_printf(ifp, "transmit buffer %d, len = %d\n", no, len); if_printf(ifp, "status %04x\n", (*sc->sc_rdcsr)(sc, LE_CSR0)); if_printf(ifp, "adr %08x, flags/blen %08x\n", LE_LE32TOH(tmd.tmd0), LE_LE32TOH(tmd.tmd1)); if (len >= sizeof(eh)) { (*sc->sc_copyfrombuf)(sc, &eh, LE_TBUFADDR(sc, no), sizeof(eh)); if_printf(ifp, "dst %s", ether_sprintf(eh.ether_dhost)); printf(" src %s type %04x\n", ether_sprintf(eh.ether_shost), ntohs(eh.ether_type)); } } #endif /* LEDEBUG */ Index: head/sys/dev/le/lance.c =================================================================== --- head/sys/dev/le/lance.c (revision 313981) +++ head/sys/dev/le/lance.c (revision 313982) @@ -1,817 +1,817 @@ /* $NetBSD: lance.c,v 1.34 2005/12/24 20:27:30 perry Exp $ */ /*- * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace * Simulation Facility, NASA Ames Research Center. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Ralph Campbell and Rick Macklem. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)if_le.c 8.2 (Berkeley) 11/16/93 */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include devclass_t le_devclass; static void lance_start(struct ifnet *); static void lance_stop(struct lance_softc *); static void lance_init(void *); static void lance_watchdog(void *s); static int lance_mediachange(struct ifnet *); static void lance_mediastatus(struct ifnet *, struct ifmediareq *); static int lance_ioctl(struct ifnet *, u_long, caddr_t); int lance_config(struct lance_softc *sc, const char* name, int unit) { struct ifnet *ifp; int i, nbuf; if (LE_LOCK_INITIALIZED(sc) == 0) return (ENXIO); ifp = sc->sc_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) return (ENOSPC); callout_init_mtx(&sc->sc_wdog_ch, &sc->sc_mtx, 0); /* Initialize ifnet structure. */ ifp->if_softc = sc; if_initname(ifp, name, unit); ifp->if_start = lance_start; ifp->if_ioctl = lance_ioctl; ifp->if_init = lance_init; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; #ifdef LANCE_REVC_BUG ifp->if_flags &= ~IFF_MULTICAST; #endif ifp->if_baudrate = IF_Mbps(10); IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); /* Initialize ifmedia structures. */ ifmedia_init(&sc->sc_media, 0, lance_mediachange, lance_mediastatus); if (sc->sc_supmedia != NULL) { for (i = 0; i < sc->sc_nsupmedia; i++) ifmedia_add(&sc->sc_media, sc->sc_supmedia[i], 0, NULL); ifmedia_set(&sc->sc_media, sc->sc_defaultmedia); } else { ifmedia_add(&sc->sc_media, IFM_MAKEWORD(IFM_ETHER, IFM_MANUAL, 0, 0), 0, NULL); ifmedia_set(&sc->sc_media, IFM_MAKEWORD(IFM_ETHER, IFM_MANUAL, 0, 0)); } switch (sc->sc_memsize) { case 8192: sc->sc_nrbuf = 4; sc->sc_ntbuf = 1; break; case 16384: sc->sc_nrbuf = 8; sc->sc_ntbuf = 2; break; case 32768: sc->sc_nrbuf = 16; sc->sc_ntbuf = 4; break; case 65536: sc->sc_nrbuf = 32; sc->sc_ntbuf = 8; break; case 131072: sc->sc_nrbuf = 64; sc->sc_ntbuf = 16; break; case 262144: sc->sc_nrbuf = 128; sc->sc_ntbuf = 32; break; default: /* weird memory size; cope with it */ nbuf = sc->sc_memsize / LEBLEN; sc->sc_ntbuf = nbuf / 5; sc->sc_nrbuf = nbuf - sc->sc_ntbuf; } if_printf(ifp, "%d receive buffers, %d transmit buffers\n", sc->sc_nrbuf, sc->sc_ntbuf); /* Make sure the chip is stopped. */ LE_LOCK(sc); lance_stop(sc); LE_UNLOCK(sc); return (0); } void lance_attach(struct lance_softc *sc) { struct ifnet *ifp = sc->sc_ifp; /* Attach the interface. */ ether_ifattach(ifp, sc->sc_enaddr); /* Claim 802.1q capability. */ ifp->if_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_VLAN_MTU; ifp->if_capenable |= IFCAP_VLAN_MTU; } void lance_detach(struct lance_softc *sc) { struct ifnet *ifp = sc->sc_ifp; LE_LOCK(sc); lance_stop(sc); LE_UNLOCK(sc); callout_drain(&sc->sc_wdog_ch); ether_ifdetach(ifp); if_free(ifp); } void lance_suspend(struct lance_softc *sc) { LE_LOCK(sc); lance_stop(sc); LE_UNLOCK(sc); } void lance_resume(struct lance_softc *sc) { LE_LOCK(sc); if (sc->sc_ifp->if_flags & IFF_UP) lance_init_locked(sc); LE_UNLOCK(sc); } static void lance_start(struct ifnet *ifp) { struct lance_softc *sc = ifp->if_softc; LE_LOCK(sc); (*sc->sc_start_locked)(sc); LE_UNLOCK(sc); } static void lance_stop(struct lance_softc *sc) { struct ifnet *ifp = sc->sc_ifp; LE_LOCK_ASSERT(sc, MA_OWNED); /* * Mark the interface down and cancel the watchdog timer. */ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); callout_stop(&sc->sc_wdog_ch); sc->sc_wdog_timer = 0; (*sc->sc_wrcsr)(sc, LE_CSR0, LE_C0_STOP); } static void lance_init(void *xsc) { struct lance_softc *sc = (struct lance_softc *)xsc; LE_LOCK(sc); lance_init_locked(sc); LE_UNLOCK(sc); } /* * Initialization of interface; set up initialization block * and transmit/receive descriptor rings. */ void lance_init_locked(struct lance_softc *sc) { struct ifnet *ifp = sc->sc_ifp; u_long a; int timo; LE_LOCK_ASSERT(sc, MA_OWNED); (*sc->sc_wrcsr)(sc, LE_CSR0, LE_C0_STOP); DELAY(100); /* Newer LANCE chips have a reset register. */ if (sc->sc_hwreset) (*sc->sc_hwreset)(sc); /* Set the correct byte swapping mode, etc. */ (*sc->sc_wrcsr)(sc, LE_CSR3, sc->sc_conf3); /* Set the current media. This may require the chip to be stopped. */ if (sc->sc_mediachange) (void)(*sc->sc_mediachange)(sc); /* * Update our private copy of the Ethernet address. * We NEED the copy so we can ensure its alignment! */ memcpy(sc->sc_enaddr, IF_LLADDR(ifp), ETHER_ADDR_LEN); /* Set up LANCE init block. */ (*sc->sc_meminit)(sc); /* Give LANCE the physical address of its init block. */ a = sc->sc_addr + LE_INITADDR(sc); (*sc->sc_wrcsr)(sc, LE_CSR1, a & 0xffff); (*sc->sc_wrcsr)(sc, LE_CSR2, a >> 16); /* Try to initialize the LANCE. */ DELAY(100); (*sc->sc_wrcsr)(sc, LE_CSR0, LE_C0_INIT); /* Wait for initialization to finish. */ for (timo = 100000; timo; timo--) if ((*sc->sc_rdcsr)(sc, LE_CSR0) & LE_C0_IDON) break; if ((*sc->sc_rdcsr)(sc, LE_CSR0) & LE_C0_IDON) { /* Start the LANCE. */ (*sc->sc_wrcsr)(sc, LE_CSR0, LE_C0_INEA | LE_C0_STRT); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->sc_wdog_timer = 0; callout_reset(&sc->sc_wdog_ch, hz, lance_watchdog, sc); (*sc->sc_start_locked)(sc); } else if_printf(ifp, "controller failed to initialize\n"); if (sc->sc_hwinit) (*sc->sc_hwinit)(sc); } /* * Routine to copy from mbuf chain to transmit buffer in * network buffer memory. */ int lance_put(struct lance_softc *sc, int boff, struct mbuf *m) { struct mbuf *n; int len, tlen = 0; LE_LOCK_ASSERT(sc, MA_OWNED); for (; m; m = n) { len = m->m_len; if (len == 0) { n = m_free(m); m = NULL; continue; } (*sc->sc_copytobuf)(sc, mtod(m, caddr_t), boff, len); boff += len; tlen += len; n = m_free(m); m = NULL; } if (tlen < LEMINSIZE) { (*sc->sc_zerobuf)(sc, boff, LEMINSIZE - tlen); tlen = LEMINSIZE; } return (tlen); } /* * Pull data off an interface. * Len is length of data, with local net header stripped. * We copy the data into mbufs. When full cluster sized units are present * we copy into clusters. */ struct mbuf * lance_get(struct lance_softc *sc, int boff, int totlen) { struct ifnet *ifp = sc->sc_ifp; struct mbuf *m, *m0, *newm; caddr_t newdata; int len; if (totlen <= ETHER_HDR_LEN || totlen > LEBLEN - ETHER_CRC_LEN) { #ifdef LEDEBUG if_printf(ifp, "invalid packet size %d; dropping\n", totlen); #endif return (NULL); } MGETHDR(m0, M_NOWAIT, MT_DATA); if (m0 == NULL) return (NULL); m0->m_pkthdr.rcvif = ifp; m0->m_pkthdr.len = totlen; len = MHLEN; m = m0; while (totlen > 0) { if (totlen >= MINCLSIZE) { if (!(MCLGET(m, M_NOWAIT))) goto bad; len = MCLBYTES; } if (m == m0) { newdata = (caddr_t) ALIGN(m->m_data + ETHER_HDR_LEN) - ETHER_HDR_LEN; len -= newdata - m->m_data; m->m_data = newdata; } m->m_len = len = min(totlen, len); (*sc->sc_copyfrombuf)(sc, mtod(m, caddr_t), boff, len); boff += len; totlen -= len; if (totlen > 0) { MGET(newm, M_NOWAIT, MT_DATA); - if (newm == 0) + if (newm == NULL) goto bad; len = MLEN; m = m->m_next = newm; } } return (m0); bad: m_freem(m0); return (NULL); } static void lance_watchdog(void *xsc) { struct lance_softc *sc = (struct lance_softc *)xsc; struct ifnet *ifp = sc->sc_ifp; LE_LOCK_ASSERT(sc, MA_OWNED); if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0) { callout_reset(&sc->sc_wdog_ch, hz, lance_watchdog, sc); return; } if_printf(ifp, "device timeout\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); lance_init_locked(sc); } static int lance_mediachange(struct ifnet *ifp) { struct lance_softc *sc = ifp->if_softc; if (sc->sc_mediachange) { /* * For setting the port in LE_CSR15 the PCnet chips must * be powered down or stopped and unlike documented may * not take effect without an initialization. So don't * invoke (*sc_mediachange) directly here but go through * lance_init_locked(). */ LE_LOCK(sc); lance_stop(sc); lance_init_locked(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) (*sc->sc_start_locked)(sc); LE_UNLOCK(sc); } return (0); } static void lance_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) { struct lance_softc *sc = ifp->if_softc; LE_LOCK(sc); if (!(ifp->if_flags & IFF_UP)) { LE_UNLOCK(sc); return; } ifmr->ifm_status = IFM_AVALID; if (sc->sc_flags & LE_CARRIER) ifmr->ifm_status |= IFM_ACTIVE; if (sc->sc_mediastatus) (*sc->sc_mediastatus)(sc, ifmr); LE_UNLOCK(sc); } /* * Process an ioctl request. */ static int lance_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct lance_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; int error = 0; switch (cmd) { case SIOCSIFFLAGS: LE_LOCK(sc); if (ifp->if_flags & IFF_PROMISC) { if (!(sc->sc_flags & LE_PROMISC)) { sc->sc_flags |= LE_PROMISC; lance_init_locked(sc); } } else if (sc->sc_flags & LE_PROMISC) { sc->sc_flags &= ~LE_PROMISC; lance_init_locked(sc); } if ((ifp->if_flags & IFF_ALLMULTI) && !(sc->sc_flags & LE_ALLMULTI)) { sc->sc_flags |= LE_ALLMULTI; lance_init_locked(sc); } else if (!(ifp->if_flags & IFF_ALLMULTI) && (sc->sc_flags & LE_ALLMULTI)) { sc->sc_flags &= ~LE_ALLMULTI; lance_init_locked(sc); } if (!(ifp->if_flags & IFF_UP) && ifp->if_drv_flags & IFF_DRV_RUNNING) { /* * If interface is marked down and it is running, then * stop it. */ lance_stop(sc); } else if (ifp->if_flags & IFF_UP && !(ifp->if_drv_flags & IFF_DRV_RUNNING)) { /* * If interface is marked up and it is stopped, then * start it. */ lance_init_locked(sc); } #ifdef LEDEBUG if (ifp->if_flags & IFF_DEBUG) sc->sc_flags |= LE_DEBUG; else sc->sc_flags &= ~LE_DEBUG; #endif LE_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: /* * Multicast list has changed; set the hardware filter * accordingly. */ LE_LOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) lance_init_locked(sc); LE_UNLOCK(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } /* * Set up the logical address filter. */ void lance_setladrf(struct lance_softc *sc, uint16_t *af) { struct ifnet *ifp = sc->sc_ifp; struct ifmultiaddr *ifma; uint32_t crc; /* * Set up multicast address filter by passing all multicast addresses * through a crc generator, and then using the high order 6 bits as an * index into the 64 bit logical address filter. The high order bit * selects the word, while the rest of the bits select the bit within * the word. */ if (ifp->if_flags & IFF_PROMISC || sc->sc_flags & LE_ALLMULTI) { af[0] = af[1] = af[2] = af[3] = 0xffff; return; } af[0] = af[1] = af[2] = af[3] = 0x0000; if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; crc = ether_crc32_le(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN); /* Just want the 6 most significant bits. */ crc >>= 26; /* Set the corresponding bit in the filter. */ af[crc >> 4] |= LE_HTOLE16(1 << (crc & 0xf)); } if_maddr_runlock(ifp); } /* * Routines for accessing the transmit and receive buffers. * The various CPU and adapter configurations supported by this * driver require three different access methods for buffers * and descriptors: * (1) contig (contiguous data; no padding), * (2) gap2 (two bytes of data followed by two bytes of padding), * (3) gap16 (16 bytes of data followed by 16 bytes of padding). */ /* * contig: contiguous data with no padding. * * Buffers may have any alignment. */ void lance_copytobuf_contig(struct lance_softc *sc, void *from, int boff, int len) { volatile caddr_t buf = sc->sc_mem; /* * Just call memcpy() to do the work. */ memcpy(buf + boff, from, len); } void lance_copyfrombuf_contig(struct lance_softc *sc, void *to, int boff, int len) { volatile caddr_t buf = sc->sc_mem; /* * Just call memcpy() to do the work. */ memcpy(to, buf + boff, len); } void lance_zerobuf_contig(struct lance_softc *sc, int boff, int len) { volatile caddr_t buf = sc->sc_mem; /* * Just let memset() do the work */ memset(buf + boff, 0, len); } #if 0 /* * Examples only; duplicate these and tweak (if necessary) in * machine-specific front-ends. */ /* * gap2: two bytes of data followed by two bytes of pad. * * Buffers must be 4-byte aligned. The code doesn't worry about * doing an extra byte. */ static void lance_copytobuf_gap2(struct lance_softc *sc, void *fromv, int boff, int len) { volatile caddr_t buf = sc->sc_mem; caddr_t from = fromv; volatile uint16_t *bptr; if (boff & 0x1) { /* Handle unaligned first byte. */ bptr = ((volatile uint16_t *)buf) + (boff - 1); *bptr = (*from++ << 8) | (*bptr & 0xff); bptr += 2; len--; } else bptr = ((volatile uint16_t *)buf) + boff; while (len > 1) { *bptr = (from[1] << 8) | (from[0] & 0xff); bptr += 2; from += 2; len -= 2; } if (len == 1) *bptr = (uint16_t)*from; } static void lance_copyfrombuf_gap2(struct lance_softc *sc, void *tov, int boff, int len) { volatile caddr_t buf = sc->sc_mem; caddr_t to = tov; volatile uint16_t *bptr; uint16_t tmp; if (boff & 0x1) { /* Handle unaligned first byte. */ bptr = ((volatile uint16_t *)buf) + (boff - 1); *to++ = (*bptr >> 8) & 0xff; bptr += 2; len--; } else bptr = ((volatile uint16_t *)buf) + boff; while (len > 1) { tmp = *bptr; *to++ = tmp & 0xff; *to++ = (tmp >> 8) & 0xff; bptr += 2; len -= 2; } if (len == 1) *to = *bptr & 0xff; } static void lance_zerobuf_gap2(struct lance_softc *sc, int boff, int len) { volatile caddr_t buf = sc->sc_mem; volatile uint16_t *bptr; if ((unsigned)boff & 0x1) { bptr = ((volatile uint16_t *)buf) + (boff - 1); *bptr &= 0xff; bptr += 2; len--; } else bptr = ((volatile uint16_t *)buf) + boff; while (len > 0) { *bptr = 0; bptr += 2; len -= 2; } } /* * gap16: 16 bytes of data followed by 16 bytes of pad. * * Buffers must be 32-byte aligned. */ static void lance_copytobuf_gap16(struct lance_softc *sc, void *fromv, int boff, int len) { volatile caddr_t buf = sc->sc_mem; caddr_t bptr, from = fromv; int xfer; bptr = buf + ((boff << 1) & ~0x1f); boff &= 0xf; xfer = min(len, 16 - boff); while (len > 0) { memcpy(bptr + boff, from, xfer); from += xfer; bptr += 32; boff = 0; len -= xfer; xfer = min(len, 16); } } static void lance_copyfrombuf_gap16(struct lance_softc *sc, void *tov, int boff, int len) { volatile caddr_t buf = sc->sc_mem; caddr_t bptr, to = tov; int xfer; bptr = buf + ((boff << 1) & ~0x1f); boff &= 0xf; xfer = min(len, 16 - boff); while (len > 0) { memcpy(to, bptr + boff, xfer); to += xfer; bptr += 32; boff = 0; len -= xfer; xfer = min(len, 16); } } static void lance_zerobuf_gap16(struct lance_softc *sc, int boff, int len) { volatile caddr_t buf = sc->sc_mem; caddr_t bptr; int xfer; bptr = buf + ((boff << 1) & ~0x1f); boff &= 0xf; xfer = min(len, 16 - boff); while (len > 0) { memset(bptr + boff, 0, xfer); bptr += 32; boff = 0; len -= xfer; xfer = min(len, 16); } } #endif /* Example only */ Index: head/sys/dev/md/md.c =================================================================== --- head/sys/dev/md/md.c (revision 313981) +++ head/sys/dev/md/md.c (revision 313982) @@ -1,1880 +1,1880 @@ /*- * ---------------------------------------------------------------------------- * "THE BEER-WARE LICENSE" (Revision 42): * wrote this file. As long as you retain this notice you * can do whatever you want with this stuff. If we meet some day, and you think * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp * ---------------------------------------------------------------------------- * * $FreeBSD$ * */ /*- * The following functions are based in the vn(4) driver: mdstart_swap(), * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(), * and as such under the following copyright: * * Copyright (c) 1988 University of Utah. * Copyright (c) 1990, 1993 * The Regents of the University of California. All rights reserved. * Copyright (c) 2013 The FreeBSD Foundation * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department. * * Portions of this software were developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: Utah Hdr: vn.c 1.13 94/04/02 * * from: @(#)vn.c 8.6 (Berkeley) 4/1/94 * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03 */ #include "opt_rootdevname.h" #include "opt_geom.h" #include "opt_md.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define MD_MODVER 1 #define MD_SHUTDOWN 0x10000 /* Tell worker thread to terminate. */ #define MD_EXITING 0x20000 /* Worker thread is exiting. */ #ifndef MD_NSECT #define MD_NSECT (10000 * 2) #endif static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk"); static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors"); static int md_debug; SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0, "Enable md(4) debug messages"); static int md_malloc_wait; SYSCTL_INT(_vm, OID_AUTO, md_malloc_wait, CTLFLAG_RW, &md_malloc_wait, 0, "Allow malloc to wait for memory allocations"); #if defined(MD_ROOT) && !defined(MD_ROOT_FSTYPE) #define MD_ROOT_FSTYPE "ufs" #endif #if defined(MD_ROOT) /* * Preloaded image gets put here. */ #if defined(MD_ROOT_SIZE) /* * We put the mfs_root symbol into the oldmfs section of the kernel object file. * Applications that patch the object with the image can determine * the size looking at the oldmfs section size within the kernel. */ u_char mfs_root[MD_ROOT_SIZE*1024] __attribute__ ((section ("oldmfs"))); const int mfs_root_size = sizeof(mfs_root); #else extern volatile u_char __weak_symbol mfs_root; extern volatile u_char __weak_symbol mfs_root_end; __GLOBL(mfs_root); __GLOBL(mfs_root_end); #define mfs_root_size ((uintptr_t)(&mfs_root_end - &mfs_root)) #endif #endif static g_init_t g_md_init; static g_fini_t g_md_fini; static g_start_t g_md_start; static g_access_t g_md_access; static void g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, struct g_consumer *cp __unused, struct g_provider *pp); -static struct cdev *status_dev = 0; +static struct cdev *status_dev = NULL; static struct sx md_sx; static struct unrhdr *md_uh; static d_ioctl_t mdctlioctl; static struct cdevsw mdctl_cdevsw = { .d_version = D_VERSION, .d_ioctl = mdctlioctl, .d_name = MD_NAME, }; struct g_class g_md_class = { .name = "MD", .version = G_VERSION, .init = g_md_init, .fini = g_md_fini, .start = g_md_start, .access = g_md_access, .dumpconf = g_md_dumpconf, }; DECLARE_GEOM_CLASS(g_md_class, g_md); static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list); #define NINDIR (PAGE_SIZE / sizeof(uintptr_t)) #define NMASK (NINDIR-1) static int nshift; static int md_vnode_pbuf_freecnt; struct indir { uintptr_t *array; u_int total; u_int used; u_int shift; }; struct md_s { int unit; LIST_ENTRY(md_s) list; struct bio_queue_head bio_queue; struct mtx queue_mtx; struct mtx stat_mtx; struct cdev *dev; enum md_types type; off_t mediasize; unsigned sectorsize; unsigned opencount; unsigned fwheads; unsigned fwsectors; unsigned flags; char name[20]; struct proc *procp; struct g_geom *gp; struct g_provider *pp; int (*start)(struct md_s *sc, struct bio *bp); struct devstat *devstat; /* MD_MALLOC related fields */ struct indir *indir; uma_zone_t uma; /* MD_PRELOAD related fields */ u_char *pl_ptr; size_t pl_len; /* MD_VNODE related fields */ struct vnode *vnode; char file[PATH_MAX]; struct ucred *cred; /* MD_SWAP related fields */ vm_object_t object; }; static struct indir * new_indir(u_int shift) { struct indir *ip; ip = malloc(sizeof *ip, M_MD, (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO); if (ip == NULL) return (NULL); ip->array = malloc(sizeof(uintptr_t) * NINDIR, M_MDSECT, (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO); if (ip->array == NULL) { free(ip, M_MD); return (NULL); } ip->total = NINDIR; ip->shift = shift; return (ip); } static void del_indir(struct indir *ip) { free(ip->array, M_MDSECT); free(ip, M_MD); } static void destroy_indir(struct md_s *sc, struct indir *ip) { int i; for (i = 0; i < NINDIR; i++) { if (!ip->array[i]) continue; if (ip->shift) destroy_indir(sc, (struct indir*)(ip->array[i])); else if (ip->array[i] > 255) uma_zfree(sc->uma, (void *)(ip->array[i])); } del_indir(ip); } /* * This function does the math and allocates the top level "indir" structure * for a device of "size" sectors. */ static struct indir * dimension(off_t size) { off_t rcnt; struct indir *ip; int layer; rcnt = size; layer = 0; while (rcnt > NINDIR) { rcnt /= NINDIR; layer++; } /* * XXX: the top layer is probably not fully populated, so we allocate * too much space for ip->array in here. */ ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO); ip->array = malloc(sizeof(uintptr_t) * NINDIR, M_MDSECT, M_WAITOK | M_ZERO); ip->total = NINDIR; ip->shift = layer * nshift; return (ip); } /* * Read a given sector */ static uintptr_t s_read(struct indir *ip, off_t offset) { struct indir *cip; int idx; uintptr_t up; if (md_debug > 1) printf("s_read(%jd)\n", (intmax_t)offset); up = 0; for (cip = ip; cip != NULL;) { if (cip->shift) { idx = (offset >> cip->shift) & NMASK; up = cip->array[idx]; cip = (struct indir *)up; continue; } idx = offset & NMASK; return (cip->array[idx]); } return (0); } /* * Write a given sector, prune the tree if the value is 0 */ static int s_write(struct indir *ip, off_t offset, uintptr_t ptr) { struct indir *cip, *lip[10]; int idx, li; uintptr_t up; if (md_debug > 1) printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr); up = 0; li = 0; cip = ip; for (;;) { lip[li++] = cip; if (cip->shift) { idx = (offset >> cip->shift) & NMASK; up = cip->array[idx]; if (up != 0) { cip = (struct indir *)up; continue; } /* Allocate branch */ cip->array[idx] = (uintptr_t)new_indir(cip->shift - nshift); if (cip->array[idx] == 0) return (ENOSPC); cip->used++; up = cip->array[idx]; cip = (struct indir *)up; continue; } /* leafnode */ idx = offset & NMASK; up = cip->array[idx]; if (up != 0) cip->used--; cip->array[idx] = ptr; if (ptr != 0) cip->used++; break; } if (cip->used != 0 || li == 1) return (0); li--; while (cip->used == 0 && cip != ip) { li--; idx = (offset >> lip[li]->shift) & NMASK; up = lip[li]->array[idx]; KASSERT(up == (uintptr_t)cip, ("md screwed up")); del_indir(cip); lip[li]->array[idx] = 0; lip[li]->used--; cip = lip[li]; } return (0); } static int g_md_access(struct g_provider *pp, int r, int w, int e) { struct md_s *sc; sc = pp->geom->softc; if (sc == NULL) { if (r <= 0 && w <= 0 && e <= 0) return (0); return (ENXIO); } r += pp->acr; w += pp->acw; e += pp->ace; if ((sc->flags & MD_READONLY) != 0 && w > 0) return (EROFS); if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) { sc->opencount = 1; } else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) { sc->opencount = 0; } return (0); } static void g_md_start(struct bio *bp) { struct md_s *sc; sc = bp->bio_to->geom->softc; if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) { mtx_lock(&sc->stat_mtx); devstat_start_transaction_bio(sc->devstat, bp); mtx_unlock(&sc->stat_mtx); } mtx_lock(&sc->queue_mtx); bioq_disksort(&sc->bio_queue, bp); mtx_unlock(&sc->queue_mtx); wakeup(sc); } #define MD_MALLOC_MOVE_ZERO 1 #define MD_MALLOC_MOVE_FILL 2 #define MD_MALLOC_MOVE_READ 3 #define MD_MALLOC_MOVE_WRITE 4 #define MD_MALLOC_MOVE_CMP 5 static int md_malloc_move_ma(vm_page_t **mp, int *ma_offs, unsigned sectorsize, void *ptr, u_char fill, int op) { struct sf_buf *sf; vm_page_t m, *mp1; char *p, first; off_t *uc; unsigned n; int error, i, ma_offs1, sz, first_read; m = NULL; error = 0; sf = NULL; /* if (op == MD_MALLOC_MOVE_CMP) { gcc */ first = 0; first_read = 0; uc = ptr; mp1 = *mp; ma_offs1 = *ma_offs; /* } */ sched_pin(); for (n = sectorsize; n != 0; n -= sz) { sz = imin(PAGE_SIZE - *ma_offs, n); if (m != **mp) { if (sf != NULL) sf_buf_free(sf); m = **mp; sf = sf_buf_alloc(m, SFB_CPUPRIVATE | (md_malloc_wait ? 0 : SFB_NOWAIT)); if (sf == NULL) { error = ENOMEM; break; } } p = (char *)sf_buf_kva(sf) + *ma_offs; switch (op) { case MD_MALLOC_MOVE_ZERO: bzero(p, sz); break; case MD_MALLOC_MOVE_FILL: memset(p, fill, sz); break; case MD_MALLOC_MOVE_READ: bcopy(ptr, p, sz); cpu_flush_dcache(p, sz); break; case MD_MALLOC_MOVE_WRITE: bcopy(p, ptr, sz); break; case MD_MALLOC_MOVE_CMP: for (i = 0; i < sz; i++, p++) { if (!first_read) { *uc = (u_char)*p; first = *p; first_read = 1; } else if (*p != first) { error = EDOOFUS; break; } } break; default: KASSERT(0, ("md_malloc_move_ma unknown op %d\n", op)); break; } if (error != 0) break; *ma_offs += sz; *ma_offs %= PAGE_SIZE; if (*ma_offs == 0) (*mp)++; ptr = (char *)ptr + sz; } if (sf != NULL) sf_buf_free(sf); sched_unpin(); if (op == MD_MALLOC_MOVE_CMP && error != 0) { *mp = mp1; *ma_offs = ma_offs1; } return (error); } static int md_malloc_move_vlist(bus_dma_segment_t **pvlist, int *pma_offs, unsigned len, void *ptr, u_char fill, int op) { bus_dma_segment_t *vlist; uint8_t *p, *end, first; off_t *uc; int ma_offs, seg_len; vlist = *pvlist; ma_offs = *pma_offs; uc = ptr; for (; len != 0; len -= seg_len) { seg_len = imin(vlist->ds_len - ma_offs, len); p = (uint8_t *)(uintptr_t)vlist->ds_addr + ma_offs; switch (op) { case MD_MALLOC_MOVE_ZERO: bzero(p, seg_len); break; case MD_MALLOC_MOVE_FILL: memset(p, fill, seg_len); break; case MD_MALLOC_MOVE_READ: bcopy(ptr, p, seg_len); cpu_flush_dcache(p, seg_len); break; case MD_MALLOC_MOVE_WRITE: bcopy(p, ptr, seg_len); break; case MD_MALLOC_MOVE_CMP: end = p + seg_len; first = *uc = *p; /* Confirm all following bytes match the first */ while (++p < end) { if (*p != first) return (EDOOFUS); } break; default: KASSERT(0, ("md_malloc_move_vlist unknown op %d\n", op)); break; } ma_offs += seg_len; if (ma_offs == vlist->ds_len) { ma_offs = 0; vlist++; } ptr = (uint8_t *)ptr + seg_len; } *pvlist = vlist; *pma_offs = ma_offs; return (0); } static int mdstart_malloc(struct md_s *sc, struct bio *bp) { u_char *dst; vm_page_t *m; bus_dma_segment_t *vlist; int i, error, error1, ma_offs, notmapped; off_t secno, nsec, uc; uintptr_t sp, osp; switch (bp->bio_cmd) { case BIO_READ: case BIO_WRITE: case BIO_DELETE: break; default: return (EOPNOTSUPP); } notmapped = (bp->bio_flags & BIO_UNMAPPED) != 0; vlist = (bp->bio_flags & BIO_VLIST) != 0 ? (bus_dma_segment_t *)bp->bio_data : NULL; if (notmapped) { m = bp->bio_ma; ma_offs = bp->bio_ma_offset; dst = NULL; KASSERT(vlist == NULL, ("vlists cannot be unmapped")); } else if (vlist != NULL) { ma_offs = bp->bio_ma_offset; dst = NULL; } else { dst = bp->bio_data; } nsec = bp->bio_length / sc->sectorsize; secno = bp->bio_offset / sc->sectorsize; error = 0; while (nsec--) { osp = s_read(sc->indir, secno); if (bp->bio_cmd == BIO_DELETE) { if (osp != 0) error = s_write(sc->indir, secno, 0); } else if (bp->bio_cmd == BIO_READ) { if (osp == 0) { if (notmapped) { error = md_malloc_move_ma(&m, &ma_offs, sc->sectorsize, NULL, 0, MD_MALLOC_MOVE_ZERO); } else if (vlist != NULL) { error = md_malloc_move_vlist(&vlist, &ma_offs, sc->sectorsize, NULL, 0, MD_MALLOC_MOVE_ZERO); } else bzero(dst, sc->sectorsize); } else if (osp <= 255) { if (notmapped) { error = md_malloc_move_ma(&m, &ma_offs, sc->sectorsize, NULL, osp, MD_MALLOC_MOVE_FILL); } else if (vlist != NULL) { error = md_malloc_move_vlist(&vlist, &ma_offs, sc->sectorsize, NULL, osp, MD_MALLOC_MOVE_FILL); } else memset(dst, osp, sc->sectorsize); } else { if (notmapped) { error = md_malloc_move_ma(&m, &ma_offs, sc->sectorsize, (void *)osp, 0, MD_MALLOC_MOVE_READ); } else if (vlist != NULL) { error = md_malloc_move_vlist(&vlist, &ma_offs, sc->sectorsize, (void *)osp, 0, MD_MALLOC_MOVE_READ); } else { bcopy((void *)osp, dst, sc->sectorsize); cpu_flush_dcache(dst, sc->sectorsize); } } osp = 0; } else if (bp->bio_cmd == BIO_WRITE) { if (sc->flags & MD_COMPRESS) { if (notmapped) { error1 = md_malloc_move_ma(&m, &ma_offs, sc->sectorsize, &uc, 0, MD_MALLOC_MOVE_CMP); i = error1 == 0 ? sc->sectorsize : 0; } else if (vlist != NULL) { error1 = md_malloc_move_vlist(&vlist, &ma_offs, sc->sectorsize, &uc, 0, MD_MALLOC_MOVE_CMP); i = error1 == 0 ? sc->sectorsize : 0; } else { uc = dst[0]; for (i = 1; i < sc->sectorsize; i++) { if (dst[i] != uc) break; } } } else { i = 0; uc = 0; } if (i == sc->sectorsize) { if (osp != uc) error = s_write(sc->indir, secno, uc); } else { if (osp <= 255) { sp = (uintptr_t)uma_zalloc(sc->uma, md_malloc_wait ? M_WAITOK : M_NOWAIT); if (sp == 0) { error = ENOSPC; break; } if (notmapped) { error = md_malloc_move_ma(&m, &ma_offs, sc->sectorsize, (void *)sp, 0, MD_MALLOC_MOVE_WRITE); } else if (vlist != NULL) { error = md_malloc_move_vlist( &vlist, &ma_offs, sc->sectorsize, (void *)sp, 0, MD_MALLOC_MOVE_WRITE); } else { bcopy(dst, (void *)sp, sc->sectorsize); } error = s_write(sc->indir, secno, sp); } else { if (notmapped) { error = md_malloc_move_ma(&m, &ma_offs, sc->sectorsize, (void *)osp, 0, MD_MALLOC_MOVE_WRITE); } else if (vlist != NULL) { error = md_malloc_move_vlist( &vlist, &ma_offs, sc->sectorsize, (void *)osp, 0, MD_MALLOC_MOVE_WRITE); } else { bcopy(dst, (void *)osp, sc->sectorsize); } osp = 0; } } } else { error = EOPNOTSUPP; } if (osp > 255) uma_zfree(sc->uma, (void*)osp); if (error != 0) break; secno++; if (!notmapped && vlist == NULL) dst += sc->sectorsize; } bp->bio_resid = 0; return (error); } static void mdcopyto_vlist(void *src, bus_dma_segment_t *vlist, off_t offset, off_t len) { off_t seg_len; while (offset >= vlist->ds_len) { offset -= vlist->ds_len; vlist++; } while (len != 0) { seg_len = omin(len, vlist->ds_len - offset); bcopy(src, (void *)(uintptr_t)(vlist->ds_addr + offset), seg_len); offset = 0; src = (uint8_t *)src + seg_len; len -= seg_len; vlist++; } } static void mdcopyfrom_vlist(bus_dma_segment_t *vlist, off_t offset, void *dst, off_t len) { off_t seg_len; while (offset >= vlist->ds_len) { offset -= vlist->ds_len; vlist++; } while (len != 0) { seg_len = omin(len, vlist->ds_len - offset); bcopy((void *)(uintptr_t)(vlist->ds_addr + offset), dst, seg_len); offset = 0; dst = (uint8_t *)dst + seg_len; len -= seg_len; vlist++; } } static int mdstart_preload(struct md_s *sc, struct bio *bp) { uint8_t *p; p = sc->pl_ptr + bp->bio_offset; switch (bp->bio_cmd) { case BIO_READ: if ((bp->bio_flags & BIO_VLIST) != 0) { mdcopyto_vlist(p, (bus_dma_segment_t *)bp->bio_data, bp->bio_ma_offset, bp->bio_length); } else { bcopy(p, bp->bio_data, bp->bio_length); } cpu_flush_dcache(bp->bio_data, bp->bio_length); break; case BIO_WRITE: if ((bp->bio_flags & BIO_VLIST) != 0) { mdcopyfrom_vlist((bus_dma_segment_t *)bp->bio_data, bp->bio_ma_offset, p, bp->bio_length); } else { bcopy(bp->bio_data, p, bp->bio_length); } break; } bp->bio_resid = 0; return (0); } static int mdstart_vnode(struct md_s *sc, struct bio *bp) { int error; struct uio auio; struct iovec aiov; struct iovec *piov; struct mount *mp; struct vnode *vp; struct buf *pb; bus_dma_segment_t *vlist; struct thread *td; off_t iolen, len, zerosize; int ma_offs, npages; switch (bp->bio_cmd) { case BIO_READ: auio.uio_rw = UIO_READ; break; case BIO_WRITE: case BIO_DELETE: auio.uio_rw = UIO_WRITE; break; case BIO_FLUSH: break; default: return (EOPNOTSUPP); } td = curthread; vp = sc->vnode; pb = NULL; piov = NULL; ma_offs = bp->bio_ma_offset; len = bp->bio_length; /* * VNODE I/O * * If an error occurs, we set BIO_ERROR but we do not set * B_INVAL because (for a write anyway), the buffer is * still valid. */ if (bp->bio_cmd == BIO_FLUSH) { (void) vn_start_write(vp, &mp, V_WAIT); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); error = VOP_FSYNC(vp, MNT_WAIT, td); VOP_UNLOCK(vp, 0); vn_finished_write(mp); return (error); } auio.uio_offset = (vm_ooffset_t)bp->bio_offset; auio.uio_resid = bp->bio_length; auio.uio_segflg = UIO_SYSSPACE; auio.uio_td = td; if (bp->bio_cmd == BIO_DELETE) { /* * Emulate BIO_DELETE by writing zeros. */ zerosize = ZERO_REGION_SIZE - (ZERO_REGION_SIZE % sc->sectorsize); auio.uio_iovcnt = howmany(bp->bio_length, zerosize); piov = malloc(sizeof(*piov) * auio.uio_iovcnt, M_MD, M_WAITOK); auio.uio_iov = piov; while (len > 0) { piov->iov_base = __DECONST(void *, zero_region); piov->iov_len = len; if (len > zerosize) piov->iov_len = zerosize; len -= piov->iov_len; piov++; } piov = auio.uio_iov; } else if ((bp->bio_flags & BIO_VLIST) != 0) { piov = malloc(sizeof(*piov) * bp->bio_ma_n, M_MD, M_WAITOK); auio.uio_iov = piov; vlist = (bus_dma_segment_t *)bp->bio_data; while (len > 0) { piov->iov_base = (void *)(uintptr_t)(vlist->ds_addr + ma_offs); piov->iov_len = vlist->ds_len - ma_offs; if (piov->iov_len > len) piov->iov_len = len; len -= piov->iov_len; ma_offs = 0; vlist++; piov++; } auio.uio_iovcnt = piov - auio.uio_iov; piov = auio.uio_iov; } else if ((bp->bio_flags & BIO_UNMAPPED) != 0) { pb = getpbuf(&md_vnode_pbuf_freecnt); bp->bio_resid = len; unmapped_step: npages = atop(min(MAXPHYS, round_page(len + (ma_offs & PAGE_MASK)))); iolen = min(ptoa(npages) - (ma_offs & PAGE_MASK), len); KASSERT(iolen > 0, ("zero iolen")); pmap_qenter((vm_offset_t)pb->b_data, &bp->bio_ma[atop(ma_offs)], npages); aiov.iov_base = (void *)((vm_offset_t)pb->b_data + (ma_offs & PAGE_MASK)); aiov.iov_len = iolen; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_resid = iolen; } else { aiov.iov_base = bp->bio_data; aiov.iov_len = bp->bio_length; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; } /* * When reading set IO_DIRECT to try to avoid double-caching * the data. When writing IO_DIRECT is not optimal. */ if (auio.uio_rw == UIO_READ) { vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred); VOP_UNLOCK(vp, 0); } else { (void) vn_start_write(vp, &mp, V_WAIT); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC, sc->cred); VOP_UNLOCK(vp, 0); vn_finished_write(mp); } if (pb != NULL) { pmap_qremove((vm_offset_t)pb->b_data, npages); if (error == 0) { len -= iolen; bp->bio_resid -= iolen; ma_offs += iolen; if (len > 0) goto unmapped_step; } relpbuf(pb, &md_vnode_pbuf_freecnt); } free(piov, M_MD); if (pb == NULL) bp->bio_resid = auio.uio_resid; return (error); } static int mdstart_swap(struct md_s *sc, struct bio *bp) { vm_page_t m; u_char *p; vm_pindex_t i, lastp; bus_dma_segment_t *vlist; int rv, ma_offs, offs, len, lastend; switch (bp->bio_cmd) { case BIO_READ: case BIO_WRITE: case BIO_DELETE: break; default: return (EOPNOTSUPP); } p = bp->bio_data; ma_offs = (bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0 ? bp->bio_ma_offset : 0; vlist = (bp->bio_flags & BIO_VLIST) != 0 ? (bus_dma_segment_t *)bp->bio_data : NULL; /* * offs is the offset at which to start operating on the * next (ie, first) page. lastp is the last page on * which we're going to operate. lastend is the ending * position within that last page (ie, PAGE_SIZE if * we're operating on complete aligned pages). */ offs = bp->bio_offset % PAGE_SIZE; lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE; lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1; rv = VM_PAGER_OK; VM_OBJECT_WLOCK(sc->object); vm_object_pip_add(sc->object, 1); for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) { len = ((i == lastp) ? lastend : PAGE_SIZE) - offs; m = vm_page_grab(sc->object, i, VM_ALLOC_SYSTEM); if (bp->bio_cmd == BIO_READ) { if (m->valid == VM_PAGE_BITS_ALL) rv = VM_PAGER_OK; else rv = vm_pager_get_pages(sc->object, &m, 1, NULL, NULL); if (rv == VM_PAGER_ERROR) { vm_page_xunbusy(m); break; } else if (rv == VM_PAGER_FAIL) { /* * Pager does not have the page. Zero * the allocated page, and mark it as * valid. Do not set dirty, the page * can be recreated if thrown out. */ pmap_zero_page(m); m->valid = VM_PAGE_BITS_ALL; } if ((bp->bio_flags & BIO_UNMAPPED) != 0) { pmap_copy_pages(&m, offs, bp->bio_ma, ma_offs, len); } else if ((bp->bio_flags & BIO_VLIST) != 0) { physcopyout_vlist(VM_PAGE_TO_PHYS(m) + offs, vlist, ma_offs, len); cpu_flush_dcache(p, len); } else { physcopyout(VM_PAGE_TO_PHYS(m) + offs, p, len); cpu_flush_dcache(p, len); } } else if (bp->bio_cmd == BIO_WRITE) { if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL) rv = vm_pager_get_pages(sc->object, &m, 1, NULL, NULL); else rv = VM_PAGER_OK; if (rv == VM_PAGER_ERROR) { vm_page_xunbusy(m); break; } if ((bp->bio_flags & BIO_UNMAPPED) != 0) { pmap_copy_pages(bp->bio_ma, ma_offs, &m, offs, len); } else if ((bp->bio_flags & BIO_VLIST) != 0) { physcopyin_vlist(vlist, ma_offs, VM_PAGE_TO_PHYS(m) + offs, len); } else { physcopyin(p, VM_PAGE_TO_PHYS(m) + offs, len); } m->valid = VM_PAGE_BITS_ALL; } else if (bp->bio_cmd == BIO_DELETE) { if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL) rv = vm_pager_get_pages(sc->object, &m, 1, NULL, NULL); else rv = VM_PAGER_OK; if (rv == VM_PAGER_ERROR) { vm_page_xunbusy(m); break; } if (len != PAGE_SIZE) { pmap_zero_page_area(m, offs, len); vm_page_clear_dirty(m, offs, len); m->valid = VM_PAGE_BITS_ALL; } else vm_pager_page_unswapped(m); } vm_page_xunbusy(m); vm_page_lock(m); if (bp->bio_cmd == BIO_DELETE && len == PAGE_SIZE) vm_page_free(m); else vm_page_activate(m); vm_page_unlock(m); if (bp->bio_cmd == BIO_WRITE) { vm_page_dirty(m); vm_pager_page_unswapped(m); } /* Actions on further pages start at offset 0 */ p += PAGE_SIZE - offs; offs = 0; ma_offs += len; } vm_object_pip_wakeup(sc->object); VM_OBJECT_WUNLOCK(sc->object); return (rv != VM_PAGER_ERROR ? 0 : ENOSPC); } static int mdstart_null(struct md_s *sc, struct bio *bp) { switch (bp->bio_cmd) { case BIO_READ: bzero(bp->bio_data, bp->bio_length); cpu_flush_dcache(bp->bio_data, bp->bio_length); break; case BIO_WRITE: break; } bp->bio_resid = 0; return (0); } static void md_kthread(void *arg) { struct md_s *sc; struct bio *bp; int error; sc = arg; thread_lock(curthread); sched_prio(curthread, PRIBIO); thread_unlock(curthread); if (sc->type == MD_VNODE) curthread->td_pflags |= TDP_NORUNNINGBUF; for (;;) { mtx_lock(&sc->queue_mtx); if (sc->flags & MD_SHUTDOWN) { sc->flags |= MD_EXITING; mtx_unlock(&sc->queue_mtx); kproc_exit(0); } bp = bioq_takefirst(&sc->bio_queue); if (!bp) { msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0); continue; } mtx_unlock(&sc->queue_mtx); if (bp->bio_cmd == BIO_GETATTR) { if ((sc->fwsectors && sc->fwheads && (g_handleattr_int(bp, "GEOM::fwsectors", sc->fwsectors) || g_handleattr_int(bp, "GEOM::fwheads", sc->fwheads))) || g_handleattr_int(bp, "GEOM::candelete", 1)) error = -1; else error = EOPNOTSUPP; } else { error = sc->start(sc, bp); } if (error != -1) { bp->bio_completed = bp->bio_length; if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) devstat_end_transaction_bio(sc->devstat, bp); g_io_deliver(bp, error); } } } static struct md_s * mdfind(int unit) { struct md_s *sc; LIST_FOREACH(sc, &md_softc_list, list) { if (sc->unit == unit) break; } return (sc); } static struct md_s * mdnew(int unit, int *errp, enum md_types type) { struct md_s *sc; int error; *errp = 0; if (unit == -1) unit = alloc_unr(md_uh); else unit = alloc_unr_specific(md_uh, unit); if (unit == -1) { *errp = EBUSY; return (NULL); } sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO); sc->type = type; bioq_init(&sc->bio_queue); mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF); mtx_init(&sc->stat_mtx, "md stat", NULL, MTX_DEF); sc->unit = unit; sprintf(sc->name, "md%d", unit); LIST_INSERT_HEAD(&md_softc_list, sc, list); error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name); if (error == 0) return (sc); LIST_REMOVE(sc, list); mtx_destroy(&sc->stat_mtx); mtx_destroy(&sc->queue_mtx); free_unr(md_uh, sc->unit); free(sc, M_MD); *errp = error; return (NULL); } static void mdinit(struct md_s *sc) { struct g_geom *gp; struct g_provider *pp; g_topology_lock(); gp = g_new_geomf(&g_md_class, "md%d", sc->unit); gp->softc = sc; pp = g_new_providerf(gp, "md%d", sc->unit); pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE; pp->mediasize = sc->mediasize; pp->sectorsize = sc->sectorsize; switch (sc->type) { case MD_MALLOC: case MD_VNODE: case MD_SWAP: pp->flags |= G_PF_ACCEPT_UNMAPPED; break; case MD_PRELOAD: case MD_NULL: break; } sc->gp = gp; sc->pp = pp; g_error_provider(pp, 0); g_topology_unlock(); sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize, DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); } static int mdcreate_malloc(struct md_s *sc, struct md_ioctl *mdio) { uintptr_t sp; int error; off_t u; error = 0; if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE)) return (EINVAL); if (mdio->md_sectorsize != 0 && !powerof2(mdio->md_sectorsize)) return (EINVAL); /* Compression doesn't make sense if we have reserved space */ if (mdio->md_options & MD_RESERVE) mdio->md_options &= ~MD_COMPRESS; if (mdio->md_fwsectors != 0) sc->fwsectors = mdio->md_fwsectors; if (mdio->md_fwheads != 0) sc->fwheads = mdio->md_fwheads; sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE); sc->indir = dimension(sc->mediasize / sc->sectorsize); sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL, 0x1ff, 0); if (mdio->md_options & MD_RESERVE) { off_t nsectors; nsectors = sc->mediasize / sc->sectorsize; for (u = 0; u < nsectors; u++) { sp = (uintptr_t)uma_zalloc(sc->uma, (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO); if (sp != 0) error = s_write(sc->indir, u, sp); else error = ENOMEM; if (error != 0) break; } } return (error); } static int mdsetcred(struct md_s *sc, struct ucred *cred) { char *tmpbuf; int error = 0; /* * Set credits in our softc */ if (sc->cred) crfree(sc->cred); sc->cred = crhold(cred); /* * Horrible kludge to establish credentials for NFS XXX. */ if (sc->vnode) { struct uio auio; struct iovec aiov; tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK); bzero(&auio, sizeof(auio)); aiov.iov_base = tmpbuf; aiov.iov_len = sc->sectorsize; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_offset = 0; auio.uio_rw = UIO_READ; auio.uio_segflg = UIO_SYSSPACE; auio.uio_resid = aiov.iov_len; vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); error = VOP_READ(sc->vnode, &auio, 0, sc->cred); VOP_UNLOCK(sc->vnode, 0); free(tmpbuf, M_TEMP); } return (error); } static int mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) { struct vattr vattr; struct nameidata nd; char *fname; int error, flags; /* * Kernel-originated requests must have the filename appended * to the mdio structure to protect against malicious software. */ fname = mdio->md_file; if ((void *)fname != (void *)(mdio + 1)) { error = copyinstr(fname, sc->file, sizeof(sc->file), NULL); if (error != 0) return (error); } else strlcpy(sc->file, fname, sizeof(sc->file)); /* * If the user specified that this is a read only device, don't * set the FWRITE mask before trying to open the backing store. */ flags = FREAD | ((mdio->md_options & MD_READONLY) ? 0 : FWRITE); NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->file, td); error = vn_open(&nd, &flags, 0, NULL); if (error != 0) return (error); NDFREE(&nd, NDF_ONLY_PNBUF); if (nd.ni_vp->v_type != VREG) { error = EINVAL; goto bad; } error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred); if (error != 0) goto bad; if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) { vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY); if (nd.ni_vp->v_iflag & VI_DOOMED) { /* Forced unmount. */ error = EBADF; goto bad; } } nd.ni_vp->v_vflag |= VV_MD; VOP_UNLOCK(nd.ni_vp, 0); if (mdio->md_fwsectors != 0) sc->fwsectors = mdio->md_fwsectors; if (mdio->md_fwheads != 0) sc->fwheads = mdio->md_fwheads; sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC); if (!(flags & FWRITE)) sc->flags |= MD_READONLY; sc->vnode = nd.ni_vp; error = mdsetcred(sc, td->td_ucred); if (error != 0) { sc->vnode = NULL; vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY); nd.ni_vp->v_vflag &= ~VV_MD; goto bad; } return (0); bad: VOP_UNLOCK(nd.ni_vp, 0); (void)vn_close(nd.ni_vp, flags, td->td_ucred, td); return (error); } static int mddestroy(struct md_s *sc, struct thread *td) { if (sc->gp) { sc->gp->softc = NULL; g_topology_lock(); g_wither_geom(sc->gp, ENXIO); g_topology_unlock(); sc->gp = NULL; sc->pp = NULL; } if (sc->devstat) { devstat_remove_entry(sc->devstat); sc->devstat = NULL; } mtx_lock(&sc->queue_mtx); sc->flags |= MD_SHUTDOWN; wakeup(sc); while (!(sc->flags & MD_EXITING)) msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10); mtx_unlock(&sc->queue_mtx); mtx_destroy(&sc->stat_mtx); mtx_destroy(&sc->queue_mtx); if (sc->vnode != NULL) { vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); sc->vnode->v_vflag &= ~VV_MD; VOP_UNLOCK(sc->vnode, 0); (void)vn_close(sc->vnode, sc->flags & MD_READONLY ? FREAD : (FREAD|FWRITE), sc->cred, td); } if (sc->cred != NULL) crfree(sc->cred); if (sc->object != NULL) vm_object_deallocate(sc->object); if (sc->indir) destroy_indir(sc, sc->indir); if (sc->uma) uma_zdestroy(sc->uma); LIST_REMOVE(sc, list); free_unr(md_uh, sc->unit); free(sc, M_MD); return (0); } static int mdresize(struct md_s *sc, struct md_ioctl *mdio) { int error, res; vm_pindex_t oldpages, newpages; switch (sc->type) { case MD_VNODE: case MD_NULL: break; case MD_SWAP: if (mdio->md_mediasize <= 0 || (mdio->md_mediasize % PAGE_SIZE) != 0) return (EDOM); oldpages = OFF_TO_IDX(round_page(sc->mediasize)); newpages = OFF_TO_IDX(round_page(mdio->md_mediasize)); if (newpages < oldpages) { VM_OBJECT_WLOCK(sc->object); vm_object_page_remove(sc->object, newpages, 0, 0); swap_pager_freespace(sc->object, newpages, oldpages - newpages); swap_release_by_cred(IDX_TO_OFF(oldpages - newpages), sc->cred); sc->object->charge = IDX_TO_OFF(newpages); sc->object->size = newpages; VM_OBJECT_WUNLOCK(sc->object); } else if (newpages > oldpages) { res = swap_reserve_by_cred(IDX_TO_OFF(newpages - oldpages), sc->cred); if (!res) return (ENOMEM); if ((mdio->md_options & MD_RESERVE) || (sc->flags & MD_RESERVE)) { error = swap_pager_reserve(sc->object, oldpages, newpages - oldpages); if (error < 0) { swap_release_by_cred( IDX_TO_OFF(newpages - oldpages), sc->cred); return (EDOM); } } VM_OBJECT_WLOCK(sc->object); sc->object->charge = IDX_TO_OFF(newpages); sc->object->size = newpages; VM_OBJECT_WUNLOCK(sc->object); } break; default: return (EOPNOTSUPP); } sc->mediasize = mdio->md_mediasize; g_topology_lock(); g_resize_provider(sc->pp, sc->mediasize); g_topology_unlock(); return (0); } static int mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) { vm_ooffset_t npage; int error; /* * Range check. Disallow negative sizes and sizes not being * multiple of page size. */ if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0) return (EDOM); /* * Allocate an OBJT_SWAP object. * * Note the truncation. */ npage = mdio->md_mediasize / PAGE_SIZE; if (mdio->md_fwsectors != 0) sc->fwsectors = mdio->md_fwsectors; if (mdio->md_fwheads != 0) sc->fwheads = mdio->md_fwheads; sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage, VM_PROT_DEFAULT, 0, td->td_ucred); if (sc->object == NULL) return (ENOMEM); sc->flags = mdio->md_options & (MD_FORCE | MD_RESERVE); if (mdio->md_options & MD_RESERVE) { if (swap_pager_reserve(sc->object, 0, npage) < 0) { error = EDOM; goto finish; } } error = mdsetcred(sc, td->td_ucred); finish: if (error != 0) { vm_object_deallocate(sc->object); sc->object = NULL; } return (error); } static int mdcreate_null(struct md_s *sc, struct md_ioctl *mdio, struct thread *td) { /* * Range check. Disallow negative sizes and sizes not being * multiple of page size. */ if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0) return (EDOM); return (0); } static int xmdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) { struct md_ioctl *mdio; struct md_s *sc; int error, i; unsigned sectsize; if (md_debug) printf("mdctlioctl(%s %lx %p %x %p)\n", devtoname(dev), cmd, addr, flags, td); mdio = (struct md_ioctl *)addr; if (mdio->md_version != MDIOVERSION) return (EINVAL); /* * We assert the version number in the individual ioctl * handlers instead of out here because (a) it is possible we * may add another ioctl in the future which doesn't read an * mdio, and (b) the correct return value for an unknown ioctl * is ENOIOCTL, not EINVAL. */ error = 0; switch (cmd) { case MDIOCATTACH: switch (mdio->md_type) { case MD_MALLOC: case MD_PRELOAD: case MD_VNODE: case MD_SWAP: case MD_NULL: break; default: return (EINVAL); } if (mdio->md_sectorsize == 0) sectsize = DEV_BSIZE; else sectsize = mdio->md_sectorsize; if (sectsize > MAXPHYS || mdio->md_mediasize < sectsize) return (EINVAL); if (mdio->md_options & MD_AUTOUNIT) sc = mdnew(-1, &error, mdio->md_type); else { if (mdio->md_unit > INT_MAX) return (EINVAL); sc = mdnew(mdio->md_unit, &error, mdio->md_type); } if (sc == NULL) return (error); if (mdio->md_options & MD_AUTOUNIT) mdio->md_unit = sc->unit; sc->mediasize = mdio->md_mediasize; sc->sectorsize = sectsize; error = EDOOFUS; switch (sc->type) { case MD_MALLOC: sc->start = mdstart_malloc; error = mdcreate_malloc(sc, mdio); break; case MD_PRELOAD: /* * We disallow attaching preloaded memory disks via * ioctl. Preloaded memory disks are automatically * attached in g_md_init(). */ error = EOPNOTSUPP; break; case MD_VNODE: sc->start = mdstart_vnode; error = mdcreate_vnode(sc, mdio, td); break; case MD_SWAP: sc->start = mdstart_swap; error = mdcreate_swap(sc, mdio, td); break; case MD_NULL: sc->start = mdstart_null; error = mdcreate_null(sc, mdio, td); break; } if (error != 0) { mddestroy(sc, td); return (error); } /* Prune off any residual fractional sector */ i = sc->mediasize % sc->sectorsize; sc->mediasize -= i; mdinit(sc); return (0); case MDIOCDETACH: if (mdio->md_mediasize != 0 || (mdio->md_options & ~MD_FORCE) != 0) return (EINVAL); sc = mdfind(mdio->md_unit); if (sc == NULL) return (ENOENT); if (sc->opencount != 0 && !(sc->flags & MD_FORCE) && !(mdio->md_options & MD_FORCE)) return (EBUSY); return (mddestroy(sc, td)); case MDIOCRESIZE: if ((mdio->md_options & ~(MD_FORCE | MD_RESERVE)) != 0) return (EINVAL); sc = mdfind(mdio->md_unit); if (sc == NULL) return (ENOENT); if (mdio->md_mediasize < sc->sectorsize) return (EINVAL); if (mdio->md_mediasize < sc->mediasize && !(sc->flags & MD_FORCE) && !(mdio->md_options & MD_FORCE)) return (EBUSY); return (mdresize(sc, mdio)); case MDIOCQUERY: sc = mdfind(mdio->md_unit); if (sc == NULL) return (ENOENT); mdio->md_type = sc->type; mdio->md_options = sc->flags; mdio->md_mediasize = sc->mediasize; mdio->md_sectorsize = sc->sectorsize; if (sc->type == MD_VNODE || (sc->type == MD_PRELOAD && mdio->md_file != NULL)) error = copyout(sc->file, mdio->md_file, strlen(sc->file) + 1); return (error); case MDIOCLIST: i = 1; LIST_FOREACH(sc, &md_softc_list, list) { if (i == MDNPAD - 1) mdio->md_pad[i] = -1; else mdio->md_pad[i++] = sc->unit; } mdio->md_pad[0] = i - 1; return (0); default: return (ENOIOCTL); }; } static int mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) { int error; sx_xlock(&md_sx); error = xmdctlioctl(dev, cmd, addr, flags, td); sx_xunlock(&md_sx); return (error); } static void md_preloaded(u_char *image, size_t length, const char *name) { struct md_s *sc; int error; sc = mdnew(-1, &error, MD_PRELOAD); if (sc == NULL) return; sc->mediasize = length; sc->sectorsize = DEV_BSIZE; sc->pl_ptr = image; sc->pl_len = length; sc->start = mdstart_preload; if (name != NULL) strlcpy(sc->file, name, sizeof(sc->file)); #if defined(MD_ROOT) && !defined(ROOTDEVNAME) if (sc->unit == 0) rootdevnames[0] = MD_ROOT_FSTYPE ":/dev/md0"; #endif mdinit(sc); if (name != NULL) { printf("%s%d: Preloaded image <%s> %zd bytes at %p\n", MD_NAME, sc->unit, name, length, image); } else { printf("%s%d: Embedded image %zd bytes at %p\n", MD_NAME, sc->unit, length, image); } } static void g_md_init(struct g_class *mp __unused) { caddr_t mod; u_char *ptr, *name, *type; unsigned len; int i; /* figure out log2(NINDIR) */ for (i = NINDIR, nshift = -1; i; nshift++) i >>= 1; mod = NULL; sx_init(&md_sx, "MD config lock"); g_topology_unlock(); md_uh = new_unrhdr(0, INT_MAX, NULL); #ifdef MD_ROOT if (mfs_root_size != 0) { sx_xlock(&md_sx); md_preloaded(__DEVOLATILE(u_char *, &mfs_root), mfs_root_size, NULL); sx_xunlock(&md_sx); } #endif /* XXX: are preload_* static or do they need Giant ? */ while ((mod = preload_search_next_name(mod)) != NULL) { name = (char *)preload_search_info(mod, MODINFO_NAME); if (name == NULL) continue; type = (char *)preload_search_info(mod, MODINFO_TYPE); if (type == NULL) continue; if (strcmp(type, "md_image") && strcmp(type, "mfs_root")) continue; ptr = preload_fetch_addr(mod); len = preload_fetch_size(mod); if (ptr != NULL && len != 0) { sx_xlock(&md_sx); md_preloaded(ptr, len, name); sx_xunlock(&md_sx); } } md_vnode_pbuf_freecnt = nswbuf / 10; status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL, 0600, MDCTL_NAME); g_topology_lock(); } static void g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, struct g_consumer *cp __unused, struct g_provider *pp) { struct md_s *mp; char *type; mp = gp->softc; if (mp == NULL) return; switch (mp->type) { case MD_MALLOC: type = "malloc"; break; case MD_PRELOAD: type = "preload"; break; case MD_VNODE: type = "vnode"; break; case MD_SWAP: type = "swap"; break; case MD_NULL: type = "null"; break; default: type = "unknown"; break; } if (pp != NULL) { if (indent == NULL) { sbuf_printf(sb, " u %d", mp->unit); sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize); sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads); sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors); sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize); sbuf_printf(sb, " t %s", type); if ((mp->type == MD_VNODE && mp->vnode != NULL) || (mp->type == MD_PRELOAD && mp->file[0] != '\0')) sbuf_printf(sb, " file %s", mp->file); } else { sbuf_printf(sb, "%s%d\n", indent, mp->unit); sbuf_printf(sb, "%s%ju\n", indent, (uintmax_t) mp->sectorsize); sbuf_printf(sb, "%s%ju\n", indent, (uintmax_t) mp->fwheads); sbuf_printf(sb, "%s%ju\n", indent, (uintmax_t) mp->fwsectors); sbuf_printf(sb, "%s%ju\n", indent, (uintmax_t) mp->mediasize); sbuf_printf(sb, "%s%s\n", indent, (mp->flags & MD_COMPRESS) == 0 ? "off": "on"); sbuf_printf(sb, "%s%s\n", indent, (mp->flags & MD_READONLY) == 0 ? "read-write": "read-only"); sbuf_printf(sb, "%s%s\n", indent, type); if ((mp->type == MD_VNODE && mp->vnode != NULL) || (mp->type == MD_PRELOAD && mp->file[0] != '\0')) { sbuf_printf(sb, "%s", indent); g_conf_printf_escaped(sb, "%s", mp->file); sbuf_printf(sb, "\n"); } } } } static void g_md_fini(struct g_class *mp __unused) { sx_destroy(&md_sx); if (status_dev != NULL) destroy_dev(status_dev); delete_unrhdr(md_uh); } Index: head/sys/dev/ncr/ncr.c =================================================================== --- head/sys/dev/ncr/ncr.c (revision 313981) +++ head/sys/dev/ncr/ncr.c (revision 313982) @@ -1,7115 +1,7115 @@ /************************************************************************** ** ** ** Device driver for the NCR 53C8XX PCI-SCSI-Controller Family. ** **------------------------------------------------------------------------- ** ** Written for 386bsd and FreeBSD by ** Wolfgang Stanglmeier ** Stefan Esser ** **------------------------------------------------------------------------- */ /*- ** Copyright (c) 1994 Wolfgang Stanglmeier. All rights reserved. ** ** Redistribution and use in source and binary forms, with or without ** modification, are permitted provided that the following conditions ** are met: ** 1. Redistributions of source code must retain the above copyright ** notice, this list of conditions and the following disclaimer. ** 2. Redistributions in binary form must reproduce the above copyright ** notice, this list of conditions and the following disclaimer in the ** documentation and/or other materials provided with the distribution. ** 3. The name of the author may not be used to endorse or promote products ** derived from this software without specific prior written permission. ** ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ** *************************************************************************** */ #include __FBSDID("$FreeBSD$"); #define NCR_GETCC_WITHMSG #if defined (__FreeBSD__) && defined(_KERNEL) #include "opt_ncr.h" #endif /*========================================================== ** ** Configuration and Debugging ** ** May be overwritten in ** **========================================================== */ /* ** SCSI address of this device. ** The boot routines should have set it. ** If not, use this. */ #ifndef SCSI_NCR_MYADDR #define SCSI_NCR_MYADDR (7) #endif /* SCSI_NCR_MYADDR */ /* ** The default synchronous period factor ** (0=asynchronous) ** If maximum synchronous frequency is defined, use it instead. */ #ifndef SCSI_NCR_MAX_SYNC #ifndef SCSI_NCR_DFLT_SYNC #define SCSI_NCR_DFLT_SYNC (12) #endif /* SCSI_NCR_DFLT_SYNC */ #else #if SCSI_NCR_MAX_SYNC == 0 #define SCSI_NCR_DFLT_SYNC 0 #else #define SCSI_NCR_DFLT_SYNC (250000 / SCSI_NCR_MAX_SYNC) #endif #endif /* ** The minimal asynchronous pre-scaler period (ns) ** Shall be 40. */ #ifndef SCSI_NCR_MIN_ASYNC #define SCSI_NCR_MIN_ASYNC (40) #endif /* SCSI_NCR_MIN_ASYNC */ /* ** The maximal bus with (in log2 byte) ** (0=8 bit, 1=16 bit) */ #ifndef SCSI_NCR_MAX_WIDE #define SCSI_NCR_MAX_WIDE (1) #endif /* SCSI_NCR_MAX_WIDE */ /*========================================================== ** ** Configuration and Debugging ** **========================================================== */ /* ** Number of targets supported by the driver. ** n permits target numbers 0..n-1. ** Default is 7, meaning targets #0..#6. ** #7 .. is myself. */ #define MAX_TARGET (16) /* ** Number of logic units supported by the driver. ** n enables logic unit numbers 0..n-1. ** The common SCSI devices require only ** one lun, so take 1 as the default. */ #ifndef MAX_LUN #define MAX_LUN (8) #endif /* MAX_LUN */ /* ** The maximum number of jobs scheduled for starting. ** There should be one slot per target, and one slot ** for each tag of each target in use. */ #define MAX_START (256) /* ** The maximum number of segments a transfer is split into. */ #define MAX_SCATTER (33) /* ** The maximum transfer length (should be >= 64k). ** MUST NOT be greater than (MAX_SCATTER-1) * PAGE_SIZE. */ #define MAX_SIZE ((MAX_SCATTER-1) * (long) PAGE_SIZE) /* ** other */ #define NCR_SNOOP_TIMEOUT (1000000) /*========================================================== ** ** Include files ** **========================================================== */ #include #include #ifdef _KERNEL #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #endif #include #include #include #include #include #include #include #include #include #include /*========================================================== ** ** Debugging tags ** **========================================================== */ #define DEBUG_ALLOC (0x0001) #define DEBUG_PHASE (0x0002) #define DEBUG_POLL (0x0004) #define DEBUG_QUEUE (0x0008) #define DEBUG_RESULT (0x0010) #define DEBUG_SCATTER (0x0020) #define DEBUG_SCRIPT (0x0040) #define DEBUG_TINY (0x0080) #define DEBUG_TIMING (0x0100) #define DEBUG_NEGO (0x0200) #define DEBUG_TAGS (0x0400) #define DEBUG_FREEZE (0x0800) #define DEBUG_RESTART (0x1000) /* ** Enable/Disable debug messages. ** Can be changed at runtime too. */ #ifdef SCSI_NCR_DEBUG #define DEBUG_FLAGS ncr_debug #else /* SCSI_NCR_DEBUG */ #define SCSI_NCR_DEBUG 0 #define DEBUG_FLAGS 0 #endif /* SCSI_NCR_DEBUG */ /*========================================================== ** ** assert () ** **========================================================== ** ** modified copy from 386bsd:/usr/include/sys/assert.h ** **---------------------------------------------------------- */ #ifdef DIAGNOSTIC #define assert(expression) { \ KASSERT(expression, ("%s", #expression)); \ } #else #define assert(expression) { \ if (!(expression)) { \ (void)printf("assertion \"%s\" failed: " \ "file \"%s\", line %d\n", \ #expression, __FILE__, __LINE__); \ } \ } #endif /*========================================================== ** ** Access to the controller chip. ** **========================================================== */ #define INB(r) bus_read_1(np->reg_res, offsetof(struct ncr_reg, r)) #define INW(r) bus_read_2(np->reg_res, offsetof(struct ncr_reg, r)) #define INL(r) bus_read_4(np->reg_res, offsetof(struct ncr_reg, r)) #define OUTB(r, val) bus_write_1(np->reg_res, offsetof(struct ncr_reg, r), val) #define OUTW(r, val) bus_write_2(np->reg_res, offsetof(struct ncr_reg, r), val) #define OUTL(r, val) bus_write_4(np->reg_res, offsetof(struct ncr_reg, r), val) #define OUTL_OFF(o, val) bus_write_4(np->reg_res, o, val) #define INB_OFF(o) bus_read_1(np->reg_res, o) #define INW_OFF(o) bus_read_2(np->reg_res, o) #define INL_OFF(o) bus_read_4(np->reg_res, o) #define READSCRIPT_OFF(base, off) \ (base ? *((volatile u_int32_t *)((volatile char *)base + (off))) : \ bus_read_4(np->sram_res, off)) #define WRITESCRIPT_OFF(base, off, val) \ do { \ if (base) \ *((volatile u_int32_t *) \ ((volatile char *)base + (off))) = (val); \ else \ bus_write_4(np->sram_res, off, val); \ } while (0) #define READSCRIPT(r) \ READSCRIPT_OFF(np->script, offsetof(struct script, r)) #define WRITESCRIPT(r, val) \ WRITESCRIPT_OFF(np->script, offsetof(struct script, r), val) /* ** Set bit field ON, OFF */ #define OUTONB(r, m) OUTB(r, INB(r) | (m)) #define OUTOFFB(r, m) OUTB(r, INB(r) & ~(m)) #define OUTONW(r, m) OUTW(r, INW(r) | (m)) #define OUTOFFW(r, m) OUTW(r, INW(r) & ~(m)) #define OUTONL(r, m) OUTL(r, INL(r) | (m)) #define OUTOFFL(r, m) OUTL(r, INL(r) & ~(m)) /*========================================================== ** ** Command control block states. ** **========================================================== */ #define HS_IDLE (0) #define HS_BUSY (1) #define HS_NEGOTIATE (2) /* sync/wide data transfer*/ #define HS_DISCONNECT (3) /* Disconnected by target */ #define HS_COMPLETE (4) #define HS_SEL_TIMEOUT (5) /* Selection timeout */ #define HS_RESET (6) /* SCSI reset */ #define HS_ABORTED (7) /* Transfer aborted */ #define HS_TIMEOUT (8) /* Software timeout */ #define HS_FAIL (9) /* SCSI or PCI bus errors */ #define HS_UNEXPECTED (10) /* Unexpected disconnect */ #define HS_STALL (11) /* QUEUE FULL or BUSY */ #define HS_DONEMASK (0xfc) /*========================================================== ** ** Software Interrupt Codes ** **========================================================== */ #define SIR_SENSE_RESTART (1) #define SIR_SENSE_FAILED (2) #define SIR_STALL_RESTART (3) #define SIR_STALL_QUEUE (4) #define SIR_NEGO_SYNC (5) #define SIR_NEGO_WIDE (6) #define SIR_NEGO_FAILED (7) #define SIR_NEGO_PROTO (8) #define SIR_REJECT_RECEIVED (9) #define SIR_REJECT_SENT (10) #define SIR_IGN_RESIDUE (11) #define SIR_MISSING_SAVE (12) #define SIR_MAX (12) /*========================================================== ** ** Extended error codes. ** xerr_status field of struct nccb. ** **========================================================== */ #define XE_OK (0) #define XE_EXTRA_DATA (1) /* unexpected data phase */ #define XE_BAD_PHASE (2) /* illegal phase (4/5) */ /*========================================================== ** ** Negotiation status. ** nego_status field of struct nccb. ** **========================================================== */ #define NS_SYNC (1) #define NS_WIDE (2) /*========================================================== ** ** XXX These are no longer used. Remove once the ** script is updated. ** "Special features" of targets. ** quirks field of struct tcb. ** actualquirks field of struct nccb. ** **========================================================== */ #define QUIRK_AUTOSAVE (0x01) #define QUIRK_NOMSG (0x02) #define QUIRK_NOSYNC (0x10) #define QUIRK_NOWIDE16 (0x20) #define QUIRK_NOTAGS (0x40) #define QUIRK_UPDATE (0x80) /*========================================================== ** ** Misc. ** **========================================================== */ #define CCB_MAGIC (0xf2691ad2) #define MAX_TAGS (32) /* hard limit */ /*========================================================== ** ** OS dependencies. ** **========================================================== */ #define PRINT_ADDR(ccb) xpt_print_path((ccb)->ccb_h.path) /*========================================================== ** ** Declaration of structs. ** **========================================================== */ struct tcb; struct lcb; struct nccb; struct ncb; struct script; typedef struct ncb * ncb_p; typedef struct tcb * tcb_p; typedef struct lcb * lcb_p; typedef struct nccb * nccb_p; struct link { ncrcmd l_cmd; ncrcmd l_paddr; }; struct usrcmd { u_long target; u_long lun; u_long data; u_long cmd; }; #define UC_SETSYNC 10 #define UC_SETTAGS 11 #define UC_SETDEBUG 12 #define UC_SETORDER 13 #define UC_SETWIDE 14 #define UC_SETFLAG 15 #define UF_TRACE (0x01) /*--------------------------------------- ** ** Timestamps for profiling ** **--------------------------------------- */ /* Type of the kernel variable `ticks'. XXX should be declared with the var. */ typedef int ticks_t; struct tstamp { ticks_t start; ticks_t end; ticks_t select; ticks_t command; ticks_t data; ticks_t status; ticks_t disconnect; }; /* ** profiling data (per device) */ struct profile { u_long num_trans; u_long num_bytes; u_long num_disc; u_long num_break; u_long num_int; u_long num_fly; u_long ms_setup; u_long ms_data; u_long ms_disc; u_long ms_post; }; /*========================================================== ** ** Declaration of structs: target control block ** **========================================================== */ #define NCR_TRANS_CUR 0x01 /* Modify current neogtiation status */ #define NCR_TRANS_ACTIVE 0x03 /* Assume this is the active target */ #define NCR_TRANS_GOAL 0x04 /* Modify negotiation goal */ #define NCR_TRANS_USER 0x08 /* Modify user negotiation settings */ struct ncr_transinfo { u_int8_t width; u_int8_t period; u_int8_t offset; }; struct ncr_target_tinfo { /* Hardware version of our sync settings */ u_int8_t disc_tag; #define NCR_CUR_DISCENB 0x01 #define NCR_CUR_TAGENB 0x02 #define NCR_USR_DISCENB 0x04 #define NCR_USR_TAGENB 0x08 u_int8_t sval; struct ncr_transinfo current; struct ncr_transinfo goal; struct ncr_transinfo user; /* Hardware version of our wide settings */ u_int8_t wval; }; struct tcb { /* ** during reselection the ncr jumps to this point ** with SFBR set to the encoded target number ** with bit 7 set. ** if it's not this target, jump to the next. ** ** JUMP IF (SFBR != #target#) ** @(next tcb) */ struct link jump_tcb; /* ** load the actual values for the sxfer and the scntl3 ** register (sync/wide mode). ** ** SCR_COPY (1); ** @(sval field of this tcb) ** @(sxfer register) ** SCR_COPY (1); ** @(wval field of this tcb) ** @(scntl3 register) */ ncrcmd getscr[6]; /* ** if next message is "identify" ** then load the message to SFBR, ** else load 0 to SFBR. ** ** CALL ** */ struct link call_lun; /* ** now look for the right lun. ** ** JUMP ** @(first nccb of this lun) */ struct link jump_lcb; /* ** pointer to interrupted getcc nccb */ nccb_p hold_cp; /* ** pointer to nccb used for negotiating. ** Avoid to start a nego for all queued commands ** when tagged command queuing is enabled. */ nccb_p nego_cp; /* ** statistical data */ u_long transfers; u_long bytes; /* ** user settable limits for sync transfer ** and tagged commands. */ struct ncr_target_tinfo tinfo; /* ** the lcb's of this tcb */ lcb_p lp[MAX_LUN]; }; /*========================================================== ** ** Declaration of structs: lun control block ** **========================================================== */ struct lcb { /* ** during reselection the ncr jumps to this point ** with SFBR set to the "Identify" message. ** if it's not this lun, jump to the next. ** ** JUMP IF (SFBR != #lun#) ** @(next lcb of this target) */ struct link jump_lcb; /* ** if next message is "simple tag", ** then load the tag to SFBR, ** else load 0 to SFBR. ** ** CALL ** */ struct link call_tag; /* ** now look for the right nccb. ** ** JUMP ** @(first nccb of this lun) */ struct link jump_nccb; /* ** start of the nccb chain */ nccb_p next_nccb; /* ** Control of tagged queueing */ u_char reqnccbs; u_char reqlink; u_char actlink; u_char usetags; u_char lasttag; }; /*========================================================== ** ** Declaration of structs: COMMAND control block ** **========================================================== ** ** This substructure is copied from the nccb to a ** global address after selection (or reselection) ** and copied back before disconnect. ** ** These fields are accessible to the script processor. ** **---------------------------------------------------------- */ struct head { /* ** Execution of a nccb starts at this point. ** It's a jump to the "SELECT" label ** of the script. ** ** After successful selection the script ** processor overwrites it with a jump to ** the IDLE label of the script. */ struct link launch; /* ** Saved data pointer. ** Points to the position in the script ** responsible for the actual transfer ** of data. ** It's written after reception of a ** "SAVE_DATA_POINTER" message. ** The goalpointer points after ** the last transfer command. */ u_int32_t savep; u_int32_t lastp; u_int32_t goalp; /* ** The virtual address of the nccb ** containing this header. */ nccb_p cp; /* ** space for some timestamps to gather ** profiling data about devices and this driver. */ struct tstamp stamp; /* ** status fields. */ u_char status[8]; }; /* ** The status bytes are used by the host and the script processor. ** ** The first four byte are copied to the scratchb register ** (declared as scr0..scr3 in ncr_reg.h) just after the select/reselect, ** and copied back just after disconnecting. ** Inside the script the XX_REG are used. ** ** The last four bytes are used inside the script by "COPY" commands. ** Because source and destination must have the same alignment ** in a longword, the fields HAVE to be at the chosen offsets. ** xerr_st (4) 0 (0x34) scratcha ** sync_st (5) 1 (0x05) sxfer ** wide_st (7) 3 (0x03) scntl3 */ /* ** First four bytes (script) */ #define QU_REG scr0 #define HS_REG scr1 #define HS_PRT nc_scr1 #define SS_REG scr2 #define PS_REG scr3 /* ** First four bytes (host) */ #define actualquirks phys.header.status[0] #define host_status phys.header.status[1] #define s_status phys.header.status[2] #define parity_status phys.header.status[3] /* ** Last four bytes (script) */ #define xerr_st header.status[4] /* MUST be ==0 mod 4 */ #define sync_st header.status[5] /* MUST be ==1 mod 4 */ #define nego_st header.status[6] #define wide_st header.status[7] /* MUST be ==3 mod 4 */ /* ** Last four bytes (host) */ #define xerr_status phys.xerr_st #define sync_status phys.sync_st #define nego_status phys.nego_st #define wide_status phys.wide_st /*========================================================== ** ** Declaration of structs: Data structure block ** **========================================================== ** ** During execution of a nccb by the script processor, ** the DSA (data structure address) register points ** to this substructure of the nccb. ** This substructure contains the header with ** the script-processor-changable data and ** data blocks for the indirect move commands. ** **---------------------------------------------------------- */ struct dsb { /* ** Header. ** Has to be the first entry, ** because it's jumped to by the ** script processor */ struct head header; /* ** Table data for Script */ struct scr_tblsel select; struct scr_tblmove smsg ; struct scr_tblmove smsg2 ; struct scr_tblmove cmd ; struct scr_tblmove scmd ; struct scr_tblmove sense ; struct scr_tblmove data [MAX_SCATTER]; }; /*========================================================== ** ** Declaration of structs: Command control block. ** **========================================================== ** ** During execution of a nccb by the script processor, ** the DSA (data structure address) register points ** to this substructure of the nccb. ** This substructure contains the header with ** the script-processor-changable data and then ** data blocks for the indirect move commands. ** **---------------------------------------------------------- */ struct nccb { /* ** This filler ensures that the global header is ** cache line size aligned. */ ncrcmd filler[4]; /* ** during reselection the ncr jumps to this point. ** If a "SIMPLE_TAG" message was received, ** then SFBR is set to the tag. ** else SFBR is set to 0 ** If looking for another tag, jump to the next nccb. ** ** JUMP IF (SFBR != #TAG#) ** @(next nccb of this lun) */ struct link jump_nccb; /* ** After execution of this call, the return address ** (in the TEMP register) points to the following ** data structure block. ** So copy it to the DSA register, and start ** processing of this data structure. ** ** CALL ** */ struct link call_tmp; /* ** This is the data structure which is ** to be executed by the script processor. */ struct dsb phys; /* ** If a data transfer phase is terminated too early ** (after reception of a message (i.e. DISCONNECT)), ** we have to prepare a mini script to transfer ** the rest of the data. */ ncrcmd patch[8]; /* ** The general SCSI driver provides a ** pointer to a control block. */ union ccb *ccb; /* ** We prepare a message to be sent after selection, ** and a second one to be sent after getcc selection. ** Contents are IDENTIFY and SIMPLE_TAG. ** While negotiating sync or wide transfer, ** a SDTM or WDTM message is appended. */ u_char scsi_smsg [8]; u_char scsi_smsg2[8]; /* ** Lock this nccb. ** Flag is used while looking for a free nccb. */ u_long magic; /* ** Physical address of this instance of nccb */ u_long p_nccb; /* ** Completion time out for this job. ** It's set to time of start + allowed number of seconds. */ time_t tlimit; /* ** All nccbs of one hostadapter are chained. */ nccb_p link_nccb; /* ** All nccbs of one target/lun are chained. */ nccb_p next_nccb; /* ** Sense command */ u_char sensecmd[6]; /* ** Tag for this transfer. ** It's patched into jump_nccb. ** If it's not zero, a SIMPLE_TAG ** message is included in smsg. */ u_char tag; }; #define CCB_PHYS(cp,lbl) (cp->p_nccb + offsetof(struct nccb, lbl)) /*========================================================== ** ** Declaration of structs: NCR device descriptor ** **========================================================== */ struct ncb { /* ** The global header. ** Accessible to both the host and the ** script-processor. ** We assume it is cache line size aligned. */ struct head header; device_t dev; /*----------------------------------------------- ** Scripts .. **----------------------------------------------- ** ** During reselection the ncr jumps to this point. ** The SFBR register is loaded with the encoded target id. ** ** Jump to the first target. ** ** JUMP ** @(next tcb) */ struct link jump_tcb; /*----------------------------------------------- ** Configuration .. **----------------------------------------------- ** ** virtual and physical addresses ** of the 53c810 chip. */ int reg_rid; struct resource *reg_res; int sram_rid; struct resource *sram_res; struct resource *irq_res; void *irq_handle; /* ** Scripts instance virtual address. */ struct script *script; struct scripth *scripth; /* ** Scripts instance physical address. */ u_long p_script; u_long p_scripth; /* ** The SCSI address of the host adapter. */ u_char myaddr; /* ** timing parameters */ u_char minsync; /* Minimum sync period factor */ u_char maxsync; /* Maximum sync period factor */ u_char maxoffs; /* Max scsi offset */ u_char clock_divn; /* Number of clock divisors */ u_long clock_khz; /* SCSI clock frequency in KHz */ u_long features; /* Chip features map */ u_char multiplier; /* Clock multiplier (1,2,4) */ u_char maxburst; /* log base 2 of dwords burst */ /* ** BIOS supplied PCI bus options */ u_char rv_scntl3; u_char rv_dcntl; u_char rv_dmode; u_char rv_ctest3; u_char rv_ctest4; u_char rv_ctest5; u_char rv_gpcntl; u_char rv_stest2; /*----------------------------------------------- ** CAM SIM information for this instance **----------------------------------------------- */ struct cam_sim *sim; struct cam_path *path; /*----------------------------------------------- ** Job control **----------------------------------------------- ** ** Commands from user */ struct usrcmd user; /* ** Target data */ struct tcb target[MAX_TARGET]; /* ** Start queue. */ u_int32_t squeue [MAX_START]; u_short squeueput; /* ** Timeout handler */ time_t heartbeat; u_short ticks; u_short latetime; time_t lasttime; struct callout timer; /*----------------------------------------------- ** Debug and profiling **----------------------------------------------- ** ** register dump */ struct ncr_reg regdump; time_t regtime; /* ** Profiling data */ struct profile profile; u_long disc_phys; u_long disc_ref; /* ** Head of list of all nccbs for this controller. */ nccb_p link_nccb; /* ** message buffers. ** Should be longword aligned, ** because they're written with a ** COPY script command. */ u_char msgout[8]; u_char msgin [8]; u_int32_t lastmsg; /* ** Buffer for STATUS_IN phase. */ u_char scratch; /* ** controller chip dependent maximal transfer width. */ u_char maxwide; struct mtx lock; #ifdef NCR_IOMAPPED /* ** address of the ncr control registers in io space */ pci_port_t port; #endif }; #define NCB_SCRIPT_PHYS(np,lbl) (np->p_script + offsetof (struct script, lbl)) #define NCB_SCRIPTH_PHYS(np,lbl) (np->p_scripth + offsetof (struct scripth,lbl)) /*========================================================== ** ** ** Script for NCR-Processor. ** ** Use ncr_script_fill() to create the variable parts. ** Use ncr_script_copy_and_bind() to make a copy and ** bind to physical addresses. ** ** **========================================================== ** ** We have to know the offsets of all labels before ** we reach them (for forward jumps). ** Therefore we declare a struct here. ** If you make changes inside the script, ** DONT FORGET TO CHANGE THE LENGTHS HERE! ** **---------------------------------------------------------- */ /* ** Script fragments which are loaded into the on-board RAM ** of 825A, 875 and 895 chips. */ struct script { ncrcmd start [ 7]; ncrcmd start0 [ 2]; ncrcmd start1 [ 3]; ncrcmd startpos [ 1]; ncrcmd trysel [ 8]; ncrcmd skip [ 8]; ncrcmd skip2 [ 3]; ncrcmd idle [ 2]; ncrcmd select [ 18]; ncrcmd prepare [ 4]; ncrcmd loadpos [ 14]; ncrcmd prepare2 [ 24]; ncrcmd setmsg [ 5]; ncrcmd clrack [ 2]; ncrcmd dispatch [ 33]; ncrcmd no_data [ 17]; ncrcmd checkatn [ 10]; ncrcmd command [ 15]; ncrcmd status [ 27]; ncrcmd msg_in [ 26]; ncrcmd msg_bad [ 6]; ncrcmd complete [ 13]; ncrcmd cleanup [ 12]; ncrcmd cleanup0 [ 9]; ncrcmd signal [ 12]; ncrcmd save_dp [ 5]; ncrcmd restore_dp [ 5]; ncrcmd disconnect [ 12]; ncrcmd disconnect0 [ 5]; ncrcmd disconnect1 [ 23]; ncrcmd msg_out [ 9]; ncrcmd msg_out_done [ 7]; ncrcmd badgetcc [ 6]; ncrcmd reselect [ 8]; ncrcmd reselect1 [ 8]; ncrcmd reselect2 [ 8]; ncrcmd resel_tmp [ 5]; ncrcmd resel_lun [ 18]; ncrcmd resel_tag [ 24]; ncrcmd data_in [MAX_SCATTER * 4 + 7]; ncrcmd data_out [MAX_SCATTER * 4 + 7]; }; /* ** Script fragments which stay in main memory for all chips. */ struct scripth { ncrcmd tryloop [MAX_START*5+2]; ncrcmd msg_parity [ 6]; ncrcmd msg_reject [ 8]; ncrcmd msg_ign_residue [ 32]; ncrcmd msg_extended [ 18]; ncrcmd msg_ext_2 [ 18]; ncrcmd msg_wdtr [ 27]; ncrcmd msg_ext_3 [ 18]; ncrcmd msg_sdtr [ 27]; ncrcmd msg_out_abort [ 10]; ncrcmd getcc [ 4]; ncrcmd getcc1 [ 5]; #ifdef NCR_GETCC_WITHMSG ncrcmd getcc2 [ 29]; #else ncrcmd getcc2 [ 14]; #endif ncrcmd getcc3 [ 6]; ncrcmd aborttag [ 4]; ncrcmd abort [ 22]; ncrcmd snooptest [ 9]; ncrcmd snoopend [ 2]; }; /*========================================================== ** ** ** Function headers. ** ** **========================================================== */ #ifdef _KERNEL static nccb_p ncr_alloc_nccb(ncb_p np, u_long target, u_long lun); static void ncr_complete(ncb_p np, nccb_p cp); static int ncr_delta(int * from, int * to); static void ncr_exception(ncb_p np); static void ncr_free_nccb(ncb_p np, nccb_p cp); static void ncr_freeze_devq(ncb_p np, struct cam_path *path); static void ncr_selectclock(ncb_p np, u_char scntl3); static void ncr_getclock(ncb_p np, u_char multiplier); static nccb_p ncr_get_nccb(ncb_p np, u_long t,u_long l); #if 0 static u_int32_t ncr_info(int unit); #endif static void ncr_init(ncb_p np, char * msg, u_long code); static void ncr_intr(void *vnp); static void ncr_intr_locked(ncb_p np); static void ncr_int_ma(ncb_p np, u_char dstat); static void ncr_int_sir(ncb_p np); static void ncr_int_sto(ncb_p np); #if 0 static void ncr_min_phys(struct buf *bp); #endif static void ncr_poll(struct cam_sim *sim); static void ncb_profile(ncb_p np, nccb_p cp); static void ncr_script_copy_and_bind(ncb_p np, ncrcmd *src, ncrcmd *dst, int len); static void ncr_script_fill(struct script * scr, struct scripth *scrh); static int ncr_scatter(struct dsb* phys, vm_offset_t vaddr, vm_size_t datalen); static void ncr_getsync(ncb_p np, u_char sfac, u_char *fakp, u_char *scntl3p); static void ncr_setsync(ncb_p np, nccb_p cp,u_char scntl3,u_char sxfer, u_char period); static void ncr_setwide(ncb_p np, nccb_p cp, u_char wide, u_char ack); static int ncr_show_msg(u_char * msg); static int ncr_snooptest(ncb_p np); static void ncr_action(struct cam_sim *sim, union ccb *ccb); static void ncr_timeout(void *arg); static void ncr_wakeup(ncb_p np, u_long code); static int ncr_probe(device_t dev); static int ncr_attach(device_t dev); #endif /* _KERNEL */ /*========================================================== ** ** ** Global static data. ** ** **========================================================== */ #ifdef _KERNEL static int ncr_debug = SCSI_NCR_DEBUG; SYSCTL_INT(_debug, OID_AUTO, ncr_debug, CTLFLAG_RW, &ncr_debug, 0, ""); static int ncr_cache; /* to be aligned _NOT_ static */ /*========================================================== ** ** ** Global static data: auto configure ** ** **========================================================== */ #define NCR_810_ID (0x00011000ul) #define NCR_815_ID (0x00041000ul) #define NCR_820_ID (0x00021000ul) #define NCR_825_ID (0x00031000ul) #define NCR_860_ID (0x00061000ul) #define NCR_875_ID (0x000f1000ul) #define NCR_875_ID2 (0x008f1000ul) #define NCR_885_ID (0x000d1000ul) #define NCR_895_ID (0x000c1000ul) #define NCR_896_ID (0x000b1000ul) #define NCR_895A_ID (0x00121000ul) #define NCR_1510D_ID (0x000a1000ul) /*========================================================== ** ** ** Scripts for NCR-Processor. ** ** Use ncr_script_bind for binding to physical addresses. ** ** **========================================================== ** ** NADDR generates a reference to a field of the controller data. ** PADDR generates a reference to another part of the script. ** RADDR generates a reference to a script processor register. ** FADDR generates a reference to a script processor register ** with offset. ** **---------------------------------------------------------- */ #define RELOC_SOFTC 0x40000000 #define RELOC_LABEL 0x50000000 #define RELOC_REGISTER 0x60000000 #define RELOC_KVAR 0x70000000 #define RELOC_LABELH 0x80000000 #define RELOC_MASK 0xf0000000 #define NADDR(label) (RELOC_SOFTC | offsetof(struct ncb, label)) #define PADDR(label) (RELOC_LABEL | offsetof(struct script, label)) #define PADDRH(label) (RELOC_LABELH | offsetof(struct scripth, label)) #define RADDR(label) (RELOC_REGISTER | REG(label)) #define FADDR(label,ofs)(RELOC_REGISTER | ((REG(label))+(ofs))) #define KVAR(which) (RELOC_KVAR | (which)) #define KVAR_SECOND (0) #define KVAR_TICKS (1) #define KVAR_NCR_CACHE (2) #define SCRIPT_KVAR_FIRST (0) #define SCRIPT_KVAR_LAST (3) /* * Kernel variables referenced in the scripts. * THESE MUST ALL BE ALIGNED TO A 4-BYTE BOUNDARY. */ static volatile void *script_kvars[] = { &time_second, &ticks, &ncr_cache }; static struct script script0 = { /*--------------------------< START >-----------------------*/ { /* ** Claim to be still alive ... */ SCR_COPY (sizeof (((struct ncb *)0)->heartbeat)), KVAR (KVAR_SECOND), NADDR (heartbeat), /* ** Make data structure address invalid. ** clear SIGP. */ SCR_LOAD_REG (dsa, 0xff), 0, SCR_FROM_REG (ctest2), 0, }/*-------------------------< START0 >----------------------*/,{ /* ** Hook for interrupted GetConditionCode. ** Will be patched to ... IFTRUE by ** the interrupt handler. */ SCR_INT ^ IFFALSE (0), SIR_SENSE_RESTART, }/*-------------------------< START1 >----------------------*/,{ /* ** Hook for stalled start queue. ** Will be patched to IFTRUE by the interrupt handler. */ SCR_INT ^ IFFALSE (0), SIR_STALL_RESTART, /* ** Then jump to a certain point in tryloop. ** Due to the lack of indirect addressing the code ** is self modifying here. */ SCR_JUMP, }/*-------------------------< STARTPOS >--------------------*/,{ PADDRH(tryloop), }/*-------------------------< TRYSEL >----------------------*/,{ /* ** Now: ** DSA: Address of a Data Structure ** or Address of the IDLE-Label. ** ** TEMP: Address of a script, which tries to ** start the NEXT entry. ** ** Save the TEMP register into the SCRATCHA register. ** Then copy the DSA to TEMP and RETURN. ** This is kind of an indirect jump. ** (The script processor has NO stack, so the ** CALL is actually a jump and link, and the ** RETURN is an indirect jump.) ** ** If the slot was empty, DSA contains the address ** of the IDLE part of this script. The processor ** jumps to IDLE and waits for a reselect. ** It will wake up and try the same slot again ** after the SIGP bit becomes set by the host. ** ** If the slot was not empty, DSA contains ** the address of the phys-part of a nccb. ** The processor jumps to this address. ** phys starts with head, ** head starts with launch, ** so actually the processor jumps to ** the lauch part. ** If the entry is scheduled for execution, ** then launch contains a jump to SELECT. ** If it's not scheduled, it contains a jump to IDLE. */ SCR_COPY (4), RADDR (temp), RADDR (scratcha), SCR_COPY (4), RADDR (dsa), RADDR (temp), SCR_RETURN, 0 }/*-------------------------< SKIP >------------------------*/,{ /* ** This entry has been canceled. ** Next time use the next slot. */ SCR_COPY (4), RADDR (scratcha), PADDR (startpos), /* ** patch the launch field. ** should look like an idle process. */ SCR_COPY_F (4), RADDR (dsa), PADDR (skip2), SCR_COPY (8), PADDR (idle), }/*-------------------------< SKIP2 >-----------------------*/,{ 0, SCR_JUMP, PADDR(start), }/*-------------------------< IDLE >------------------------*/,{ /* ** Nothing to do? ** Wait for reselect. */ SCR_JUMP, PADDR(reselect), }/*-------------------------< SELECT >----------------------*/,{ /* ** DSA contains the address of a scheduled ** data structure. ** ** SCRATCHA contains the address of the script, ** which starts the next entry. ** ** Set Initiator mode. ** ** (Target mode is left as an exercise for the reader) */ SCR_CLR (SCR_TRG), 0, SCR_LOAD_REG (HS_REG, 0xff), 0, /* ** And try to select this target. */ SCR_SEL_TBL_ATN ^ offsetof (struct dsb, select), PADDR (reselect), /* ** Now there are 4 possibilities: ** ** (1) The ncr loses arbitration. ** This is ok, because it will try again, ** when the bus becomes idle. ** (But beware of the timeout function!) ** ** (2) The ncr is reselected. ** Then the script processor takes the jump ** to the RESELECT label. ** ** (3) The ncr completes the selection. ** Then it will execute the next statement. ** ** (4) There is a selection timeout. ** Then the ncr should interrupt the host and stop. ** Unfortunately, it seems to continue execution ** of the script. But it will fail with an ** IID-interrupt on the next WHEN. */ SCR_JUMPR ^ IFTRUE (WHEN (SCR_MSG_IN)), 0, /* ** Send the IDENTIFY and SIMPLE_TAG messages ** (and the MSG_EXT_SDTR message) */ SCR_MOVE_TBL ^ SCR_MSG_OUT, offsetof (struct dsb, smsg), #ifdef undef /* XXX better fail than try to deal with this ... */ SCR_JUMPR ^ IFTRUE (WHEN (SCR_MSG_OUT)), -16, #endif SCR_CLR (SCR_ATN), 0, SCR_COPY (1), RADDR (sfbr), NADDR (lastmsg), /* ** Selection complete. ** Next time use the next slot. */ SCR_COPY (4), RADDR (scratcha), PADDR (startpos), }/*-------------------------< PREPARE >----------------------*/,{ /* ** The ncr doesn't have an indirect load ** or store command. So we have to ** copy part of the control block to a ** fixed place, where we can access it. ** ** We patch the address part of a ** COPY command with the DSA-register. */ SCR_COPY_F (4), RADDR (dsa), PADDR (loadpos), /* ** then we do the actual copy. */ SCR_COPY (sizeof (struct head)), /* ** continued after the next label ... */ }/*-------------------------< LOADPOS >---------------------*/,{ 0, NADDR (header), /* ** Mark this nccb as not scheduled. */ SCR_COPY (8), PADDR (idle), NADDR (header.launch), /* ** Set a time stamp for this selection */ SCR_COPY (sizeof (ticks)), KVAR (KVAR_TICKS), NADDR (header.stamp.select), /* ** load the savep (saved pointer) into ** the TEMP register (actual pointer) */ SCR_COPY (4), NADDR (header.savep), RADDR (temp), /* ** Initialize the status registers */ SCR_COPY (4), NADDR (header.status), RADDR (scr0), }/*-------------------------< PREPARE2 >---------------------*/,{ /* ** Load the synchronous mode register */ SCR_COPY (1), NADDR (sync_st), RADDR (sxfer), /* ** Load the wide mode and timing register */ SCR_COPY (1), NADDR (wide_st), RADDR (scntl3), /* ** Initialize the msgout buffer with a NOOP message. */ SCR_LOAD_REG (scratcha, MSG_NOOP), 0, SCR_COPY (1), RADDR (scratcha), NADDR (msgout), SCR_COPY (1), RADDR (scratcha), NADDR (msgin), /* ** Message in phase ? */ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** Extended or reject message ? */ SCR_FROM_REG (sbdl), 0, SCR_JUMP ^ IFTRUE (DATA (MSG_EXTENDED)), PADDR (msg_in), SCR_JUMP ^ IFTRUE (DATA (MSG_MESSAGE_REJECT)), PADDRH (msg_reject), /* ** normal processing */ SCR_JUMP, PADDR (dispatch), }/*-------------------------< SETMSG >----------------------*/,{ SCR_COPY (1), RADDR (scratcha), NADDR (msgout), SCR_SET (SCR_ATN), 0, }/*-------------------------< CLRACK >----------------------*/,{ /* ** Terminate possible pending message phase. */ SCR_CLR (SCR_ACK), 0, }/*-----------------------< DISPATCH >----------------------*/,{ SCR_FROM_REG (HS_REG), 0, SCR_INT ^ IFTRUE (DATA (HS_NEGOTIATE)), SIR_NEGO_FAILED, /* ** remove bogus output signals */ SCR_REG_REG (socl, SCR_AND, CACK|CATN), 0, SCR_RETURN ^ IFTRUE (WHEN (SCR_DATA_OUT)), 0, SCR_RETURN ^ IFTRUE (IF (SCR_DATA_IN)), 0, SCR_JUMP ^ IFTRUE (IF (SCR_MSG_OUT)), PADDR (msg_out), SCR_JUMP ^ IFTRUE (IF (SCR_MSG_IN)), PADDR (msg_in), SCR_JUMP ^ IFTRUE (IF (SCR_COMMAND)), PADDR (command), SCR_JUMP ^ IFTRUE (IF (SCR_STATUS)), PADDR (status), /* ** Discard one illegal phase byte, if required. */ SCR_LOAD_REG (scratcha, XE_BAD_PHASE), 0, SCR_COPY (1), RADDR (scratcha), NADDR (xerr_st), SCR_JUMPR ^ IFFALSE (IF (SCR_ILG_OUT)), 8, SCR_MOVE_ABS (1) ^ SCR_ILG_OUT, NADDR (scratch), SCR_JUMPR ^ IFFALSE (IF (SCR_ILG_IN)), 8, SCR_MOVE_ABS (1) ^ SCR_ILG_IN, NADDR (scratch), SCR_JUMP, PADDR (dispatch), }/*-------------------------< NO_DATA >--------------------*/,{ /* ** The target wants to transfer too much data ** or in the wrong direction. ** Remember that in extended error. */ SCR_LOAD_REG (scratcha, XE_EXTRA_DATA), 0, SCR_COPY (1), RADDR (scratcha), NADDR (xerr_st), /* ** Discard one data byte, if required. */ SCR_JUMPR ^ IFFALSE (WHEN (SCR_DATA_OUT)), 8, SCR_MOVE_ABS (1) ^ SCR_DATA_OUT, NADDR (scratch), SCR_JUMPR ^ IFFALSE (IF (SCR_DATA_IN)), 8, SCR_MOVE_ABS (1) ^ SCR_DATA_IN, NADDR (scratch), /* ** .. and repeat as required. */ SCR_CALL, PADDR (dispatch), SCR_JUMP, PADDR (no_data), }/*-------------------------< CHECKATN >--------------------*/,{ /* ** If AAP (bit 1 of scntl0 register) is set ** and a parity error is detected, ** the script processor asserts ATN. ** ** The target should switch to a MSG_OUT phase ** to get the message. */ SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFFALSE (MASK (CATN, CATN)), PADDR (dispatch), /* ** count it */ SCR_REG_REG (PS_REG, SCR_ADD, 1), 0, /* ** Prepare a MSG_INITIATOR_DET_ERR message ** (initiator detected error). ** The target should retry the transfer. */ SCR_LOAD_REG (scratcha, MSG_INITIATOR_DET_ERR), 0, SCR_JUMP, PADDR (setmsg), }/*-------------------------< COMMAND >--------------------*/,{ /* ** If this is not a GETCC transfer ... */ SCR_FROM_REG (SS_REG), 0, /*<<<*/ SCR_JUMPR ^ IFTRUE (DATA (SCSI_STATUS_CHECK_COND)), 28, /* ** ... set a timestamp ... */ SCR_COPY (sizeof (ticks)), KVAR (KVAR_TICKS), NADDR (header.stamp.command), /* ** ... and send the command */ SCR_MOVE_TBL ^ SCR_COMMAND, offsetof (struct dsb, cmd), SCR_JUMP, PADDR (dispatch), /* ** Send the GETCC command */ /*>>>*/ SCR_MOVE_TBL ^ SCR_COMMAND, offsetof (struct dsb, scmd), SCR_JUMP, PADDR (dispatch), }/*-------------------------< STATUS >--------------------*/,{ /* ** set the timestamp. */ SCR_COPY (sizeof (ticks)), KVAR (KVAR_TICKS), NADDR (header.stamp.status), /* ** If this is a GETCC transfer, */ SCR_FROM_REG (SS_REG), 0, /*<<<*/ SCR_JUMPR ^ IFFALSE (DATA (SCSI_STATUS_CHECK_COND)), 40, /* ** get the status */ SCR_MOVE_ABS (1) ^ SCR_STATUS, NADDR (scratch), /* ** Save status to scsi_status. ** Mark as complete. ** And wait for disconnect. */ SCR_TO_REG (SS_REG), 0, SCR_REG_REG (SS_REG, SCR_OR, SCSI_STATUS_SENSE), 0, SCR_LOAD_REG (HS_REG, HS_COMPLETE), 0, SCR_JUMP, PADDR (checkatn), /* ** If it was no GETCC transfer, ** save the status to scsi_status. */ /*>>>*/ SCR_MOVE_ABS (1) ^ SCR_STATUS, NADDR (scratch), SCR_TO_REG (SS_REG), 0, /* ** if it was no check condition ... */ SCR_JUMP ^ IFTRUE (DATA (SCSI_STATUS_CHECK_COND)), PADDR (checkatn), /* ** ... mark as complete. */ SCR_LOAD_REG (HS_REG, HS_COMPLETE), 0, SCR_JUMP, PADDR (checkatn), }/*-------------------------< MSG_IN >--------------------*/,{ /* ** Get the first byte of the message ** and save it to SCRATCHA. ** ** The script processor doesn't negate the ** ACK signal after this transfer. */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[0]), /* ** Check for message parity error. */ SCR_TO_REG (scratcha), 0, SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), SCR_FROM_REG (scratcha), 0, /* ** Parity was ok, handle this message. */ SCR_JUMP ^ IFTRUE (DATA (MSG_CMDCOMPLETE)), PADDR (complete), SCR_JUMP ^ IFTRUE (DATA (MSG_SAVEDATAPOINTER)), PADDR (save_dp), SCR_JUMP ^ IFTRUE (DATA (MSG_RESTOREPOINTERS)), PADDR (restore_dp), SCR_JUMP ^ IFTRUE (DATA (MSG_DISCONNECT)), PADDR (disconnect), SCR_JUMP ^ IFTRUE (DATA (MSG_EXTENDED)), PADDRH (msg_extended), SCR_JUMP ^ IFTRUE (DATA (MSG_NOOP)), PADDR (clrack), SCR_JUMP ^ IFTRUE (DATA (MSG_MESSAGE_REJECT)), PADDRH (msg_reject), SCR_JUMP ^ IFTRUE (DATA (MSG_IGN_WIDE_RESIDUE)), PADDRH (msg_ign_residue), /* ** Rest of the messages left as ** an exercise ... ** ** Unimplemented messages: ** fall through to MSG_BAD. */ }/*-------------------------< MSG_BAD >------------------*/,{ /* ** unimplemented message - reject it. */ SCR_INT, SIR_REJECT_SENT, SCR_LOAD_REG (scratcha, MSG_MESSAGE_REJECT), 0, SCR_JUMP, PADDR (setmsg), }/*-------------------------< COMPLETE >-----------------*/,{ /* ** Complete message. ** ** If it's not the get condition code, ** copy TEMP register to LASTP in header. */ SCR_FROM_REG (SS_REG), 0, /*<<<*/ SCR_JUMPR ^ IFTRUE (MASK (SCSI_STATUS_SENSE, SCSI_STATUS_SENSE)), 12, SCR_COPY (4), RADDR (temp), NADDR (header.lastp), /*>>>*/ /* ** When we terminate the cycle by clearing ACK, ** the target may disconnect immediately. ** ** We don't want to be told of an ** "unexpected disconnect", ** so we disable this feature. */ SCR_REG_REG (scntl2, SCR_AND, 0x7f), 0, /* ** Terminate cycle ... */ SCR_CLR (SCR_ACK|SCR_ATN), 0, /* ** ... and wait for the disconnect. */ SCR_WAIT_DISC, 0, }/*-------------------------< CLEANUP >-------------------*/,{ /* ** dsa: Pointer to nccb ** or xxxxxxFF (no nccb) ** ** HS_REG: Host-Status (<>0!) */ SCR_FROM_REG (dsa), 0, SCR_JUMP ^ IFTRUE (DATA (0xff)), PADDR (signal), /* ** dsa is valid. ** save the status registers */ SCR_COPY (4), RADDR (scr0), NADDR (header.status), /* ** and copy back the header to the nccb. */ SCR_COPY_F (4), RADDR (dsa), PADDR (cleanup0), SCR_COPY (sizeof (struct head)), NADDR (header), }/*-------------------------< CLEANUP0 >--------------------*/,{ 0, /* ** If command resulted in "check condition" ** status and is not yet completed, ** try to get the condition code. */ SCR_FROM_REG (HS_REG), 0, /*<<<*/ SCR_JUMPR ^ IFFALSE (MASK (0, HS_DONEMASK)), 16, SCR_FROM_REG (SS_REG), 0, SCR_JUMP ^ IFTRUE (DATA (SCSI_STATUS_CHECK_COND)), PADDRH(getcc2), }/*-------------------------< SIGNAL >----------------------*/,{ /* ** if status = queue full, ** reinsert in startqueue and stall queue. */ /*>>>*/ SCR_FROM_REG (SS_REG), 0, SCR_INT ^ IFTRUE (DATA (SCSI_STATUS_QUEUE_FULL)), SIR_STALL_QUEUE, /* ** And make the DSA register invalid. */ SCR_LOAD_REG (dsa, 0xff), /* invalid */ 0, /* ** if job completed ... */ SCR_FROM_REG (HS_REG), 0, /* ** ... signal completion to the host */ SCR_INT_FLY ^ IFFALSE (MASK (0, HS_DONEMASK)), 0, /* ** Auf zu neuen Schandtaten! */ SCR_JUMP, PADDR(start), }/*-------------------------< SAVE_DP >------------------*/,{ /* ** SAVE_DP message: ** Copy TEMP register to SAVEP in header. */ SCR_COPY (4), RADDR (temp), NADDR (header.savep), SCR_JUMP, PADDR (clrack), }/*-------------------------< RESTORE_DP >---------------*/,{ /* ** RESTORE_DP message: ** Copy SAVEP in header to TEMP register. */ SCR_COPY (4), NADDR (header.savep), RADDR (temp), SCR_JUMP, PADDR (clrack), }/*-------------------------< DISCONNECT >---------------*/,{ /* ** If QUIRK_AUTOSAVE is set, ** do a "save pointer" operation. */ SCR_FROM_REG (QU_REG), 0, /*<<<*/ SCR_JUMPR ^ IFFALSE (MASK (QUIRK_AUTOSAVE, QUIRK_AUTOSAVE)), 12, /* ** like SAVE_DP message: ** Copy TEMP register to SAVEP in header. */ SCR_COPY (4), RADDR (temp), NADDR (header.savep), /*>>>*/ /* ** Check if temp==savep or temp==goalp: ** if not, log a missing save pointer message. ** In fact, it's a comparison mod 256. ** ** Hmmm, I hadn't thought that I would be urged to ** write this kind of ugly self modifying code. ** ** It's unbelievable, but the ncr53c8xx isn't able ** to subtract one register from another. */ SCR_FROM_REG (temp), 0, /* ** You are not expected to understand this .. ** ** CAUTION: only little endian architectures supported! XXX */ SCR_COPY_F (1), NADDR (header.savep), PADDR (disconnect0), }/*-------------------------< DISCONNECT0 >--------------*/,{ /*<<<*/ SCR_JUMPR ^ IFTRUE (DATA (1)), 20, /* ** neither this */ SCR_COPY_F (1), NADDR (header.goalp), PADDR (disconnect1), }/*-------------------------< DISCONNECT1 >--------------*/,{ SCR_INT ^ IFFALSE (DATA (1)), SIR_MISSING_SAVE, /*>>>*/ /* ** DISCONNECTing ... ** ** disable the "unexpected disconnect" feature, ** and remove the ACK signal. */ SCR_REG_REG (scntl2, SCR_AND, 0x7f), 0, SCR_CLR (SCR_ACK|SCR_ATN), 0, /* ** Wait for the disconnect. */ SCR_WAIT_DISC, 0, /* ** Profiling: ** Set a time stamp, ** and count the disconnects. */ SCR_COPY (sizeof (ticks)), KVAR (KVAR_TICKS), NADDR (header.stamp.disconnect), SCR_COPY (4), NADDR (disc_phys), RADDR (temp), SCR_REG_REG (temp, SCR_ADD, 0x01), 0, SCR_COPY (4), RADDR (temp), NADDR (disc_phys), /* ** Status is: DISCONNECTED. */ SCR_LOAD_REG (HS_REG, HS_DISCONNECT), 0, SCR_JUMP, PADDR (cleanup), }/*-------------------------< MSG_OUT >-------------------*/,{ /* ** The target requests a message. */ SCR_MOVE_ABS (1) ^ SCR_MSG_OUT, NADDR (msgout), SCR_COPY (1), RADDR (sfbr), NADDR (lastmsg), /* ** If it was no ABORT message ... */ SCR_JUMP ^ IFTRUE (DATA (MSG_ABORT)), PADDRH (msg_out_abort), /* ** ... wait for the next phase ** if it's a message out, send it again, ... */ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)), PADDR (msg_out), }/*-------------------------< MSG_OUT_DONE >--------------*/,{ /* ** ... else clear the message ... */ SCR_LOAD_REG (scratcha, MSG_NOOP), 0, SCR_COPY (4), RADDR (scratcha), NADDR (msgout), /* ** ... and process the next phase */ SCR_JUMP, PADDR (dispatch), }/*------------------------< BADGETCC >---------------------*/,{ /* ** If SIGP was set, clear it and try again. */ SCR_FROM_REG (ctest2), 0, SCR_JUMP ^ IFTRUE (MASK (CSIGP,CSIGP)), PADDRH (getcc2), SCR_INT, SIR_SENSE_FAILED, }/*-------------------------< RESELECT >--------------------*/,{ /* ** This NOP will be patched with LED OFF ** SCR_REG_REG (gpreg, SCR_OR, 0x01) */ SCR_NO_OP, 0, /* ** make the DSA invalid. */ SCR_LOAD_REG (dsa, 0xff), 0, SCR_CLR (SCR_TRG), 0, /* ** Sleep waiting for a reselection. ** If SIGP is set, special treatment. ** ** Zu allem bereit .. */ SCR_WAIT_RESEL, PADDR(reselect2), }/*-------------------------< RESELECT1 >--------------------*/,{ /* ** This NOP will be patched with LED ON ** SCR_REG_REG (gpreg, SCR_AND, 0xfe) */ SCR_NO_OP, 0, /* ** ... zu nichts zu gebrauchen ? ** ** load the target id into the SFBR ** and jump to the control block. ** ** Look at the declarations of ** - struct ncb ** - struct tcb ** - struct lcb ** - struct nccb ** to understand what's going on. */ SCR_REG_SFBR (ssid, SCR_AND, 0x8F), 0, SCR_TO_REG (sdid), 0, SCR_JUMP, NADDR (jump_tcb), }/*-------------------------< RESELECT2 >-------------------*/,{ /* ** This NOP will be patched with LED ON ** SCR_REG_REG (gpreg, SCR_AND, 0xfe) */ SCR_NO_OP, 0, /* ** If it's not connected :( ** -> interrupted by SIGP bit. ** Jump to start. */ SCR_FROM_REG (ctest2), 0, SCR_JUMP ^ IFTRUE (MASK (CSIGP,CSIGP)), PADDR (start), SCR_JUMP, PADDR (reselect), }/*-------------------------< RESEL_TMP >-------------------*/,{ /* ** The return address in TEMP ** is in fact the data structure address, ** so copy it to the DSA register. */ SCR_COPY (4), RADDR (temp), RADDR (dsa), SCR_JUMP, PADDR (prepare), }/*-------------------------< RESEL_LUN >-------------------*/,{ /* ** come back to this point ** to get an IDENTIFY message ** Wait for a msg_in phase. */ /*<<<*/ SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_IN)), 48, /* ** message phase ** It's not a sony, it's a trick: ** read the data without acknowledging it. */ SCR_FROM_REG (sbdl), 0, /*<<<*/ SCR_JUMPR ^ IFFALSE (MASK (MSG_IDENTIFYFLAG, 0x98)), 32, /* ** It WAS an Identify message. ** get it and ack it! */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin), SCR_CLR (SCR_ACK), 0, /* ** Mask out the lun. */ SCR_REG_REG (sfbr, SCR_AND, 0x07), 0, SCR_RETURN, 0, /* ** No message phase or no IDENTIFY message: ** return 0. */ /*>>>*/ SCR_LOAD_SFBR (0), 0, SCR_RETURN, 0, }/*-------------------------< RESEL_TAG >-------------------*/,{ /* ** come back to this point ** to get a SIMPLE_TAG message ** Wait for a MSG_IN phase. */ /*<<<*/ SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_IN)), 64, /* ** message phase ** It's a trick - read the data ** without acknowledging it. */ SCR_FROM_REG (sbdl), 0, /*<<<*/ SCR_JUMPR ^ IFFALSE (DATA (MSG_SIMPLE_Q_TAG)), 48, /* ** It WAS a SIMPLE_TAG message. ** get it and ack it! */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin), SCR_CLR (SCR_ACK), 0, /* ** Wait for the second byte (the tag) */ /*<<<*/ SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_IN)), 24, /* ** Get it and ack it! */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin), SCR_CLR (SCR_ACK|SCR_CARRY), 0, SCR_RETURN, 0, /* ** No message phase or no SIMPLE_TAG message ** or no second byte: return 0. */ /*>>>*/ SCR_LOAD_SFBR (0), 0, SCR_SET (SCR_CARRY), 0, SCR_RETURN, 0, }/*-------------------------< DATA_IN >--------------------*/,{ /* ** Because the size depends on the ** #define MAX_SCATTER parameter, ** it is filled in at runtime. ** ** SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)), ** PADDR (no_data), ** SCR_COPY (sizeof (ticks)), ** KVAR (KVAR_TICKS), ** NADDR (header.stamp.data), ** SCR_MOVE_TBL ^ SCR_DATA_IN, ** offsetof (struct dsb, data[ 0]), ** ** ##===========< i=1; i========= ** || SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_IN)), ** || PADDR (checkatn), ** || SCR_MOVE_TBL ^ SCR_DATA_IN, ** || offsetof (struct dsb, data[ i]), ** ##========================================== ** ** SCR_CALL, ** PADDR (checkatn), ** SCR_JUMP, ** PADDR (no_data), */ 0 }/*-------------------------< DATA_OUT >-------------------*/,{ /* ** Because the size depends on the ** #define MAX_SCATTER parameter, ** it is filled in at runtime. ** ** SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_OUT)), ** PADDR (no_data), ** SCR_COPY (sizeof (ticks)), ** KVAR (KVAR_TICKS), ** NADDR (header.stamp.data), ** SCR_MOVE_TBL ^ SCR_DATA_OUT, ** offsetof (struct dsb, data[ 0]), ** ** ##===========< i=1; i========= ** || SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_OUT)), ** || PADDR (dispatch), ** || SCR_MOVE_TBL ^ SCR_DATA_OUT, ** || offsetof (struct dsb, data[ i]), ** ##========================================== ** ** SCR_CALL, ** PADDR (dispatch), ** SCR_JUMP, ** PADDR (no_data), ** **--------------------------------------------------------- */ (u_long)0 }/*--------------------------------------------------------*/ }; static struct scripth scripth0 = { /*-------------------------< TRYLOOP >---------------------*/{ /* ** Load an entry of the start queue into dsa ** and try to start it by jumping to TRYSEL. ** ** Because the size depends on the ** #define MAX_START parameter, it is filled ** in at runtime. ** **----------------------------------------------------------- ** ** ##===========< I=0; i=========== ** || SCR_COPY (4), ** || NADDR (squeue[i]), ** || RADDR (dsa), ** || SCR_CALL, ** || PADDR (trysel), ** ##========================================== ** ** SCR_JUMP, ** PADDRH(tryloop), ** **----------------------------------------------------------- */ 0 }/*-------------------------< MSG_PARITY >---------------*/,{ /* ** count it */ SCR_REG_REG (PS_REG, SCR_ADD, 0x01), 0, /* ** send a "message parity error" message. */ SCR_LOAD_REG (scratcha, MSG_PARITY_ERROR), 0, SCR_JUMP, PADDR (setmsg), }/*-------------------------< MSG_MESSAGE_REJECT >---------------*/,{ /* ** If a negotiation was in progress, ** negotiation failed. */ SCR_FROM_REG (HS_REG), 0, SCR_INT ^ IFTRUE (DATA (HS_NEGOTIATE)), SIR_NEGO_FAILED, /* ** else make host log this message */ SCR_INT ^ IFFALSE (DATA (HS_NEGOTIATE)), SIR_REJECT_RECEIVED, SCR_JUMP, PADDR (clrack), }/*-------------------------< MSG_IGN_RESIDUE >----------*/,{ /* ** Terminate cycle */ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get residue size. */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[1]), /* ** Check for message parity error. */ SCR_TO_REG (scratcha), 0, SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), SCR_FROM_REG (scratcha), 0, /* ** Size is 0 .. ignore message. */ SCR_JUMP ^ IFTRUE (DATA (0)), PADDR (clrack), /* ** Size is not 1 .. have to interrupt. */ /*<<<*/ SCR_JUMPR ^ IFFALSE (DATA (1)), 40, /* ** Check for residue byte in swide register */ SCR_FROM_REG (scntl2), 0, /*<<<*/ SCR_JUMPR ^ IFFALSE (MASK (WSR, WSR)), 16, /* ** There IS data in the swide register. ** Discard it. */ SCR_REG_REG (scntl2, SCR_OR, WSR), 0, SCR_JUMP, PADDR (clrack), /* ** Load again the size to the sfbr register. */ /*>>>*/ SCR_FROM_REG (scratcha), 0, /*>>>*/ SCR_INT, SIR_IGN_RESIDUE, SCR_JUMP, PADDR (clrack), }/*-------------------------< MSG_EXTENDED >-------------*/,{ /* ** Terminate cycle */ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get length. */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[1]), /* ** Check for message parity error. */ SCR_TO_REG (scratcha), 0, SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), SCR_FROM_REG (scratcha), 0, /* */ SCR_JUMP ^ IFTRUE (DATA (3)), PADDRH (msg_ext_3), SCR_JUMP ^ IFFALSE (DATA (2)), PADDR (msg_bad), }/*-------------------------< MSG_EXT_2 >----------------*/,{ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get extended message code. */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[2]), /* ** Check for message parity error. */ SCR_TO_REG (scratcha), 0, SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), SCR_FROM_REG (scratcha), 0, SCR_JUMP ^ IFTRUE (DATA (MSG_EXT_WDTR)), PADDRH (msg_wdtr), /* ** unknown extended message */ SCR_JUMP, PADDR (msg_bad) }/*-------------------------< MSG_WDTR >-----------------*/,{ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get data bus width */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[3]), SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), /* ** let the host do the real work. */ SCR_INT, SIR_NEGO_WIDE, /* ** let the target fetch our answer. */ SCR_SET (SCR_ATN), 0, SCR_CLR (SCR_ACK), 0, SCR_INT ^ IFFALSE (WHEN (SCR_MSG_OUT)), SIR_NEGO_PROTO, /* ** Send the MSG_EXT_WDTR */ SCR_MOVE_ABS (4) ^ SCR_MSG_OUT, NADDR (msgout), SCR_CLR (SCR_ATN), 0, SCR_COPY (1), RADDR (sfbr), NADDR (lastmsg), SCR_JUMP, PADDR (msg_out_done), }/*-------------------------< MSG_EXT_3 >----------------*/,{ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get extended message code. */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[2]), /* ** Check for message parity error. */ SCR_TO_REG (scratcha), 0, SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), SCR_FROM_REG (scratcha), 0, SCR_JUMP ^ IFTRUE (DATA (MSG_EXT_SDTR)), PADDRH (msg_sdtr), /* ** unknown extended message */ SCR_JUMP, PADDR (msg_bad) }/*-------------------------< MSG_SDTR >-----------------*/,{ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get period and offset */ SCR_MOVE_ABS (2) ^ SCR_MSG_IN, NADDR (msgin[3]), SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), /* ** let the host do the real work. */ SCR_INT, SIR_NEGO_SYNC, /* ** let the target fetch our answer. */ SCR_SET (SCR_ATN), 0, SCR_CLR (SCR_ACK), 0, SCR_INT ^ IFFALSE (WHEN (SCR_MSG_OUT)), SIR_NEGO_PROTO, /* ** Send the MSG_EXT_SDTR */ SCR_MOVE_ABS (5) ^ SCR_MSG_OUT, NADDR (msgout), SCR_CLR (SCR_ATN), 0, SCR_COPY (1), RADDR (sfbr), NADDR (lastmsg), SCR_JUMP, PADDR (msg_out_done), }/*-------------------------< MSG_OUT_ABORT >-------------*/,{ /* ** After ABORT message, ** ** expect an immediate disconnect, ... */ SCR_REG_REG (scntl2, SCR_AND, 0x7f), 0, SCR_CLR (SCR_ACK|SCR_ATN), 0, SCR_WAIT_DISC, 0, /* ** ... and set the status to "ABORTED" */ SCR_LOAD_REG (HS_REG, HS_ABORTED), 0, SCR_JUMP, PADDR (cleanup), }/*-------------------------< GETCC >-----------------------*/,{ /* ** The ncr doesn't have an indirect load ** or store command. So we have to ** copy part of the control block to a ** fixed place, where we can modify it. ** ** We patch the address part of a COPY command ** with the address of the dsa register ... */ SCR_COPY_F (4), RADDR (dsa), PADDRH (getcc1), /* ** ... then we do the actual copy. */ SCR_COPY (sizeof (struct head)), }/*-------------------------< GETCC1 >----------------------*/,{ 0, NADDR (header), /* ** Initialize the status registers */ SCR_COPY (4), NADDR (header.status), RADDR (scr0), }/*-------------------------< GETCC2 >----------------------*/,{ /* ** Get the condition code from a target. ** ** DSA points to a data structure. ** Set TEMP to the script location ** that receives the condition code. ** ** Because there is no script command ** to load a longword into a register, ** we use a CALL command. */ /*<<<*/ SCR_CALLR, 24, /* ** Get the condition code. */ SCR_MOVE_TBL ^ SCR_DATA_IN, offsetof (struct dsb, sense), /* ** No data phase may follow! */ SCR_CALL, PADDR (checkatn), SCR_JUMP, PADDR (no_data), /*>>>*/ /* ** The CALL jumps to this point. ** Prepare for a RESTORE_POINTER message. ** Save the TEMP register into the saved pointer. */ SCR_COPY (4), RADDR (temp), NADDR (header.savep), /* ** Load scratcha, because in case of a selection timeout, ** the host will expect a new value for startpos in ** the scratcha register. */ SCR_COPY (4), PADDR (startpos), RADDR (scratcha), #ifdef NCR_GETCC_WITHMSG /* ** If QUIRK_NOMSG is set, select without ATN. ** and don't send a message. */ SCR_FROM_REG (QU_REG), 0, SCR_JUMP ^ IFTRUE (MASK (QUIRK_NOMSG, QUIRK_NOMSG)), PADDRH(getcc3), /* ** Then try to connect to the target. ** If we are reselected, special treatment ** of the current job is required before ** accepting the reselection. */ SCR_SEL_TBL_ATN ^ offsetof (struct dsb, select), PADDR(badgetcc), /* ** Send the IDENTIFY message. ** In case of short transfer, remove ATN. */ SCR_MOVE_TBL ^ SCR_MSG_OUT, offsetof (struct dsb, smsg2), SCR_CLR (SCR_ATN), 0, /* ** save the first byte of the message. */ SCR_COPY (1), RADDR (sfbr), NADDR (lastmsg), SCR_JUMP, PADDR (prepare2), #endif }/*-------------------------< GETCC3 >----------------------*/,{ /* ** Try to connect to the target. ** If we are reselected, special treatment ** of the current job is required before ** accepting the reselection. ** ** Silly target won't accept a message. ** Select without ATN. */ SCR_SEL_TBL ^ offsetof (struct dsb, select), PADDR(badgetcc), /* ** Force error if selection timeout */ SCR_JUMPR ^ IFTRUE (WHEN (SCR_MSG_IN)), 0, /* ** don't negotiate. */ SCR_JUMP, PADDR (prepare2), }/*-------------------------< ABORTTAG >-------------------*/,{ /* ** Abort a bad reselection. ** Set the message to ABORT vs. ABORT_TAG */ SCR_LOAD_REG (scratcha, MSG_ABORT_TAG), 0, SCR_JUMPR ^ IFFALSE (CARRYSET), 8, }/*-------------------------< ABORT >----------------------*/,{ SCR_LOAD_REG (scratcha, MSG_ABORT), 0, SCR_COPY (1), RADDR (scratcha), NADDR (msgout), SCR_SET (SCR_ATN), 0, SCR_CLR (SCR_ACK), 0, /* ** and send it. ** we expect an immediate disconnect */ SCR_REG_REG (scntl2, SCR_AND, 0x7f), 0, SCR_MOVE_ABS (1) ^ SCR_MSG_OUT, NADDR (msgout), SCR_COPY (1), RADDR (sfbr), NADDR (lastmsg), SCR_CLR (SCR_ACK|SCR_ATN), 0, SCR_WAIT_DISC, 0, SCR_JUMP, PADDR (start), }/*-------------------------< SNOOPTEST >-------------------*/,{ /* ** Read the variable. */ SCR_COPY (4), KVAR (KVAR_NCR_CACHE), RADDR (scratcha), /* ** Write the variable. */ SCR_COPY (4), RADDR (temp), KVAR (KVAR_NCR_CACHE), /* ** Read back the variable. */ SCR_COPY (4), KVAR (KVAR_NCR_CACHE), RADDR (temp), }/*-------------------------< SNOOPEND >-------------------*/,{ /* ** And stop. */ SCR_INT, 99, }/*--------------------------------------------------------*/ }; /*========================================================== ** ** ** Fill in #define dependent parts of the script ** ** **========================================================== */ static void ncr_script_fill (struct script * scr, struct scripth * scrh) { int i; ncrcmd *p; p = scrh->tryloop; for (i=0; itryloop + sizeof (scrh->tryloop)); p = scr->data_in; *p++ =SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)); *p++ =PADDR (no_data); *p++ =SCR_COPY (sizeof (ticks)); *p++ =(ncrcmd) KVAR (KVAR_TICKS); *p++ =NADDR (header.stamp.data); *p++ =SCR_MOVE_TBL ^ SCR_DATA_IN; *p++ =offsetof (struct dsb, data[ 0]); for (i=1; idata_in + sizeof (scr->data_in)); p = scr->data_out; *p++ =SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_OUT)); *p++ =PADDR (no_data); *p++ =SCR_COPY (sizeof (ticks)); *p++ =(ncrcmd) KVAR (KVAR_TICKS); *p++ =NADDR (header.stamp.data); *p++ =SCR_MOVE_TBL ^ SCR_DATA_OUT; *p++ =offsetof (struct dsb, data[ 0]); for (i=1; idata_out + sizeof (scr->data_out)); } /*========================================================== ** ** ** Copy and rebind a script. ** ** **========================================================== */ static void ncr_script_copy_and_bind (ncb_p np, ncrcmd *src, ncrcmd *dst, int len) { ncrcmd opcode, new, old, tmp1, tmp2; ncrcmd *start, *end; int relocs, offset; start = src; end = src + len/4; offset = 0; while (src < end) { opcode = *src++; WRITESCRIPT_OFF(dst, offset, opcode); offset += 4; /* ** If we forget to change the length ** in struct script, a field will be ** padded with 0. This is an illegal ** command. */ if (opcode == 0) { device_printf(np->dev, "ERROR0 IN SCRIPT at %d.\n", (int)(src - start - 1)); DELAY (1000000); } if (DEBUG_FLAGS & DEBUG_SCRIPT) printf ("%p: <%x>\n", (src-1), (unsigned)opcode); /* ** We don't have to decode ALL commands */ switch (opcode >> 28) { case 0xc: /* ** COPY has TWO arguments. */ relocs = 2; tmp1 = src[0]; if ((tmp1 & RELOC_MASK) == RELOC_KVAR) tmp1 = 0; tmp2 = src[1]; if ((tmp2 & RELOC_MASK) == RELOC_KVAR) tmp2 = 0; if ((tmp1 ^ tmp2) & 3) { device_printf(np->dev, "ERROR1 IN SCRIPT at %d.\n", (int)(src - start - 1)); DELAY (1000000); } /* ** If PREFETCH feature not enabled, remove ** the NO FLUSH bit if present. */ if ((opcode & SCR_NO_FLUSH) && !(np->features&FE_PFEN)) WRITESCRIPT_OFF(dst, offset - 4, (opcode & ~SCR_NO_FLUSH)); break; case 0x0: /* ** MOVE (absolute address) */ relocs = 1; break; case 0x8: /* ** JUMP / CALL ** dont't relocate if relative :-) */ if (opcode & 0x00800000) relocs = 0; else relocs = 1; break; case 0x4: case 0x5: case 0x6: case 0x7: relocs = 1; break; default: relocs = 0; break; } if (relocs) { while (relocs--) { old = *src++; switch (old & RELOC_MASK) { case RELOC_REGISTER: new = (old & ~RELOC_MASK) + rman_get_start(np->reg_res); break; case RELOC_LABEL: new = (old & ~RELOC_MASK) + np->p_script; break; case RELOC_LABELH: new = (old & ~RELOC_MASK) + np->p_scripth; break; case RELOC_SOFTC: new = (old & ~RELOC_MASK) + vtophys(np); break; case RELOC_KVAR: if (((old & ~RELOC_MASK) < SCRIPT_KVAR_FIRST) || ((old & ~RELOC_MASK) > SCRIPT_KVAR_LAST)) panic("ncr KVAR out of range"); new = vtophys(script_kvars[old & ~RELOC_MASK]); break; case 0: /* Don't relocate a 0 address. */ if (old == 0) { new = old; break; } /* FALLTHROUGH */ default: panic("ncr_script_copy_and_bind: weird relocation %x @ %d\n", old, (int)(src - start)); break; } WRITESCRIPT_OFF(dst, offset, new); offset += 4; } } else { WRITESCRIPT_OFF(dst, offset, *src++); offset += 4; } } } /*========================================================== ** ** ** Auto configuration. ** ** **========================================================== */ #if 0 /*---------------------------------------------------------- ** ** Reduce the transfer length to the max value ** we can transfer safely. ** ** Reading a block greater then MAX_SIZE from the ** raw (character) device exercises a memory leak ** in the vm subsystem. This is common to ALL devices. ** We have submitted a description of this bug to ** . ** It should be fixed in the current release. ** **---------------------------------------------------------- */ void ncr_min_phys (struct buf *bp) { if ((unsigned long)bp->b_bcount > MAX_SIZE) bp->b_bcount = MAX_SIZE; } #endif #if 0 /*---------------------------------------------------------- ** ** Maximal number of outstanding requests per target. ** **---------------------------------------------------------- */ u_int32_t ncr_info (int unit) { return (1); /* may be changed later */ } #endif /*---------------------------------------------------------- ** ** NCR chip devices table and chip look up function. ** Features bit are defined in ncrreg.h. Is it the ** right place? ** **---------------------------------------------------------- */ typedef struct { unsigned long device_id; unsigned short minrevid; char *name; unsigned char maxburst; unsigned char maxoffs; unsigned char clock_divn; unsigned int features; } ncr_chip; static ncr_chip ncr_chip_table[] = { {NCR_810_ID, 0x00, "ncr 53c810 fast10 scsi", 4, 8, 4, FE_ERL} , {NCR_810_ID, 0x10, "ncr 53c810a fast10 scsi", 4, 8, 4, FE_ERL|FE_LDSTR|FE_PFEN|FE_BOF} , {NCR_815_ID, 0x00, "ncr 53c815 fast10 scsi", 4, 8, 4, FE_ERL|FE_BOF} , {NCR_820_ID, 0x00, "ncr 53c820 fast10 wide scsi", 4, 8, 4, FE_WIDE|FE_ERL} , {NCR_825_ID, 0x00, "ncr 53c825 fast10 wide scsi", 4, 8, 4, FE_WIDE|FE_ERL|FE_BOF} , {NCR_825_ID, 0x10, "ncr 53c825a fast10 wide scsi", 7, 8, 4, FE_WIDE|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_860_ID, 0x00, "ncr 53c860 fast20 scsi", 4, 8, 5, FE_ULTRA|FE_CLK80|FE_CACHE_SET|FE_LDSTR|FE_PFEN} , {NCR_875_ID, 0x00, "ncr 53c875 fast20 wide scsi", 7, 16, 5, FE_WIDE|FE_ULTRA|FE_CLK80|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_875_ID, 0x02, "ncr 53c875 fast20 wide scsi", 7, 16, 5, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_875_ID2, 0x00, "ncr 53c875j fast20 wide scsi", 7, 16, 5, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_885_ID, 0x00, "ncr 53c885 fast20 wide scsi", 7, 16, 5, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_895_ID, 0x00, "ncr 53c895 fast40 wide scsi", 7, 31, 7, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_896_ID, 0x00, "ncr 53c896 fast40 wide scsi", 7, 31, 7, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_895A_ID, 0x00, "ncr 53c895a fast40 wide scsi", 7, 31, 7, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_1510D_ID, 0x00, "ncr 53c1510d fast40 wide scsi", 7, 31, 7, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} }; static int ncr_chip_lookup(u_long device_id, u_char revision_id) { int i, found; found = -1; for (i = 0; i < nitems(ncr_chip_table); i++) { if (device_id == ncr_chip_table[i].device_id && ncr_chip_table[i].minrevid <= revision_id) { if (found < 0 || ncr_chip_table[found].minrevid < ncr_chip_table[i].minrevid) { found = i; } } } return found; } /*---------------------------------------------------------- ** ** Probe the hostadapter. ** **---------------------------------------------------------- */ static int ncr_probe (device_t dev) { int i; i = ncr_chip_lookup(pci_get_devid(dev), pci_get_revid(dev)); if (i >= 0) { device_set_desc(dev, ncr_chip_table[i].name); return (BUS_PROBE_DEFAULT); } return (ENXIO); } /*========================================================== ** ** NCR chip clock divisor table. ** Divisors are multiplied by 10,000,000 in order to make ** calculations more simple. ** **========================================================== */ #define _5M 5000000 static u_long div_10M[] = {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M}; /*=============================================================== ** ** NCR chips allow burst lengths of 2, 4, 8, 16, 32, 64, 128 ** transfers. 32,64,128 are only supported by 875 and 895 chips. ** We use log base 2 (burst length) as internal code, with ** value 0 meaning "burst disabled". ** **=============================================================== */ /* * Burst length from burst code. */ #define burst_length(bc) (!(bc))? 0 : 1 << (bc) /* * Burst code from io register bits. */ #define burst_code(dmode, ctest4, ctest5) \ (ctest4) & 0x80? 0 : (((dmode) & 0xc0) >> 6) + ((ctest5) & 0x04) + 1 /* * Set initial io register bits from burst code. */ static void ncr_init_burst(ncb_p np, u_char bc) { np->rv_ctest4 &= ~0x80; np->rv_dmode &= ~(0x3 << 6); np->rv_ctest5 &= ~0x4; if (!bc) { np->rv_ctest4 |= 0x80; } else { --bc; np->rv_dmode |= ((bc & 0x3) << 6); np->rv_ctest5 |= (bc & 0x4); } } /*========================================================== ** ** ** Auto configuration: attach and init a host adapter. ** ** **========================================================== */ static int ncr_attach (device_t dev) { ncb_p np = (struct ncb*) device_get_softc(dev); u_char rev = 0; u_long period; int i, rid; u_int8_t usrsync; u_int8_t usrwide; struct cam_devq *devq; /* ** allocate and initialize structures. */ np->dev = dev; mtx_init(&np->lock, "ncr", NULL, MTX_DEF); callout_init_mtx(&np->timer, &np->lock, 0); /* ** Try to map the controller chip to ** virtual and physical memory. */ np->reg_rid = PCIR_BAR(1); np->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &np->reg_rid, RF_ACTIVE); if (!np->reg_res) { device_printf(dev, "could not map memory\n"); return ENXIO; } /* ** Now the INB INW INL OUTB OUTW OUTL macros ** can be used safely. */ #ifdef NCR_IOMAPPED /* ** Try to map the controller chip into iospace. */ if (!pci_map_port (config_id, 0x10, &np->port)) return; #endif /* ** Save some controller register default values */ np->rv_scntl3 = INB(nc_scntl3) & 0x77; np->rv_dmode = INB(nc_dmode) & 0xce; np->rv_dcntl = INB(nc_dcntl) & 0xa9; np->rv_ctest3 = INB(nc_ctest3) & 0x01; np->rv_ctest4 = INB(nc_ctest4) & 0x88; np->rv_ctest5 = INB(nc_ctest5) & 0x24; np->rv_gpcntl = INB(nc_gpcntl); np->rv_stest2 = INB(nc_stest2) & 0x20; if (bootverbose >= 2) { printf ("\tBIOS values: SCNTL3:%02x DMODE:%02x DCNTL:%02x\n", np->rv_scntl3, np->rv_dmode, np->rv_dcntl); printf ("\t CTEST3:%02x CTEST4:%02x CTEST5:%02x\n", np->rv_ctest3, np->rv_ctest4, np->rv_ctest5); } np->rv_dcntl |= NOCOM; /* ** Do chip dependent initialization. */ rev = pci_get_revid(dev); /* ** Get chip features from chips table. */ i = ncr_chip_lookup(pci_get_devid(dev), rev); if (i >= 0) { np->maxburst = ncr_chip_table[i].maxburst; np->maxoffs = ncr_chip_table[i].maxoffs; np->clock_divn = ncr_chip_table[i].clock_divn; np->features = ncr_chip_table[i].features; } else { /* Should'nt happen if probe() is ok */ np->maxburst = 4; np->maxoffs = 8; np->clock_divn = 4; np->features = FE_ERL; } np->maxwide = np->features & FE_WIDE ? 1 : 0; np->clock_khz = np->features & FE_CLK80 ? 80000 : 40000; if (np->features & FE_QUAD) np->multiplier = 4; else if (np->features & FE_DBLR) np->multiplier = 2; else np->multiplier = 1; /* ** Get the frequency of the chip's clock. ** Find the right value for scntl3. */ if (np->features & (FE_ULTRA|FE_ULTRA2)) ncr_getclock(np, np->multiplier); #ifdef NCR_TEKRAM_EEPROM if (bootverbose) { device_printf(dev, "Tekram EEPROM read %s\n", read_tekram_eeprom (np, NULL) ? "succeeded" : "failed"); } #endif /* NCR_TEKRAM_EEPROM */ /* * If scntl3 != 0, we assume BIOS is present. */ if (np->rv_scntl3) np->features |= FE_BIOS; /* * Divisor to be used for async (timer pre-scaler). */ i = np->clock_divn - 1; while (i >= 0) { --i; if (10ul * SCSI_NCR_MIN_ASYNC * np->clock_khz > div_10M[i]) { ++i; break; } } np->rv_scntl3 = i+1; /* * Minimum synchronous period factor supported by the chip. * Btw, 'period' is in tenths of nanoseconds. */ period = howmany(4 * div_10M[0], np->clock_khz); if (period <= 250) np->minsync = 10; else if (period <= 303) np->minsync = 11; else if (period <= 500) np->minsync = 12; else np->minsync = howmany(period, 40); /* * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2). */ if (np->minsync < 25 && !(np->features & (FE_ULTRA|FE_ULTRA2))) np->minsync = 25; else if (np->minsync < 12 && !(np->features & FE_ULTRA2)) np->minsync = 12; /* * Maximum synchronous period factor supported by the chip. */ period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz); np->maxsync = period > 2540 ? 254 : period / 10; /* * Now, some features available with Symbios compatible boards. * LED support through GPIO0 and DIFF support. */ #ifdef SCSI_NCR_SYMBIOS_COMPAT if (!(np->rv_gpcntl & 0x01)) np->features |= FE_LED0; #if 0 /* Not safe enough without NVRAM support or user settable option */ if (!(INB(nc_gpreg) & 0x08)) np->features |= FE_DIFF; #endif #endif /* SCSI_NCR_SYMBIOS_COMPAT */ /* * Prepare initial IO registers settings. * Trust BIOS only if we believe we have one and if we want to. */ #ifdef SCSI_NCR_TRUST_BIOS if (!(np->features & FE_BIOS)) { #else if (1) { #endif np->rv_dmode = 0; np->rv_dcntl = NOCOM; np->rv_ctest3 = 0; np->rv_ctest4 = MPEE; np->rv_ctest5 = 0; np->rv_stest2 = 0; if (np->features & FE_ERL) np->rv_dmode |= ERL; /* Enable Read Line */ if (np->features & FE_BOF) np->rv_dmode |= BOF; /* Burst Opcode Fetch */ if (np->features & FE_ERMP) np->rv_dmode |= ERMP; /* Enable Read Multiple */ if (np->features & FE_CLSE) np->rv_dcntl |= CLSE; /* Cache Line Size Enable */ if (np->features & FE_WRIE) np->rv_ctest3 |= WRIE; /* Write and Invalidate */ if (np->features & FE_PFEN) np->rv_dcntl |= PFEN; /* Prefetch Enable */ if (np->features & FE_DFS) np->rv_ctest5 |= DFS; /* Dma Fifo Size */ if (np->features & FE_DIFF) np->rv_stest2 |= 0x20; /* Differential mode */ ncr_init_burst(np, np->maxburst); /* Max dwords burst length */ } else { np->maxburst = burst_code(np->rv_dmode, np->rv_ctest4, np->rv_ctest5); } /* ** Get on-chip SRAM address, if supported */ if ((np->features & FE_RAM) && sizeof(struct script) <= 4096) { np->sram_rid = PCIR_BAR(2); np->sram_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &np->sram_rid, RF_ACTIVE); } /* ** Allocate structure for script relocation. */ if (np->sram_res != NULL) { np->script = NULL; np->p_script = rman_get_start(np->sram_res); } else if (sizeof (struct script) > PAGE_SIZE) { np->script = (struct script*) contigmalloc (round_page(sizeof (struct script)), M_DEVBUF, M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0); } else { np->script = (struct script *) malloc (sizeof (struct script), M_DEVBUF, M_WAITOK); } if (sizeof (struct scripth) > PAGE_SIZE) { np->scripth = (struct scripth*) contigmalloc (round_page(sizeof (struct scripth)), M_DEVBUF, M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0); } else { np->scripth = (struct scripth *) malloc (sizeof (struct scripth), M_DEVBUF, M_WAITOK); } #ifdef SCSI_NCR_PCI_CONFIG_FIXUP /* ** If cache line size is enabled, check PCI config space and ** try to fix it up if necessary. */ #ifdef PCIR_CACHELNSZ /* To be sure that new PCI stuff is present */ { u_char cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); u_short command = pci_read_config(dev, PCIR_COMMAND, 2); if (!cachelnsz) { cachelnsz = 8; device_printf(dev, "setting PCI cache line size register to %d.\n", (int)cachelnsz); pci_write_config(dev, PCIR_CACHELNSZ, cachelnsz, 1); } if (!(command & PCIM_CMD_MWRICEN)) { command |= PCIM_CMD_MWRICEN; device_printf(dev, "setting PCI command write and invalidate.\n"); pci_write_config(dev, PCIR_COMMAND, command, 2); } } #endif /* PCIR_CACHELNSZ */ #endif /* SCSI_NCR_PCI_CONFIG_FIXUP */ /* Initialize per-target user settings */ usrsync = 0; if (SCSI_NCR_DFLT_SYNC) { usrsync = SCSI_NCR_DFLT_SYNC; if (usrsync > np->maxsync) usrsync = np->maxsync; if (usrsync < np->minsync) usrsync = np->minsync; } usrwide = (SCSI_NCR_MAX_WIDE); if (usrwide > np->maxwide) usrwide=np->maxwide; for (i=0;itarget[i]; tp->tinfo.user.period = usrsync; tp->tinfo.user.offset = usrsync != 0 ? np->maxoffs : 0; tp->tinfo.user.width = usrwide; tp->tinfo.disc_tag = NCR_CUR_DISCENB | NCR_CUR_TAGENB | NCR_USR_DISCENB | NCR_USR_TAGENB; } /* ** Bells and whistles ;-) */ if (bootverbose) device_printf(dev, "minsync=%d, maxsync=%d, maxoffs=%d, %d dwords burst, %s dma fifo\n", np->minsync, np->maxsync, np->maxoffs, burst_length(np->maxburst), (np->rv_ctest5 & DFS) ? "large" : "normal"); /* ** Print some complementary information that can be helpful. */ if (bootverbose) device_printf(dev, "%s, %s IRQ driver%s\n", np->rv_stest2 & 0x20 ? "differential" : "single-ended", np->rv_dcntl & IRQM ? "totem pole" : "open drain", np->sram_res ? ", using on-chip SRAM" : ""); /* ** Patch scripts to physical addresses */ ncr_script_fill (&script0, &scripth0); if (np->script) np->p_script = vtophys(np->script); np->p_scripth = vtophys(np->scripth); ncr_script_copy_and_bind (np, (ncrcmd *) &script0, (ncrcmd *) np->script, sizeof(struct script)); ncr_script_copy_and_bind (np, (ncrcmd *) &scripth0, (ncrcmd *) np->scripth, sizeof(struct scripth)); /* ** Patch the script for LED support. */ if (np->features & FE_LED0) { WRITESCRIPT(reselect[0], SCR_REG_REG(gpreg, SCR_OR, 0x01)); WRITESCRIPT(reselect1[0], SCR_REG_REG(gpreg, SCR_AND, 0xfe)); WRITESCRIPT(reselect2[0], SCR_REG_REG(gpreg, SCR_AND, 0xfe)); } /* ** init data structure */ np->jump_tcb.l_cmd = SCR_JUMP; np->jump_tcb.l_paddr = NCB_SCRIPTH_PHYS (np, abort); /* ** Get SCSI addr of host adapter (set by bios?). */ np->myaddr = INB(nc_scid) & 0x07; if (!np->myaddr) np->myaddr = SCSI_NCR_MYADDR; #ifdef NCR_DUMP_REG /* ** Log the initial register contents */ { int reg; for (reg=0; reg<256; reg+=4) { if (reg%16==0) printf ("reg[%2x]", reg); printf (" %08x", (int)pci_conf_read (config_id, reg)); if (reg%16==12) printf ("\n"); } } #endif /* NCR_DUMP_REG */ /* ** Reset chip. */ OUTB (nc_istat, SRST); DELAY (1000); OUTB (nc_istat, 0 ); /* ** Now check the cache handling of the pci chipset. */ if (ncr_snooptest (np)) { printf ("CACHE INCORRECTLY CONFIGURED.\n"); return EINVAL; } /* ** Install the interrupt handler. */ rid = 0; np->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (np->irq_res == NULL) { device_printf(dev, "interruptless mode: reduced performance.\n"); } else { bus_setup_intr(dev, np->irq_res, INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE, NULL, ncr_intr, np, &np->irq_handle); } /* ** Create the device queue. We only allow MAX_START-1 concurrent ** transactions so we can be sure to have one element free in our ** start queue to reset to the idle loop. */ devq = cam_simq_alloc(MAX_START - 1); if (devq == NULL) return ENOMEM; /* ** Now tell the generic SCSI layer ** about our bus. */ np->sim = cam_sim_alloc(ncr_action, ncr_poll, "ncr", np, device_get_unit(dev), &np->lock, 1, MAX_TAGS, devq); if (np->sim == NULL) { cam_simq_free(devq); return ENOMEM; } mtx_lock(&np->lock); if (xpt_bus_register(np->sim, dev, 0) != CAM_SUCCESS) { cam_sim_free(np->sim, /*free_devq*/ TRUE); mtx_unlock(&np->lock); return ENOMEM; } if (xpt_create_path(&np->path, /*periph*/NULL, cam_sim_path(np->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(np->sim)); cam_sim_free(np->sim, /*free_devq*/TRUE); mtx_unlock(&np->lock); return ENOMEM; } /* ** start the timeout daemon */ ncr_timeout (np); np->lasttime=0; mtx_unlock(&np->lock); return 0; } /*========================================================== ** ** ** Process pending device interrupts. ** ** **========================================================== */ static void ncr_intr(vnp) void *vnp; { ncb_p np = vnp; mtx_lock(&np->lock); ncr_intr_locked(np); mtx_unlock(&np->lock); } static void ncr_intr_locked(ncb_p np) { if (DEBUG_FLAGS & DEBUG_TINY) printf ("["); if (INB(nc_istat) & (INTF|SIP|DIP)) { /* ** Repeat until no outstanding ints */ do { ncr_exception (np); } while (INB(nc_istat) & (INTF|SIP|DIP)); np->ticks = 100; } if (DEBUG_FLAGS & DEBUG_TINY) printf ("]\n"); } /*========================================================== ** ** ** Start execution of a SCSI command. ** This is called from the generic SCSI driver. ** ** **========================================================== */ static void ncr_action (struct cam_sim *sim, union ccb *ccb) { ncb_p np; np = (ncb_p) cam_sim_softc(sim); mtx_assert(&np->lock, MA_OWNED); switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_SCSI_IO: /* Execute the requested I/O operation */ { nccb_p cp; lcb_p lp; tcb_p tp; struct ccb_scsiio *csio; u_int8_t *msgptr; u_int msglen; u_int msglen2; int segments; u_int8_t nego; u_int8_t idmsg; int qidx; tp = &np->target[ccb->ccb_h.target_id]; csio = &ccb->csio; /* * Make sure we support this request. We can't do * PHYS pointers. */ if (ccb->ccb_h.flags & CAM_CDB_PHYS) { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } /* * Last time we need to check if this CCB needs to * be aborted. */ if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { xpt_done(ccb); return; } ccb->ccb_h.status |= CAM_SIM_QUEUED; /*--------------------------------------------------- ** ** Assign an nccb / bind ccb ** **---------------------------------------------------- */ cp = ncr_get_nccb (np, ccb->ccb_h.target_id, ccb->ccb_h.target_lun); if (cp == NULL) { /* XXX JGibbs - Freeze SIMQ */ ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_done(ccb); return; } cp->ccb = ccb; /*--------------------------------------------------- ** ** timestamp ** **---------------------------------------------------- */ /* ** XXX JGibbs - Isn't this expensive ** enough to be conditionalized?? */ bzero (&cp->phys.header.stamp, sizeof (struct tstamp)); cp->phys.header.stamp.start = ticks; nego = 0; if (tp->nego_cp == NULL) { if (tp->tinfo.current.width != tp->tinfo.goal.width) { tp->nego_cp = cp; nego = NS_WIDE; } else if ((tp->tinfo.current.period != tp->tinfo.goal.period) || (tp->tinfo.current.offset != tp->tinfo.goal.offset)) { tp->nego_cp = cp; nego = NS_SYNC; } } /*--------------------------------------------------- ** ** choose a new tag ... ** **---------------------------------------------------- */ lp = tp->lp[ccb->ccb_h.target_lun]; if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0 && (ccb->csio.tag_action != CAM_TAG_ACTION_NONE) && (nego == 0)) { /* ** assign a tag to this nccb */ while (!cp->tag) { nccb_p cp2 = lp->next_nccb; lp->lasttag = lp->lasttag % 255 + 1; while (cp2 && cp2->tag != lp->lasttag) cp2 = cp2->next_nccb; if (cp2) continue; cp->tag=lp->lasttag; if (DEBUG_FLAGS & DEBUG_TAGS) { PRINT_ADDR(ccb); printf ("using tag #%d.\n", cp->tag); } } } else { cp->tag=0; } /*---------------------------------------------------- ** ** Build the identify / tag / sdtr message ** **---------------------------------------------------- */ idmsg = MSG_IDENTIFYFLAG | ccb->ccb_h.target_lun; if (tp->tinfo.disc_tag & NCR_CUR_DISCENB) idmsg |= MSG_IDENTIFY_DISCFLAG; msgptr = cp->scsi_smsg; msglen = 0; msgptr[msglen++] = idmsg; if (cp->tag) { msgptr[msglen++] = ccb->csio.tag_action; msgptr[msglen++] = cp->tag; } switch (nego) { case NS_SYNC: msgptr[msglen++] = MSG_EXTENDED; msgptr[msglen++] = MSG_EXT_SDTR_LEN; msgptr[msglen++] = MSG_EXT_SDTR; msgptr[msglen++] = tp->tinfo.goal.period; msgptr[msglen++] = tp->tinfo.goal.offset; if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(ccb); printf ("sync msgout: "); ncr_show_msg (&cp->scsi_smsg [msglen-5]); printf (".\n"); }; break; case NS_WIDE: msgptr[msglen++] = MSG_EXTENDED; msgptr[msglen++] = MSG_EXT_WDTR_LEN; msgptr[msglen++] = MSG_EXT_WDTR; msgptr[msglen++] = tp->tinfo.goal.width; if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(ccb); printf ("wide msgout: "); ncr_show_msg (&cp->scsi_smsg [msglen-4]); printf (".\n"); }; break; } /*---------------------------------------------------- ** ** Build the identify message for getcc. ** **---------------------------------------------------- */ cp->scsi_smsg2 [0] = idmsg; msglen2 = 1; /*---------------------------------------------------- ** ** Build the data descriptors ** **---------------------------------------------------- */ /* XXX JGibbs - Handle other types of I/O */ if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { segments = ncr_scatter(&cp->phys, (vm_offset_t)csio->data_ptr, (vm_size_t)csio->dxfer_len); if (segments < 0) { ccb->ccb_h.status = CAM_REQ_TOO_BIG; ncr_free_nccb(np, cp); xpt_done(ccb); return; } if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { cp->phys.header.savep = NCB_SCRIPT_PHYS (np, data_in); cp->phys.header.goalp = cp->phys.header.savep +20 +segments*16; } else { /* CAM_DIR_OUT */ cp->phys.header.savep = NCB_SCRIPT_PHYS (np, data_out); cp->phys.header.goalp = cp->phys.header.savep +20 +segments*16; } } else { cp->phys.header.savep = NCB_SCRIPT_PHYS (np, no_data); cp->phys.header.goalp = cp->phys.header.savep; } cp->phys.header.lastp = cp->phys.header.savep; /*---------------------------------------------------- ** ** fill in nccb ** **---------------------------------------------------- ** ** ** physical -> virtual backlink ** Generic SCSI command */ cp->phys.header.cp = cp; /* ** Startqueue */ cp->phys.header.launch.l_paddr = NCB_SCRIPT_PHYS (np, select); cp->phys.header.launch.l_cmd = SCR_JUMP; /* ** select */ cp->phys.select.sel_id = ccb->ccb_h.target_id; cp->phys.select.sel_scntl3 = tp->tinfo.wval; cp->phys.select.sel_sxfer = tp->tinfo.sval; /* ** message */ cp->phys.smsg.addr = CCB_PHYS (cp, scsi_smsg); cp->phys.smsg.size = msglen; cp->phys.smsg2.addr = CCB_PHYS (cp, scsi_smsg2); cp->phys.smsg2.size = msglen2; /* ** command */ cp->phys.cmd.addr = vtophys (scsiio_cdb_ptr(csio)); cp->phys.cmd.size = csio->cdb_len; /* ** sense command */ cp->phys.scmd.addr = CCB_PHYS (cp, sensecmd); cp->phys.scmd.size = 6; /* ** patch requested size into sense command */ cp->sensecmd[0] = 0x03; cp->sensecmd[1] = ccb->ccb_h.target_lun << 5; cp->sensecmd[4] = csio->sense_len; /* ** sense data */ cp->phys.sense.addr = vtophys (&csio->sense_data); cp->phys.sense.size = csio->sense_len; /* ** status */ cp->actualquirks = QUIRK_NOMSG; cp->host_status = nego ? HS_NEGOTIATE : HS_BUSY; cp->s_status = SCSI_STATUS_ILLEGAL; cp->parity_status = 0; cp->xerr_status = XE_OK; cp->sync_status = tp->tinfo.sval; cp->nego_status = nego; cp->wide_status = tp->tinfo.wval; /*---------------------------------------------------- ** ** Critical region: start this job. ** **---------------------------------------------------- */ /* ** reselect pattern and activate this job. */ cp->jump_nccb.l_cmd = (SCR_JUMP ^ IFFALSE (DATA (cp->tag))); cp->tlimit = time_second + ccb->ccb_h.timeout / 1000 + 2; cp->magic = CCB_MAGIC; /* ** insert into start queue. */ qidx = np->squeueput + 1; if (qidx >= MAX_START) qidx = 0; np->squeue [qidx ] = NCB_SCRIPT_PHYS (np, idle); np->squeue [np->squeueput] = CCB_PHYS (cp, phys); np->squeueput = qidx; if(DEBUG_FLAGS & DEBUG_QUEUE) device_printf(np->dev, "queuepos=%d tryoffset=%d.\n", np->squeueput, (unsigned)(READSCRIPT(startpos[0]) - (NCB_SCRIPTH_PHYS (np, tryloop)))); /* ** Script processor may be waiting for reselect. ** Wake it up. */ OUTB (nc_istat, SIGP); break; } case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; tcb_p tp; u_int update_type; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; update_type = 0; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) update_type |= NCR_TRANS_GOAL; if (cts->type == CTS_TYPE_USER_SETTINGS) update_type |= NCR_TRANS_USER; tp = &np->target[ccb->ccb_h.target_id]; /* Tag and disc enables */ if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { if (update_type & NCR_TRANS_GOAL) { if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) tp->tinfo.disc_tag |= NCR_CUR_DISCENB; else tp->tinfo.disc_tag &= ~NCR_CUR_DISCENB; } if (update_type & NCR_TRANS_USER) { if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) tp->tinfo.disc_tag |= NCR_USR_DISCENB; else tp->tinfo.disc_tag &= ~NCR_USR_DISCENB; } } if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { if (update_type & NCR_TRANS_GOAL) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) tp->tinfo.disc_tag |= NCR_CUR_TAGENB; else tp->tinfo.disc_tag &= ~NCR_CUR_TAGENB; } if (update_type & NCR_TRANS_USER) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) tp->tinfo.disc_tag |= NCR_USR_TAGENB; else tp->tinfo.disc_tag &= ~NCR_USR_TAGENB; } } /* Filter bus width and sync negotiation settings */ if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { if (spi->bus_width > np->maxwide) spi->bus_width = np->maxwide; } if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) { if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) { if (spi->sync_period != 0 && (spi->sync_period < np->minsync)) spi->sync_period = np->minsync; } if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) { if (spi->sync_offset == 0) spi->sync_period = 0; if (spi->sync_offset > np->maxoffs) spi->sync_offset = np->maxoffs; } } if ((update_type & NCR_TRANS_USER) != 0) { if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) tp->tinfo.user.period = spi->sync_period; if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) tp->tinfo.user.offset = spi->sync_offset; if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) tp->tinfo.user.width = spi->bus_width; } if ((update_type & NCR_TRANS_GOAL) != 0) { if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) tp->tinfo.goal.period = spi->sync_period; if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) tp->tinfo.goal.offset = spi->sync_offset; if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) tp->tinfo.goal.width = spi->bus_width; } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { struct ccb_trans_settings *cts = &ccb->cts; struct ncr_transinfo *tinfo; tcb_p tp = &np->target[ccb->ccb_h.target_id]; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { tinfo = &tp->tinfo.current; if (tp->tinfo.disc_tag & NCR_CUR_DISCENB) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; else spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; if (tp->tinfo.disc_tag & NCR_CUR_TAGENB) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; else scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; } else { tinfo = &tp->tinfo.user; if (tp->tinfo.disc_tag & NCR_USR_DISCENB) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; else spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; if (tp->tinfo.disc_tag & NCR_USR_TAGENB) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; else scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; } spi->sync_period = tinfo->period; spi->sync_offset = tinfo->offset; spi->bus_width = tinfo->width; spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH | CTS_SPI_VALID_DISC; scsi->valid = CTS_SCSI_VALID_TQ; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { /* XXX JGibbs - I'm sure the NCR uses a different strategy, * but it should be able to deal with Adaptec * geometry too. */ cam_calc_geometry(&ccb->ccg, /*extended*/1); xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ { OUTB (nc_scntl1, CRST); ccb->ccb_h.status = CAM_REQ_CMP; DELAY(10000); /* Wait until our interrupt handler sees it */ xpt_done(ccb); break; } case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; if ((np->features & FE_WIDE) != 0) cpi->hba_inquiry |= PI_WIDE_16; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = (np->features & FE_WIDE) ? 15 : 7; cpi->max_lun = MAX_LUN - 1; cpi->initiator_id = np->myaddr; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "Symbios", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } /*========================================================== ** ** ** Complete execution of a SCSI command. ** Signal completion to the generic SCSI driver. ** ** **========================================================== */ static void ncr_complete (ncb_p np, nccb_p cp) { union ccb *ccb; tcb_p tp; /* ** Sanity check */ if (!cp || (cp->magic!=CCB_MAGIC) || !cp->ccb) return; cp->magic = 1; cp->tlimit= 0; /* ** No Reselect anymore. */ cp->jump_nccb.l_cmd = (SCR_JUMP); /* ** No starting. */ cp->phys.header.launch.l_paddr= NCB_SCRIPT_PHYS (np, idle); /* ** timestamp */ ncb_profile (np, cp); if (DEBUG_FLAGS & DEBUG_TINY) printf ("CCB=%x STAT=%x/%x\n", (int)(intptr_t)cp & 0xfff, cp->host_status,cp->s_status); ccb = cp->ccb; cp->ccb = NULL; tp = &np->target[ccb->ccb_h.target_id]; /* ** We do not queue more than 1 nccb per target ** with negotiation at any time. If this nccb was ** used for negotiation, clear this info in the tcb. */ if (cp == tp->nego_cp) tp->nego_cp = NULL; /* ** Check for parity errors. */ /* XXX JGibbs - What about reporting them??? */ if (cp->parity_status) { PRINT_ADDR(ccb); printf ("%d parity error(s), fallback.\n", cp->parity_status); /* ** fallback to asynch transfer. */ tp->tinfo.goal.period = 0; tp->tinfo.goal.offset = 0; } /* ** Check for extended errors. */ if (cp->xerr_status != XE_OK) { PRINT_ADDR(ccb); switch (cp->xerr_status) { case XE_EXTRA_DATA: printf ("extraneous data discarded.\n"); break; case XE_BAD_PHASE: printf ("illegal scsi phase (4/5).\n"); break; default: printf ("extended error %d.\n", cp->xerr_status); break; } if (cp->host_status==HS_COMPLETE) cp->host_status = HS_FAIL; } /* ** Check the status. */ if (cp->host_status == HS_COMPLETE) { if (cp->s_status == SCSI_STATUS_OK) { /* ** All went well. */ /* XXX JGibbs - Properly calculate residual */ tp->bytes += ccb->csio.dxfer_len; tp->transfers ++; ccb->ccb_h.status = CAM_REQ_CMP; } else if ((cp->s_status & SCSI_STATUS_SENSE) != 0) { /* * XXX Could be TERMIO too. Should record * original status. */ ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; cp->s_status &= ~SCSI_STATUS_SENSE; if (cp->s_status == SCSI_STATUS_OK) { ccb->ccb_h.status = CAM_AUTOSNS_VALID|CAM_SCSI_STATUS_ERROR; } else { ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; } } else { ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; ccb->csio.scsi_status = cp->s_status; } } else if (cp->host_status == HS_SEL_TIMEOUT) { /* ** Device failed selection */ ccb->ccb_h.status = CAM_SEL_TIMEOUT; } else if (cp->host_status == HS_TIMEOUT) { /* ** No response */ ccb->ccb_h.status = CAM_CMD_TIMEOUT; } else if (cp->host_status == HS_STALL) { ccb->ccb_h.status = CAM_REQUEUE_REQ; } else { /* ** Other protocol messes */ PRINT_ADDR(ccb); printf ("COMMAND FAILED (%x %x) @%p.\n", cp->host_status, cp->s_status, cp); ccb->ccb_h.status = CAM_CMD_TIMEOUT; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); ccb->ccb_h.status |= CAM_DEV_QFRZN; } /* ** Free this nccb */ ncr_free_nccb (np, cp); /* ** signal completion to generic driver. */ xpt_done (ccb); } /*========================================================== ** ** ** Signal all (or one) control block done. ** ** **========================================================== */ static void ncr_wakeup (ncb_p np, u_long code) { /* ** Starting at the default nccb and following ** the links, complete all jobs with a ** host_status greater than "disconnect". ** ** If the "code" parameter is not zero, ** complete all jobs that are not IDLE. */ nccb_p cp = np->link_nccb; while (cp) { switch (cp->host_status) { case HS_IDLE: break; case HS_DISCONNECT: if(DEBUG_FLAGS & DEBUG_TINY) printf ("D"); /* FALLTHROUGH */ case HS_BUSY: case HS_NEGOTIATE: if (!code) break; cp->host_status = code; /* FALLTHROUGH */ default: ncr_complete (np, cp); break; } cp = cp -> link_nccb; } } static void ncr_freeze_devq (ncb_p np, struct cam_path *path) { nccb_p cp; int i; int count; int firstskip; /* ** Starting at the first nccb and following ** the links, complete all jobs that match ** the passed in path and are in the start queue. */ cp = np->link_nccb; count = 0; firstskip = 0; while (cp) { switch (cp->host_status) { case HS_BUSY: case HS_NEGOTIATE: if ((cp->phys.header.launch.l_paddr == NCB_SCRIPT_PHYS (np, select)) && (xpt_path_comp(path, cp->ccb->ccb_h.path) >= 0)) { /* Mark for removal from the start queue */ for (i = 1; i < MAX_START; i++) { int idx; idx = np->squeueput - i; if (idx < 0) idx = MAX_START + idx; if (np->squeue[idx] == CCB_PHYS(cp, phys)) { np->squeue[idx] = NCB_SCRIPT_PHYS (np, skip); if (i > firstskip) firstskip = i; break; } } cp->host_status=HS_STALL; ncr_complete (np, cp); count++; } break; default: break; } cp = cp->link_nccb; } if (count > 0) { int j; int bidx; /* Compress the start queue */ j = 0; bidx = np->squeueput; i = np->squeueput - firstskip; if (i < 0) i = MAX_START + i; for (;;) { bidx = i - j; if (bidx < 0) bidx = MAX_START + bidx; if (np->squeue[i] == NCB_SCRIPT_PHYS (np, skip)) { j++; } else if (j != 0) { np->squeue[bidx] = np->squeue[i]; if (np->squeue[bidx] == NCB_SCRIPT_PHYS(np, idle)) break; } i = (i + 1) % MAX_START; } np->squeueput = bidx; } } /*========================================================== ** ** ** Start NCR chip. ** ** **========================================================== */ static void ncr_init(ncb_p np, char * msg, u_long code) { int i; /* ** Reset chip. */ OUTB (nc_istat, SRST); DELAY (1000); OUTB (nc_istat, 0); /* ** Message. */ if (msg) device_printf(np->dev, "restart (%s).\n", msg); /* ** Clear Start Queue */ for (i=0;i squeue [i] = NCB_SCRIPT_PHYS (np, idle); /* ** Start at first entry. */ np->squeueput = 0; WRITESCRIPT(startpos[0], NCB_SCRIPTH_PHYS (np, tryloop)); WRITESCRIPT(start0 [0], SCR_INT ^ IFFALSE (0)); /* ** Wakeup all pending jobs. */ ncr_wakeup (np, code); /* ** Init chip. */ OUTB (nc_istat, 0x00 ); /* Remove Reset, abort ... */ OUTB (nc_scntl0, 0xca ); /* full arb., ena parity, par->ATN */ OUTB (nc_scntl1, 0x00 ); /* odd parity, and remove CRST!! */ ncr_selectclock(np, np->rv_scntl3); /* Select SCSI clock */ OUTB (nc_scid , RRE|np->myaddr);/* host adapter SCSI address */ OUTW (nc_respid, 1ul<myaddr);/* id to respond to */ OUTB (nc_istat , SIGP ); /* Signal Process */ OUTB (nc_dmode , np->rv_dmode); /* XXX modify burstlen ??? */ OUTB (nc_dcntl , np->rv_dcntl); OUTB (nc_ctest3, np->rv_ctest3); OUTB (nc_ctest5, np->rv_ctest5); OUTB (nc_ctest4, np->rv_ctest4);/* enable master parity checking */ OUTB (nc_stest2, np->rv_stest2|EXT); /* Extended Sreq/Sack filtering */ OUTB (nc_stest3, TE ); /* TolerANT enable */ OUTB (nc_stime0, 0x0b ); /* HTH = disabled, STO = 0.1 sec. */ if (bootverbose >= 2) { printf ("\tACTUAL values:SCNTL3:%02x DMODE:%02x DCNTL:%02x\n", np->rv_scntl3, np->rv_dmode, np->rv_dcntl); printf ("\t CTEST3:%02x CTEST4:%02x CTEST5:%02x\n", np->rv_ctest3, np->rv_ctest4, np->rv_ctest5); } /* ** Enable GPIO0 pin for writing if LED support. */ if (np->features & FE_LED0) { OUTOFFB (nc_gpcntl, 0x01); } /* ** Fill in target structure. */ for (i=0;itarget[i]; tp->tinfo.sval = 0; tp->tinfo.wval = np->rv_scntl3; tp->tinfo.current.period = 0; tp->tinfo.current.offset = 0; tp->tinfo.current.width = MSG_EXT_WDTR_BUS_8_BIT; } /* ** enable ints */ OUTW (nc_sien , STO|HTH|MA|SGE|UDC|RST); OUTB (nc_dien , MDPE|BF|ABRT|SSI|SIR|IID); /* ** Start script processor. */ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, start)); /* * Notify the XPT of the event */ if (code == HS_RESET) xpt_async(AC_BUS_RESET, np->path, NULL); } static void ncr_poll(struct cam_sim *sim) { ncr_intr_locked(cam_sim_softc(sim)); } /*========================================================== ** ** Get clock factor and sync divisor for a given ** synchronous factor period. ** Returns the clock factor (in sxfer) and scntl3 ** synchronous divisor field. ** **========================================================== */ static void ncr_getsync(ncb_p np, u_char sfac, u_char *fakp, u_char *scntl3p) { u_long clk = np->clock_khz; /* SCSI clock frequency in kHz */ int div = np->clock_divn; /* Number of divisors supported */ u_long fak; /* Sync factor in sxfer */ u_long per; /* Period in tenths of ns */ u_long kpc; /* (per * clk) */ /* ** Compute the synchronous period in tenths of nano-seconds */ if (sfac <= 10) per = 250; else if (sfac == 11) per = 303; else if (sfac == 12) per = 500; else per = 40 * sfac; /* ** Look for the greatest clock divisor that allows an ** input speed faster than the period. */ kpc = per * clk; while (--div >= 0) if (kpc >= (div_10M[div] * 4)) break; /* ** Calculate the lowest clock factor that allows an output ** speed not faster than the period. */ fak = (kpc - 1) / div_10M[div] + 1; #if 0 /* You can #if 1 if you think this optimization is useful */ per = (fak * div_10M[div]) / clk; /* ** Why not to try the immediate lower divisor and to choose ** the one that allows the fastest output speed ? ** We dont want input speed too much greater than output speed. */ if (div >= 1 && fak < 6) { u_long fak2, per2; fak2 = (kpc - 1) / div_10M[div-1] + 1; per2 = (fak2 * div_10M[div-1]) / clk; if (per2 < per && fak2 <= 6) { fak = fak2; per = per2; --div; } } #endif if (fak < 4) fak = 4; /* Should never happen, too bad ... */ /* ** Compute and return sync parameters for the ncr */ *fakp = fak - 4; *scntl3p = ((div+1) << 4) + (sfac < 25 ? 0x80 : 0); } /*========================================================== ** ** Switch sync mode for current job and its target ** **========================================================== */ static void ncr_setsync(ncb_p np, nccb_p cp, u_char scntl3, u_char sxfer, u_char period) { union ccb *ccb; struct ccb_trans_settings neg; tcb_p tp; int div; u_int target = INB (nc_sdid) & 0x0f; u_int period_10ns; assert (cp); if (!cp) return; ccb = cp->ccb; assert (ccb); if (!ccb) return; assert (target == ccb->ccb_h.target_id); tp = &np->target[target]; if (!scntl3 || !(sxfer & 0x1f)) scntl3 = np->rv_scntl3; scntl3 = (scntl3 & 0xf0) | (tp->tinfo.wval & EWS) | (np->rv_scntl3 & 0x07); /* ** Deduce the value of controller sync period from scntl3. ** period is in tenths of nano-seconds. */ div = ((scntl3 >> 4) & 0x7); if ((sxfer & 0x1f) && div) period_10ns = (((sxfer>>5)+4)*div_10M[div-1])/np->clock_khz; else period_10ns = 0; tp->tinfo.goal.period = period; tp->tinfo.goal.offset = sxfer & 0x1f; tp->tinfo.current.period = period; tp->tinfo.current.offset = sxfer & 0x1f; /* ** Stop there if sync parameters are unchanged */ if (tp->tinfo.sval == sxfer && tp->tinfo.wval == scntl3) return; tp->tinfo.sval = sxfer; tp->tinfo.wval = scntl3; if (sxfer & 0x1f) { /* ** Disable extended Sreq/Sack filtering */ if (period_10ns <= 2000) OUTOFFB (nc_stest2, EXT); } /* ** Tell the SCSI layer about the ** new transfer parameters. */ memset(&neg, 0, sizeof (neg)); neg.protocol = PROTO_SCSI; neg.protocol_version = SCSI_REV_2; neg.transport = XPORT_SPI; neg.transport_version = 2; neg.xport_specific.spi.sync_period = period; neg.xport_specific.spi.sync_offset = sxfer & 0x1f; neg.xport_specific.spi.valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET; xpt_setup_ccb(&neg.ccb_h, ccb->ccb_h.path, /*priority*/1); xpt_async(AC_TRANSFER_NEG, ccb->ccb_h.path, &neg); /* ** set actual value and sync_status */ OUTB (nc_sxfer, sxfer); np->sync_st = sxfer; OUTB (nc_scntl3, scntl3); np->wide_st = scntl3; /* ** patch ALL nccbs of this target. */ for (cp = np->link_nccb; cp; cp = cp->link_nccb) { if (!cp->ccb) continue; if (cp->ccb->ccb_h.target_id != target) continue; cp->sync_status = sxfer; cp->wide_status = scntl3; } } /*========================================================== ** ** Switch wide mode for current job and its target ** SCSI specs say: a SCSI device that accepts a WDTR ** message shall reset the synchronous agreement to ** asynchronous mode. ** **========================================================== */ static void ncr_setwide (ncb_p np, nccb_p cp, u_char wide, u_char ack) { union ccb *ccb; struct ccb_trans_settings neg; u_int target = INB (nc_sdid) & 0x0f; tcb_p tp; u_char scntl3; u_char sxfer; assert (cp); if (!cp) return; ccb = cp->ccb; assert (ccb); if (!ccb) return; assert (target == ccb->ccb_h.target_id); tp = &np->target[target]; tp->tinfo.current.width = wide; tp->tinfo.goal.width = wide; tp->tinfo.current.period = 0; tp->tinfo.current.offset = 0; scntl3 = (tp->tinfo.wval & (~EWS)) | (wide ? EWS : 0); sxfer = ack ? 0 : tp->tinfo.sval; /* ** Stop there if sync/wide parameters are unchanged */ if (tp->tinfo.sval == sxfer && tp->tinfo.wval == scntl3) return; tp->tinfo.sval = sxfer; tp->tinfo.wval = scntl3; /* Tell the SCSI layer about the new transfer params */ memset(&neg, 0, sizeof (neg)); neg.protocol = PROTO_SCSI; neg.protocol_version = SCSI_REV_2; neg.transport = XPORT_SPI; neg.transport_version = 2; neg.xport_specific.spi.bus_width = (scntl3 & EWS) ? MSG_EXT_WDTR_BUS_16_BIT : MSG_EXT_WDTR_BUS_8_BIT; neg.xport_specific.spi.sync_period = 0; neg.xport_specific.spi.sync_offset = 0; neg.xport_specific.spi.valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH; xpt_setup_ccb(&neg.ccb_h, ccb->ccb_h.path, /*priority*/1); xpt_async(AC_TRANSFER_NEG, ccb->ccb_h.path, &neg); /* ** set actual value and sync_status */ OUTB (nc_sxfer, sxfer); np->sync_st = sxfer; OUTB (nc_scntl3, scntl3); np->wide_st = scntl3; /* ** patch ALL nccbs of this target. */ for (cp = np->link_nccb; cp; cp = cp->link_nccb) { if (!cp->ccb) continue; if (cp->ccb->ccb_h.target_id != target) continue; cp->sync_status = sxfer; cp->wide_status = scntl3; } } /*========================================================== ** ** ** ncr timeout handler. ** ** **========================================================== ** ** Misused to keep the driver running when ** interrupts are not configured correctly. ** **---------------------------------------------------------- */ static void ncr_timeout (void *arg) { ncb_p np = arg; time_t thistime = time_second; ticks_t step = np->ticks; u_long count = 0; long signed t; nccb_p cp; mtx_assert(&np->lock, MA_OWNED); if (np->lasttime != thistime) { np->lasttime = thistime; /*---------------------------------------------------- ** ** handle ncr chip timeouts ** ** Assumption: ** We have a chance to arbitrate for the ** SCSI bus at least every 10 seconds. ** **---------------------------------------------------- */ t = thistime - np->heartbeat; if (t<2) np->latetime=0; else np->latetime++; if (np->latetime>2) { /* ** If there are no requests, the script ** processor will sleep on SEL_WAIT_RESEL. ** But we have to check whether it died. ** Let's try to wake it up. */ OUTB (nc_istat, SIGP); } /*---------------------------------------------------- ** ** handle nccb timeouts ** **---------------------------------------------------- */ for (cp=np->link_nccb; cp; cp=cp->link_nccb) { /* ** look for timed out nccbs. */ if (!cp->host_status) continue; count++; if (cp->tlimit > thistime) continue; /* ** Disable reselect. ** Remove it from startqueue. */ cp->jump_nccb.l_cmd = (SCR_JUMP); if (cp->phys.header.launch.l_paddr == NCB_SCRIPT_PHYS (np, select)) { device_printf(np->dev, "timeout nccb=%p (skip)\n", cp); cp->phys.header.launch.l_paddr = NCB_SCRIPT_PHYS (np, skip); } switch (cp->host_status) { case HS_BUSY: case HS_NEGOTIATE: /* FALLTHROUGH */ case HS_DISCONNECT: cp->host_status=HS_TIMEOUT; } cp->tag = 0; /* ** wakeup this nccb. */ ncr_complete (np, cp); } } callout_reset(&np->timer, step ? step : 1, ncr_timeout, np); if (INB(nc_istat) & (INTF|SIP|DIP)) { /* ** Process pending interrupts. */ if (DEBUG_FLAGS & DEBUG_TINY) printf ("{"); ncr_exception (np); if (DEBUG_FLAGS & DEBUG_TINY) printf ("}"); } } /*========================================================== ** ** log message for real hard errors ** ** "ncr0 targ 0?: ERROR (ds:si) (so-si-sd) (sxfer/scntl3) @ name (dsp:dbc)." ** " reg: r0 r1 r2 r3 r4 r5 r6 ..... rf." ** ** exception register: ** ds: dstat ** si: sist ** ** SCSI bus lines: ** so: control lines as driver by NCR. ** si: control lines as seen by NCR. ** sd: scsi data lines as seen by NCR. ** ** wide/fastmode: ** sxfer: (see the manual) ** scntl3: (see the manual) ** ** current script command: ** dsp: script address (relative to start of script). ** dbc: first word of script command. ** ** First 16 register of the chip: ** r0..rf ** **========================================================== */ static void ncr_log_hard_error(ncb_p np, u_short sist, u_char dstat) { u_int32_t dsp; int script_ofs; int script_size; char *script_name; u_char *script_base; int i; dsp = INL (nc_dsp); if (np->p_script < dsp && dsp <= np->p_script + sizeof(struct script)) { script_ofs = dsp - np->p_script; script_size = sizeof(struct script); script_base = (u_char *) np->script; script_name = "script"; } else if (np->p_scripth < dsp && dsp <= np->p_scripth + sizeof(struct scripth)) { script_ofs = dsp - np->p_scripth; script_size = sizeof(struct scripth); script_base = (u_char *) np->scripth; script_name = "scripth"; } else { script_ofs = dsp; script_size = 0; - script_base = 0; + script_base = NULL; script_name = "mem"; } device_printf(np->dev, "%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x) @ (%s %x:%08x).\n", (unsigned)INB (nc_sdid)&0x0f, dstat, sist, (unsigned)INB (nc_socl), (unsigned)INB (nc_sbcl), (unsigned)INB (nc_sbdl), (unsigned)INB (nc_sxfer),(unsigned)INB (nc_scntl3), script_name, script_ofs, (unsigned)INL (nc_dbc)); if (((script_ofs & 3) == 0) && (unsigned)script_ofs < script_size) { device_printf(np->dev, "script cmd = %08x\n", (int)READSCRIPT_OFF(script_base, script_ofs)); } device_printf(np->dev, "regdump:"); for (i=0; i<16;i++) printf (" %02x", (unsigned)INB_OFF(i)); printf (".\n"); } /*========================================================== ** ** ** ncr chip exception handler. ** ** **========================================================== */ static void ncr_exception (ncb_p np) { u_char istat, dstat; u_short sist; /* ** interrupt on the fly ? */ while ((istat = INB (nc_istat)) & INTF) { if (DEBUG_FLAGS & DEBUG_TINY) printf ("F "); OUTB (nc_istat, INTF); np->profile.num_fly++; ncr_wakeup (np, 0); } if (!(istat & (SIP|DIP))) { return; } /* ** Steinbach's Guideline for Systems Programming: ** Never test for an error condition you don't know how to handle. */ sist = (istat & SIP) ? INW (nc_sist) : 0; dstat = (istat & DIP) ? INB (nc_dstat) : 0; np->profile.num_int++; if (DEBUG_FLAGS & DEBUG_TINY) printf ("<%d|%x:%x|%x:%x>", INB(nc_scr0), dstat,sist, (unsigned)INL(nc_dsp), (unsigned)INL(nc_dbc)); if ((dstat==DFE) && (sist==PAR)) return; /*========================================================== ** ** First the normal cases. ** **========================================================== */ /*------------------------------------------- ** SCSI reset **------------------------------------------- */ if (sist & RST) { ncr_init (np, bootverbose ? "scsi reset" : NULL, HS_RESET); return; } /*------------------------------------------- ** selection timeout ** ** IID excluded from dstat mask! ** (chip bug) **------------------------------------------- */ if ((sist & STO) && !(sist & (GEN|HTH|MA|SGE|UDC|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|SIR))) { ncr_int_sto (np); return; } /*------------------------------------------- ** Phase mismatch. **------------------------------------------- */ if ((sist & MA) && !(sist & (STO|GEN|HTH|SGE|UDC|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|SIR|IID))) { ncr_int_ma (np, dstat); return; } /*---------------------------------------- ** move command with length 0 **---------------------------------------- */ if ((dstat & IID) && !(sist & (STO|GEN|HTH|MA|SGE|UDC|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|SIR)) && ((INL(nc_dbc) & 0xf8000000) == SCR_MOVE_TBL)) { /* ** Target wants more data than available. ** The "no_data" script will do it. */ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, no_data)); return; } /*------------------------------------------- ** Programmed interrupt **------------------------------------------- */ if ((dstat & SIR) && !(sist & (STO|GEN|HTH|MA|SGE|UDC|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|IID)) && (INB(nc_dsps) <= SIR_MAX)) { ncr_int_sir (np); return; } /*======================================== ** log message for real hard errors **======================================== */ ncr_log_hard_error(np, sist, dstat); /*======================================== ** do the register dump **======================================== */ if (time_second - np->regtime > 10) { int i; np->regtime = time_second; for (i=0; iregdump); i++) ((volatile char*)&np->regdump)[i] = INB_OFF(i); np->regdump.nc_dstat = dstat; np->regdump.nc_sist = sist; } /*---------------------------------------- ** clean up the dma fifo **---------------------------------------- */ if ( (INB(nc_sstat0) & (ILF|ORF|OLF) ) || (INB(nc_sstat1) & (FF3210) ) || (INB(nc_sstat2) & (ILF1|ORF1|OLF1)) || /* wide .. */ !(dstat & DFE)) { device_printf(np->dev, "have to clear fifos.\n"); OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */ OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ } /*---------------------------------------- ** handshake timeout **---------------------------------------- */ if (sist & HTH) { device_printf(np->dev, "handshake timeout\n"); OUTB (nc_scntl1, CRST); DELAY (1000); OUTB (nc_scntl1, 0x00); OUTB (nc_scr0, HS_FAIL); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, cleanup)); return; } /*---------------------------------------- ** unexpected disconnect **---------------------------------------- */ if ((sist & UDC) && !(sist & (STO|GEN|HTH|MA|SGE|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|SIR|IID))) { OUTB (nc_scr0, HS_UNEXPECTED); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, cleanup)); return; } /*---------------------------------------- ** cannot disconnect **---------------------------------------- */ if ((dstat & IID) && !(sist & (STO|GEN|HTH|MA|SGE|UDC|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|SIR)) && ((INL(nc_dbc) & 0xf8000000) == SCR_WAIT_DISC)) { /* ** Unexpected data cycle while waiting for disconnect. */ if (INB(nc_sstat2) & LDSC) { /* ** It's an early reconnect. ** Let's continue ... */ OUTB (nc_dcntl, np->rv_dcntl | STD); /* ** info message */ device_printf(np->dev, "INFO: LDSC while IID.\n"); return; } device_printf(np->dev, "target %d doesn't release the bus.\n", INB (nc_sdid)&0x0f); /* ** return without restarting the NCR. ** timeout will do the real work. */ return; } /*---------------------------------------- ** single step **---------------------------------------- */ if ((dstat & SSI) && !(sist & (STO|GEN|HTH|MA|SGE|UDC|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|SIR|IID))) { OUTB (nc_dcntl, np->rv_dcntl | STD); return; } /* ** @RECOVER@ HTH, SGE, ABRT. ** ** We should try to recover from these interrupts. ** They may occur if there are problems with synch transfers, or ** if targets are switched on or off while the driver is running. */ if (sist & SGE) { /* clear scsi offsets */ OUTB (nc_ctest3, np->rv_ctest3 | CLF); } /* ** Freeze controller to be able to read the messages. */ if (DEBUG_FLAGS & DEBUG_FREEZE) { int i; unsigned char val; for (i=0; i<0x60; i++) { switch (i%16) { case 0: device_printf(np->dev, "reg[%d0]: ", i / 16); break; case 4: case 8: case 12: printf (" "); break; } val = bus_read_1(np->reg_res, i); printf (" %x%x", val/16, val%16); if (i%16==15) printf (".\n"); } callout_stop(&np->timer); device_printf(np->dev, "halted!\n"); /* ** don't restart controller ... */ OUTB (nc_istat, SRST); return; } #ifdef NCR_FREEZE /* ** Freeze system to be able to read the messages. */ printf ("ncr: fatal error: system halted - press reset to reboot ..."); for (;;); #endif /* ** sorry, have to kill ALL jobs ... */ ncr_init (np, "fatal error", HS_FAIL); } /*========================================================== ** ** ncr chip exception handler for selection timeout ** **========================================================== ** ** There seems to be a bug in the 53c810. ** Although a STO-Interrupt is pending, ** it continues executing script commands. ** But it will fail and interrupt (IID) on ** the next instruction where it's looking ** for a valid phase. ** **---------------------------------------------------------- */ static void ncr_int_sto (ncb_p np) { u_long dsa, scratcha, diff; nccb_p cp; if (DEBUG_FLAGS & DEBUG_TINY) printf ("T"); /* ** look for nccb and set the status. */ dsa = INL (nc_dsa); cp = np->link_nccb; while (cp && (CCB_PHYS (cp, phys) != dsa)) cp = cp->link_nccb; if (cp) { cp-> host_status = HS_SEL_TIMEOUT; ncr_complete (np, cp); } /* ** repair start queue */ scratcha = INL (nc_scratcha); diff = scratcha - NCB_SCRIPTH_PHYS (np, tryloop); /* assert ((diff <= MAX_START * 20) && !(diff % 20));*/ if ((diff <= MAX_START * 20) && !(diff % 20)) { WRITESCRIPT(startpos[0], scratcha); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, start)); return; } ncr_init (np, "selection timeout", HS_FAIL); } /*========================================================== ** ** ** ncr chip exception handler for phase errors. ** ** **========================================================== ** ** We have to construct a new transfer descriptor, ** to transfer the rest of the current block. ** **---------------------------------------------------------- */ static void ncr_int_ma (ncb_p np, u_char dstat) { u_int32_t dbc; u_int32_t rest; u_int32_t dsa; u_int32_t dsp; u_int32_t nxtdsp; volatile void *vdsp_base; size_t vdsp_off; u_int32_t oadr, olen; u_int32_t *tblp, *newcmd; u_char cmd, sbcl, ss0, ss2, ctest5; u_short delta; nccb_p cp; dsp = INL (nc_dsp); dsa = INL (nc_dsa); dbc = INL (nc_dbc); ss0 = INB (nc_sstat0); ss2 = INB (nc_sstat2); sbcl= INB (nc_sbcl); cmd = dbc >> 24; rest= dbc & 0xffffff; ctest5 = (np->rv_ctest5 & DFS) ? INB (nc_ctest5) : 0; if (ctest5 & DFS) delta=(((ctest5<<8) | (INB (nc_dfifo) & 0xff)) - rest) & 0x3ff; else delta=(INB (nc_dfifo) - rest) & 0x7f; /* ** The data in the dma fifo has not been transferred to ** the target -> add the amount to the rest ** and clear the data. ** Check the sstat2 register in case of wide transfer. */ if (!(dstat & DFE)) rest += delta; if (ss0 & OLF) rest++; if (ss0 & ORF) rest++; if (INB(nc_scntl3) & EWS) { if (ss2 & OLF1) rest++; if (ss2 & ORF1) rest++; } OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */ /* ** locate matching cp */ cp = np->link_nccb; while (cp && (CCB_PHYS (cp, phys) != dsa)) cp = cp->link_nccb; if (!cp) { device_printf(np->dev, "SCSI phase error fixup: CCB already dequeued (%p)\n", (void *)np->header.cp); return; } if (cp != np->header.cp) { device_printf(np->dev, "SCSI phase error fixup: CCB address mismatch " "(%p != %p) np->nccb = %p\n", (void *)cp, (void *)np->header.cp, (void *)np->link_nccb); /* return;*/ } /* ** find the interrupted script command, ** and the address at which to continue. */ if (dsp == vtophys (&cp->patch[2])) { vdsp_base = cp; vdsp_off = offsetof(struct nccb, patch[0]); nxtdsp = READSCRIPT_OFF(vdsp_base, vdsp_off + 3*4); } else if (dsp == vtophys (&cp->patch[6])) { vdsp_base = cp; vdsp_off = offsetof(struct nccb, patch[4]); nxtdsp = READSCRIPT_OFF(vdsp_base, vdsp_off + 3*4); } else if (dsp > np->p_script && dsp <= np->p_script + sizeof(struct script)) { vdsp_base = np->script; vdsp_off = dsp - np->p_script - 8; nxtdsp = dsp; } else { vdsp_base = np->scripth; vdsp_off = dsp - np->p_scripth - 8; nxtdsp = dsp; } /* ** log the information */ if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE)) { printf ("P%x%x ",cmd&7, sbcl&7); printf ("RL=%d D=%d SS0=%x ", (unsigned) rest, (unsigned) delta, ss0); } if (DEBUG_FLAGS & DEBUG_PHASE) { printf ("\nCP=%p CP2=%p DSP=%x NXT=%x VDSP=%p CMD=%x ", cp, np->header.cp, dsp, nxtdsp, (volatile char*)vdsp_base+vdsp_off, cmd); } /* ** get old startaddress and old length. */ oadr = READSCRIPT_OFF(vdsp_base, vdsp_off + 1*4); if (cmd & 0x10) { /* Table indirect */ tblp = (u_int32_t *) ((char*) &cp->phys + oadr); olen = tblp[0]; oadr = tblp[1]; } else { tblp = (u_int32_t *) 0; olen = READSCRIPT_OFF(vdsp_base, vdsp_off) & 0xffffff; } if (DEBUG_FLAGS & DEBUG_PHASE) { printf ("OCMD=%x\nTBLP=%p OLEN=%lx OADR=%lx\n", (unsigned) (READSCRIPT_OFF(vdsp_base, vdsp_off) >> 24), (void *) tblp, (u_long) olen, (u_long) oadr); } /* ** if old phase not dataphase, leave here. */ if (cmd != (READSCRIPT_OFF(vdsp_base, vdsp_off) >> 24)) { PRINT_ADDR(cp->ccb); printf ("internal error: cmd=%02x != %02x=(vdsp[0] >> 24)\n", (unsigned)cmd, (unsigned)READSCRIPT_OFF(vdsp_base, vdsp_off) >> 24); return; } if (cmd & 0x06) { PRINT_ADDR(cp->ccb); printf ("phase change %x-%x %d@%08x resid=%d.\n", cmd&7, sbcl&7, (unsigned)olen, (unsigned)oadr, (unsigned)rest); OUTB (nc_dcntl, np->rv_dcntl | STD); return; } /* ** choose the correct patch area. ** if savep points to one, choose the other. */ newcmd = cp->patch; if (cp->phys.header.savep == vtophys (newcmd)) newcmd+=4; /* ** fillin the commands */ newcmd[0] = ((cmd & 0x0f) << 24) | rest; newcmd[1] = oadr + olen - rest; newcmd[2] = SCR_JUMP; newcmd[3] = nxtdsp; if (DEBUG_FLAGS & DEBUG_PHASE) { PRINT_ADDR(cp->ccb); printf ("newcmd[%d] %x %x %x %x.\n", (int)(newcmd - cp->patch), (unsigned)newcmd[0], (unsigned)newcmd[1], (unsigned)newcmd[2], (unsigned)newcmd[3]); } /* ** fake the return address (to the patch). ** and restart script processor at dispatcher. */ np->profile.num_break++; OUTL (nc_temp, vtophys (newcmd)); if ((cmd & 7) == 0) OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, dispatch)); else OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, checkatn)); } /*========================================================== ** ** ** ncr chip exception handler for programmed interrupts. ** ** **========================================================== */ static int ncr_show_msg (u_char * msg) { u_char i; printf ("%x",*msg); if (*msg==MSG_EXTENDED) { for (i=1;i<8;i++) { if (i-1>msg[1]) break; printf ("-%x",msg[i]); } return (i+1); } else if ((*msg & 0xf0) == 0x20) { printf ("-%x",msg[1]); return (2); } return (1); } static void ncr_int_sir (ncb_p np) { u_char scntl3; u_char chg, ofs, per, fak, wide; u_char num = INB (nc_dsps); - nccb_p cp=0; + nccb_p cp = NULL; u_long dsa; u_int target = INB (nc_sdid) & 0x0f; tcb_p tp = &np->target[target]; int i; if (DEBUG_FLAGS & DEBUG_TINY) printf ("I#%d", num); switch (num) { case SIR_SENSE_RESTART: case SIR_STALL_RESTART: break; default: /* ** lookup the nccb */ dsa = INL (nc_dsa); cp = np->link_nccb; while (cp && (CCB_PHYS (cp, phys) != dsa)) cp = cp->link_nccb; assert (cp); if (!cp) goto out; assert (cp == np->header.cp); if (cp != np->header.cp) goto out; } switch (num) { /*-------------------------------------------------------------------- ** ** Processing of interrupted getcc selects ** **-------------------------------------------------------------------- */ case SIR_SENSE_RESTART: /*------------------------------------------ ** Script processor is idle. ** Look for interrupted "check cond" **------------------------------------------ */ if (DEBUG_FLAGS & DEBUG_RESTART) device_printf(np->dev, "int#%d", num); cp = (nccb_p) 0; for (i=0; itarget[i]; if (DEBUG_FLAGS & DEBUG_RESTART) printf ("+"); cp = tp->hold_cp; if (!cp) continue; if (DEBUG_FLAGS & DEBUG_RESTART) printf ("+"); if ((cp->host_status==HS_BUSY) && (cp->s_status==SCSI_STATUS_CHECK_COND)) break; if (DEBUG_FLAGS & DEBUG_RESTART) printf ("- (remove)"); tp->hold_cp = cp = (nccb_p) 0; } if (cp) { if (DEBUG_FLAGS & DEBUG_RESTART) printf ("+ restart job ..\n"); OUTL (nc_dsa, CCB_PHYS (cp, phys)); OUTL (nc_dsp, NCB_SCRIPTH_PHYS (np, getcc)); return; } /* ** no job, resume normal processing */ if (DEBUG_FLAGS & DEBUG_RESTART) printf (" -- remove trap\n"); WRITESCRIPT(start0[0], SCR_INT ^ IFFALSE (0)); break; case SIR_SENSE_FAILED: /*------------------------------------------- ** While trying to select for ** getting the condition code, ** a target reselected us. **------------------------------------------- */ if (DEBUG_FLAGS & DEBUG_RESTART) { PRINT_ADDR(cp->ccb); printf ("in getcc reselect by t%d.\n", INB(nc_ssid) & 0x0f); } /* ** Mark this job */ cp->host_status = HS_BUSY; cp->s_status = SCSI_STATUS_CHECK_COND; np->target[cp->ccb->ccb_h.target_id].hold_cp = cp; /* ** And patch code to restart it. */ WRITESCRIPT(start0[0], SCR_INT); break; /*----------------------------------------------------------------------------- ** ** Was Sie schon immer ueber transfermode negotiation wissen wollten ... ** ** We try to negotiate sync and wide transfer only after ** a successful inquire command. We look at byte 7 of the ** inquire data to determine the capabilities if the target. ** ** When we try to negotiate, we append the negotiation message ** to the identify and (maybe) simple tag message. ** The host status field is set to HS_NEGOTIATE to mark this ** situation. ** ** If the target doesn't answer this message immediately ** (as required by the standard), the SIR_NEGO_FAIL interrupt ** will be raised eventually. ** The handler removes the HS_NEGOTIATE status, and sets the ** negotiated value to the default (async / nowide). ** ** If we receive a matching answer immediately, we check it ** for validity, and set the values. ** ** If we receive a Reject message immediately, we assume the ** negotiation has failed, and fall back to standard values. ** ** If we receive a negotiation message while not in HS_NEGOTIATE ** state, it's a target initiated negotiation. We prepare a ** (hopefully) valid answer, set our parameters, and send back ** this answer to the target. ** ** If the target doesn't fetch the answer (no message out phase), ** we assume the negotiation has failed, and fall back to default ** settings. ** ** When we set the values, we adjust them in all nccbs belonging ** to this target, in the controller's register, and in the "phys" ** field of the controller's struct ncb. ** ** Possible cases: hs sir msg_in value send goto ** We try try to negotiate: ** -> target doesnt't msgin NEG FAIL noop defa. - dispatch ** -> target rejected our msg NEG FAIL reject defa. - dispatch ** -> target answered (ok) NEG SYNC sdtr set - clrack ** -> target answered (!ok) NEG SYNC sdtr defa. REJ--->msg_bad ** -> target answered (ok) NEG WIDE wdtr set - clrack ** -> target answered (!ok) NEG WIDE wdtr defa. REJ--->msg_bad ** -> any other msgin NEG FAIL noop defa. - dispatch ** ** Target tries to negotiate: ** -> incoming message --- SYNC sdtr set SDTR - ** -> incoming message --- WIDE wdtr set WDTR - ** We sent our answer: ** -> target doesn't msgout --- PROTO ? defa. - dispatch ** **----------------------------------------------------------------------------- */ case SIR_NEGO_FAILED: /*------------------------------------------------------- ** ** Negotiation failed. ** Target doesn't send an answer message, ** or target rejected our message. ** ** Remove negotiation request. ** **------------------------------------------------------- */ OUTB (HS_PRT, HS_BUSY); /* FALLTHROUGH */ case SIR_NEGO_PROTO: /*------------------------------------------------------- ** ** Negotiation failed. ** Target doesn't fetch the answer message. ** **------------------------------------------------------- */ if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("negotiation failed sir=%x status=%x.\n", num, cp->nego_status); } /* ** any error in negotiation: ** fall back to default mode. */ switch (cp->nego_status) { case NS_SYNC: ncr_setsync (np, cp, 0, 0xe0, 0); break; case NS_WIDE: ncr_setwide (np, cp, 0, 0); break; } np->msgin [0] = MSG_NOOP; np->msgout[0] = MSG_NOOP; cp->nego_status = 0; OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, dispatch)); break; case SIR_NEGO_SYNC: /* ** Synchronous request message received. */ if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("sync msgin: "); (void) ncr_show_msg (np->msgin); printf (".\n"); } /* ** get requested values. */ chg = 0; per = np->msgin[3]; ofs = np->msgin[4]; if (ofs==0) per=255; /* ** check values against driver limits. */ if (per < np->minsync) {chg = 1; per = np->minsync;} if (per < tp->tinfo.user.period) {chg = 1; per = tp->tinfo.user.period;} if (ofs > tp->tinfo.user.offset) {chg = 1; ofs = tp->tinfo.user.offset;} /* ** Check against controller limits. */ fak = 7; scntl3 = 0; if (ofs != 0) { ncr_getsync(np, per, &fak, &scntl3); if (fak > 7) { chg = 1; ofs = 0; } } if (ofs == 0) { fak = 7; per = 0; scntl3 = 0; } if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("sync: per=%d scntl3=0x%x ofs=%d fak=%d chg=%d.\n", per, scntl3, ofs, fak, chg); } if (INB (HS_PRT) == HS_NEGOTIATE) { OUTB (HS_PRT, HS_BUSY); switch (cp->nego_status) { case NS_SYNC: /* ** This was an answer message */ if (chg) { /* ** Answer wasn't acceptable. */ ncr_setsync (np, cp, 0, 0xe0, 0); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, msg_bad)); } else { /* ** Answer is ok. */ ncr_setsync (np,cp,scntl3,(fak<<5)|ofs, per); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, clrack)); } return; case NS_WIDE: ncr_setwide (np, cp, 0, 0); break; } } /* ** It was a request. Set value and ** prepare an answer message */ ncr_setsync (np, cp, scntl3, (fak<<5)|ofs, per); np->msgout[0] = MSG_EXTENDED; np->msgout[1] = 3; np->msgout[2] = MSG_EXT_SDTR; np->msgout[3] = per; np->msgout[4] = ofs; cp->nego_status = NS_SYNC; if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("sync msgout: "); (void) ncr_show_msg (np->msgout); printf (".\n"); } if (!ofs) { OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, msg_bad)); return; } np->msgin [0] = MSG_NOOP; break; case SIR_NEGO_WIDE: /* ** Wide request message received. */ if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("wide msgin: "); (void) ncr_show_msg (np->msgin); printf (".\n"); } /* ** get requested values. */ chg = 0; wide = np->msgin[3]; /* ** check values against driver limits. */ if (wide > tp->tinfo.user.width) {chg = 1; wide = tp->tinfo.user.width;} if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("wide: wide=%d chg=%d.\n", wide, chg); } if (INB (HS_PRT) == HS_NEGOTIATE) { OUTB (HS_PRT, HS_BUSY); switch (cp->nego_status) { case NS_WIDE: /* ** This was an answer message */ if (chg) { /* ** Answer wasn't acceptable. */ ncr_setwide (np, cp, 0, 1); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, msg_bad)); } else { /* ** Answer is ok. */ ncr_setwide (np, cp, wide, 1); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, clrack)); } return; case NS_SYNC: ncr_setsync (np, cp, 0, 0xe0, 0); break; } } /* ** It was a request, set value and ** prepare an answer message */ ncr_setwide (np, cp, wide, 1); np->msgout[0] = MSG_EXTENDED; np->msgout[1] = 2; np->msgout[2] = MSG_EXT_WDTR; np->msgout[3] = wide; np->msgin [0] = MSG_NOOP; cp->nego_status = NS_WIDE; if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("wide msgout: "); (void) ncr_show_msg (np->msgout); printf (".\n"); } break; /*-------------------------------------------------------------------- ** ** Processing of special messages ** **-------------------------------------------------------------------- */ case SIR_REJECT_RECEIVED: /*----------------------------------------------- ** ** We received a MSG_MESSAGE_REJECT message. ** **----------------------------------------------- */ PRINT_ADDR(cp->ccb); printf ("MSG_MESSAGE_REJECT received (%x:%x).\n", (unsigned)np->lastmsg, np->msgout[0]); break; case SIR_REJECT_SENT: /*----------------------------------------------- ** ** We received an unknown message ** **----------------------------------------------- */ PRINT_ADDR(cp->ccb); printf ("MSG_MESSAGE_REJECT sent for "); (void) ncr_show_msg (np->msgin); printf (".\n"); break; /*-------------------------------------------------------------------- ** ** Processing of special messages ** **-------------------------------------------------------------------- */ case SIR_IGN_RESIDUE: /*----------------------------------------------- ** ** We received an IGNORE RESIDUE message, ** which couldn't be handled by the script. ** **----------------------------------------------- */ PRINT_ADDR(cp->ccb); printf ("MSG_IGN_WIDE_RESIDUE received, but not yet implemented.\n"); break; case SIR_MISSING_SAVE: /*----------------------------------------------- ** ** We received an DISCONNECT message, ** but the datapointer wasn't saved before. ** **----------------------------------------------- */ PRINT_ADDR(cp->ccb); printf ("MSG_DISCONNECT received, but datapointer not saved:\n" "\tdata=%x save=%x goal=%x.\n", (unsigned) INL (nc_temp), (unsigned) np->header.savep, (unsigned) np->header.goalp); break; /*-------------------------------------------------------------------- ** ** Processing of a "SCSI_STATUS_QUEUE_FULL" status. ** ** XXX JGibbs - We should do the same thing for BUSY status. ** ** The current command has been rejected, ** because there are too many in the command queue. ** We have started too many commands for that target. ** **-------------------------------------------------------------------- */ case SIR_STALL_QUEUE: cp->xerr_status = XE_OK; cp->host_status = HS_COMPLETE; cp->s_status = SCSI_STATUS_QUEUE_FULL; ncr_freeze_devq(np, cp->ccb->ccb_h.path); ncr_complete(np, cp); /* FALLTHROUGH */ case SIR_STALL_RESTART: /*----------------------------------------------- ** ** Enable selecting again, ** if NO disconnected jobs. ** **----------------------------------------------- */ /* ** Look for a disconnected job. */ cp = np->link_nccb; while (cp && cp->host_status != HS_DISCONNECT) cp = cp->link_nccb; /* ** if there is one, ... */ if (cp) { /* ** wait for reselection */ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, reselect)); return; } /* ** else remove the interrupt. */ device_printf(np->dev, "queue empty.\n"); WRITESCRIPT(start1[0], SCR_INT ^ IFFALSE (0)); break; } out: OUTB (nc_dcntl, np->rv_dcntl | STD); } /*========================================================== ** ** ** Acquire a control block ** ** **========================================================== */ static nccb_p ncr_get_nccb (ncb_p np, u_long target, u_long lun) { lcb_p lp; nccb_p cp = NULL; /* ** Lun structure available ? */ lp = np->target[target].lp[lun]; if (lp) { cp = lp->next_nccb; /* ** Look for free CCB */ while (cp && cp->magic) { cp = cp->next_nccb; } } /* ** if nothing available, create one. */ if (cp == NULL) cp = ncr_alloc_nccb(np, target, lun); if (cp != NULL) { if (cp->magic) { device_printf(np->dev, "Bogus free cp found\n"); return (NULL); } cp->magic = 1; } return (cp); } /*========================================================== ** ** ** Release one control block ** ** **========================================================== */ static void ncr_free_nccb (ncb_p np, nccb_p cp) { /* ** sanity */ assert (cp != NULL); cp -> host_status = HS_IDLE; cp -> magic = 0; } /*========================================================== ** ** ** Allocation of resources for Targets/Luns/Tags. ** ** **========================================================== */ static nccb_p ncr_alloc_nccb (ncb_p np, u_long target, u_long lun) { tcb_p tp; lcb_p lp; nccb_p cp; assert (np != NULL); if (target>=MAX_TARGET) return(NULL); if (lun >=MAX_LUN ) return(NULL); tp=&np->target[target]; if (!tp->jump_tcb.l_cmd) { /* ** initialize it. */ tp->jump_tcb.l_cmd = (SCR_JUMP^IFFALSE (DATA (0x80 + target))); tp->jump_tcb.l_paddr = np->jump_tcb.l_paddr; tp->getscr[0] = (np->features & FE_PFEN)? SCR_COPY(1) : SCR_COPY_F(1); tp->getscr[1] = vtophys (&tp->tinfo.sval); tp->getscr[2] = rman_get_start(np->reg_res) + offsetof (struct ncr_reg, nc_sxfer); tp->getscr[3] = (np->features & FE_PFEN)? SCR_COPY(1) : SCR_COPY_F(1); tp->getscr[4] = vtophys (&tp->tinfo.wval); tp->getscr[5] = rman_get_start(np->reg_res) + offsetof (struct ncr_reg, nc_scntl3); assert (((offsetof(struct ncr_reg, nc_sxfer) ^ (offsetof(struct tcb ,tinfo) + offsetof(struct ncr_target_tinfo, sval))) & 3) == 0); assert (((offsetof(struct ncr_reg, nc_scntl3) ^ (offsetof(struct tcb, tinfo) + offsetof(struct ncr_target_tinfo, wval))) &3) == 0); tp->call_lun.l_cmd = (SCR_CALL); tp->call_lun.l_paddr = NCB_SCRIPT_PHYS (np, resel_lun); tp->jump_lcb.l_cmd = (SCR_JUMP); tp->jump_lcb.l_paddr = NCB_SCRIPTH_PHYS (np, abort); np->jump_tcb.l_paddr = vtophys (&tp->jump_tcb); } /* ** Logic unit control block */ lp = tp->lp[lun]; if (!lp) { /* ** Allocate a lcb */ lp = (lcb_p) malloc (sizeof (struct lcb), M_DEVBUF, M_NOWAIT | M_ZERO); if (!lp) return(NULL); /* ** Initialize it */ lp->jump_lcb.l_cmd = (SCR_JUMP ^ IFFALSE (DATA (lun))); lp->jump_lcb.l_paddr = tp->jump_lcb.l_paddr; lp->call_tag.l_cmd = (SCR_CALL); lp->call_tag.l_paddr = NCB_SCRIPT_PHYS (np, resel_tag); lp->jump_nccb.l_cmd = (SCR_JUMP); lp->jump_nccb.l_paddr = NCB_SCRIPTH_PHYS (np, aborttag); lp->actlink = 1; /* ** Chain into LUN list */ tp->jump_lcb.l_paddr = vtophys (&lp->jump_lcb); tp->lp[lun] = lp; } /* ** Allocate a nccb */ cp = (nccb_p) malloc (sizeof (struct nccb), M_DEVBUF, M_NOWAIT|M_ZERO); if (!cp) return (NULL); if (DEBUG_FLAGS & DEBUG_ALLOC) { printf ("new nccb @%p.\n", cp); } /* ** Fill in physical addresses */ cp->p_nccb = vtophys (cp); /* ** Chain into reselect list */ cp->jump_nccb.l_cmd = SCR_JUMP; cp->jump_nccb.l_paddr = lp->jump_nccb.l_paddr; lp->jump_nccb.l_paddr = CCB_PHYS (cp, jump_nccb); cp->call_tmp.l_cmd = SCR_CALL; cp->call_tmp.l_paddr = NCB_SCRIPT_PHYS (np, resel_tmp); /* ** Chain into wakeup list */ cp->link_nccb = np->link_nccb; np->link_nccb = cp; /* ** Chain into CCB list */ cp->next_nccb = lp->next_nccb; lp->next_nccb = cp; return (cp); } /*========================================================== ** ** ** Build Scatter Gather Block ** ** **========================================================== ** ** The transfer area may be scattered among ** several non adjacent physical pages. ** ** We may use MAX_SCATTER blocks. ** **---------------------------------------------------------- */ static int ncr_scatter (struct dsb* phys, vm_offset_t vaddr, vm_size_t datalen) { u_long paddr, pnext; u_short segment = 0; u_long segsize, segaddr; u_long size, csize = 0; u_long chunk = MAX_SIZE; int free; bzero (&phys->data, sizeof (phys->data)); if (!datalen) return (0); paddr = vtophys (vaddr); /* ** insert extra break points at a distance of chunk. ** We try to reduce the number of interrupts caused ** by unexpected phase changes due to disconnects. ** A typical harddisk may disconnect before ANY block. ** If we wanted to avoid unexpected phase changes at all ** we had to use a break point every 512 bytes. ** Of course the number of scatter/gather blocks is ** limited. */ free = MAX_SCATTER - 1; if (vaddr & PAGE_MASK) free -= datalen / PAGE_SIZE; if (free>1) while ((chunk * free >= 2 * datalen) && (chunk>=1024)) chunk /= 2; if(DEBUG_FLAGS & DEBUG_SCATTER) printf("ncr?:\tscattering virtual=%p size=%d chunk=%d.\n", (void *) vaddr, (unsigned) datalen, (unsigned) chunk); /* ** Build data descriptors. */ while (datalen && (segment < MAX_SCATTER)) { /* ** this segment is empty */ segsize = 0; segaddr = paddr; pnext = paddr; if (!csize) csize = chunk; while ((datalen) && (paddr == pnext) && (csize)) { /* ** continue this segment */ pnext = (paddr & (~PAGE_MASK)) + PAGE_SIZE; /* ** Compute max size */ size = pnext - paddr; /* page size */ if (size > datalen) size = datalen; /* data size */ if (size > csize ) size = csize ; /* chunksize */ segsize += size; vaddr += size; csize -= size; datalen -= size; paddr = vtophys (vaddr); } if(DEBUG_FLAGS & DEBUG_SCATTER) printf ("\tseg #%d addr=%x size=%d (rest=%d).\n", segment, (unsigned) segaddr, (unsigned) segsize, (unsigned) datalen); phys->data[segment].addr = segaddr; phys->data[segment].size = segsize; segment++; } if (datalen) { printf("ncr?: scatter/gather failed (residue=%d).\n", (unsigned) datalen); return (-1); } return (segment); } /*========================================================== ** ** ** Test the pci bus snoop logic :-( ** ** Has to be called with interrupts disabled. ** ** **========================================================== */ #ifndef NCR_IOMAPPED static int ncr_regtest (struct ncb* np) { register volatile u_int32_t data; /* ** ncr registers may NOT be cached. ** write 0xffffffff to a read only register area, ** and try to read it back. */ data = 0xffffffff; OUTL_OFF(offsetof(struct ncr_reg, nc_dstat), data); data = INL_OFF(offsetof(struct ncr_reg, nc_dstat)); #if 1 if (data == 0xffffffff) { #else if ((data & 0xe2f0fffd) != 0x02000080) { #endif printf ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n", (unsigned) data); return (0x10); } return (0); } #endif static int ncr_snooptest (struct ncb* np) { u_int32_t ncr_rd, ncr_wr, ncr_bk, host_rd, host_wr, pc; int i, err=0; #ifndef NCR_IOMAPPED err |= ncr_regtest (np); if (err) return (err); #endif /* ** init */ pc = NCB_SCRIPTH_PHYS (np, snooptest); host_wr = 1; ncr_wr = 2; /* ** Set memory and register. */ ncr_cache = host_wr; OUTL (nc_temp, ncr_wr); /* ** Start script (exchange values) */ OUTL (nc_dsp, pc); /* ** Wait 'til done (with timeout) */ for (i=0; i=NCR_SNOOP_TIMEOUT) { printf ("CACHE TEST FAILED: timeout.\n"); return (0x20); } /* ** Check termination position. */ if (pc != NCB_SCRIPTH_PHYS (np, snoopend)+8) { printf ("CACHE TEST FAILED: script execution failed.\n"); printf ("start=%08lx, pc=%08lx, end=%08lx\n", (u_long) NCB_SCRIPTH_PHYS (np, snooptest), (u_long) pc, (u_long) NCB_SCRIPTH_PHYS (np, snoopend) +8); return (0x40); } /* ** Show results. */ if (host_wr != ncr_rd) { printf ("CACHE TEST FAILED: host wrote %d, ncr read %d.\n", (int) host_wr, (int) ncr_rd); err |= 1; } if (host_rd != ncr_wr) { printf ("CACHE TEST FAILED: ncr wrote %d, host read %d.\n", (int) ncr_wr, (int) host_rd); err |= 2; } if (ncr_bk != ncr_wr) { printf ("CACHE TEST FAILED: ncr wrote %d, read back %d.\n", (int) ncr_wr, (int) ncr_bk); err |= 4; } return (err); } /*========================================================== ** ** ** Profiling the drivers and targets performance. ** ** **========================================================== */ /* ** Compute the difference in milliseconds. **/ static int ncr_delta (int *from, int *to) { if (!from) return (-1); if (!to) return (-2); return ((to - from) * 1000 / hz); } #define PROFILE cp->phys.header.stamp static void ncb_profile (ncb_p np, nccb_p cp) { int co, da, st, en, di, se, post,work,disc; u_long diff; PROFILE.end = ticks; st = ncr_delta (&PROFILE.start,&PROFILE.status); if (st<0) return; /* status not reached */ da = ncr_delta (&PROFILE.start,&PROFILE.data); if (da<0) return; /* No data transfer phase */ co = ncr_delta (&PROFILE.start,&PROFILE.command); if (co<0) return; /* command not executed */ en = ncr_delta (&PROFILE.start,&PROFILE.end), di = ncr_delta (&PROFILE.start,&PROFILE.disconnect), se = ncr_delta (&PROFILE.start,&PROFILE.select); post = en - st; /* ** @PROFILE@ Disconnect time invalid if multiple disconnects */ if (di>=0) disc = se-di; else disc = 0; work = (st - co) - disc; diff = (np->disc_phys - np->disc_ref) & 0xff; np->disc_ref += diff; np->profile.num_trans += 1; if (cp->ccb) np->profile.num_bytes += cp->ccb->csio.dxfer_len; np->profile.num_disc += diff; np->profile.ms_setup += co; np->profile.ms_data += work; np->profile.ms_disc += disc; np->profile.ms_post += post; } #undef PROFILE /*========================================================== ** ** Determine the ncr's clock frequency. ** This is essential for the negotiation ** of the synchronous transfer rate. ** **========================================================== ** ** Note: we have to return the correct value. ** THERE IS NO SAVE DEFAULT VALUE. ** ** Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock. ** 53C860 and 53C875 rev. 1 support fast20 transfers but ** do not have a clock doubler and so are provided with a ** 80 MHz clock. All other fast20 boards incorporate a doubler ** and so should be delivered with a 40 MHz clock. ** The future fast40 chips (895/895) use a 40 Mhz base clock ** and provide a clock quadrupler (160 Mhz). The code below ** tries to deal as cleverly as possible with all this stuff. ** **---------------------------------------------------------- */ /* * Select NCR SCSI clock frequency */ static void ncr_selectclock(ncb_p np, u_char scntl3) { if (np->multiplier < 2) { OUTB(nc_scntl3, scntl3); return; } if (bootverbose >= 2) device_printf(np->dev, "enabling clock multiplier\n"); OUTB(nc_stest1, DBLEN); /* Enable clock multiplier */ if (np->multiplier > 2) { /* Poll bit 5 of stest4 for quadrupler */ int i = 20; while (!(INB(nc_stest4) & LCKFRQ) && --i > 0) DELAY(20); if (!i) device_printf(np->dev, "the chip cannot lock the frequency\n"); } else /* Wait 20 micro-seconds for doubler */ DELAY(20); OUTB(nc_stest3, HSC); /* Halt the scsi clock */ OUTB(nc_scntl3, scntl3); OUTB(nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier */ OUTB(nc_stest3, 0x00); /* Restart scsi clock */ } /* * calculate NCR SCSI clock frequency (in KHz) */ static unsigned ncrgetfreq (ncb_p np, int gen) { int ms = 0; /* * Measure GEN timer delay in order * to calculate SCSI clock frequency * * This code will never execute too * many loop iterations (if DELAY is * reasonably correct). It could get * too low a delay (too high a freq.) * if the CPU is slow executing the * loop for some reason (an NMI, for * example). For this reason we will * if multiple measurements are to be * performed trust the higher delay * (lower frequency returned). */ OUTB (nc_stest1, 0); /* make sure clock doubler is OFF */ OUTW (nc_sien , 0); /* mask all scsi interrupts */ (void) INW (nc_sist); /* clear pending scsi interrupt */ OUTB (nc_dien , 0); /* mask all dma interrupts */ (void) INW (nc_sist); /* another one, just to be sure :) */ OUTB (nc_scntl3, 4); /* set pre-scaler to divide by 3 */ OUTB (nc_stime1, 0); /* disable general purpose timer */ OUTB (nc_stime1, gen); /* set to nominal delay of (1<= 2) printf ("\tDelay (GEN=%d): %u msec\n", gen, ms); /* * adjust for prescaler, and convert into KHz */ return ms ? ((1 << gen) * 4440) / ms : 0; } static void ncr_getclock (ncb_p np, u_char multiplier) { unsigned char scntl3; unsigned char stest1; scntl3 = INB(nc_scntl3); stest1 = INB(nc_stest1); np->multiplier = 1; if (multiplier > 1) { np->multiplier = multiplier; np->clock_khz = 40000 * multiplier; } else { if ((scntl3 & 7) == 0) { unsigned f1, f2; /* throw away first result */ (void) ncrgetfreq (np, 11); f1 = ncrgetfreq (np, 11); f2 = ncrgetfreq (np, 11); if (bootverbose >= 2) printf ("\tNCR clock is %uKHz, %uKHz\n", f1, f2); if (f1 > f2) f1 = f2; /* trust lower result */ if (f1 > 45000) { scntl3 = 5; /* >45Mhz: assume 80MHz */ } else { scntl3 = 3; /* <45Mhz: assume 40MHz */ } } else if ((scntl3 & 7) == 5) np->clock_khz = 80000; /* Probably a 875 rev. 1 ? */ } } /*=========================================================================*/ #ifdef NCR_TEKRAM_EEPROM struct tekram_eeprom_dev { u_char devmode; #define TKR_PARCHK 0x01 #define TKR_TRYSYNC 0x02 #define TKR_ENDISC 0x04 #define TKR_STARTUNIT 0x08 #define TKR_USETAGS 0x10 #define TKR_TRYWIDE 0x20 u_char syncparam; /* max. sync transfer rate (table ?) */ u_char filler1; u_char filler2; }; struct tekram_eeprom { struct tekram_eeprom_dev dev[16]; u_char adaptid; u_char adaptmode; #define TKR_ADPT_GT2DRV 0x01 #define TKR_ADPT_GT1GB 0x02 #define TKR_ADPT_RSTBUS 0x04 #define TKR_ADPT_ACTNEG 0x08 #define TKR_ADPT_NOSEEK 0x10 #define TKR_ADPT_MORLUN 0x20 u_char delay; /* unit ? ( table ??? ) */ u_char tags; /* use 4 times as many ... */ u_char filler[60]; }; static void tekram_write_bit (ncb_p np, int bit) { u_char val = 0x10 + ((bit & 1) << 1); DELAY(10); OUTB (nc_gpreg, val); DELAY(10); OUTB (nc_gpreg, val | 0x04); DELAY(10); OUTB (nc_gpreg, val); DELAY(10); } static int tekram_read_bit (ncb_p np) { OUTB (nc_gpreg, 0x10); DELAY(10); OUTB (nc_gpreg, 0x14); DELAY(10); return INB (nc_gpreg) & 1; } static u_short read_tekram_eeprom_reg (ncb_p np, int reg) { int bit; u_short result = 0; int cmd = 0x80 | reg; OUTB (nc_gpreg, 0x10); tekram_write_bit (np, 1); for (bit = 7; bit >= 0; bit--) { tekram_write_bit (np, cmd >> bit); } for (bit = 0; bit < 16; bit++) { result <<= 1; result |= tekram_read_bit (np); } OUTB (nc_gpreg, 0x00); return result; } static int read_tekram_eeprom(ncb_p np, struct tekram_eeprom *buffer) { u_short *p = (u_short *) buffer; u_short sum = 0; int i; if (INB (nc_gpcntl) != 0x09) { return 0; } for (i = 0; i < 64; i++) { u_short val; if((i&0x0f) == 0) printf ("%02x:", i*2); val = read_tekram_eeprom_reg (np, i); if (p) *p++ = val; sum += val; if((i&0x01) == 0x00) printf (" "); printf ("%02x%02x", val & 0xff, (val >> 8) & 0xff); if((i&0x0f) == 0x0f) printf ("\n"); } printf ("Sum = %04x\n", sum); return sum == 0x1234; } #endif /* NCR_TEKRAM_EEPROM */ static device_method_t ncr_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ncr_probe), DEVMETHOD(device_attach, ncr_attach), { 0, 0 } }; static driver_t ncr_driver = { "ncr", ncr_methods, sizeof(struct ncb), }; static devclass_t ncr_devclass; DRIVER_MODULE(ncr, pci, ncr_driver, ncr_devclass, 0, 0); MODULE_DEPEND(ncr, cam, 1, 1, 1); MODULE_DEPEND(ncr, pci, 1, 1, 1); /*=========================================================================*/ #endif /* _KERNEL */ Index: head/sys/dev/netmap/netmap_freebsd.c =================================================================== --- head/sys/dev/netmap/netmap_freebsd.c (revision 313981) +++ head/sys/dev/netmap/netmap_freebsd.c (revision 313982) @@ -1,1468 +1,1468 @@ /* * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* $FreeBSD$ */ #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include #include /* POLLIN, POLLOUT */ #include /* types used in module initialization */ #include /* DEV_MODULE_ORDERED */ #include #include /* kern_ioctl() */ #include #include /* vtophys */ #include /* vtophys */ #include #include #include #include #include #include #include /* sockaddrs */ #include #include /* kthread_add() */ #include /* PROC_LOCK() */ #include /* RFNOWAIT */ #include /* sched_bind() */ #include /* mp_maxid */ #include #include #include /* IFT_ETHER */ #include /* ether_ifdetach */ #include /* LLADDR */ #include /* bus_dmamap_* */ #include /* in6_cksum_pseudo() */ #include /* in_pseudo(), in_cksum_hdr() */ #include #include #include #include /* ======================== FREEBSD-SPECIFIC ROUTINES ================== */ void nm_os_selinfo_init(NM_SELINFO_T *si) { struct mtx *m = &si->m; mtx_init(m, "nm_kn_lock", NULL, MTX_DEF); knlist_init_mtx(&si->si.si_note, m); } void nm_os_selinfo_uninit(NM_SELINFO_T *si) { /* XXX kqueue(9) needed; these will mirror knlist_init. */ knlist_delete(&si->si.si_note, curthread, 0 /* not locked */ ); knlist_destroy(&si->si.si_note); /* now we don't need the mutex anymore */ mtx_destroy(&si->m); } void nm_os_ifnet_lock(void) { IFNET_RLOCK(); } void nm_os_ifnet_unlock(void) { IFNET_RUNLOCK(); } static int netmap_use_count = 0; void nm_os_get_module(void) { netmap_use_count++; } void nm_os_put_module(void) { netmap_use_count--; } static void netmap_ifnet_arrival_handler(void *arg __unused, struct ifnet *ifp) { netmap_undo_zombie(ifp); } static void netmap_ifnet_departure_handler(void *arg __unused, struct ifnet *ifp) { netmap_make_zombie(ifp); } static eventhandler_tag nm_ifnet_ah_tag; static eventhandler_tag nm_ifnet_dh_tag; int nm_os_ifnet_init(void) { nm_ifnet_ah_tag = EVENTHANDLER_REGISTER(ifnet_arrival_event, netmap_ifnet_arrival_handler, NULL, EVENTHANDLER_PRI_ANY); nm_ifnet_dh_tag = EVENTHANDLER_REGISTER(ifnet_departure_event, netmap_ifnet_departure_handler, NULL, EVENTHANDLER_PRI_ANY); return 0; } void nm_os_ifnet_fini(void) { EVENTHANDLER_DEREGISTER(ifnet_arrival_event, nm_ifnet_ah_tag); EVENTHANDLER_DEREGISTER(ifnet_departure_event, nm_ifnet_dh_tag); } rawsum_t nm_os_csum_raw(uint8_t *data, size_t len, rawsum_t cur_sum) { /* TODO XXX please use the FreeBSD implementation for this. */ uint16_t *words = (uint16_t *)data; int nw = len / 2; int i; for (i = 0; i < nw; i++) cur_sum += be16toh(words[i]); if (len & 1) cur_sum += (data[len-1] << 8); return cur_sum; } /* Fold a raw checksum: 'cur_sum' is in host byte order, while the * return value is in network byte order. */ uint16_t nm_os_csum_fold(rawsum_t cur_sum) { /* TODO XXX please use the FreeBSD implementation for this. */ while (cur_sum >> 16) cur_sum = (cur_sum & 0xFFFF) + (cur_sum >> 16); return htobe16((~cur_sum) & 0xFFFF); } uint16_t nm_os_csum_ipv4(struct nm_iphdr *iph) { #if 0 return in_cksum_hdr((void *)iph); #else return nm_os_csum_fold(nm_os_csum_raw((uint8_t*)iph, sizeof(struct nm_iphdr), 0)); #endif } void nm_os_csum_tcpudp_ipv4(struct nm_iphdr *iph, void *data, size_t datalen, uint16_t *check) { #ifdef INET uint16_t pseudolen = datalen + iph->protocol; /* Compute and insert the pseudo-header cheksum. */ *check = in_pseudo(iph->saddr, iph->daddr, htobe16(pseudolen)); /* Compute the checksum on TCP/UDP header + payload * (includes the pseudo-header). */ *check = nm_os_csum_fold(nm_os_csum_raw(data, datalen, 0)); #else static int notsupported = 0; if (!notsupported) { notsupported = 1; D("inet4 segmentation not supported"); } #endif } void nm_os_csum_tcpudp_ipv6(struct nm_ipv6hdr *ip6h, void *data, size_t datalen, uint16_t *check) { #ifdef INET6 *check = in6_cksum_pseudo((void*)ip6h, datalen, ip6h->nexthdr, 0); *check = nm_os_csum_fold(nm_os_csum_raw(data, datalen, 0)); #else static int notsupported = 0; if (!notsupported) { notsupported = 1; D("inet6 segmentation not supported"); } #endif } /* on FreeBSD we send up one packet at a time */ void * nm_os_send_up(struct ifnet *ifp, struct mbuf *m, struct mbuf *prev) { NA(ifp)->if_input(ifp, m); return NULL; } int nm_os_mbuf_has_offld(struct mbuf *m) { return m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_SCTP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_SCTP_IPV6 | CSUM_TSO); } static void freebsd_generic_rx_handler(struct ifnet *ifp, struct mbuf *m) { struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)NA(ifp); int stolen = generic_rx_handler(ifp, m); if (!stolen) { gna->save_if_input(ifp, m); } } /* * Intercept the rx routine in the standard device driver. * Second argument is non-zero to intercept, 0 to restore */ int nm_os_catch_rx(struct netmap_generic_adapter *gna, int intercept) { struct netmap_adapter *na = &gna->up.up; struct ifnet *ifp = na->ifp; if (intercept) { if (gna->save_if_input) { D("cannot intercept again"); return EINVAL; /* already set */ } gna->save_if_input = ifp->if_input; ifp->if_input = freebsd_generic_rx_handler; } else { if (!gna->save_if_input){ D("cannot restore"); return EINVAL; /* not saved */ } ifp->if_input = gna->save_if_input; gna->save_if_input = NULL; } return 0; } /* * Intercept the packet steering routine in the tx path, * so that we can decide which queue is used for an mbuf. * Second argument is non-zero to intercept, 0 to restore. * On freebsd we just intercept if_transmit. */ int nm_os_catch_tx(struct netmap_generic_adapter *gna, int intercept) { struct netmap_adapter *na = &gna->up.up; struct ifnet *ifp = netmap_generic_getifp(gna); if (intercept) { na->if_transmit = ifp->if_transmit; ifp->if_transmit = netmap_transmit; } else { ifp->if_transmit = na->if_transmit; } return 0; } /* * Transmit routine used by generic_netmap_txsync(). Returns 0 on success * and non-zero on error (which may be packet drops or other errors). * addr and len identify the netmap buffer, m is the (preallocated) * mbuf to use for transmissions. * * We should add a reference to the mbuf so the m_freem() at the end * of the transmission does not consume resources. * * On FreeBSD, and on multiqueue cards, we can force the queue using * if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) * i = m->m_pkthdr.flowid % adapter->num_queues; * else * i = curcpu % adapter->num_queues; * */ int nm_os_generic_xmit_frame(struct nm_os_gen_arg *a) { int ret; u_int len = a->len; struct ifnet *ifp = a->ifp; struct mbuf *m = a->m; #if __FreeBSD_version < 1100000 /* * Old FreeBSD versions. The mbuf has a cluster attached, * we need to copy from the cluster to the netmap buffer. */ if (MBUF_REFCNT(m) != 1) { D("invalid refcnt %d for %p", MBUF_REFCNT(m), m); panic("in generic_xmit_frame"); } if (m->m_ext.ext_size < len) { RD(5, "size %d < len %d", m->m_ext.ext_size, len); len = m->m_ext.ext_size; } bcopy(a->addr, m->m_data, len); #else /* __FreeBSD_version >= 1100000 */ /* New FreeBSD versions. Link the external storage to * the netmap buffer, so that no copy is necessary. */ m->m_ext.ext_buf = m->m_data = a->addr; m->m_ext.ext_size = len; #endif /* __FreeBSD_version >= 1100000 */ m->m_len = m->m_pkthdr.len = len; /* mbuf refcnt is not contended, no need to use atomic * (a memory barrier is enough). */ SET_MBUF_REFCNT(m, 2); M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); m->m_pkthdr.flowid = a->ring_nr; m->m_pkthdr.rcvif = ifp; /* used for tx notification */ ret = NA(ifp)->if_transmit(ifp, m); return ret ? -1 : 0; } #if __FreeBSD_version >= 1100005 struct netmap_adapter * netmap_getna(if_t ifp) { return (NA((struct ifnet *)ifp)); } #endif /* __FreeBSD_version >= 1100005 */ /* * The following two functions are empty until we have a generic * way to extract the info from the ifp */ int nm_os_generic_find_num_desc(struct ifnet *ifp, unsigned int *tx, unsigned int *rx) { D("called, in tx %d rx %d", *tx, *rx); return 0; } void nm_os_generic_find_num_queues(struct ifnet *ifp, u_int *txq, u_int *rxq) { D("called, in txq %d rxq %d", *txq, *rxq); *txq = netmap_generic_rings; *rxq = netmap_generic_rings; } void nm_os_generic_set_features(struct netmap_generic_adapter *gna) { gna->rxsg = 1; /* Supported through m_copydata. */ gna->txqdisc = 0; /* Not supported. */ } void nm_os_mitigation_init(struct nm_generic_mit *mit, int idx, struct netmap_adapter *na) { ND("called"); mit->mit_pending = 0; mit->mit_ring_idx = idx; mit->mit_na = na; } void nm_os_mitigation_start(struct nm_generic_mit *mit) { ND("called"); } void nm_os_mitigation_restart(struct nm_generic_mit *mit) { ND("called"); } int nm_os_mitigation_active(struct nm_generic_mit *mit) { ND("called"); return 0; } void nm_os_mitigation_cleanup(struct nm_generic_mit *mit) { ND("called"); } static int nm_vi_dummy(struct ifnet *ifp, u_long cmd, caddr_t addr) { return EINVAL; } static void nm_vi_start(struct ifnet *ifp) { panic("nm_vi_start() must not be called"); } /* * Index manager of persistent virtual interfaces. * It is used to decide the lowest byte of the MAC address. * We use the same algorithm with management of bridge port index. */ #define NM_VI_MAX 255 static struct { uint8_t index[NM_VI_MAX]; /* XXX just for a reasonable number */ uint8_t active; struct mtx lock; } nm_vi_indices; void nm_os_vi_init_index(void) { int i; for (i = 0; i < NM_VI_MAX; i++) nm_vi_indices.index[i] = i; nm_vi_indices.active = 0; mtx_init(&nm_vi_indices.lock, "nm_vi_indices_lock", NULL, MTX_DEF); } /* return -1 if no index available */ static int nm_vi_get_index(void) { int ret; mtx_lock(&nm_vi_indices.lock); ret = nm_vi_indices.active == NM_VI_MAX ? -1 : nm_vi_indices.index[nm_vi_indices.active++]; mtx_unlock(&nm_vi_indices.lock); return ret; } static void nm_vi_free_index(uint8_t val) { int i, lim; mtx_lock(&nm_vi_indices.lock); lim = nm_vi_indices.active; for (i = 0; i < lim; i++) { if (nm_vi_indices.index[i] == val) { /* swap index[lim-1] and j */ int tmp = nm_vi_indices.index[lim-1]; nm_vi_indices.index[lim-1] = val; nm_vi_indices.index[i] = tmp; nm_vi_indices.active--; break; } } if (lim == nm_vi_indices.active) D("funny, index %u didn't found", val); mtx_unlock(&nm_vi_indices.lock); } #undef NM_VI_MAX /* * Implementation of a netmap-capable virtual interface that * registered to the system. * It is based on if_tap.c and ip_fw_log.c in FreeBSD 9. * * Note: Linux sets refcount to 0 on allocation of net_device, * then increments it on registration to the system. * FreeBSD sets refcount to 1 on if_alloc(), and does not * increment this refcount on if_attach(). */ int nm_os_vi_persist(const char *name, struct ifnet **ret) { struct ifnet *ifp; u_short macaddr_hi; uint32_t macaddr_mid; u_char eaddr[6]; int unit = nm_vi_get_index(); /* just to decide MAC address */ if (unit < 0) return EBUSY; /* * We use the same MAC address generation method with tap * except for the highest octet is 00:be instead of 00:bd */ macaddr_hi = htons(0x00be); /* XXX tap + 1 */ macaddr_mid = (uint32_t) ticks; bcopy(&macaddr_hi, eaddr, sizeof(short)); bcopy(&macaddr_mid, &eaddr[2], sizeof(uint32_t)); eaddr[5] = (uint8_t)unit; ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { D("if_alloc failed"); return ENOMEM; } if_initname(ifp, name, IF_DUNIT_NONE); ifp->if_mtu = 65536; ifp->if_flags = IFF_UP | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = (void *)nm_vi_dummy; ifp->if_ioctl = nm_vi_dummy; ifp->if_start = nm_vi_start; ifp->if_mtu = ETHERMTU; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_capabilities |= IFCAP_LINKSTATE; ifp->if_capenable |= IFCAP_LINKSTATE; ether_ifattach(ifp, eaddr); *ret = ifp; return 0; } /* unregister from the system and drop the final refcount */ void nm_os_vi_detach(struct ifnet *ifp) { nm_vi_free_index(((char *)IF_LLADDR(ifp))[5]); ether_ifdetach(ifp); if_free(ifp); } /* ======================== PTNETMAP SUPPORT ========================== */ #ifdef WITH_PTNETMAP_GUEST #include #include #include /* bus_dmamap_* */ #include #include #include /* * ptnetmap memory device (memdev) for freebsd guest, * ssed to expose host netmap memory to the guest through a PCI BAR. */ /* * ptnetmap memdev private data structure */ struct ptnetmap_memdev { device_t dev; struct resource *pci_io; struct resource *pci_mem; struct netmap_mem_d *nm_mem; }; static int ptn_memdev_probe(device_t); static int ptn_memdev_attach(device_t); static int ptn_memdev_detach(device_t); static int ptn_memdev_shutdown(device_t); static device_method_t ptn_memdev_methods[] = { DEVMETHOD(device_probe, ptn_memdev_probe), DEVMETHOD(device_attach, ptn_memdev_attach), DEVMETHOD(device_detach, ptn_memdev_detach), DEVMETHOD(device_shutdown, ptn_memdev_shutdown), DEVMETHOD_END }; static driver_t ptn_memdev_driver = { PTNETMAP_MEMDEV_NAME, ptn_memdev_methods, sizeof(struct ptnetmap_memdev), }; /* We use (SI_ORDER_MIDDLE+1) here, see DEV_MODULE_ORDERED() invocation * below. */ static devclass_t ptnetmap_devclass; DRIVER_MODULE_ORDERED(ptn_memdev, pci, ptn_memdev_driver, ptnetmap_devclass, NULL, NULL, SI_ORDER_MIDDLE + 1); /* * Map host netmap memory through PCI-BAR in the guest OS, * returning physical (nm_paddr) and virtual (nm_addr) addresses * of the netmap memory mapped in the guest. */ int nm_os_pt_memdev_iomap(struct ptnetmap_memdev *ptn_dev, vm_paddr_t *nm_paddr, void **nm_addr, uint64_t *mem_size) { int rid; D("ptn_memdev_driver iomap"); rid = PCIR_BAR(PTNETMAP_MEM_PCI_BAR); *mem_size = bus_read_4(ptn_dev->pci_io, PTNET_MDEV_IO_MEMSIZE_HI); *mem_size = bus_read_4(ptn_dev->pci_io, PTNET_MDEV_IO_MEMSIZE_LO) | (*mem_size << 32); /* map memory allocator */ ptn_dev->pci_mem = bus_alloc_resource(ptn_dev->dev, SYS_RES_MEMORY, &rid, 0, ~0, *mem_size, RF_ACTIVE); if (ptn_dev->pci_mem == NULL) { *nm_paddr = 0; - *nm_addr = 0; + *nm_addr = NULL; return ENOMEM; } *nm_paddr = rman_get_start(ptn_dev->pci_mem); *nm_addr = rman_get_virtual(ptn_dev->pci_mem); D("=== BAR %d start %lx len %lx mem_size %lx ===", PTNETMAP_MEM_PCI_BAR, (unsigned long)(*nm_paddr), (unsigned long)rman_get_size(ptn_dev->pci_mem), (unsigned long)*mem_size); return (0); } uint32_t nm_os_pt_memdev_ioread(struct ptnetmap_memdev *ptn_dev, unsigned int reg) { return bus_read_4(ptn_dev->pci_io, reg); } /* Unmap host netmap memory. */ void nm_os_pt_memdev_iounmap(struct ptnetmap_memdev *ptn_dev) { D("ptn_memdev_driver iounmap"); if (ptn_dev->pci_mem) { bus_release_resource(ptn_dev->dev, SYS_RES_MEMORY, PCIR_BAR(PTNETMAP_MEM_PCI_BAR), ptn_dev->pci_mem); ptn_dev->pci_mem = NULL; } } /* Device identification routine, return BUS_PROBE_DEFAULT on success, * positive on failure */ static int ptn_memdev_probe(device_t dev) { char desc[256]; if (pci_get_vendor(dev) != PTNETMAP_PCI_VENDOR_ID) return (ENXIO); if (pci_get_device(dev) != PTNETMAP_PCI_DEVICE_ID) return (ENXIO); snprintf(desc, sizeof(desc), "%s PCI adapter", PTNETMAP_MEMDEV_NAME); device_set_desc_copy(dev, desc); return (BUS_PROBE_DEFAULT); } /* Device initialization routine. */ static int ptn_memdev_attach(device_t dev) { struct ptnetmap_memdev *ptn_dev; int rid; uint16_t mem_id; D("ptn_memdev_driver attach"); ptn_dev = device_get_softc(dev); ptn_dev->dev = dev; pci_enable_busmaster(dev); rid = PCIR_BAR(PTNETMAP_IO_PCI_BAR); ptn_dev->pci_io = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (ptn_dev->pci_io == NULL) { device_printf(dev, "cannot map I/O space\n"); return (ENXIO); } mem_id = bus_read_4(ptn_dev->pci_io, PTNET_MDEV_IO_MEMID); /* create guest allocator */ ptn_dev->nm_mem = netmap_mem_pt_guest_attach(ptn_dev, mem_id); if (ptn_dev->nm_mem == NULL) { ptn_memdev_detach(dev); return (ENOMEM); } netmap_mem_get(ptn_dev->nm_mem); D("ptn_memdev_driver probe OK - host_mem_id: %d", mem_id); return (0); } /* Device removal routine. */ static int ptn_memdev_detach(device_t dev) { struct ptnetmap_memdev *ptn_dev; D("ptn_memdev_driver detach"); ptn_dev = device_get_softc(dev); if (ptn_dev->nm_mem) { netmap_mem_put(ptn_dev->nm_mem); ptn_dev->nm_mem = NULL; } if (ptn_dev->pci_mem) { bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(PTNETMAP_MEM_PCI_BAR), ptn_dev->pci_mem); ptn_dev->pci_mem = NULL; } if (ptn_dev->pci_io) { bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(PTNETMAP_IO_PCI_BAR), ptn_dev->pci_io); ptn_dev->pci_io = NULL; } return (0); } static int ptn_memdev_shutdown(device_t dev) { D("ptn_memdev_driver shutdown"); return bus_generic_shutdown(dev); } #endif /* WITH_PTNETMAP_GUEST */ /* * In order to track whether pages are still mapped, we hook into * the standard cdev_pager and intercept the constructor and * destructor. */ struct netmap_vm_handle_t { struct cdev *dev; struct netmap_priv_d *priv; }; static int netmap_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred, u_short *color) { struct netmap_vm_handle_t *vmh = handle; if (netmap_verbose) D("handle %p size %jd prot %d foff %jd", handle, (intmax_t)size, prot, (intmax_t)foff); if (color) *color = 0; dev_ref(vmh->dev); return 0; } static void netmap_dev_pager_dtor(void *handle) { struct netmap_vm_handle_t *vmh = handle; struct cdev *dev = vmh->dev; struct netmap_priv_d *priv = vmh->priv; if (netmap_verbose) D("handle %p", handle); netmap_dtor(priv); free(vmh, M_DEVBUF); dev_rel(dev); } static int netmap_dev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot, vm_page_t *mres) { struct netmap_vm_handle_t *vmh = object->handle; struct netmap_priv_d *priv = vmh->priv; struct netmap_adapter *na = priv->np_na; vm_paddr_t paddr; vm_page_t page; vm_memattr_t memattr; vm_pindex_t pidx; ND("object %p offset %jd prot %d mres %p", object, (intmax_t)offset, prot, mres); memattr = object->memattr; pidx = OFF_TO_IDX(offset); paddr = netmap_mem_ofstophys(na->nm_mem, offset); if (paddr == 0) return VM_PAGER_FAIL; if (((*mres)->flags & PG_FICTITIOUS) != 0) { /* * If the passed in result page is a fake page, update it with * the new physical address. */ page = *mres; vm_page_updatefake(page, paddr, memattr); } else { /* * Replace the passed in reqpage page with our own fake page and * free up the all of the original pages. */ #ifndef VM_OBJECT_WUNLOCK /* FreeBSD < 10.x */ #define VM_OBJECT_WUNLOCK VM_OBJECT_UNLOCK #define VM_OBJECT_WLOCK VM_OBJECT_LOCK #endif /* VM_OBJECT_WUNLOCK */ VM_OBJECT_WUNLOCK(object); page = vm_page_getfake(paddr, memattr); VM_OBJECT_WLOCK(object); vm_page_lock(*mres); vm_page_free(*mres); vm_page_unlock(*mres); *mres = page; vm_page_insert(page, object, pidx); } page->valid = VM_PAGE_BITS_ALL; return (VM_PAGER_OK); } static struct cdev_pager_ops netmap_cdev_pager_ops = { .cdev_pg_ctor = netmap_dev_pager_ctor, .cdev_pg_dtor = netmap_dev_pager_dtor, .cdev_pg_fault = netmap_dev_pager_fault, }; static int netmap_mmap_single(struct cdev *cdev, vm_ooffset_t *foff, vm_size_t objsize, vm_object_t *objp, int prot) { int error; struct netmap_vm_handle_t *vmh; struct netmap_priv_d *priv; vm_object_t obj; if (netmap_verbose) D("cdev %p foff %jd size %jd objp %p prot %d", cdev, (intmax_t )*foff, (intmax_t )objsize, objp, prot); vmh = malloc(sizeof(struct netmap_vm_handle_t), M_DEVBUF, M_NOWAIT | M_ZERO); if (vmh == NULL) return ENOMEM; vmh->dev = cdev; NMG_LOCK(); error = devfs_get_cdevpriv((void**)&priv); if (error) goto err_unlock; if (priv->np_nifp == NULL) { error = EINVAL; goto err_unlock; } vmh->priv = priv; priv->np_refs++; NMG_UNLOCK(); obj = cdev_pager_allocate(vmh, OBJT_DEVICE, &netmap_cdev_pager_ops, objsize, prot, *foff, NULL); if (obj == NULL) { D("cdev_pager_allocate failed"); error = EINVAL; goto err_deref; } *objp = obj; return 0; err_deref: NMG_LOCK(); priv->np_refs--; err_unlock: NMG_UNLOCK(); // err: free(vmh, M_DEVBUF); return error; } /* * On FreeBSD the close routine is only called on the last close on * the device (/dev/netmap) so we cannot do anything useful. * To track close() on individual file descriptors we pass netmap_dtor() to * devfs_set_cdevpriv() on open(). The FreeBSD kernel will call the destructor * when the last fd pointing to the device is closed. * * Note that FreeBSD does not even munmap() on close() so we also have * to track mmap() ourselves, and postpone the call to * netmap_dtor() is called when the process has no open fds and no active * memory maps on /dev/netmap, as in linux. */ static int netmap_close(struct cdev *dev, int fflag, int devtype, struct thread *td) { if (netmap_verbose) D("dev %p fflag 0x%x devtype %d td %p", dev, fflag, devtype, td); return 0; } static int netmap_open(struct cdev *dev, int oflags, int devtype, struct thread *td) { struct netmap_priv_d *priv; int error; (void)dev; (void)oflags; (void)devtype; (void)td; NMG_LOCK(); priv = netmap_priv_new(); if (priv == NULL) { error = ENOMEM; goto out; } error = devfs_set_cdevpriv(priv, netmap_dtor); if (error) { netmap_priv_delete(priv); } out: NMG_UNLOCK(); return error; } /******************** kthread wrapper ****************/ #include u_int nm_os_ncpus(void) { return mp_maxid + 1; } struct nm_kthread_ctx { struct thread *user_td; /* thread user-space (kthread creator) to send ioctl */ struct ptnetmap_cfgentry_bhyve cfg; /* worker function and parameter */ nm_kthread_worker_fn_t worker_fn; void *worker_private; struct nm_kthread *nmk; /* integer to manage multiple worker contexts (e.g., RX or TX on ptnetmap) */ long type; }; struct nm_kthread { struct thread *worker; struct mtx worker_lock; uint64_t scheduled; /* pending wake_up request */ struct nm_kthread_ctx worker_ctx; int run; /* used to stop kthread */ int attach_user; /* kthread attached to user_process */ int affinity; }; void inline nm_os_kthread_wakeup_worker(struct nm_kthread *nmk) { /* * There may be a race between FE and BE, * which call both this function, and worker kthread, * that reads nmk->scheduled. * * For us it is not important the counter value, * but simply that it has changed since the last * time the kthread saw it. */ mtx_lock(&nmk->worker_lock); nmk->scheduled++; if (nmk->worker_ctx.cfg.wchan) { wakeup((void *)(uintptr_t)nmk->worker_ctx.cfg.wchan); } mtx_unlock(&nmk->worker_lock); } void inline nm_os_kthread_send_irq(struct nm_kthread *nmk) { struct nm_kthread_ctx *ctx = &nmk->worker_ctx; int err; if (ctx->user_td && ctx->cfg.ioctl_fd > 0) { err = kern_ioctl(ctx->user_td, ctx->cfg.ioctl_fd, ctx->cfg.ioctl_cmd, (caddr_t)&ctx->cfg.ioctl_data); if (err) { D("kern_ioctl error: %d ioctl parameters: fd %d com %lu data %p", err, ctx->cfg.ioctl_fd, (unsigned long)ctx->cfg.ioctl_cmd, &ctx->cfg.ioctl_data); } } } static void nm_kthread_worker(void *data) { struct nm_kthread *nmk = data; struct nm_kthread_ctx *ctx = &nmk->worker_ctx; uint64_t old_scheduled = nmk->scheduled; if (nmk->affinity >= 0) { thread_lock(curthread); sched_bind(curthread, nmk->affinity); thread_unlock(curthread); } while (nmk->run) { /* * check if the parent process dies * (when kthread is attached to user process) */ if (ctx->user_td) { PROC_LOCK(curproc); thread_suspend_check(0); PROC_UNLOCK(curproc); } else { kthread_suspend_check(); } /* * if wchan is not defined, we don't have notification * mechanism and we continually execute worker_fn() */ if (!ctx->cfg.wchan) { ctx->worker_fn(ctx->worker_private); /* worker body */ } else { /* checks if there is a pending notification */ mtx_lock(&nmk->worker_lock); if (likely(nmk->scheduled != old_scheduled)) { old_scheduled = nmk->scheduled; mtx_unlock(&nmk->worker_lock); ctx->worker_fn(ctx->worker_private); /* worker body */ continue; } else if (nmk->run) { /* wait on event with one second timeout */ msleep((void *)(uintptr_t)ctx->cfg.wchan, &nmk->worker_lock, 0, "nmk_ev", hz); nmk->scheduled++; } mtx_unlock(&nmk->worker_lock); } } kthread_exit(); } void nm_os_kthread_set_affinity(struct nm_kthread *nmk, int affinity) { nmk->affinity = affinity; } struct nm_kthread * nm_os_kthread_create(struct nm_kthread_cfg *cfg, unsigned int cfgtype, void *opaque) { struct nm_kthread *nmk = NULL; if (cfgtype != PTNETMAP_CFGTYPE_BHYVE) { D("Unsupported cfgtype %u", cfgtype); return NULL; } nmk = malloc(sizeof(*nmk), M_DEVBUF, M_NOWAIT | M_ZERO); if (!nmk) return NULL; mtx_init(&nmk->worker_lock, "nm_kthread lock", NULL, MTX_DEF); nmk->worker_ctx.worker_fn = cfg->worker_fn; nmk->worker_ctx.worker_private = cfg->worker_private; nmk->worker_ctx.type = cfg->type; nmk->affinity = -1; /* attach kthread to user process (ptnetmap) */ nmk->attach_user = cfg->attach_user; /* store kick/interrupt configuration */ if (opaque) { nmk->worker_ctx.cfg = *((struct ptnetmap_cfgentry_bhyve *)opaque); } return nmk; } int nm_os_kthread_start(struct nm_kthread *nmk) { struct proc *p = NULL; int error = 0; if (nmk->worker) { return EBUSY; } /* check if we want to attach kthread to user process */ if (nmk->attach_user) { nmk->worker_ctx.user_td = curthread; p = curthread->td_proc; } /* enable kthread main loop */ nmk->run = 1; /* create kthread */ if((error = kthread_add(nm_kthread_worker, nmk, p, &nmk->worker, RFNOWAIT /* to be checked */, 0, "nm-kthread-%ld", nmk->worker_ctx.type))) { goto err; } D("nm_kthread started td %p", nmk->worker); return 0; err: D("nm_kthread start failed err %d", error); nmk->worker = NULL; return error; } void nm_os_kthread_stop(struct nm_kthread *nmk) { if (!nmk->worker) { return; } /* tell to kthread to exit from main loop */ nmk->run = 0; /* wake up kthread if it sleeps */ kthread_resume(nmk->worker); nm_os_kthread_wakeup_worker(nmk); nmk->worker = NULL; } void nm_os_kthread_delete(struct nm_kthread *nmk) { if (!nmk) return; if (nmk->worker) { nm_os_kthread_stop(nmk); } memset(&nmk->worker_ctx.cfg, 0, sizeof(nmk->worker_ctx.cfg)); free(nmk, M_DEVBUF); } /******************** kqueue support ****************/ /* * nm_os_selwakeup also needs to issue a KNOTE_UNLOCKED. * We use a non-zero argument to distinguish the call from the one * in kevent_scan() which instead also needs to run netmap_poll(). * The knote uses a global mutex for the time being. We might * try to reuse the one in the si, but it is not allocated * permanently so it might be a bit tricky. * * The *kqfilter function registers one or another f_event * depending on read or write mode. * In the call to f_event() td_fpop is NULL so any child function * calling devfs_get_cdevpriv() would fail - and we need it in * netmap_poll(). As a workaround we store priv into kn->kn_hook * and pass it as first argument to netmap_poll(), which then * uses the failure to tell that we are called from f_event() * and do not need the selrecord(). */ void nm_os_selwakeup(struct nm_selinfo *si) { if (netmap_verbose) D("on knote %p", &si->si.si_note); selwakeuppri(&si->si, PI_NET); /* use a non-zero hint to tell the notification from the * call done in kqueue_scan() which uses 0 */ KNOTE_UNLOCKED(&si->si.si_note, 0x100 /* notification */); } void nm_os_selrecord(struct thread *td, struct nm_selinfo *si) { selrecord(td, &si->si); } static void netmap_knrdetach(struct knote *kn) { struct netmap_priv_d *priv = (struct netmap_priv_d *)kn->kn_hook; struct selinfo *si = &priv->np_si[NR_RX]->si; D("remove selinfo %p", si); knlist_remove(&si->si_note, kn, 0); } static void netmap_knwdetach(struct knote *kn) { struct netmap_priv_d *priv = (struct netmap_priv_d *)kn->kn_hook; struct selinfo *si = &priv->np_si[NR_TX]->si; D("remove selinfo %p", si); knlist_remove(&si->si_note, kn, 0); } /* * callback from notifies (generated externally) and our * calls to kevent(). The former we just return 1 (ready) * since we do not know better. * In the latter we call netmap_poll and return 0/1 accordingly. */ static int netmap_knrw(struct knote *kn, long hint, int events) { struct netmap_priv_d *priv; int revents; if (hint != 0) { ND(5, "call from notify"); return 1; /* assume we are ready */ } priv = kn->kn_hook; /* the notification may come from an external thread, * in which case we do not want to run the netmap_poll * This should be filtered above, but check just in case. */ if (curthread != priv->np_td) { /* should not happen */ RD(5, "curthread changed %p %p", curthread, priv->np_td); return 1; } else { revents = netmap_poll(priv, events, NULL); return (events & revents) ? 1 : 0; } } static int netmap_knread(struct knote *kn, long hint) { return netmap_knrw(kn, hint, POLLIN); } static int netmap_knwrite(struct knote *kn, long hint) { return netmap_knrw(kn, hint, POLLOUT); } static struct filterops netmap_rfiltops = { .f_isfd = 1, .f_detach = netmap_knrdetach, .f_event = netmap_knread, }; static struct filterops netmap_wfiltops = { .f_isfd = 1, .f_detach = netmap_knwdetach, .f_event = netmap_knwrite, }; /* * This is called when a thread invokes kevent() to record * a change in the configuration of the kqueue(). * The 'priv' should be the same as in the netmap device. */ static int netmap_kqfilter(struct cdev *dev, struct knote *kn) { struct netmap_priv_d *priv; int error; struct netmap_adapter *na; struct nm_selinfo *si; int ev = kn->kn_filter; if (ev != EVFILT_READ && ev != EVFILT_WRITE) { D("bad filter request %d", ev); return 1; } error = devfs_get_cdevpriv((void**)&priv); if (error) { D("device not yet setup"); return 1; } na = priv->np_na; if (na == NULL) { D("no netmap adapter for this file descriptor"); return 1; } /* the si is indicated in the priv */ si = priv->np_si[(ev == EVFILT_WRITE) ? NR_TX : NR_RX]; // XXX lock(priv) ? kn->kn_fop = (ev == EVFILT_WRITE) ? &netmap_wfiltops : &netmap_rfiltops; kn->kn_hook = priv; knlist_add(&si->si.si_note, kn, 1); // XXX unlock(priv) ND("register %p %s td %p priv %p kn %p np_nifp %p kn_fp/fpop %s", na, na->ifp->if_xname, curthread, priv, kn, priv->np_nifp, kn->kn_fp == curthread->td_fpop ? "match" : "MISMATCH"); return 0; } static int freebsd_netmap_poll(struct cdev *cdevi __unused, int events, struct thread *td) { struct netmap_priv_d *priv; if (devfs_get_cdevpriv((void **)&priv)) { return POLLERR; } return netmap_poll(priv, events, td); } static int freebsd_netmap_ioctl(struct cdev *dev __unused, u_long cmd, caddr_t data, int ffla __unused, struct thread *td) { int error; struct netmap_priv_d *priv; CURVNET_SET(TD_TO_VNET(td)); error = devfs_get_cdevpriv((void **)&priv); if (error) { /* XXX ENOENT should be impossible, since the priv * is now created in the open */ if (error == ENOENT) error = ENXIO; goto out; } error = netmap_ioctl(priv, cmd, data, td); out: CURVNET_RESTORE(); return error; } extern struct cdevsw netmap_cdevsw; /* XXX used in netmap.c, should go elsewhere */ struct cdevsw netmap_cdevsw = { .d_version = D_VERSION, .d_name = "netmap", .d_open = netmap_open, .d_mmap_single = netmap_mmap_single, .d_ioctl = freebsd_netmap_ioctl, .d_poll = freebsd_netmap_poll, .d_kqfilter = netmap_kqfilter, .d_close = netmap_close, }; /*--- end of kqueue support ----*/ /* * Kernel entry point. * * Initialize/finalize the module and return. * * Return 0 on success, errno on failure. */ static int netmap_loader(__unused struct module *module, int event, __unused void *arg) { int error = 0; switch (event) { case MOD_LOAD: error = netmap_init(); break; case MOD_UNLOAD: /* * if some one is still using netmap, * then the module can not be unloaded. */ if (netmap_use_count) { D("netmap module can not be unloaded - netmap_use_count: %d", netmap_use_count); error = EBUSY; break; } netmap_fini(); break; default: error = EOPNOTSUPP; break; } return (error); } #ifdef DEV_MODULE_ORDERED /* * The netmap module contains three drivers: (i) the netmap character device * driver; (ii) the ptnetmap memdev PCI device driver, (iii) the ptnet PCI * device driver. The attach() routines of both (ii) and (iii) need the * lock of the global allocator, and such lock is initialized in netmap_init(), * which is part of (i). * Therefore, we make sure that (i) is loaded before (ii) and (iii), using * the 'order' parameter of driver declaration macros. For (i), we specify * SI_ORDER_MIDDLE, while higher orders are used with the DRIVER_MODULE_ORDERED * macros for (ii) and (iii). */ DEV_MODULE_ORDERED(netmap, netmap_loader, NULL, SI_ORDER_MIDDLE); #else /* !DEV_MODULE_ORDERED */ DEV_MODULE(netmap, netmap_loader, NULL); #endif /* DEV_MODULE_ORDERED */ MODULE_DEPEND(netmap, pci, 1, 1, 1); MODULE_VERSION(netmap, 1); /* reduce conditional code */ // linux API, use for the knlist in FreeBSD /* use a private mutex for the knlist */ Index: head/sys/dev/netmap/netmap_mem2.c =================================================================== --- head/sys/dev/netmap/netmap_mem2.c (revision 313981) +++ head/sys/dev/netmap/netmap_mem2.c (revision 313982) @@ -1,2399 +1,2399 @@ /* * Copyright (C) 2012-2014 Matteo Landi * Copyright (C) 2012-2016 Luigi Rizzo * Copyright (C) 2012-2016 Giuseppe Lettieri * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifdef linux #include "bsd_glue.h" #endif /* linux */ #ifdef __APPLE__ #include "osx_glue.h" #endif /* __APPLE__ */ #ifdef __FreeBSD__ #include /* prerequisite */ __FBSDID("$FreeBSD$"); #include #include #include /* MALLOC_DEFINE */ #include #include /* vtophys */ #include /* vtophys */ #include /* sockaddrs */ #include #include #include #include #include #include /* bus_dmamap_* */ /* M_NETMAP only used in here */ MALLOC_DECLARE(M_NETMAP); MALLOC_DEFINE(M_NETMAP, "netmap", "Network memory map"); #endif /* __FreeBSD__ */ #ifdef _WIN32 #include #endif #include #include #include #include "netmap_mem2.h" #ifdef _WIN32_USE_SMALL_GENERIC_DEVICES_MEMORY #define NETMAP_BUF_MAX_NUM 8*4096 /* if too big takes too much time to allocate */ #else #define NETMAP_BUF_MAX_NUM 20*4096*2 /* large machine */ #endif #define NETMAP_POOL_MAX_NAMSZ 32 enum { NETMAP_IF_POOL = 0, NETMAP_RING_POOL, NETMAP_BUF_POOL, NETMAP_POOLS_NR }; struct netmap_obj_params { u_int size; u_int num; }; struct netmap_obj_pool { char name[NETMAP_POOL_MAX_NAMSZ]; /* name of the allocator */ /* ---------------------------------------------------*/ /* these are only meaningful if the pool is finalized */ /* (see 'finalized' field in netmap_mem_d) */ u_int objtotal; /* actual total number of objects. */ u_int memtotal; /* actual total memory space */ u_int numclusters; /* actual number of clusters */ u_int objfree; /* number of free objects. */ struct lut_entry *lut; /* virt,phys addresses, objtotal entries */ uint32_t *bitmap; /* one bit per buffer, 1 means free */ uint32_t bitmap_slots; /* number of uint32 entries in bitmap */ /* ---------------------------------------------------*/ /* limits */ u_int objminsize; /* minimum object size */ u_int objmaxsize; /* maximum object size */ u_int nummin; /* minimum number of objects */ u_int nummax; /* maximum number of objects */ /* these are changed only by config */ u_int _objtotal; /* total number of objects */ u_int _objsize; /* object size */ u_int _clustsize; /* cluster size */ u_int _clustentries; /* objects per cluster */ u_int _numclusters; /* number of clusters */ /* requested values */ u_int r_objtotal; u_int r_objsize; }; #define NMA_LOCK_T NM_MTX_T struct netmap_mem_ops { int (*nmd_get_lut)(struct netmap_mem_d *, struct netmap_lut*); int (*nmd_get_info)(struct netmap_mem_d *, u_int *size, u_int *memflags, uint16_t *id); vm_paddr_t (*nmd_ofstophys)(struct netmap_mem_d *, vm_ooffset_t); int (*nmd_config)(struct netmap_mem_d *); int (*nmd_finalize)(struct netmap_mem_d *); void (*nmd_deref)(struct netmap_mem_d *); ssize_t (*nmd_if_offset)(struct netmap_mem_d *, const void *vaddr); void (*nmd_delete)(struct netmap_mem_d *); struct netmap_if * (*nmd_if_new)(struct netmap_adapter *); void (*nmd_if_delete)(struct netmap_adapter *, struct netmap_if *); int (*nmd_rings_create)(struct netmap_adapter *); void (*nmd_rings_delete)(struct netmap_adapter *); }; typedef uint16_t nm_memid_t; struct netmap_mem_d { NMA_LOCK_T nm_mtx; /* protect the allocator */ u_int nm_totalsize; /* shorthand */ u_int flags; #define NETMAP_MEM_FINALIZED 0x1 /* preallocation done */ int lasterr; /* last error for curr config */ int active; /* active users */ int refcount; /* the three allocators */ struct netmap_obj_pool pools[NETMAP_POOLS_NR]; nm_memid_t nm_id; /* allocator identifier */ int nm_grp; /* iommu groupd id */ /* list of all existing allocators, sorted by nm_id */ struct netmap_mem_d *prev, *next; struct netmap_mem_ops *ops; }; /* * XXX need to fix the case of t0 == void */ #define NMD_DEFCB(t0, name) \ t0 \ netmap_mem_##name(struct netmap_mem_d *nmd) \ { \ return nmd->ops->nmd_##name(nmd); \ } #define NMD_DEFCB1(t0, name, t1) \ t0 \ netmap_mem_##name(struct netmap_mem_d *nmd, t1 a1) \ { \ return nmd->ops->nmd_##name(nmd, a1); \ } #define NMD_DEFCB3(t0, name, t1, t2, t3) \ t0 \ netmap_mem_##name(struct netmap_mem_d *nmd, t1 a1, t2 a2, t3 a3) \ { \ return nmd->ops->nmd_##name(nmd, a1, a2, a3); \ } #define NMD_DEFNACB(t0, name) \ t0 \ netmap_mem_##name(struct netmap_adapter *na) \ { \ return na->nm_mem->ops->nmd_##name(na); \ } #define NMD_DEFNACB1(t0, name, t1) \ t0 \ netmap_mem_##name(struct netmap_adapter *na, t1 a1) \ { \ return na->nm_mem->ops->nmd_##name(na, a1); \ } NMD_DEFCB1(int, get_lut, struct netmap_lut *); NMD_DEFCB3(int, get_info, u_int *, u_int *, uint16_t *); NMD_DEFCB1(vm_paddr_t, ofstophys, vm_ooffset_t); static int netmap_mem_config(struct netmap_mem_d *); NMD_DEFCB(int, config); NMD_DEFCB1(ssize_t, if_offset, const void *); NMD_DEFCB(void, delete); NMD_DEFNACB(struct netmap_if *, if_new); NMD_DEFNACB1(void, if_delete, struct netmap_if *); NMD_DEFNACB(int, rings_create); NMD_DEFNACB(void, rings_delete); static int netmap_mem_map(struct netmap_obj_pool *, struct netmap_adapter *); static int netmap_mem_unmap(struct netmap_obj_pool *, struct netmap_adapter *); static int nm_mem_assign_group(struct netmap_mem_d *, struct device *); #define NMA_LOCK_INIT(n) NM_MTX_INIT((n)->nm_mtx) #define NMA_LOCK_DESTROY(n) NM_MTX_DESTROY((n)->nm_mtx) #define NMA_LOCK(n) NM_MTX_LOCK((n)->nm_mtx) #define NMA_UNLOCK(n) NM_MTX_UNLOCK((n)->nm_mtx) #ifdef NM_DEBUG_MEM_PUTGET #define NM_DBG_REFC(nmd, func, line) \ printf("%s:%d mem[%d] -> %d\n", func, line, (nmd)->nm_id, (nmd)->refcount); #else #define NM_DBG_REFC(nmd, func, line) #endif #ifdef NM_DEBUG_MEM_PUTGET void __netmap_mem_get(struct netmap_mem_d *nmd, const char *func, int line) #else void netmap_mem_get(struct netmap_mem_d *nmd) #endif { NMA_LOCK(nmd); nmd->refcount++; NM_DBG_REFC(nmd, func, line); NMA_UNLOCK(nmd); } #ifdef NM_DEBUG_MEM_PUTGET void __netmap_mem_put(struct netmap_mem_d *nmd, const char *func, int line) #else void netmap_mem_put(struct netmap_mem_d *nmd) #endif { int last; NMA_LOCK(nmd); last = (--nmd->refcount == 0); NM_DBG_REFC(nmd, func, line); NMA_UNLOCK(nmd); if (last) netmap_mem_delete(nmd); } int netmap_mem_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na) { if (nm_mem_assign_group(nmd, na->pdev) < 0) { return ENOMEM; } else { NMA_LOCK(nmd); nmd->lasterr = nmd->ops->nmd_finalize(nmd); NMA_UNLOCK(nmd); } if (!nmd->lasterr && na->pdev) netmap_mem_map(&nmd->pools[NETMAP_BUF_POOL], na); return nmd->lasterr; } void netmap_mem_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na) { NMA_LOCK(nmd); netmap_mem_unmap(&nmd->pools[NETMAP_BUF_POOL], na); if (nmd->active == 1) { u_int i; /* * Reset the allocator when it falls out of use so that any * pool resources leaked by unclean application exits are * reclaimed. */ for (i = 0; i < NETMAP_POOLS_NR; i++) { struct netmap_obj_pool *p; u_int j; p = &nmd->pools[i]; p->objfree = p->objtotal; /* * Reproduce the net effect of the M_ZERO malloc() * and marking of free entries in the bitmap that * occur in finalize_obj_allocator() */ memset(p->bitmap, '\0', sizeof(uint32_t) * ((p->objtotal + 31) / 32)); /* * Set all the bits in the bitmap that have * corresponding buffers to 1 to indicate they are * free. */ for (j = 0; j < p->objtotal; j++) { if (p->lut[j].vaddr != NULL) { p->bitmap[ (j>>5) ] |= ( 1 << (j & 31) ); } } } /* * Per netmap_mem_finalize_all(), * buffers 0 and 1 are reserved */ nmd->pools[NETMAP_BUF_POOL].objfree -= 2; if (nmd->pools[NETMAP_BUF_POOL].bitmap) { /* XXX This check is a workaround that prevents a * NULL pointer crash which currently happens only * with ptnetmap guests. * Removed shared-info --> is the bug still there? */ nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3; } } nmd->ops->nmd_deref(nmd); NMA_UNLOCK(nmd); } /* accessor functions */ static int netmap_mem2_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut) { lut->lut = nmd->pools[NETMAP_BUF_POOL].lut; lut->objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal; lut->objsize = nmd->pools[NETMAP_BUF_POOL]._objsize; return 0; } static struct netmap_obj_params netmap_params[NETMAP_POOLS_NR] = { [NETMAP_IF_POOL] = { .size = 1024, .num = 100, }, [NETMAP_RING_POOL] = { .size = 9*PAGE_SIZE, .num = 200, }, [NETMAP_BUF_POOL] = { .size = 2048, .num = NETMAP_BUF_MAX_NUM, }, }; static struct netmap_obj_params netmap_min_priv_params[NETMAP_POOLS_NR] = { [NETMAP_IF_POOL] = { .size = 1024, .num = 2, }, [NETMAP_RING_POOL] = { .size = 5*PAGE_SIZE, .num = 4, }, [NETMAP_BUF_POOL] = { .size = 2048, .num = 4098, }, }; /* * nm_mem is the memory allocator used for all physical interfaces * running in netmap mode. * Virtual (VALE) ports will have each its own allocator. */ extern struct netmap_mem_ops netmap_mem_global_ops; /* forward */ struct netmap_mem_d nm_mem = { /* Our memory allocator. */ .pools = { [NETMAP_IF_POOL] = { .name = "netmap_if", .objminsize = sizeof(struct netmap_if), .objmaxsize = 4096, .nummin = 10, /* don't be stingy */ .nummax = 10000, /* XXX very large */ }, [NETMAP_RING_POOL] = { .name = "netmap_ring", .objminsize = sizeof(struct netmap_ring), .objmaxsize = 32*PAGE_SIZE, .nummin = 2, .nummax = 1024, }, [NETMAP_BUF_POOL] = { .name = "netmap_buf", .objminsize = 64, .objmaxsize = 65536, .nummin = 4, .nummax = 1000000, /* one million! */ }, }, .nm_id = 1, .nm_grp = -1, .prev = &nm_mem, .next = &nm_mem, .ops = &netmap_mem_global_ops }; static struct netmap_mem_d *netmap_last_mem_d = &nm_mem; /* blueprint for the private memory allocators */ extern struct netmap_mem_ops netmap_mem_private_ops; /* forward */ /* XXX clang is not happy about using name as a print format */ static const struct netmap_mem_d nm_blueprint = { .pools = { [NETMAP_IF_POOL] = { .name = "%s_if", .objminsize = sizeof(struct netmap_if), .objmaxsize = 4096, .nummin = 1, .nummax = 100, }, [NETMAP_RING_POOL] = { .name = "%s_ring", .objminsize = sizeof(struct netmap_ring), .objmaxsize = 32*PAGE_SIZE, .nummin = 2, .nummax = 1024, }, [NETMAP_BUF_POOL] = { .name = "%s_buf", .objminsize = 64, .objmaxsize = 65536, .nummin = 4, .nummax = 1000000, /* one million! */ }, }, .flags = NETMAP_MEM_PRIVATE, .ops = &netmap_mem_private_ops }; /* memory allocator related sysctls */ #define STRINGIFY(x) #x #define DECLARE_SYSCTLS(id, name) \ SYSBEGIN(mem2_ ## name); \ SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \ CTLFLAG_RW, &netmap_params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \ SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \ CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \ SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \ CTLFLAG_RW, &netmap_params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \ SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \ CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s"); \ SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_size, \ CTLFLAG_RW, &netmap_min_priv_params[id].size, 0, \ "Default size of private netmap " STRINGIFY(name) "s"); \ SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_num, \ CTLFLAG_RW, &netmap_min_priv_params[id].num, 0, \ "Default number of private netmap " STRINGIFY(name) "s"); \ SYSEND SYSCTL_DECL(_dev_netmap); DECLARE_SYSCTLS(NETMAP_IF_POOL, if); DECLARE_SYSCTLS(NETMAP_RING_POOL, ring); DECLARE_SYSCTLS(NETMAP_BUF_POOL, buf); /* call with NMA_LOCK(&nm_mem) held */ static int nm_mem_assign_id_locked(struct netmap_mem_d *nmd) { nm_memid_t id; struct netmap_mem_d *scan = netmap_last_mem_d; int error = ENOMEM; do { /* we rely on unsigned wrap around */ id = scan->nm_id + 1; if (id == 0) /* reserve 0 as error value */ id = 1; scan = scan->next; if (id != scan->nm_id) { nmd->nm_id = id; nmd->prev = scan->prev; nmd->next = scan; scan->prev->next = nmd; scan->prev = nmd; netmap_last_mem_d = nmd; error = 0; break; } } while (scan != netmap_last_mem_d); return error; } /* call with NMA_LOCK(&nm_mem) *not* held */ static int nm_mem_assign_id(struct netmap_mem_d *nmd) { int ret; NMA_LOCK(&nm_mem); ret = nm_mem_assign_id_locked(nmd); NMA_UNLOCK(&nm_mem); return ret; } static void nm_mem_release_id(struct netmap_mem_d *nmd) { NMA_LOCK(&nm_mem); nmd->prev->next = nmd->next; nmd->next->prev = nmd->prev; if (netmap_last_mem_d == nmd) netmap_last_mem_d = nmd->prev; nmd->prev = nmd->next = NULL; NMA_UNLOCK(&nm_mem); } static int nm_mem_assign_group(struct netmap_mem_d *nmd, struct device *dev) { int err = 0, id; id = nm_iommu_group_id(dev); if (netmap_verbose) D("iommu_group %d", id); NMA_LOCK(nmd); if (nmd->nm_grp < 0) nmd->nm_grp = id; if (nmd->nm_grp != id) nmd->lasterr = err = ENOMEM; NMA_UNLOCK(nmd); return err; } /* * First, find the allocator that contains the requested offset, * then locate the cluster through a lookup table. */ static vm_paddr_t netmap_mem2_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset) { int i; vm_ooffset_t o = offset; vm_paddr_t pa; struct netmap_obj_pool *p; NMA_LOCK(nmd); p = nmd->pools; for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) { if (offset >= p[i].memtotal) continue; // now lookup the cluster's address #ifndef _WIN32 pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr) + offset % p[i]._objsize; #else pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr); pa.QuadPart += offset % p[i]._objsize; #endif NMA_UNLOCK(nmd); return pa; } /* this is only in case of errors */ D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o, p[NETMAP_IF_POOL].memtotal, p[NETMAP_IF_POOL].memtotal + p[NETMAP_RING_POOL].memtotal, p[NETMAP_IF_POOL].memtotal + p[NETMAP_RING_POOL].memtotal + p[NETMAP_BUF_POOL].memtotal); NMA_UNLOCK(nmd); #ifndef _WIN32 return 0; // XXX bad address #else vm_paddr_t res; res.QuadPart = 0; return res; #endif } #ifdef _WIN32 /* * win32_build_virtual_memory_for_userspace * * This function get all the object making part of the pools and maps * a contiguous virtual memory space for the userspace * It works this way * 1 - allocate a Memory Descriptor List wide as the sum * of the memory needed for the pools * 2 - cycle all the objects in every pool and for every object do * * 2a - cycle all the objects in every pool, get the list * of the physical address descriptors * 2b - calculate the offset in the array of pages desciptor in the * main MDL * 2c - copy the descriptors of the object in the main MDL * * 3 - return the resulting MDL that needs to be mapped in userland * * In this way we will have an MDL that describes all the memory for the * objects in a single object */ PMDL win32_build_user_vm_map(struct netmap_mem_d* nmd) { int i, j; u_int memsize, memflags, ofs = 0; PMDL mainMdl, tempMdl; if (netmap_mem_get_info(nmd, &memsize, &memflags, NULL)) { D("memory not finalised yet"); return NULL; } mainMdl = IoAllocateMdl(NULL, memsize, FALSE, FALSE, NULL); if (mainMdl == NULL) { D("failed to allocate mdl"); return NULL; } NMA_LOCK(nmd); for (i = 0; i < NETMAP_POOLS_NR; i++) { struct netmap_obj_pool *p = &nmd->pools[i]; int clsz = p->_clustsize; int clobjs = p->_clustentries; /* objects per cluster */ int mdl_len = sizeof(PFN_NUMBER) * BYTES_TO_PAGES(clsz); PPFN_NUMBER pSrc, pDst; /* each pool has a different cluster size so we need to reallocate */ tempMdl = IoAllocateMdl(p->lut[0].vaddr, clsz, FALSE, FALSE, NULL); if (tempMdl == NULL) { NMA_UNLOCK(nmd); D("fail to allocate tempMdl"); IoFreeMdl(mainMdl); return NULL; } pSrc = MmGetMdlPfnArray(tempMdl); /* create one entry per cluster, the lut[] has one entry per object */ for (j = 0; j < p->numclusters; j++, ofs += clsz) { pDst = &MmGetMdlPfnArray(mainMdl)[BYTES_TO_PAGES(ofs)]; MmInitializeMdl(tempMdl, p->lut[j*clobjs].vaddr, clsz); MmBuildMdlForNonPagedPool(tempMdl); /* compute physical page addresses */ RtlCopyMemory(pDst, pSrc, mdl_len); /* copy the page descriptors */ mainMdl->MdlFlags = tempMdl->MdlFlags; /* XXX what is in here ? */ } IoFreeMdl(tempMdl); } NMA_UNLOCK(nmd); return mainMdl; } #endif /* _WIN32 */ /* * helper function for OS-specific mmap routines (currently only windows). * Given an nmd and a pool index, returns the cluster size and number of clusters. * Returns 0 if memory is finalised and the pool is valid, otherwise 1. * It should be called under NMA_LOCK(nmd) otherwise the underlying info can change. */ int netmap_mem2_get_pool_info(struct netmap_mem_d* nmd, u_int pool, u_int *clustsize, u_int *numclusters) { if (!nmd || !clustsize || !numclusters || pool >= NETMAP_POOLS_NR) return 1; /* invalid arguments */ // NMA_LOCK_ASSERT(nmd); if (!(nmd->flags & NETMAP_MEM_FINALIZED)) { *clustsize = *numclusters = 0; return 1; /* not ready yet */ } *clustsize = nmd->pools[pool]._clustsize; *numclusters = nmd->pools[pool].numclusters; return 0; /* success */ } static int netmap_mem2_get_info(struct netmap_mem_d* nmd, u_int* size, u_int *memflags, nm_memid_t *id) { int error = 0; NMA_LOCK(nmd); error = netmap_mem_config(nmd); if (error) goto out; if (size) { if (nmd->flags & NETMAP_MEM_FINALIZED) { *size = nmd->nm_totalsize; } else { int i; *size = 0; for (i = 0; i < NETMAP_POOLS_NR; i++) { struct netmap_obj_pool *p = nmd->pools + i; *size += (p->_numclusters * p->_clustsize); } } } if (memflags) *memflags = nmd->flags; if (id) *id = nmd->nm_id; out: NMA_UNLOCK(nmd); return error; } /* * we store objects by kernel address, need to find the offset * within the pool to export the value to userspace. * Algorithm: scan until we find the cluster, then add the * actual offset in the cluster */ static ssize_t netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr) { int i, k = p->_clustentries, n = p->objtotal; ssize_t ofs = 0; for (i = 0; i < n; i += k, ofs += p->_clustsize) { const char *base = p->lut[i].vaddr; ssize_t relofs = (const char *) vaddr - base; if (relofs < 0 || relofs >= p->_clustsize) continue; ofs = ofs + relofs; ND("%s: return offset %d (cluster %d) for pointer %p", p->name, ofs, i, vaddr); return ofs; } D("address %p is not contained inside any cluster (%s)", vaddr, p->name); return 0; /* An error occurred */ } /* Helper functions which convert virtual addresses to offsets */ #define netmap_if_offset(n, v) \ netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v)) #define netmap_ring_offset(n, v) \ ((n)->pools[NETMAP_IF_POOL].memtotal + \ netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v))) static ssize_t netmap_mem2_if_offset(struct netmap_mem_d *nmd, const void *addr) { ssize_t v; NMA_LOCK(nmd); v = netmap_if_offset(nmd, addr); NMA_UNLOCK(nmd); return v; } /* * report the index, and use start position as a hint, * otherwise buffer allocation becomes terribly expensive. */ static void * netmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_t *index) { uint32_t i = 0; /* index in the bitmap */ uint32_t mask, j = 0; /* slot counter */ void *vaddr = NULL; if (len > p->_objsize) { D("%s request size %d too large", p->name, len); // XXX cannot reduce the size return NULL; } if (p->objfree == 0) { D("no more %s objects", p->name); return NULL; } if (start) i = *start; /* termination is guaranteed by p->free, but better check bounds on i */ while (vaddr == NULL && i < p->bitmap_slots) { uint32_t cur = p->bitmap[i]; if (cur == 0) { /* bitmask is fully used */ i++; continue; } /* locate a slot */ for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1) ; p->bitmap[i] &= ~mask; /* mark object as in use */ p->objfree--; vaddr = p->lut[i * 32 + j].vaddr; if (index) *index = i * 32 + j; } ND("%s allocator: allocated object @ [%d][%d]: vaddr %p",p->name, i, j, vaddr); if (start) *start = i; return vaddr; } /* * free by index, not by address. * XXX should we also cleanup the content ? */ static int netmap_obj_free(struct netmap_obj_pool *p, uint32_t j) { uint32_t *ptr, mask; if (j >= p->objtotal) { D("invalid index %u, max %u", j, p->objtotal); return 1; } ptr = &p->bitmap[j / 32]; mask = (1 << (j % 32)); if (*ptr & mask) { D("ouch, double free on buffer %d", j); return 1; } else { *ptr |= mask; p->objfree++; return 0; } } /* * free by address. This is slow but is only used for a few * objects (rings, nifp) */ static void netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr) { u_int i, j, n = p->numclusters; for (i = 0, j = 0; i < n; i++, j += p->_clustentries) { void *base = p->lut[i * p->_clustentries].vaddr; ssize_t relofs = (ssize_t) vaddr - (ssize_t) base; /* Given address, is out of the scope of the current cluster.*/ if (vaddr < base || relofs >= p->_clustsize) continue; j = j + relofs / p->_objsize; /* KASSERT(j != 0, ("Cannot free object 0")); */ netmap_obj_free(p, j); return; } D("address %p is not contained inside any cluster (%s)", vaddr, p->name); } #define netmap_mem_bufsize(n) \ ((n)->pools[NETMAP_BUF_POOL]._objsize) #define netmap_if_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL) #define netmap_if_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v)) #define netmap_ring_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL) #define netmap_ring_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v)) #define netmap_buf_malloc(n, _pos, _index) \ netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], netmap_mem_bufsize(n), _pos, _index) #if 0 // XXX unused /* Return the index associated to the given packet buffer */ #define netmap_buf_index(n, v) \ (netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n)) #endif /* * allocate extra buffers in a linked list. * returns the actual number. */ uint32_t netmap_extra_alloc(struct netmap_adapter *na, uint32_t *head, uint32_t n) { struct netmap_mem_d *nmd = na->nm_mem; uint32_t i, pos = 0; /* opaque, scan position in the bitmap */ NMA_LOCK(nmd); *head = 0; /* default, 'null' index ie empty list */ for (i = 0 ; i < n; i++) { uint32_t cur = *head; /* save current head */ uint32_t *p = netmap_buf_malloc(nmd, &pos, head); if (p == NULL) { D("no more buffers after %d of %d", i, n); *head = cur; /* restore */ break; } ND(5, "allocate buffer %d -> %d", *head, cur); *p = cur; /* link to previous head */ } NMA_UNLOCK(nmd); return i; } static void netmap_extra_free(struct netmap_adapter *na, uint32_t head) { struct lut_entry *lut = na->na_lut.lut; struct netmap_mem_d *nmd = na->nm_mem; struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; uint32_t i, cur, *buf; ND("freeing the extra list"); for (i = 0; head >=2 && head < p->objtotal; i++) { cur = head; buf = lut[head].vaddr; head = *buf; *buf = 0; if (netmap_obj_free(p, cur)) break; } if (head != 0) D("breaking with head %d", head); if (netmap_verbose) D("freed %d buffers", i); } /* Return nonzero on error */ static int netmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n) { struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; u_int i = 0; /* slot counter */ uint32_t pos = 0; /* slot in p->bitmap */ uint32_t index = 0; /* buffer index */ for (i = 0; i < n; i++) { void *vaddr = netmap_buf_malloc(nmd, &pos, &index); if (vaddr == NULL) { D("no more buffers after %d of %d", i, n); goto cleanup; } slot[i].buf_idx = index; slot[i].len = p->_objsize; slot[i].flags = 0; } ND("allocated %d buffers, %d available, first at %d", n, p->objfree, pos); return (0); cleanup: while (i > 0) { i--; netmap_obj_free(p, slot[i].buf_idx); } bzero(slot, n * sizeof(slot[0])); return (ENOMEM); } static void netmap_mem_set_ring(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n, uint32_t index) { struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; u_int i; for (i = 0; i < n; i++) { slot[i].buf_idx = index; slot[i].len = p->_objsize; slot[i].flags = 0; } } static void netmap_free_buf(struct netmap_mem_d *nmd, uint32_t i) { struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; if (i < 2 || i >= p->objtotal) { D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal); return; } netmap_obj_free(p, i); } static void netmap_free_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n) { u_int i; for (i = 0; i < n; i++) { if (slot[i].buf_idx > 2) netmap_free_buf(nmd, slot[i].buf_idx); } } static void netmap_reset_obj_allocator(struct netmap_obj_pool *p) { if (p == NULL) return; if (p->bitmap) free(p->bitmap, M_NETMAP); p->bitmap = NULL; if (p->lut) { u_int i; /* * Free each cluster allocated in * netmap_finalize_obj_allocator(). The cluster start * addresses are stored at multiples of p->_clusterentries * in the lut. */ for (i = 0; i < p->objtotal; i += p->_clustentries) { if (p->lut[i].vaddr) contigfree(p->lut[i].vaddr, p->_clustsize, M_NETMAP); } bzero(p->lut, sizeof(struct lut_entry) * p->objtotal); #ifdef linux vfree(p->lut); #else free(p->lut, M_NETMAP); #endif } p->lut = NULL; p->objtotal = 0; p->memtotal = 0; p->numclusters = 0; p->objfree = 0; } /* * Free all resources related to an allocator. */ static void netmap_destroy_obj_allocator(struct netmap_obj_pool *p) { if (p == NULL) return; netmap_reset_obj_allocator(p); } /* * We receive a request for objtotal objects, of size objsize each. * Internally we may round up both numbers, as we allocate objects * in small clusters multiple of the page size. * We need to keep track of objtotal and clustentries, * as they are needed when freeing memory. * * XXX note -- userspace needs the buffers to be contiguous, * so we cannot afford gaps at the end of a cluster. */ /* call with NMA_LOCK held */ static int netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize) { int i; u_int clustsize; /* the cluster size, multiple of page size */ u_int clustentries; /* how many objects per entry */ /* we store the current request, so we can * detect configuration changes later */ p->r_objtotal = objtotal; p->r_objsize = objsize; #define MAX_CLUSTSIZE (1<<22) // 4 MB #define LINE_ROUND NM_CACHE_ALIGN // 64 if (objsize >= MAX_CLUSTSIZE) { /* we could do it but there is no point */ D("unsupported allocation for %d bytes", objsize); return EINVAL; } /* make sure objsize is a multiple of LINE_ROUND */ i = (objsize & (LINE_ROUND - 1)); if (i) { D("XXX aligning object by %d bytes", LINE_ROUND - i); objsize += LINE_ROUND - i; } if (objsize < p->objminsize || objsize > p->objmaxsize) { D("requested objsize %d out of range [%d, %d]", objsize, p->objminsize, p->objmaxsize); return EINVAL; } if (objtotal < p->nummin || objtotal > p->nummax) { D("requested objtotal %d out of range [%d, %d]", objtotal, p->nummin, p->nummax); return EINVAL; } /* * Compute number of objects using a brute-force approach: * given a max cluster size, * we try to fill it with objects keeping track of the * wasted space to the next page boundary. */ for (clustentries = 0, i = 1;; i++) { u_int delta, used = i * objsize; if (used > MAX_CLUSTSIZE) break; delta = used % PAGE_SIZE; if (delta == 0) { // exact solution clustentries = i; break; } } /* exact solution not found */ if (clustentries == 0) { D("unsupported allocation for %d bytes", objsize); return EINVAL; } /* compute clustsize */ clustsize = clustentries * objsize; if (netmap_verbose) D("objsize %d clustsize %d objects %d", objsize, clustsize, clustentries); /* * The number of clusters is n = ceil(objtotal/clustentries) * objtotal' = n * clustentries */ p->_clustentries = clustentries; p->_clustsize = clustsize; p->_numclusters = (objtotal + clustentries - 1) / clustentries; /* actual values (may be larger than requested) */ p->_objsize = objsize; p->_objtotal = p->_numclusters * clustentries; return 0; } static struct lut_entry * nm_alloc_lut(u_int nobj) { size_t n = sizeof(struct lut_entry) * nobj; struct lut_entry *lut; #ifdef linux lut = vmalloc(n); #else lut = malloc(n, M_NETMAP, M_NOWAIT | M_ZERO); #endif return lut; } /* call with NMA_LOCK held */ static int netmap_finalize_obj_allocator(struct netmap_obj_pool *p) { int i; /* must be signed */ size_t n; /* optimistically assume we have enough memory */ p->numclusters = p->_numclusters; p->objtotal = p->_objtotal; p->lut = nm_alloc_lut(p->objtotal); if (p->lut == NULL) { D("Unable to create lookup table for '%s'", p->name); goto clean; } /* Allocate the bitmap */ n = (p->objtotal + 31) / 32; p->bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, M_NOWAIT | M_ZERO); if (p->bitmap == NULL) { D("Unable to create bitmap (%d entries) for allocator '%s'", (int)n, p->name); goto clean; } p->bitmap_slots = n; /* * Allocate clusters, init pointers and bitmap */ n = p->_clustsize; for (i = 0; i < (int)p->objtotal;) { int lim = i + p->_clustentries; char *clust; /* * XXX Note, we only need contigmalloc() for buffers attached * to native interfaces. In all other cases (nifp, netmap rings * and even buffers for VALE ports or emulated interfaces) we * can live with standard malloc, because the hardware will not * access the pages directly. */ clust = contigmalloc(n, M_NETMAP, M_NOWAIT | M_ZERO, (size_t)0, -1UL, PAGE_SIZE, 0); if (clust == NULL) { /* * If we get here, there is a severe memory shortage, * so halve the allocated memory to reclaim some. */ D("Unable to create cluster at %d for '%s' allocator", i, p->name); if (i < 2) /* nothing to halve */ goto out; lim = i / 2; for (i--; i >= lim; i--) { p->bitmap[ (i>>5) ] &= ~( 1 << (i & 31) ); if (i % p->_clustentries == 0 && p->lut[i].vaddr) contigfree(p->lut[i].vaddr, n, M_NETMAP); p->lut[i].vaddr = NULL; } out: p->objtotal = i; /* we may have stopped in the middle of a cluster */ p->numclusters = (i + p->_clustentries - 1) / p->_clustentries; break; } /* * Set bitmap and lut state for all buffers in the current * cluster. * * [i, lim) is the set of buffer indexes that cover the * current cluster. * * 'clust' is really the address of the current buffer in * the current cluster as we index through it with a stride * of p->_objsize. */ for (; i < lim; i++, clust += p->_objsize) { p->bitmap[ (i>>5) ] |= ( 1 << (i & 31) ); p->lut[i].vaddr = clust; p->lut[i].paddr = vtophys(clust); } } p->objfree = p->objtotal; p->memtotal = p->numclusters * p->_clustsize; if (p->objfree == 0) goto clean; if (netmap_verbose) D("Pre-allocated %d clusters (%d/%dKB) for '%s'", p->numclusters, p->_clustsize >> 10, p->memtotal >> 10, p->name); return 0; clean: netmap_reset_obj_allocator(p); return ENOMEM; } /* call with lock held */ static int netmap_memory_config_changed(struct netmap_mem_d *nmd) { int i; for (i = 0; i < NETMAP_POOLS_NR; i++) { if (nmd->pools[i].r_objsize != netmap_params[i].size || nmd->pools[i].r_objtotal != netmap_params[i].num) return 1; } return 0; } static void netmap_mem_reset_all(struct netmap_mem_d *nmd) { int i; if (netmap_verbose) D("resetting %p", nmd); for (i = 0; i < NETMAP_POOLS_NR; i++) { netmap_reset_obj_allocator(&nmd->pools[i]); } nmd->flags &= ~NETMAP_MEM_FINALIZED; } static int netmap_mem_unmap(struct netmap_obj_pool *p, struct netmap_adapter *na) { int i, lim = p->_objtotal; if (na->pdev == NULL) return 0; #if defined(__FreeBSD__) (void)i; (void)lim; D("unsupported on FreeBSD"); #elif defined(_WIN32) (void)i; (void)lim; D("unsupported on Windows"); //XXX_ale, really? #else /* linux */ for (i = 2; i < lim; i++) { netmap_unload_map(na, (bus_dma_tag_t) na->pdev, &p->lut[i].paddr); } #endif /* linux */ return 0; } static int netmap_mem_map(struct netmap_obj_pool *p, struct netmap_adapter *na) { #if defined(__FreeBSD__) D("unsupported on FreeBSD"); #elif defined(_WIN32) D("unsupported on Windows"); //XXX_ale, really? #else /* linux */ int i, lim = p->_objtotal; if (na->pdev == NULL) return 0; for (i = 2; i < lim; i++) { netmap_load_map(na, (bus_dma_tag_t) na->pdev, &p->lut[i].paddr, p->lut[i].vaddr); } #endif /* linux */ return 0; } static int netmap_mem_finalize_all(struct netmap_mem_d *nmd) { int i; if (nmd->flags & NETMAP_MEM_FINALIZED) return 0; nmd->lasterr = 0; nmd->nm_totalsize = 0; for (i = 0; i < NETMAP_POOLS_NR; i++) { nmd->lasterr = netmap_finalize_obj_allocator(&nmd->pools[i]); if (nmd->lasterr) goto error; nmd->nm_totalsize += nmd->pools[i].memtotal; } /* buffers 0 and 1 are reserved */ nmd->pools[NETMAP_BUF_POOL].objfree -= 2; nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3; nmd->flags |= NETMAP_MEM_FINALIZED; if (netmap_verbose) D("interfaces %d KB, rings %d KB, buffers %d MB", nmd->pools[NETMAP_IF_POOL].memtotal >> 10, nmd->pools[NETMAP_RING_POOL].memtotal >> 10, nmd->pools[NETMAP_BUF_POOL].memtotal >> 20); if (netmap_verbose) D("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree); return 0; error: netmap_mem_reset_all(nmd); return nmd->lasterr; } static void netmap_mem_private_delete(struct netmap_mem_d *nmd) { if (nmd == NULL) return; if (netmap_verbose) D("deleting %p", nmd); if (nmd->active > 0) D("bug: deleting mem allocator with active=%d!", nmd->active); nm_mem_release_id(nmd); if (netmap_verbose) D("done deleting %p", nmd); NMA_LOCK_DESTROY(nmd); free(nmd, M_DEVBUF); } static int netmap_mem_private_config(struct netmap_mem_d *nmd) { /* nothing to do, we are configured on creation * and configuration never changes thereafter */ return 0; } static int netmap_mem_private_finalize(struct netmap_mem_d *nmd) { int err; err = netmap_mem_finalize_all(nmd); if (!err) nmd->active++; return err; } static void netmap_mem_private_deref(struct netmap_mem_d *nmd) { if (--nmd->active <= 0) netmap_mem_reset_all(nmd); } /* * allocator for private memory */ struct netmap_mem_d * netmap_mem_private_new(const char *name, u_int txr, u_int txd, u_int rxr, u_int rxd, u_int extra_bufs, u_int npipes, int *perr) { struct netmap_mem_d *d = NULL; struct netmap_obj_params p[NETMAP_POOLS_NR]; int i, err; u_int v, maxd; d = malloc(sizeof(struct netmap_mem_d), M_DEVBUF, M_NOWAIT | M_ZERO); if (d == NULL) { err = ENOMEM; goto error; } *d = nm_blueprint; err = nm_mem_assign_id(d); if (err) goto error; /* account for the fake host rings */ txr++; rxr++; /* copy the min values */ for (i = 0; i < NETMAP_POOLS_NR; i++) { p[i] = netmap_min_priv_params[i]; } /* possibly increase them to fit user request */ v = sizeof(struct netmap_if) + sizeof(ssize_t) * (txr + rxr); if (p[NETMAP_IF_POOL].size < v) p[NETMAP_IF_POOL].size = v; v = 2 + 4 * npipes; if (p[NETMAP_IF_POOL].num < v) p[NETMAP_IF_POOL].num = v; maxd = (txd > rxd) ? txd : rxd; v = sizeof(struct netmap_ring) + sizeof(struct netmap_slot) * maxd; if (p[NETMAP_RING_POOL].size < v) p[NETMAP_RING_POOL].size = v; /* each pipe endpoint needs two tx rings (1 normal + 1 host, fake) * and two rx rings (again, 1 normal and 1 fake host) */ v = txr + rxr + 8 * npipes; if (p[NETMAP_RING_POOL].num < v) p[NETMAP_RING_POOL].num = v; /* for each pipe we only need the buffers for the 4 "real" rings. * On the other end, the pipe ring dimension may be different from * the parent port ring dimension. As a compromise, we allocate twice the * space actually needed if the pipe rings were the same size as the parent rings */ v = (4 * npipes + rxr) * rxd + (4 * npipes + txr) * txd + 2 + extra_bufs; /* the +2 is for the tx and rx fake buffers (indices 0 and 1) */ if (p[NETMAP_BUF_POOL].num < v) p[NETMAP_BUF_POOL].num = v; if (netmap_verbose) D("req if %d*%d ring %d*%d buf %d*%d", p[NETMAP_IF_POOL].num, p[NETMAP_IF_POOL].size, p[NETMAP_RING_POOL].num, p[NETMAP_RING_POOL].size, p[NETMAP_BUF_POOL].num, p[NETMAP_BUF_POOL].size); for (i = 0; i < NETMAP_POOLS_NR; i++) { snprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ, nm_blueprint.pools[i].name, name); err = netmap_config_obj_allocator(&d->pools[i], p[i].num, p[i].size); if (err) goto error; } d->flags &= ~NETMAP_MEM_FINALIZED; NMA_LOCK_INIT(d); return d; error: netmap_mem_private_delete(d); if (perr) *perr = err; return NULL; } /* call with lock held */ static int netmap_mem_global_config(struct netmap_mem_d *nmd) { int i; if (nmd->active) /* already in use, we cannot change the configuration */ goto out; if (!netmap_memory_config_changed(nmd)) goto out; ND("reconfiguring"); if (nmd->flags & NETMAP_MEM_FINALIZED) { /* reset previous allocation */ for (i = 0; i < NETMAP_POOLS_NR; i++) { netmap_reset_obj_allocator(&nmd->pools[i]); } nmd->flags &= ~NETMAP_MEM_FINALIZED; } for (i = 0; i < NETMAP_POOLS_NR; i++) { nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i], netmap_params[i].num, netmap_params[i].size); if (nmd->lasterr) goto out; } out: return nmd->lasterr; } static int netmap_mem_global_finalize(struct netmap_mem_d *nmd) { int err; /* update configuration if changed */ if (netmap_mem_global_config(nmd)) return nmd->lasterr; nmd->active++; if (nmd->flags & NETMAP_MEM_FINALIZED) { /* may happen if config is not changed */ ND("nothing to do"); goto out; } if (netmap_mem_finalize_all(nmd)) goto out; nmd->lasterr = 0; out: if (nmd->lasterr) nmd->active--; err = nmd->lasterr; return err; } static void netmap_mem_global_delete(struct netmap_mem_d *nmd) { int i; for (i = 0; i < NETMAP_POOLS_NR; i++) { netmap_destroy_obj_allocator(&nm_mem.pools[i]); } NMA_LOCK_DESTROY(&nm_mem); } int netmap_mem_init(void) { NMA_LOCK_INIT(&nm_mem); netmap_mem_get(&nm_mem); return (0); } void netmap_mem_fini(void) { netmap_mem_put(&nm_mem); } static void netmap_free_rings(struct netmap_adapter *na) { enum txrx t; for_rx_tx(t) { u_int i; for (i = 0; i < nma_get_nrings(na, t) + 1; i++) { struct netmap_kring *kring = &NMR(na, t)[i]; struct netmap_ring *ring = kring->ring; if (ring == NULL || kring->users > 0 || (kring->nr_kflags & NKR_NEEDRING)) { ND("skipping ring %s (ring %p, users %d)", kring->name, ring, kring->users); continue; } if (i != nma_get_nrings(na, t) || na->na_flags & NAF_HOST_RINGS) netmap_free_bufs(na->nm_mem, ring->slot, kring->nkr_num_slots); netmap_ring_free(na->nm_mem, ring); kring->ring = NULL; } } } /* call with NMA_LOCK held * * * Allocate netmap rings and buffers for this card * The rings are contiguous, but have variable size. * The kring array must follow the layout described * in netmap_krings_create(). */ static int netmap_mem2_rings_create(struct netmap_adapter *na) { enum txrx t; NMA_LOCK(na->nm_mem); for_rx_tx(t) { u_int i; for (i = 0; i <= nma_get_nrings(na, t); i++) { struct netmap_kring *kring = &NMR(na, t)[i]; struct netmap_ring *ring = kring->ring; u_int len, ndesc; if (ring || (!kring->users && !(kring->nr_kflags & NKR_NEEDRING))) { /* uneeded, or already created by somebody else */ ND("skipping ring %s", kring->name); continue; } ndesc = kring->nkr_num_slots; len = sizeof(struct netmap_ring) + ndesc * sizeof(struct netmap_slot); ring = netmap_ring_malloc(na->nm_mem, len); if (ring == NULL) { D("Cannot allocate %s_ring", nm_txrx2str(t)); goto cleanup; } ND("txring at %p", ring); kring->ring = ring; *(uint32_t *)(uintptr_t)&ring->num_slots = ndesc; *(int64_t *)(uintptr_t)&ring->buf_ofs = (na->nm_mem->pools[NETMAP_IF_POOL].memtotal + na->nm_mem->pools[NETMAP_RING_POOL].memtotal) - netmap_ring_offset(na->nm_mem, ring); /* copy values from kring */ ring->head = kring->rhead; ring->cur = kring->rcur; ring->tail = kring->rtail; *(uint16_t *)(uintptr_t)&ring->nr_buf_size = netmap_mem_bufsize(na->nm_mem); ND("%s h %d c %d t %d", kring->name, ring->head, ring->cur, ring->tail); ND("initializing slots for %s_ring", nm_txrx2str(txrx)); if (i != nma_get_nrings(na, t) || (na->na_flags & NAF_HOST_RINGS)) { /* this is a real ring */ if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) { D("Cannot allocate buffers for %s_ring", nm_txrx2str(t)); goto cleanup; } } else { /* this is a fake ring, set all indices to 0 */ netmap_mem_set_ring(na->nm_mem, ring->slot, ndesc, 0); } /* ring info */ *(uint16_t *)(uintptr_t)&ring->ringid = kring->ring_id; *(uint16_t *)(uintptr_t)&ring->dir = kring->tx; } } NMA_UNLOCK(na->nm_mem); return 0; cleanup: netmap_free_rings(na); NMA_UNLOCK(na->nm_mem); return ENOMEM; } static void netmap_mem2_rings_delete(struct netmap_adapter *na) { /* last instance, release bufs and rings */ NMA_LOCK(na->nm_mem); netmap_free_rings(na); NMA_UNLOCK(na->nm_mem); } /* call with NMA_LOCK held */ /* * Allocate the per-fd structure netmap_if. * * We assume that the configuration stored in na * (number of tx/rx rings and descs) does not change while * the interface is in netmap mode. */ static struct netmap_if * netmap_mem2_if_new(struct netmap_adapter *na) { struct netmap_if *nifp; ssize_t base; /* handy for relative offsets between rings and nifp */ u_int i, len, n[NR_TXRX], ntot; enum txrx t; ntot = 0; for_rx_tx(t) { /* account for the (eventually fake) host rings */ n[t] = nma_get_nrings(na, t) + 1; ntot += n[t]; } /* * the descriptor is followed inline by an array of offsets * to the tx and rx rings in the shared memory region. */ NMA_LOCK(na->nm_mem); len = sizeof(struct netmap_if) + (ntot * sizeof(ssize_t)); nifp = netmap_if_malloc(na->nm_mem, len); if (nifp == NULL) { NMA_UNLOCK(na->nm_mem); return NULL; } /* initialize base fields -- override const */ *(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings; *(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings; strncpy(nifp->ni_name, na->name, (size_t)IFNAMSIZ); /* * fill the slots for the rx and tx rings. They contain the offset * between the ring and nifp, so the information is usable in * userspace to reach the ring from the nifp. */ base = netmap_if_offset(na->nm_mem, nifp); for (i = 0; i < n[NR_TX]; i++) { if (na->tx_rings[i].ring == NULL) { // XXX maybe use the offset of an error ring, // like we do for buffers? *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = 0; continue; } *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = netmap_ring_offset(na->nm_mem, na->tx_rings[i].ring) - base; } for (i = 0; i < n[NR_RX]; i++) { if (na->rx_rings[i].ring == NULL) { // XXX maybe use the offset of an error ring, // like we do for buffers? *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+n[NR_TX]] = 0; continue; } *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+n[NR_TX]] = netmap_ring_offset(na->nm_mem, na->rx_rings[i].ring) - base; } NMA_UNLOCK(na->nm_mem); return (nifp); } static void netmap_mem2_if_delete(struct netmap_adapter *na, struct netmap_if *nifp) { if (nifp == NULL) /* nothing to do */ return; NMA_LOCK(na->nm_mem); if (nifp->ni_bufs_head) netmap_extra_free(na, nifp->ni_bufs_head); netmap_if_free(na->nm_mem, nifp); NMA_UNLOCK(na->nm_mem); } static void netmap_mem_global_deref(struct netmap_mem_d *nmd) { nmd->active--; if (!nmd->active) nmd->nm_grp = -1; if (netmap_verbose) D("active = %d", nmd->active); } struct netmap_mem_ops netmap_mem_global_ops = { .nmd_get_lut = netmap_mem2_get_lut, .nmd_get_info = netmap_mem2_get_info, .nmd_ofstophys = netmap_mem2_ofstophys, .nmd_config = netmap_mem_global_config, .nmd_finalize = netmap_mem_global_finalize, .nmd_deref = netmap_mem_global_deref, .nmd_delete = netmap_mem_global_delete, .nmd_if_offset = netmap_mem2_if_offset, .nmd_if_new = netmap_mem2_if_new, .nmd_if_delete = netmap_mem2_if_delete, .nmd_rings_create = netmap_mem2_rings_create, .nmd_rings_delete = netmap_mem2_rings_delete }; struct netmap_mem_ops netmap_mem_private_ops = { .nmd_get_lut = netmap_mem2_get_lut, .nmd_get_info = netmap_mem2_get_info, .nmd_ofstophys = netmap_mem2_ofstophys, .nmd_config = netmap_mem_private_config, .nmd_finalize = netmap_mem_private_finalize, .nmd_deref = netmap_mem_private_deref, .nmd_if_offset = netmap_mem2_if_offset, .nmd_delete = netmap_mem_private_delete, .nmd_if_new = netmap_mem2_if_new, .nmd_if_delete = netmap_mem2_if_delete, .nmd_rings_create = netmap_mem2_rings_create, .nmd_rings_delete = netmap_mem2_rings_delete }; int netmap_mem_pools_info_get(struct nmreq *nmr, struct netmap_adapter *na) { uintptr_t *pp = (uintptr_t *)&nmr->nr_arg1; struct netmap_pools_info *upi = (struct netmap_pools_info *)(*pp); struct netmap_mem_d *nmd = na->nm_mem; struct netmap_pools_info pi; unsigned int memsize; uint16_t memid; int ret; if (!nmd) { return -1; } ret = netmap_mem_get_info(nmd, &memsize, NULL, &memid); if (ret) { return ret; } pi.memsize = memsize; pi.memid = memid; pi.if_pool_offset = 0; pi.if_pool_objtotal = nmd->pools[NETMAP_IF_POOL].objtotal; pi.if_pool_objsize = nmd->pools[NETMAP_IF_POOL]._objsize; pi.ring_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal; pi.ring_pool_objtotal = nmd->pools[NETMAP_RING_POOL].objtotal; pi.ring_pool_objsize = nmd->pools[NETMAP_RING_POOL]._objsize; pi.buf_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal + nmd->pools[NETMAP_RING_POOL].memtotal; pi.buf_pool_objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal; pi.buf_pool_objsize = nmd->pools[NETMAP_BUF_POOL]._objsize; ret = copyout(&pi, upi, sizeof(pi)); if (ret) { return ret; } return 0; } #ifdef WITH_PTNETMAP_GUEST struct mem_pt_if { struct mem_pt_if *next; struct ifnet *ifp; unsigned int nifp_offset; }; /* Netmap allocator for ptnetmap guests. */ struct netmap_mem_ptg { struct netmap_mem_d up; vm_paddr_t nm_paddr; /* physical address in the guest */ void *nm_addr; /* virtual address in the guest */ struct netmap_lut buf_lut; /* lookup table for BUF pool in the guest */ nm_memid_t host_mem_id; /* allocator identifier in the host */ struct ptnetmap_memdev *ptn_dev;/* ptnetmap memdev */ struct mem_pt_if *pt_ifs; /* list of interfaces in passthrough */ }; /* Link a passthrough interface to a passthrough netmap allocator. */ static int netmap_mem_pt_guest_ifp_add(struct netmap_mem_d *nmd, struct ifnet *ifp, unsigned int nifp_offset) { struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; struct mem_pt_if *ptif = malloc(sizeof(*ptif), M_NETMAP, M_NOWAIT | M_ZERO); if (!ptif) { return ENOMEM; } NMA_LOCK(nmd); ptif->ifp = ifp; ptif->nifp_offset = nifp_offset; if (ptnmd->pt_ifs) { ptif->next = ptnmd->pt_ifs; } ptnmd->pt_ifs = ptif; NMA_UNLOCK(nmd); D("added (ifp=%p,nifp_offset=%u)", ptif->ifp, ptif->nifp_offset); return 0; } /* Called with NMA_LOCK(nmd) held. */ static struct mem_pt_if * netmap_mem_pt_guest_ifp_lookup(struct netmap_mem_d *nmd, struct ifnet *ifp) { struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; struct mem_pt_if *curr; for (curr = ptnmd->pt_ifs; curr; curr = curr->next) { if (curr->ifp == ifp) { return curr; } } return NULL; } /* Unlink a passthrough interface from a passthrough netmap allocator. */ int netmap_mem_pt_guest_ifp_del(struct netmap_mem_d *nmd, struct ifnet *ifp) { struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; struct mem_pt_if *prev = NULL; struct mem_pt_if *curr; int ret = -1; NMA_LOCK(nmd); for (curr = ptnmd->pt_ifs; curr; curr = curr->next) { if (curr->ifp == ifp) { if (prev) { prev->next = curr->next; } else { ptnmd->pt_ifs = curr->next; } D("removed (ifp=%p,nifp_offset=%u)", curr->ifp, curr->nifp_offset); free(curr, M_NETMAP); ret = 0; break; } prev = curr; } NMA_UNLOCK(nmd); return ret; } static int netmap_mem_pt_guest_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut) { struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; if (!(nmd->flags & NETMAP_MEM_FINALIZED)) { return EINVAL; } *lut = ptnmd->buf_lut; return 0; } static int netmap_mem_pt_guest_get_info(struct netmap_mem_d *nmd, u_int *size, u_int *memflags, uint16_t *id) { int error = 0; NMA_LOCK(nmd); error = nmd->ops->nmd_config(nmd); if (error) goto out; if (size) *size = nmd->nm_totalsize; if (memflags) *memflags = nmd->flags; if (id) *id = nmd->nm_id; out: NMA_UNLOCK(nmd); return error; } static vm_paddr_t netmap_mem_pt_guest_ofstophys(struct netmap_mem_d *nmd, vm_ooffset_t off) { struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; vm_paddr_t paddr; /* if the offset is valid, just return csb->base_addr + off */ paddr = (vm_paddr_t)(ptnmd->nm_paddr + off); ND("off %lx padr %lx", off, (unsigned long)paddr); return paddr; } static int netmap_mem_pt_guest_config(struct netmap_mem_d *nmd) { /* nothing to do, we are configured on creation * and configuration never changes thereafter */ return 0; } static int netmap_mem_pt_guest_finalize(struct netmap_mem_d *nmd) { struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; uint64_t mem_size; uint32_t bufsize; uint32_t nbuffers; uint32_t poolofs; vm_paddr_t paddr; char *vaddr; int i; int error = 0; nmd->active++; if (nmd->flags & NETMAP_MEM_FINALIZED) goto out; if (ptnmd->ptn_dev == NULL) { D("ptnetmap memdev not attached"); error = ENOMEM; goto err; } /* Map memory through ptnetmap-memdev BAR. */ error = nm_os_pt_memdev_iomap(ptnmd->ptn_dev, &ptnmd->nm_paddr, &ptnmd->nm_addr, &mem_size); if (error) goto err; /* Initialize the lut using the information contained in the * ptnetmap memory device. */ bufsize = nm_os_pt_memdev_ioread(ptnmd->ptn_dev, PTNET_MDEV_IO_BUF_POOL_OBJSZ); nbuffers = nm_os_pt_memdev_ioread(ptnmd->ptn_dev, PTNET_MDEV_IO_BUF_POOL_OBJNUM); /* allocate the lut */ if (ptnmd->buf_lut.lut == NULL) { D("allocating lut"); ptnmd->buf_lut.lut = nm_alloc_lut(nbuffers); if (ptnmd->buf_lut.lut == NULL) { D("lut allocation failed"); return ENOMEM; } } /* we have physically contiguous memory mapped through PCI BAR */ poolofs = nm_os_pt_memdev_ioread(ptnmd->ptn_dev, PTNET_MDEV_IO_BUF_POOL_OFS); vaddr = (char *)(ptnmd->nm_addr) + poolofs; paddr = ptnmd->nm_paddr + poolofs; for (i = 0; i < nbuffers; i++) { ptnmd->buf_lut.lut[i].vaddr = vaddr; ptnmd->buf_lut.lut[i].paddr = paddr; vaddr += bufsize; paddr += bufsize; } ptnmd->buf_lut.objtotal = nbuffers; ptnmd->buf_lut.objsize = bufsize; nmd->nm_totalsize = (unsigned int)mem_size; nmd->flags |= NETMAP_MEM_FINALIZED; out: return 0; err: nmd->active--; return error; } static void netmap_mem_pt_guest_deref(struct netmap_mem_d *nmd) { struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; nmd->active--; if (nmd->active <= 0 && (nmd->flags & NETMAP_MEM_FINALIZED)) { nmd->flags &= ~NETMAP_MEM_FINALIZED; /* unmap ptnetmap-memdev memory */ if (ptnmd->ptn_dev) { nm_os_pt_memdev_iounmap(ptnmd->ptn_dev); } - ptnmd->nm_addr = 0; + ptnmd->nm_addr = NULL; ptnmd->nm_paddr = 0; } } static ssize_t netmap_mem_pt_guest_if_offset(struct netmap_mem_d *nmd, const void *vaddr) { struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; return (const char *)(vaddr) - (char *)(ptnmd->nm_addr); } static void netmap_mem_pt_guest_delete(struct netmap_mem_d *nmd) { if (nmd == NULL) return; if (netmap_verbose) D("deleting %p", nmd); if (nmd->active > 0) D("bug: deleting mem allocator with active=%d!", nmd->active); nm_mem_release_id(nmd); if (netmap_verbose) D("done deleting %p", nmd); NMA_LOCK_DESTROY(nmd); free(nmd, M_DEVBUF); } static struct netmap_if * netmap_mem_pt_guest_if_new(struct netmap_adapter *na) { struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)na->nm_mem; struct mem_pt_if *ptif; struct netmap_if *nifp = NULL; NMA_LOCK(na->nm_mem); ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp); if (ptif == NULL) { D("Error: interface %p is not in passthrough", na->ifp); goto out; } nifp = (struct netmap_if *)((char *)(ptnmd->nm_addr) + ptif->nifp_offset); NMA_UNLOCK(na->nm_mem); out: return nifp; } static void netmap_mem_pt_guest_if_delete(struct netmap_adapter *na, struct netmap_if *nifp) { struct mem_pt_if *ptif; NMA_LOCK(na->nm_mem); ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp); if (ptif == NULL) { D("Error: interface %p is not in passthrough", na->ifp); } NMA_UNLOCK(na->nm_mem); } static int netmap_mem_pt_guest_rings_create(struct netmap_adapter *na) { struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)na->nm_mem; struct mem_pt_if *ptif; struct netmap_if *nifp; int i, error = -1; NMA_LOCK(na->nm_mem); ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp); if (ptif == NULL) { D("Error: interface %p is not in passthrough", na->ifp); goto out; } /* point each kring to the corresponding backend ring */ nifp = (struct netmap_if *)((char *)ptnmd->nm_addr + ptif->nifp_offset); for (i = 0; i <= na->num_tx_rings; i++) { struct netmap_kring *kring = na->tx_rings + i; if (kring->ring) continue; kring->ring = (struct netmap_ring *) ((char *)nifp + nifp->ring_ofs[i]); } for (i = 0; i <= na->num_rx_rings; i++) { struct netmap_kring *kring = na->rx_rings + i; if (kring->ring) continue; kring->ring = (struct netmap_ring *) ((char *)nifp + nifp->ring_ofs[i + na->num_tx_rings + 1]); } error = 0; out: NMA_UNLOCK(na->nm_mem); return error; } static void netmap_mem_pt_guest_rings_delete(struct netmap_adapter *na) { /* TODO: remove?? */ #if 0 struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)na->nm_mem; struct mem_pt_if *ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp); #endif } static struct netmap_mem_ops netmap_mem_pt_guest_ops = { .nmd_get_lut = netmap_mem_pt_guest_get_lut, .nmd_get_info = netmap_mem_pt_guest_get_info, .nmd_ofstophys = netmap_mem_pt_guest_ofstophys, .nmd_config = netmap_mem_pt_guest_config, .nmd_finalize = netmap_mem_pt_guest_finalize, .nmd_deref = netmap_mem_pt_guest_deref, .nmd_if_offset = netmap_mem_pt_guest_if_offset, .nmd_delete = netmap_mem_pt_guest_delete, .nmd_if_new = netmap_mem_pt_guest_if_new, .nmd_if_delete = netmap_mem_pt_guest_if_delete, .nmd_rings_create = netmap_mem_pt_guest_rings_create, .nmd_rings_delete = netmap_mem_pt_guest_rings_delete }; /* Called with NMA_LOCK(&nm_mem) held. */ static struct netmap_mem_d * netmap_mem_pt_guest_find_memid(nm_memid_t mem_id) { struct netmap_mem_d *mem = NULL; struct netmap_mem_d *scan = netmap_last_mem_d; do { /* find ptnetmap allocator through host ID */ if (scan->ops->nmd_deref == netmap_mem_pt_guest_deref && ((struct netmap_mem_ptg *)(scan))->host_mem_id == mem_id) { mem = scan; break; } scan = scan->next; } while (scan != netmap_last_mem_d); return mem; } /* Called with NMA_LOCK(&nm_mem) held. */ static struct netmap_mem_d * netmap_mem_pt_guest_create(nm_memid_t mem_id) { struct netmap_mem_ptg *ptnmd; int err = 0; ptnmd = malloc(sizeof(struct netmap_mem_ptg), M_DEVBUF, M_NOWAIT | M_ZERO); if (ptnmd == NULL) { err = ENOMEM; goto error; } ptnmd->up.ops = &netmap_mem_pt_guest_ops; ptnmd->host_mem_id = mem_id; ptnmd->pt_ifs = NULL; /* Assign new id in the guest (We have the lock) */ err = nm_mem_assign_id_locked(&ptnmd->up); if (err) goto error; ptnmd->up.flags &= ~NETMAP_MEM_FINALIZED; ptnmd->up.flags |= NETMAP_MEM_IO; NMA_LOCK_INIT(&ptnmd->up); return &ptnmd->up; error: netmap_mem_pt_guest_delete(&ptnmd->up); return NULL; } /* * find host id in guest allocators and create guest allocator * if it is not there */ static struct netmap_mem_d * netmap_mem_pt_guest_get(nm_memid_t mem_id) { struct netmap_mem_d *nmd; NMA_LOCK(&nm_mem); nmd = netmap_mem_pt_guest_find_memid(mem_id); if (nmd == NULL) { nmd = netmap_mem_pt_guest_create(mem_id); } NMA_UNLOCK(&nm_mem); return nmd; } /* * The guest allocator can be created by ptnetmap_memdev (during the device * attach) or by ptnetmap device (ptnet), during the netmap_attach. * * The order is not important (we have different order in LINUX and FreeBSD). * The first one, creates the device, and the second one simply attaches it. */ /* Called when ptnetmap_memdev is attaching, to attach a new allocator in * the guest */ struct netmap_mem_d * netmap_mem_pt_guest_attach(struct ptnetmap_memdev *ptn_dev, nm_memid_t mem_id) { struct netmap_mem_d *nmd; struct netmap_mem_ptg *ptnmd; nmd = netmap_mem_pt_guest_get(mem_id); /* assign this device to the guest allocator */ if (nmd) { ptnmd = (struct netmap_mem_ptg *)nmd; ptnmd->ptn_dev = ptn_dev; } return nmd; } /* Called when ptnet device is attaching */ struct netmap_mem_d * netmap_mem_pt_guest_new(struct ifnet *ifp, unsigned int nifp_offset, unsigned int memid) { struct netmap_mem_d *nmd; if (ifp == NULL) { return NULL; } nmd = netmap_mem_pt_guest_get((nm_memid_t)memid); if (nmd) { netmap_mem_pt_guest_ifp_add(nmd, ifp, nifp_offset); } return nmd; } #endif /* WITH_PTNETMAP_GUEST */ Index: head/sys/dev/ofw/ofw_bus_subr.c =================================================================== --- head/sys/dev/ofw/ofw_bus_subr.c (revision 313981) +++ head/sys/dev/ofw/ofw_bus_subr.c (revision 313982) @@ -1,970 +1,970 @@ /*- * Copyright (c) 2001 - 2003 by Thomas Moestl . * Copyright (c) 2005 Marius Strobl * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include "ofw_bus_if.h" #define OFW_COMPAT_LEN 255 #define OFW_STATUS_LEN 16 int ofw_bus_gen_setup_devinfo(struct ofw_bus_devinfo *obd, phandle_t node) { if (obd == NULL) return (ENOMEM); /* The 'name' property is considered mandatory. */ if ((OF_getprop_alloc(node, "name", 1, (void **)&obd->obd_name)) == -1) return (EINVAL); OF_getprop_alloc(node, "compatible", 1, (void **)&obd->obd_compat); OF_getprop_alloc(node, "device_type", 1, (void **)&obd->obd_type); OF_getprop_alloc(node, "model", 1, (void **)&obd->obd_model); OF_getprop_alloc(node, "status", 1, (void **)&obd->obd_status); obd->obd_node = node; return (0); } void ofw_bus_gen_destroy_devinfo(struct ofw_bus_devinfo *obd) { if (obd == NULL) return; if (obd->obd_compat != NULL) free(obd->obd_compat, M_OFWPROP); if (obd->obd_model != NULL) free(obd->obd_model, M_OFWPROP); if (obd->obd_name != NULL) free(obd->obd_name, M_OFWPROP); if (obd->obd_type != NULL) free(obd->obd_type, M_OFWPROP); if (obd->obd_status != NULL) free(obd->obd_status, M_OFWPROP); } int ofw_bus_gen_child_pnpinfo_str(device_t cbdev, device_t child, char *buf, size_t buflen) { if (ofw_bus_get_name(child) != NULL) { strlcat(buf, "name=", buflen); strlcat(buf, ofw_bus_get_name(child), buflen); } if (ofw_bus_get_compat(child) != NULL) { strlcat(buf, " compat=", buflen); strlcat(buf, ofw_bus_get_compat(child), buflen); } return (0); }; const char * ofw_bus_gen_get_compat(device_t bus, device_t dev) { const struct ofw_bus_devinfo *obd; obd = OFW_BUS_GET_DEVINFO(bus, dev); if (obd == NULL) return (NULL); return (obd->obd_compat); } const char * ofw_bus_gen_get_model(device_t bus, device_t dev) { const struct ofw_bus_devinfo *obd; obd = OFW_BUS_GET_DEVINFO(bus, dev); if (obd == NULL) return (NULL); return (obd->obd_model); } const char * ofw_bus_gen_get_name(device_t bus, device_t dev) { const struct ofw_bus_devinfo *obd; obd = OFW_BUS_GET_DEVINFO(bus, dev); if (obd == NULL) return (NULL); return (obd->obd_name); } phandle_t ofw_bus_gen_get_node(device_t bus, device_t dev) { const struct ofw_bus_devinfo *obd; obd = OFW_BUS_GET_DEVINFO(bus, dev); if (obd == NULL) return (0); return (obd->obd_node); } const char * ofw_bus_gen_get_type(device_t bus, device_t dev) { const struct ofw_bus_devinfo *obd; obd = OFW_BUS_GET_DEVINFO(bus, dev); if (obd == NULL) return (NULL); return (obd->obd_type); } const char * ofw_bus_get_status(device_t dev) { const struct ofw_bus_devinfo *obd; obd = OFW_BUS_GET_DEVINFO(device_get_parent(dev), dev); if (obd == NULL) return (NULL); return (obd->obd_status); } int ofw_bus_status_okay(device_t dev) { const char *status; status = ofw_bus_get_status(dev); if (status == NULL || strcmp(status, "okay") == 0 || strcmp(status, "ok") == 0) return (1); return (0); } int ofw_bus_node_status_okay(phandle_t node) { char status[OFW_STATUS_LEN]; int len; len = OF_getproplen(node, "status"); if (len <= 0) return (1); OF_getprop(node, "status", status, OFW_STATUS_LEN); if ((len == 5 && (bcmp(status, "okay", len) == 0)) || (len == 3 && (bcmp(status, "ok", len)))) return (1); return (0); } static int ofw_bus_node_is_compatible_int(const char *compat, int len, const char *onecompat) { int onelen, l, ret; onelen = strlen(onecompat); ret = 0; while (len > 0) { if (strlen(compat) == onelen && strncasecmp(compat, onecompat, onelen) == 0) { /* Found it. */ ret = 1; break; } /* Slide to the next sub-string. */ l = strlen(compat) + 1; compat += l; len -= l; } return (ret); } int ofw_bus_node_is_compatible(phandle_t node, const char *compatstr) { char compat[OFW_COMPAT_LEN]; int len, rv; if ((len = OF_getproplen(node, "compatible")) <= 0) return (0); bzero(compat, OFW_COMPAT_LEN); if (OF_getprop(node, "compatible", compat, OFW_COMPAT_LEN) < 0) return (0); rv = ofw_bus_node_is_compatible_int(compat, len, compatstr); return (rv); } int ofw_bus_is_compatible(device_t dev, const char *onecompat) { phandle_t node; const char *compat; int len; if ((compat = ofw_bus_get_compat(dev)) == NULL) return (0); if ((node = ofw_bus_get_node(dev)) == -1) return (0); /* Get total 'compatible' prop len */ if ((len = OF_getproplen(node, "compatible")) <= 0) return (0); return (ofw_bus_node_is_compatible_int(compat, len, onecompat)); } int ofw_bus_is_compatible_strict(device_t dev, const char *compatible) { const char *compat; size_t len; if ((compat = ofw_bus_get_compat(dev)) == NULL) return (0); len = strlen(compatible); if (strlen(compat) == len && strncasecmp(compat, compatible, len) == 0) return (1); return (0); } const struct ofw_compat_data * ofw_bus_search_compatible(device_t dev, const struct ofw_compat_data *compat) { if (compat == NULL) return NULL; for (; compat->ocd_str != NULL; ++compat) { if (ofw_bus_is_compatible(dev, compat->ocd_str)) break; } return (compat); } int ofw_bus_has_prop(device_t dev, const char *propname) { phandle_t node; if ((node = ofw_bus_get_node(dev)) == -1) return (0); return (OF_hasprop(node, propname)); } void ofw_bus_setup_iinfo(phandle_t node, struct ofw_bus_iinfo *ii, int intrsz) { pcell_t addrc; int msksz; if (OF_getencprop(node, "#address-cells", &addrc, sizeof(addrc)) == -1) addrc = 2; ii->opi_addrc = addrc * sizeof(pcell_t); ii->opi_imapsz = OF_getencprop_alloc(node, "interrupt-map", 1, (void **)&ii->opi_imap); if (ii->opi_imapsz > 0) { msksz = OF_getencprop_alloc(node, "interrupt-map-mask", 1, (void **)&ii->opi_imapmsk); /* * Failure to get the mask is ignored; a full mask is used * then. We barf on bad mask sizes, however. */ if (msksz != -1 && msksz != ii->opi_addrc + intrsz) panic("ofw_bus_setup_iinfo: bad interrupt-map-mask " "property!"); } } int ofw_bus_lookup_imap(phandle_t node, struct ofw_bus_iinfo *ii, void *reg, int regsz, void *pintr, int pintrsz, void *mintr, int mintrsz, phandle_t *iparent) { uint8_t maskbuf[regsz + pintrsz]; int rv; if (ii->opi_imapsz <= 0) return (0); KASSERT(regsz >= ii->opi_addrc, ("ofw_bus_lookup_imap: register size too small: %d < %d", regsz, ii->opi_addrc)); if (node != -1) { rv = OF_getencprop(node, "reg", reg, regsz); if (rv < regsz) panic("ofw_bus_lookup_imap: cannot get reg property"); } return (ofw_bus_search_intrmap(pintr, pintrsz, reg, ii->opi_addrc, ii->opi_imap, ii->opi_imapsz, ii->opi_imapmsk, maskbuf, mintr, mintrsz, iparent)); } /* * Map an interrupt using the firmware reg, interrupt-map and * interrupt-map-mask properties. * The interrupt property to be mapped must be of size intrsz, and pointed to * by intr. The regs property of the node for which the mapping is done must * be passed as regs. This property is an array of register specifications; * the size of the address part of such a specification must be passed as * physsz. Only the first element of the property is used. * imap and imapsz hold the interrupt mask and it's size. * imapmsk is a pointer to the interrupt-map-mask property, which must have * a size of physsz + intrsz; it may be NULL, in which case a full mask is * assumed. * maskbuf must point to a buffer of length physsz + intrsz. * The interrupt is returned in result, which must point to a buffer of length * rintrsz (which gives the expected size of the mapped interrupt). * Returns number of cells in the interrupt if a mapping was found, 0 otherwise. */ int ofw_bus_search_intrmap(void *intr, int intrsz, void *regs, int physsz, void *imap, int imapsz, void *imapmsk, void *maskbuf, void *result, int rintrsz, phandle_t *iparent) { phandle_t parent; uint8_t *ref = maskbuf; uint8_t *uiintr = intr; uint8_t *uiregs = regs; uint8_t *uiimapmsk = imapmsk; uint8_t *mptr; pcell_t paddrsz; pcell_t pintrsz; int i, rsz, tsz; rsz = -1; if (imapmsk != NULL) { for (i = 0; i < physsz; i++) ref[i] = uiregs[i] & uiimapmsk[i]; for (i = 0; i < intrsz; i++) ref[physsz + i] = uiintr[i] & uiimapmsk[physsz + i]; } else { bcopy(regs, ref, physsz); bcopy(intr, ref + physsz, intrsz); } mptr = imap; i = imapsz; paddrsz = 0; while (i > 0) { bcopy(mptr + physsz + intrsz, &parent, sizeof(parent)); #ifndef OFW_IMAP_NO_IPARENT_ADDR_CELLS /* * Find if we need to read the parent address data. * CHRP-derived OF bindings, including ePAPR-compliant FDTs, * use this as an optional part of the specifier. */ if (OF_getencprop(OF_node_from_xref(parent), "#address-cells", &paddrsz, sizeof(paddrsz)) == -1) paddrsz = 0; /* default */ paddrsz *= sizeof(pcell_t); #endif if (OF_searchencprop(OF_node_from_xref(parent), "#interrupt-cells", &pintrsz, sizeof(pintrsz)) == -1) pintrsz = 1; /* default */ pintrsz *= sizeof(pcell_t); /* Compute the map stride size. */ tsz = physsz + intrsz + sizeof(phandle_t) + paddrsz + pintrsz; KASSERT(i >= tsz, ("ofw_bus_search_intrmap: truncated map")); if (bcmp(ref, mptr, physsz + intrsz) == 0) { bcopy(mptr + physsz + intrsz + sizeof(parent) + paddrsz, result, MIN(rintrsz, pintrsz)); if (iparent != NULL) *iparent = parent; return (pintrsz/sizeof(pcell_t)); } mptr += tsz; i -= tsz; } return (0); } int ofw_bus_msimap(phandle_t node, uint16_t pci_rid, phandle_t *msi_parent, uint32_t *msi_rid) { pcell_t *map, mask, msi_base, rid_base, rid_length; ssize_t len; uint32_t masked_rid, rid; int err, i; /* TODO: This should be OF_searchprop_alloc if we had it */ len = OF_getencprop_alloc(node, "msi-map", sizeof(*map), (void **)&map); if (len < 0) { if (msi_parent != NULL) { *msi_parent = 0; OF_getencprop(node, "msi-parent", msi_parent, sizeof(*msi_parent)); } if (msi_rid != NULL) *msi_rid = pci_rid; return (0); } err = ENOENT; rid = 0; mask = 0xffffffff; OF_getencprop(node, "msi-map-mask", &mask, sizeof(mask)); masked_rid = pci_rid & mask; for (i = 0; i < len; i += 4) { rid_base = map[i + 0]; rid_length = map[i + 3]; if (masked_rid < rid_base || masked_rid >= (rid_base + rid_length)) continue; msi_base = map[i + 2]; if (msi_parent != NULL) *msi_parent = map[i + 1]; if (msi_rid != NULL) *msi_rid = masked_rid - rid_base + msi_base; err = 0; break; } free(map, M_OFWPROP); return (err); } int ofw_bus_reg_to_rl(device_t dev, phandle_t node, pcell_t acells, pcell_t scells, struct resource_list *rl) { uint64_t phys, size; ssize_t i, j, rid, nreg, ret; uint32_t *reg; char *name; /* * This may be just redundant when having ofw_bus_devinfo * but makes this routine independent of it. */ ret = OF_getprop_alloc(node, "name", sizeof(*name), (void **)&name); if (ret == -1) name = NULL; ret = OF_getencprop_alloc(node, "reg", sizeof(*reg), (void **)®); nreg = (ret == -1) ? 0 : ret; if (nreg % (acells + scells) != 0) { if (bootverbose) device_printf(dev, "Malformed reg property on <%s>\n", (name == NULL) ? "unknown" : name); nreg = 0; } for (i = 0, rid = 0; i < nreg; i += acells + scells, rid++) { phys = size = 0; for (j = 0; j < acells; j++) { phys <<= 32; phys |= reg[i + j]; } for (j = 0; j < scells; j++) { size <<= 32; size |= reg[i + acells + j]; } /* Skip the dummy reg property of glue devices like ssm(4). */ if (size != 0) resource_list_add(rl, SYS_RES_MEMORY, rid, phys, phys + size - 1, size); } free(name, M_OFWPROP); free(reg, M_OFWPROP); return (0); } /* * Get interrupt parent for given node. * Returns 0 if interrupt parent doesn't exist. */ phandle_t ofw_bus_find_iparent(phandle_t node) { phandle_t iparent; if (OF_searchencprop(node, "interrupt-parent", &iparent, sizeof(iparent)) == -1) { for (iparent = node; iparent != 0; iparent = OF_parent(iparent)) { if (OF_hasprop(iparent, "interrupt-controller")) break; } iparent = OF_xref_from_node(iparent); } return (iparent); } int ofw_bus_intr_to_rl(device_t dev, phandle_t node, struct resource_list *rl, int *rlen) { phandle_t iparent; uint32_t icells, *intr; int err, i, irqnum, nintr, rid; boolean_t extended; nintr = OF_getencprop_alloc(node, "interrupts", sizeof(*intr), (void **)&intr); if (nintr > 0) { iparent = ofw_bus_find_iparent(node); if (iparent == 0) { device_printf(dev, "No interrupt-parent found, " "assuming direct parent\n"); iparent = OF_parent(node); iparent = OF_xref_from_node(iparent); } if (OF_searchencprop(OF_node_from_xref(iparent), "#interrupt-cells", &icells, sizeof(icells)) == -1) { device_printf(dev, "Missing #interrupt-cells " "property, assuming <1>\n"); icells = 1; } if (icells < 1 || icells > nintr) { device_printf(dev, "Invalid #interrupt-cells property " "value <%d>, assuming <1>\n", icells); icells = 1; } extended = false; } else { nintr = OF_getencprop_alloc(node, "interrupts-extended", sizeof(*intr), (void **)&intr); if (nintr <= 0) return (0); extended = true; } err = 0; rid = 0; for (i = 0; i < nintr; i += icells) { if (extended) { iparent = intr[i++]; if (OF_searchencprop(OF_node_from_xref(iparent), "#interrupt-cells", &icells, sizeof(icells)) == -1) { device_printf(dev, "Missing #interrupt-cells " "property\n"); err = ENOENT; break; } if (icells < 1 || (i + icells) > nintr) { device_printf(dev, "Invalid #interrupt-cells " "property value <%d>\n", icells); err = ERANGE; break; } } irqnum = ofw_bus_map_intr(dev, iparent, icells, &intr[i]); resource_list_add(rl, SYS_RES_IRQ, rid++, irqnum, irqnum, 1); } if (rlen != NULL) *rlen = rid; free(intr, M_OFWPROP); return (err); } int ofw_bus_intr_by_rid(device_t dev, phandle_t node, int wanted_rid, phandle_t *producer, int *ncells, pcell_t **cells) { phandle_t iparent; uint32_t icells, *intr; int err, i, nintr, rid; boolean_t extended; nintr = OF_getencprop_alloc(node, "interrupts", sizeof(*intr), (void **)&intr); if (nintr > 0) { iparent = ofw_bus_find_iparent(node); if (iparent == 0) { device_printf(dev, "No interrupt-parent found, " "assuming direct parent\n"); iparent = OF_parent(node); iparent = OF_xref_from_node(iparent); } if (OF_searchencprop(OF_node_from_xref(iparent), "#interrupt-cells", &icells, sizeof(icells)) == -1) { device_printf(dev, "Missing #interrupt-cells " "property, assuming <1>\n"); icells = 1; } if (icells < 1 || icells > nintr) { device_printf(dev, "Invalid #interrupt-cells property " "value <%d>, assuming <1>\n", icells); icells = 1; } extended = false; } else { nintr = OF_getencprop_alloc(node, "interrupts-extended", sizeof(*intr), (void **)&intr); if (nintr <= 0) return (ESRCH); extended = true; } err = ESRCH; rid = 0; for (i = 0; i < nintr; i += icells, rid++) { if (extended) { iparent = intr[i++]; if (OF_searchencprop(OF_node_from_xref(iparent), "#interrupt-cells", &icells, sizeof(icells)) == -1) { device_printf(dev, "Missing #interrupt-cells " "property\n"); err = ENOENT; break; } if (icells < 1 || (i + icells) > nintr) { device_printf(dev, "Invalid #interrupt-cells " "property value <%d>\n", icells); err = ERANGE; break; } } if (rid == wanted_rid) { *cells = malloc(icells * sizeof(**cells), M_OFWPROP, M_WAITOK); *producer = iparent; *ncells= icells; memcpy(*cells, intr + i, icells * sizeof(**cells)); err = 0; break; } } free(intr, M_OFWPROP); return (err); } phandle_t ofw_bus_find_child(phandle_t start, const char *child_name) { char *name; int ret; phandle_t child; for (child = OF_child(start); child != 0; child = OF_peer(child)) { ret = OF_getprop_alloc(child, "name", sizeof(*name), (void **)&name); if (ret == -1) continue; if (strcmp(name, child_name) == 0) { free(name, M_OFWPROP); return (child); } free(name, M_OFWPROP); } return (0); } phandle_t ofw_bus_find_compatible(phandle_t node, const char *onecompat) { phandle_t child, ret; void *compat; int len; /* * Traverse all children of 'start' node, and find first with * matching 'compatible' property. */ for (child = OF_child(node); child != 0; child = OF_peer(child)) { len = OF_getprop_alloc(child, "compatible", 1, &compat); if (len >= 0) { ret = ofw_bus_node_is_compatible_int(compat, len, onecompat); free(compat, M_OFWPROP); if (ret != 0) return (child); } ret = ofw_bus_find_compatible(child, onecompat); if (ret != 0) return (ret); } return (0); } /** * @brief Return child of bus whose phandle is node * * A direct child of @p will be returned if it its phandle in the * OFW tree is @p node. Otherwise, NULL is returned. * * @param bus The bus to examine * @param node The phandle_t to look for. */ device_t ofw_bus_find_child_device_by_phandle(device_t bus, phandle_t node) { device_t *children, retval, child; int nkid, i; /* * Nothing can match the flag value for no node. */ if (node == -1) return (NULL); /* * Search the children for a match. We microoptimize * a bit by not using ofw_bus_get since we already know * the parent. We do not recurse. */ if (device_get_children(bus, &children, &nkid) != 0) return (NULL); retval = NULL; for (i = 0; i < nkid; i++) { child = children[i]; if (OFW_BUS_GET_NODE(bus, child) == node) { retval = child; break; } } free(children, M_TEMP); return (retval); } /* * Parse property that contain list of xrefs and values * (like standard "clocks" and "resets" properties) * Input arguments: * node - consumers device node * list_name - name of parsed list - "clocks" * cells_name - name of size property - "#clock-cells" * idx - the index of the requested list entry, or, if -1, an indication * to return the number of entries in the parsed list. * Output arguments: * producer - handle of producer * ncells - number of cells in result or the number of items in the list when * idx == -1. * cells - array of decoded cells */ static int ofw_bus_parse_xref_list_internal(phandle_t node, const char *list_name, const char *cells_name, int idx, phandle_t *producer, int *ncells, pcell_t **cells) { phandle_t pnode; phandle_t *elems; uint32_t pcells; int rv, i, j, nelems, cnt; elems = NULL; nelems = OF_getencprop_alloc(node, list_name, sizeof(*elems), (void **)&elems); if (nelems <= 0) return (ENOENT); rv = (idx == -1) ? 0 : ENOENT; for (i = 0, cnt = 0; i < nelems; i += pcells, cnt++) { pnode = elems[i++]; if (OF_getencprop(OF_node_from_xref(pnode), cells_name, &pcells, sizeof(pcells)) == -1) { printf("Missing %s property\n", cells_name); rv = ENOENT; break; } if ((i + pcells) > nelems) { printf("Invalid %s property value <%d>\n", cells_name, pcells); rv = ERANGE; break; } if (cnt == idx) { *cells= malloc(pcells * sizeof(**cells), M_OFWPROP, M_WAITOK); *producer = pnode; *ncells = pcells; for (j = 0; j < pcells; j++) (*cells)[j] = elems[i + j]; rv = 0; break; } } if (elems != NULL) free(elems, M_OFWPROP); if (idx == -1 && rv == 0) *ncells = cnt; return (rv); } /* * Parse property that contain list of xrefs and values * (like standard "clocks" and "resets" properties) * Input arguments: * node - consumers device node * list_name - name of parsed list - "clocks" * cells_name - name of size property - "#clock-cells" * idx - the index of the requested list entry (>= 0) * Output arguments: * producer - handle of producer * ncells - number of cells in result * cells - array of decoded cells */ int ofw_bus_parse_xref_list_alloc(phandle_t node, const char *list_name, const char *cells_name, int idx, phandle_t *producer, int *ncells, pcell_t **cells) { KASSERT(idx >= 0, ("ofw_bus_parse_xref_list_alloc: negative index supplied")); return (ofw_bus_parse_xref_list_internal(node, list_name, cells_name, idx, producer, ncells, cells)); } /* * Parse property that contain list of xrefs and values * (like standard "clocks" and "resets" properties) * and determine the number of items in the list * Input arguments: * node - consumers device node * list_name - name of parsed list - "clocks" * cells_name - name of size property - "#clock-cells" * Output arguments: * count - number of items in list */ int ofw_bus_parse_xref_list_get_length(phandle_t node, const char *list_name, const char *cells_name, int *count) { return (ofw_bus_parse_xref_list_internal(node, list_name, cells_name, -1, NULL, count, NULL)); } /* * Find index of string in string list property (case sensitive). */ int ofw_bus_find_string_index(phandle_t node, const char *list_name, const char *name, int *idx) { char *elems; int rv, i, cnt, nelems; elems = NULL; nelems = OF_getprop_alloc(node, list_name, 1, (void **)&elems); if (nelems <= 0) return (ENOENT); rv = ENOENT; for (i = 0, cnt = 0; i < nelems; cnt++) { if (strcmp(elems + i, name) == 0) { *idx = cnt; rv = 0; break; } i += strlen(elems + i) + 1; } if (elems != NULL) free(elems, M_OFWPROP); return (rv); } /* * Create zero terminated array of strings from string list property. */ int ofw_bus_string_list_to_array(phandle_t node, const char *list_name, const char ***out_array) { char *elems, *tptr; const char **array; int i, cnt, nelems, len; elems = NULL; nelems = OF_getprop_alloc(node, list_name, 1, (void **)&elems); if (nelems <= 0) return (nelems); /* Count number of strings. */ for (i = 0, cnt = 0; i < nelems; cnt++) i += strlen(elems + i) + 1; /* Allocate space for arrays and all strings. */ array = malloc((cnt + 1) * sizeof(char *) + nelems, M_OFWPROP, M_WAITOK); /* Get address of first string. */ tptr = (char *)(array + cnt + 1); /* Copy strings. */ memcpy(tptr, elems, nelems); free(elems, M_OFWPROP); /* Fill string pointers. */ for (i = 0, cnt = 0; i < nelems; cnt++) { len = strlen(tptr) + 1; array[cnt] = tptr; i += len; tptr += len; } - array[cnt] = 0; + array[cnt] = NULL; *out_array = array; return (cnt); } Index: head/sys/dev/patm/if_patm_tx.c =================================================================== --- head/sys/dev/patm/if_patm_tx.c (revision 313981) +++ head/sys/dev/patm/if_patm_tx.c (revision 313982) @@ -1,1277 +1,1277 @@ /*- * Copyright (c) 2003 * Fraunhofer Institute for Open Communication Systems (FhG Fokus). * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * The TST allocation algorithm is from the IDT driver which is: * * Copyright (c) 2000, 2001 Richard Hodges and Matriplex, inc. * All rights reserved. * * Copyright (c) 1996, 1997, 1998, 1999 Mark Tinguely * All rights reserved. * * Author: Hartmut Brandt * * Driver for IDT77252 based cards like ProSum's. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_natm.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef ENABLE_BPF #include #endif #include #include #include #include #include #include #include #include #include #include static struct mbuf *patm_tx_pad(struct patm_softc *sc, struct mbuf *m0); static void patm_launch(struct patm_softc *sc, struct patm_scd *scd); static struct patm_txmap *patm_txmap_get(struct patm_softc *); static void patm_load_txbuf(void *, bus_dma_segment_t *, int, bus_size_t, int); static void patm_tst_alloc(struct patm_softc *sc, struct patm_vcc *vcc); static void patm_tst_free(struct patm_softc *sc, struct patm_vcc *vcc); static void patm_tst_timer(void *p); static void patm_tst_update(struct patm_softc *); static void patm_tct_start(struct patm_softc *sc, struct patm_vcc *); static const char *dump_scd(struct patm_softc *sc, struct patm_scd *scd) __unused; static void patm_tct_print(struct patm_softc *sc, u_int cid) __unused; /* * Structure for communication with the loader function for transmission */ struct txarg { struct patm_softc *sc; struct patm_scd *scd; /* scheduling channel */ struct patm_vcc *vcc; /* the VCC of this PDU */ struct mbuf *mbuf; u_int hdr; /* cell header */ }; static __inline u_int cbr2slots(struct patm_softc *sc, struct patm_vcc *vcc) { /* compute the number of slots we need, make sure to get at least * the specified PCR */ return ((u_int)howmany((uint64_t)(sc->mmap->tst_size - 1) * vcc->vcc.tparam.pcr, IFP2IFATM(sc->ifp)->mib.pcr)); } static __inline u_int slots2cr(struct patm_softc *sc, u_int slots) { return ((slots * IFP2IFATM(sc->ifp)->mib.pcr + sc->mmap->tst_size - 2) / (sc->mmap->tst_size - 1)); } /* check if we can open this one */ int patm_tx_vcc_can_open(struct patm_softc *sc, struct patm_vcc *vcc) { /* check resources */ switch (vcc->vcc.traffic) { case ATMIO_TRAFFIC_CBR: { u_int slots = cbr2slots(sc, vcc); if (slots > sc->tst_free + sc->tst_reserve) return (EINVAL); break; } case ATMIO_TRAFFIC_VBR: if (vcc->vcc.tparam.scr > sc->bwrem) return (EINVAL); if (vcc->vcc.tparam.pcr > IFP2IFATM(sc->ifp)->mib.pcr) return (EINVAL); if (vcc->vcc.tparam.scr > vcc->vcc.tparam.pcr || vcc->vcc.tparam.mbs == 0) return (EINVAL); break; case ATMIO_TRAFFIC_ABR: if (vcc->vcc.tparam.tbe == 0 || vcc->vcc.tparam.nrm == 0) /* needed to compute CRM */ return (EINVAL); if (vcc->vcc.tparam.pcr > IFP2IFATM(sc->ifp)->mib.pcr || vcc->vcc.tparam.icr > vcc->vcc.tparam.pcr || vcc->vcc.tparam.mcr > vcc->vcc.tparam.icr) return (EINVAL); if (vcc->vcc.tparam.mcr > sc->bwrem || vcc->vcc.tparam.icr > sc->bwrem) return (EINVAL); break; } return (0); } #define NEXT_TAG(T) do { \ (T) = ((T) + 1) % IDT_TSQE_TAG_SPACE; \ } while (0) /* * open it */ void patm_tx_vcc_open(struct patm_softc *sc, struct patm_vcc *vcc) { struct patm_scd *scd; if (vcc->vcc.traffic == ATMIO_TRAFFIC_UBR) { /* we use UBR0 */ vcc->scd = sc->scd0; vcc->vflags |= PATM_VCC_TX_OPEN; return; } /* get an SCD */ scd = patm_scd_alloc(sc); if (scd == NULL) { /* should not happen */ patm_printf(sc, "out of SCDs\n"); return; } vcc->scd = scd; patm_scd_setup(sc, scd); patm_tct_setup(sc, scd, vcc); if (vcc->vcc.traffic != ATMIO_TRAFFIC_CBR) patm_tct_start(sc, vcc); vcc->vflags |= PATM_VCC_TX_OPEN; } /* * close the given vcc for transmission */ void patm_tx_vcc_close(struct patm_softc *sc, struct patm_vcc *vcc) { struct patm_scd *scd; struct mbuf *m; vcc->vflags |= PATM_VCC_TX_CLOSING; if (vcc->vcc.traffic == ATMIO_TRAFFIC_UBR) { /* let the queue PDUs go out */ vcc->scd = NULL; vcc->vflags &= ~(PATM_VCC_TX_OPEN | PATM_VCC_TX_CLOSING); return; } scd = vcc->scd; /* empty the waitq */ for (;;) { _IF_DEQUEUE(&scd->q, m); if (m == NULL) break; m_freem(m); } if (scd->num_on_card == 0) { /* we are idle */ vcc->vflags &= ~PATM_VCC_TX_OPEN; if (vcc->vcc.traffic == ATMIO_TRAFFIC_CBR) patm_tst_free(sc, vcc); patm_sram_write4(sc, scd->sram + 0, 0, 0, 0, 0); patm_sram_write4(sc, scd->sram + 4, 0, 0, 0, 0); patm_scd_free(sc, scd); vcc->scd = NULL; vcc->vflags &= ~PATM_VCC_TX_CLOSING; return; } /* speed up transmission */ patm_nor_write(sc, IDT_NOR_TCMDQ, IDT_TCMDQ_UIER(vcc->cid, 0xff)); patm_nor_write(sc, IDT_NOR_TCMDQ, IDT_TCMDQ_ULACR(vcc->cid, 0xff)); /* wait for the interrupt to drop the number to 0 */ patm_debug(sc, VCC, "%u buffers still on card", scd->num_on_card); } /* transmission side finally closed */ void patm_tx_vcc_closed(struct patm_softc *sc, struct patm_vcc *vcc) { patm_debug(sc, VCC, "%u.%u TX closed", vcc->vcc.vpi, vcc->vcc.vci); if (vcc->vcc.traffic == ATMIO_TRAFFIC_VBR) sc->bwrem += vcc->vcc.tparam.scr; } /* * Pull off packets from the interface queue and try to transmit them. * If the transmission fails because of a full transmit channel, we drop * packets for CBR and queue them for other channels up to limit. * This limit should depend on the CDVT for VBR and ABR, but it doesn't. */ void patm_start(struct ifnet *ifp) { struct patm_softc *sc = ifp->if_softc; struct mbuf *m; struct atm_pseudohdr *aph; u_int vpi, vci, cid; struct patm_vcc *vcc; mtx_lock(&sc->mtx); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { mtx_unlock(&sc->mtx); return; } while (1) { /* get a new mbuf */ IF_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; /* split of pseudo header */ if (m->m_len < sizeof(*aph) && (m = m_pullup(m, sizeof(*aph))) == NULL) { if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); continue; } aph = mtod(m, struct atm_pseudohdr *); vci = ATM_PH_VCI(aph); vpi = ATM_PH_VPI(aph); m_adj(m, sizeof(*aph)); /* reject empty packets */ if (m->m_pkthdr.len == 0) { m_freem(m); if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); continue; } /* check whether this is a legal vcc */ if (!LEGAL_VPI(sc, vpi) || !LEGAL_VCI(sc, vci) || vci == 0) { m_freem(m); if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); continue; } cid = PATM_CID(sc, vpi, vci); vcc = sc->vccs[cid]; if (vcc == NULL) { m_freem(m); if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); continue; } /* must be multiple of 48 if not AAL5 */ if (vcc->vcc.aal == ATMIO_AAL_0 || vcc->vcc.aal == ATMIO_AAL_34) { /* XXX AAL3/4 format? */ if (m->m_pkthdr.len % 48 != 0 && (m = patm_tx_pad(sc, m)) == NULL) { if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); continue; } } else if (vcc->vcc.aal == ATMIO_AAL_RAW) { switch (vcc->vflags & PATM_RAW_FORMAT) { default: case PATM_RAW_CELL: if (m->m_pkthdr.len != 53) { if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); m_freem(m); continue; } break; case PATM_RAW_NOHEC: if (m->m_pkthdr.len != 52) { if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); m_freem(m); continue; } break; case PATM_RAW_CS: if (m->m_pkthdr.len != 64) { if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); m_freem(m); continue; } break; } } /* save data */ m->m_pkthdr.PH_loc.ptr = vcc; /* try to put it on the channels queue */ if (_IF_QFULL(&vcc->scd->q)) { if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); sc->stats.tx_qfull++; m_freem(m); continue; } _IF_ENQUEUE(&vcc->scd->q, m); #ifdef ENABLE_BPF if (!(vcc->vcc.flags & ATMIO_FLAG_NG) && (vcc->vcc.aal == ATMIO_AAL_5) && (vcc->vcc.flags & ATM_PH_LLCSNAP)) BPF_MTAP(ifp, m); #endif /* kick the channel to life */ patm_launch(sc, vcc->scd); } mtx_unlock(&sc->mtx); } /* * Pad non-AAL5 packet to a multiple of 48-byte. * We assume AAL0 only. We have still to decide on the format of AAL3/4. */ static struct mbuf * patm_tx_pad(struct patm_softc *sc, struct mbuf *m0) { struct mbuf *last, *m; u_int plen, pad, space; plen = m_length(m0, &last); if (plen != m0->m_pkthdr.len) { patm_printf(sc, "%s: mbuf length mismatch %d %u\n", __func__, m0->m_pkthdr.len, plen); m0->m_pkthdr.len = plen; if (plen == 0) { m_freem(m0); if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); return (NULL); } if (plen % 48 == 0) return (m0); } pad = 48 - plen % 48; m0->m_pkthdr.len += pad; if (M_WRITABLE(last)) { if (M_TRAILINGSPACE(last) >= pad) { bzero(last->m_data + last->m_len, pad); last->m_len += pad; return (m0); } space = M_LEADINGSPACE(last); if (space + M_TRAILINGSPACE(last) >= pad) { bcopy(last->m_data, last->m_data + space, last->m_len); last->m_data -= space; bzero(last->m_data + last->m_len, pad); last->m_len += pad; return (m0); } } MGET(m, M_NOWAIT, MT_DATA); - if (m == 0) { + if (m == NULL) { m_freem(m0); if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); return (NULL); } bzero(mtod(m, u_char *), pad); m->m_len = pad; last->m_next = m; return (m0); } /* * Try to put as many packets from the channels queue onto the channel */ static void patm_launch(struct patm_softc *sc, struct patm_scd *scd) { struct txarg a; struct mbuf *m, *tmp; u_int segs; struct patm_txmap *map; int error; a.sc = sc; a.scd = scd; /* limit the number of outstanding packets to the tag space */ while (scd->num_on_card < IDT_TSQE_TAG_SPACE) { /* get the next packet */ _IF_DEQUEUE(&scd->q, m); if (m == NULL) break; a.vcc = m->m_pkthdr.PH_loc.ptr; /* we must know the number of segments beforehand - count * this may actually give a wrong number of segments for * AAL_RAW where we still need to remove the cell header */ segs = 0; for (tmp = m; tmp != NULL; tmp = tmp->m_next) if (tmp->m_len != 0) segs++; /* check whether there is space in the queue */ if (segs >= scd->space) { /* put back */ _IF_PREPEND(&scd->q, m); sc->stats.tx_out_of_tbds++; break; } /* get a DMA map */ if ((map = patm_txmap_get(sc)) == NULL) { _IF_PREPEND(&scd->q, m); sc->stats.tx_out_of_maps++; break; } /* load the map */ m->m_pkthdr.PH_loc.ptr = map; a.mbuf = m; /* handle AAL_RAW */ if (a.vcc->vcc.aal == ATMIO_AAL_RAW) { u_char hdr[4]; m_copydata(m, 0, 4, hdr); a.hdr = (hdr[0] << 24) | (hdr[1] << 16) | (hdr[2] << 8) | hdr[3]; switch (a.vcc->vflags & PATM_RAW_FORMAT) { default: case PATM_RAW_CELL: m_adj(m, 5); break; case PATM_RAW_NOHEC: m_adj(m, 4); break; case PATM_RAW_CS: m_adj(m, 16); break; } } else a.hdr = IDT_TBD_HDR(a.vcc->vcc.vpi, a.vcc->vcc.vci, 0, 0); error = bus_dmamap_load_mbuf(sc->tx_tag, map->map, m, patm_load_txbuf, &a, BUS_DMA_NOWAIT); if (error == EFBIG) { if ((m = m_defrag(m, M_NOWAIT)) == NULL) { if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); continue; } error = bus_dmamap_load_mbuf(sc->tx_tag, map->map, m, patm_load_txbuf, &a, BUS_DMA_NOWAIT); } if (error != 0) { sc->stats.tx_load_err++; if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); SLIST_INSERT_HEAD(&sc->tx_maps_free, map, link); m_freem(m); continue; } if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1); } } /* * Load the DMA segments into the scheduling channel */ static void patm_load_txbuf(void *uarg, bus_dma_segment_t *segs, int nseg, bus_size_t mapsize, int error) { struct txarg *a= uarg; struct patm_scd *scd = a->scd; u_int w1, w3, cnt; struct idt_tbd *tbd = NULL; u_int rest = mapsize; if (error != 0) return; cnt = 0; while (nseg > 0) { if (segs->ds_len == 0) { /* transmit buffer length must be > 0 */ nseg--; segs++; continue; } /* rest after this buffer */ rest -= segs->ds_len; /* put together status word */ w1 = 0; if (rest < 48 /* && a->vcc->vcc.aal != ATMIO_AAL_5 */) /* last cell is in this buffer */ w1 |= IDT_TBD_EPDU; if (a->vcc->vcc.aal == ATMIO_AAL_5) w1 |= IDT_TBD_AAL5; else if (a->vcc->vcc.aal == ATMIO_AAL_34) w1 |= IDT_TBD_AAL34; else w1 |= IDT_TBD_AAL0; w1 |= segs->ds_len; /* AAL5 PDU length (unpadded) */ if (a->vcc->vcc.aal == ATMIO_AAL_5) w3 = mapsize; else w3 = 0; if (rest == 0) w1 |= IDT_TBD_TSIF | IDT_TBD_GTSI | (scd->tag << IDT_TBD_TAG_SHIFT); tbd = &scd->scq[scd->tail]; tbd->flags = htole32(w1); tbd->addr = htole32(segs->ds_addr); tbd->aal5 = htole32(w3); tbd->hdr = htole32(a->hdr); patm_debug(a->sc, TX, "TBD(%u): %08x %08x %08x %08x", scd->tail, w1, segs->ds_addr, w3, a->hdr); /* got to next entry */ if (++scd->tail == IDT_SCQ_SIZE) scd->tail = 0; cnt++; nseg--; segs++; } scd->space -= cnt; scd->num_on_card++; KASSERT(rest == 0, ("bad mbuf")); KASSERT(cnt > 0, ("no segs")); KASSERT(scd->space > 0, ("scq full")); KASSERT(scd->on_card[scd->tag] == NULL, ("scd on_card wedged %u%s", scd->tag, dump_scd(a->sc, scd))); scd->on_card[scd->tag] = a->mbuf; a->mbuf->m_pkthdr.csum_data = cnt; NEXT_TAG(scd->tag); patm_debug(a->sc, TX, "SCD tail %u (%lx:%lx)", scd->tail, (u_long)scd->phy, (u_long)scd->phy + (scd->tail << IDT_TBD_SHIFT)); patm_sram_write(a->sc, scd->sram, scd->phy + (scd->tail << IDT_TBD_SHIFT)); if (patm_sram_read(a->sc, a->vcc->cid * 8 + 3) & IDT_TCT_IDLE) { /* * if the connection is idle start it. We cannot rely * on a flag set by patm_tx_idle() here, because sometimes * the card seems to place an idle TSI into the TSQ but * forgets to raise an interrupt. */ patm_nor_write(a->sc, IDT_NOR_TCMDQ, IDT_TCMDQ_START(a->vcc->cid)); } } /* * packet transmitted */ void patm_tx(struct patm_softc *sc, u_int stamp, u_int status) { u_int cid, tag, last; struct mbuf *m; struct patm_vcc *vcc; struct patm_scd *scd; struct patm_txmap *map; /* get the connection */ cid = PATM_CID(sc, IDT_TBD_VPI(status), IDT_TBD_VCI(status)); if ((vcc = sc->vccs[cid]) == NULL) { /* closed UBR connection */ return; } scd = vcc->scd; tag = IDT_TSQE_TAG(stamp); last = scd->last_tag; if (tag == last) { patm_printf(sc, "same tag %u\n", tag); return; } /* Errata 12 requests us to free all entries up to the one * with the given tag. */ do { /* next tag to try */ NEXT_TAG(last); m = scd->on_card[last]; KASSERT(m != NULL, ("%stag=%u", dump_scd(sc, scd), tag)); scd->on_card[last] = NULL; patm_debug(sc, TX, "ok tag=%x", last); map = m->m_pkthdr.PH_loc.ptr; scd->space += m->m_pkthdr.csum_data; bus_dmamap_sync(sc->tx_tag, map->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->tx_tag, map->map); m_freem(m); SLIST_INSERT_HEAD(&sc->tx_maps_free, map, link); scd->num_on_card--; if (vcc->vflags & PATM_VCC_TX_CLOSING) { if (scd->num_on_card == 0) { /* done with this VCC */ if (vcc->vcc.traffic == ATMIO_TRAFFIC_CBR) patm_tst_free(sc, vcc); patm_sram_write4(sc, scd->sram + 0, 0, 0, 0, 0); patm_sram_write4(sc, scd->sram + 4, 0, 0, 0, 0); patm_scd_free(sc, scd); vcc->scd = NULL; vcc->vflags &= ~PATM_VCC_TX_CLOSING; if (vcc->vcc.flags & ATMIO_FLAG_ASYNC) { patm_tx_vcc_closed(sc, vcc); if (!(vcc->vflags & PATM_VCC_OPEN)) patm_vcc_closed(sc, vcc); } else cv_signal(&sc->vcc_cv); return; } patm_debug(sc, VCC, "%u buffers still on card", scd->num_on_card); if (vcc->vcc.traffic == ATMIO_TRAFFIC_ABR) { /* insist on speeding up transmission for ABR */ patm_nor_write(sc, IDT_NOR_TCMDQ, IDT_TCMDQ_UIER(vcc->cid, 0xff)); patm_nor_write(sc, IDT_NOR_TCMDQ, IDT_TCMDQ_ULACR(vcc->cid, 0xff)); } } } while (last != tag); scd->last_tag = tag; if (vcc->vcc.traffic == ATMIO_TRAFFIC_ABR) { u_int acri, cps; acri = (patm_sram_read(sc, 8 * cid + 2) >> IDT_TCT_ACRI_SHIFT) & 0x3fff; cps = IFP2IFATM(sc->ifp)->mib.pcr * 32 / ((1 << (acri >> 10)) * (acri & 0x3ff)); if (cps != vcc->cps) { patm_debug(sc, VCC, "ACRI=%04x CPS=%u", acri, cps); ATMEV_SEND_ACR_CHANGED(IFP2IFATM(sc->ifp), vcc->vcc.vpi, vcc->vcc.vci, cps); vcc->cps = cps; } } patm_launch(sc, scd); } /* * VBR/ABR connection went idle * Either restart it or set the idle flag. */ void patm_tx_idle(struct patm_softc *sc, u_int cid) { struct patm_vcc *vcc; patm_debug(sc, VCC, "idle %u", cid); if ((vcc = sc->vccs[cid]) != NULL && (vcc->vflags & (PATM_VCC_TX_OPEN | PATM_VCC_TX_CLOSING)) != 0 && vcc->scd != NULL && (vcc->scd->num_on_card != 0 || _IF_QLEN(&vcc->scd->q) != 0)) { /* * If there is any packet outstanding in the SCD re-activate * the channel and kick it. */ patm_nor_write(sc, IDT_NOR_TCMDQ, IDT_TCMDQ_START(vcc->cid)); patm_launch(sc, vcc->scd); } } /* * Convert a (24bit) rate to the atm-forum form * Our rate is never larger than 19 bit. */ static u_int cps2atmf(u_int cps) { u_int e; if (cps == 0) return (0); cps <<= 9; e = 0; while (cps > (1024 - 1)) { e++; cps >>= 1; } return ((1 << 14) | (e << 9) | (cps & 0x1ff)); } /* * Do a binary search on the log2rate table to convert the rate * to its log form. This assumes that the ATM-Forum form is monotonically * increasing with the plain cell rate. */ static u_int rate2log(struct patm_softc *sc, u_int rate) { const uint32_t *tbl; u_int lower, upper, mid, done, val, afr; afr = cps2atmf(rate); if (sc->flags & PATM_25M) tbl = patm_rtables25; else tbl = patm_rtables155; lower = 0; upper = 255; done = 0; while (!done) { mid = (lower + upper) / 2; val = tbl[mid] >> 17; if (val == afr || upper == lower) break; if (afr > val) lower = mid + 1; else upper = mid - 1; } if (val > afr && mid > 0) mid--; return (mid); } /* * Return the table index for an increase table. The increase table * must be selected not by the RIF itself, but by PCR/2^RIF. Each table * represents an additive increase of a cell rate that can be computed * from the first table entry (the value in this entry will not be clamped * by the link rate). */ static u_int get_air_table(struct patm_softc *sc, u_int rif, u_int pcr) { const uint32_t *tbl; u_int increase, base, lair0, ret, t, cps; #define GET_ENTRY(TAB, IDX) (0xffff & ((IDX & 1) ? \ (tbl[512 + (IDX / 2) + 128 * (TAB)] >> 16) : \ (tbl[512 + (IDX / 2) + 128 * (TAB)]))) #define MANT_BITS 10 #define FRAC_BITS 16 #define DIFF_TO_FP(D) (((D) & ((1 << MANT_BITS) - 1)) << ((D) >> MANT_BITS)) #define AFR_TO_INT(A) ((1 << (((A) >> 9) & 0x1f)) * \ (512 + ((A) & 0x1ff)) / 512 * ((A) >> 14)) if (sc->flags & PATM_25M) tbl = patm_rtables25; else tbl = patm_rtables155; if (rif >= patm_rtables_ntab) rif = patm_rtables_ntab - 1; increase = pcr >> rif; ret = 0; for (t = 0; t < patm_rtables_ntab; t++) { /* get base rate of this table */ base = GET_ENTRY(t, 0); /* convert this to fixed point */ lair0 = DIFF_TO_FP(base) >> FRAC_BITS; /* get the CPS from the log2rate table */ cps = AFR_TO_INT(tbl[lair0] >> 17) - 10; if (increase >= cps) break; ret = t; } return (ret + 4); } /* * Setup the TCT */ void patm_tct_setup(struct patm_softc *sc, struct patm_scd *scd, struct patm_vcc *vcc) { uint32_t tct[8]; u_int sram; u_int mbs, token; u_int tmp, crm, rdf, cdf, air, mcr; bzero(tct, sizeof(tct)); if (vcc == NULL) { /* special case for UBR0 */ sram = 0; tct[0] = IDT_TCT_UBR | scd->sram; tct[7] = IDT_TCT_UBR_FLG; } else { sram = vcc->cid * 8; switch (vcc->vcc.traffic) { case ATMIO_TRAFFIC_CBR: patm_tst_alloc(sc, vcc); tct[0] = IDT_TCT_CBR | scd->sram; /* must account for what was really allocated */ break; case ATMIO_TRAFFIC_VBR: /* compute parameters for the TCT */ scd->init_er = rate2log(sc, vcc->vcc.tparam.pcr); scd->lacr = rate2log(sc, vcc->vcc.tparam.scr); /* get the 16-bit fraction of SCR/PCR * both a 24 bit. Do it the simple way. */ token = (uint64_t)(vcc->vcc.tparam.scr << 16) / vcc->vcc.tparam.pcr; patm_debug(sc, VCC, "VBR: init_er=%u lacr=%u " "token=0x%04x\n", scd->init_er, scd->lacr, token); tct[0] = IDT_TCT_VBR | scd->sram; tct[2] = IDT_TCT_TSIF; tct[3] = IDT_TCT_IDLE | IDT_TCT_HALT; tct[4] = IDT_TCT_MAXIDLE; tct[5] = 0x01000000; if ((mbs = vcc->vcc.tparam.mbs) > 0xff) mbs = 0xff; tct[6] = (mbs << 16) | token; sc->bwrem -= vcc->vcc.tparam.scr; break; case ATMIO_TRAFFIC_ABR: scd->init_er = rate2log(sc, vcc->vcc.tparam.pcr); scd->lacr = rate2log(sc, vcc->vcc.tparam.icr); mcr = rate2log(sc, vcc->vcc.tparam.mcr); /* compute CRM */ tmp = vcc->vcc.tparam.tbe / vcc->vcc.tparam.nrm; if (tmp * vcc->vcc.tparam.nrm < vcc->vcc.tparam.tbe) tmp++; for (crm = 1; tmp > (1 << crm); crm++) ; if (crm > 0x7) crm = 7; air = get_air_table(sc, vcc->vcc.tparam.rif, vcc->vcc.tparam.pcr); if ((rdf = vcc->vcc.tparam.rdf) >= patm_rtables_ntab) rdf = patm_rtables_ntab - 1; rdf += patm_rtables_ntab + 4; if ((cdf = vcc->vcc.tparam.cdf) >= patm_rtables_ntab) cdf = patm_rtables_ntab - 1; cdf += patm_rtables_ntab + 4; patm_debug(sc, VCC, "ABR: init_er=%u lacr=%u mcr=%u " "crm=%u air=%u rdf=%u cdf=%u\n", scd->init_er, scd->lacr, mcr, crm, air, rdf, cdf); tct[0] = IDT_TCT_ABR | scd->sram; tct[1] = crm << IDT_TCT_CRM_SHIFT; tct[3] = IDT_TCT_HALT | IDT_TCT_IDLE | (4 << IDT_TCT_NAGE_SHIFT); tct[4] = mcr << IDT_TCT_LMCR_SHIFT; tct[5] = (cdf << IDT_TCT_CDF_SHIFT) | (rdf << IDT_TCT_RDF_SHIFT) | (air << IDT_TCT_AIR_SHIFT); sc->bwrem -= vcc->vcc.tparam.mcr; break; } } patm_sram_write4(sc, sram + 0, tct[0], tct[1], tct[2], tct[3]); patm_sram_write4(sc, sram + 4, tct[4], tct[5], tct[6], tct[7]); patm_debug(sc, VCC, "TCT[%u]: %08x %08x %08x %08x %08x %08x %08x %08x", sram / 8, patm_sram_read(sc, sram + 0), patm_sram_read(sc, sram + 1), patm_sram_read(sc, sram + 2), patm_sram_read(sc, sram + 3), patm_sram_read(sc, sram + 4), patm_sram_read(sc, sram + 5), patm_sram_read(sc, sram + 6), patm_sram_read(sc, sram + 7)); } /* * Start a channel */ static void patm_tct_start(struct patm_softc *sc, struct patm_vcc *vcc) { patm_nor_write(sc, IDT_NOR_TCMDQ, IDT_TCMDQ_UIER(vcc->cid, vcc->scd->init_er)); patm_nor_write(sc, IDT_NOR_TCMDQ, IDT_TCMDQ_SLACR(vcc->cid, vcc->scd->lacr)); } static void patm_tct_print(struct patm_softc *sc, u_int cid) { #ifdef PATM_DEBUG u_int sram = cid * 8; #endif patm_debug(sc, VCC, "TCT[%u]: %08x %08x %08x %08x %08x %08x %08x %08x", sram / 8, patm_sram_read(sc, sram + 0), patm_sram_read(sc, sram + 1), patm_sram_read(sc, sram + 2), patm_sram_read(sc, sram + 3), patm_sram_read(sc, sram + 4), patm_sram_read(sc, sram + 5), patm_sram_read(sc, sram + 6), patm_sram_read(sc, sram + 7)); } /* * Setup the SCD */ void patm_scd_setup(struct patm_softc *sc, struct patm_scd *scd) { patm_sram_write4(sc, scd->sram + 0, scd->phy, 0, 0xffffffff, 0); patm_sram_write4(sc, scd->sram + 4, 0, 0, 0, 0); patm_debug(sc, VCC, "SCD(%x): %08x %08x %08x %08x %08x %08x %08x %08x", scd->sram, patm_sram_read(sc, scd->sram + 0), patm_sram_read(sc, scd->sram + 1), patm_sram_read(sc, scd->sram + 2), patm_sram_read(sc, scd->sram + 3), patm_sram_read(sc, scd->sram + 4), patm_sram_read(sc, scd->sram + 5), patm_sram_read(sc, scd->sram + 6), patm_sram_read(sc, scd->sram + 7)); } /* * Grow the TX map table if possible */ static void patm_txmaps_grow(struct patm_softc *sc) { u_int i; struct patm_txmap *map; int err; if (sc->tx_nmaps >= sc->tx_maxmaps) return; for (i = sc->tx_nmaps; i < sc->tx_nmaps + PATM_CFG_TXMAPS_STEP; i++) { map = uma_zalloc(sc->tx_mapzone, M_NOWAIT); err = bus_dmamap_create(sc->tx_tag, 0, &map->map); if (err) { uma_zfree(sc->tx_mapzone, map); break; } SLIST_INSERT_HEAD(&sc->tx_maps_free, map, link); } sc->tx_nmaps = i; } /* * Allocate a transmission map */ static struct patm_txmap * patm_txmap_get(struct patm_softc *sc) { struct patm_txmap *map; if ((map = SLIST_FIRST(&sc->tx_maps_free)) == NULL) { patm_txmaps_grow(sc); if ((map = SLIST_FIRST(&sc->tx_maps_free)) == NULL) return (NULL); } SLIST_REMOVE_HEAD(&sc->tx_maps_free, link); return (map); } /* * Look whether we are in the process of updating the TST on the chip. * If we are set the flag that we need another update. * If we are not start the update. */ static __inline void patm_tst_start(struct patm_softc *sc) { if (!(sc->tst_state & TST_PENDING)) { sc->tst_state |= TST_PENDING; if (!(sc->tst_state & TST_WAIT)) { /* timer not running */ patm_tst_update(sc); } } } /* * Allocate TST entries to a CBR connection */ static void patm_tst_alloc(struct patm_softc *sc, struct patm_vcc *vcc) { u_int slots; u_int qptr, pptr; u_int qmax, pmax; u_int pspc, last; mtx_lock(&sc->tst_lock); /* compute the number of slots we need, make sure to get at least * the specified PCR */ slots = cbr2slots(sc, vcc); vcc->scd->slots = slots; sc->bwrem -= slots2cr(sc, slots); patm_debug(sc, TST, "tst_alloc: cbr=%u link=%u tst=%u slots=%u", vcc->vcc.tparam.pcr, IFP2IFATM(sc->ifp)->mib.pcr, sc->mmap->tst_size, slots); qmax = sc->mmap->tst_size - 1; pmax = qmax << 8; pspc = pmax / slots; pptr = pspc >> 1; /* starting point */ qptr = pptr >> 8; last = qptr; while (slots > 0) { if (qptr >= qmax) qptr -= qmax; if (sc->tst_soft[qptr] != IDT_TST_VBR) { /* used - try next */ qptr++; continue; } patm_debug(sc, TST, "slot[%u] = %u.%u diff=%d", qptr, vcc->vcc.vpi, vcc->vcc.vci, (int)qptr - (int)last); last = qptr; sc->tst_soft[qptr] = IDT_TST_CBR | vcc->cid | TST_BOTH; sc->tst_free--; if ((pptr += pspc) >= pmax) pptr -= pmax; qptr = pptr >> 8; slots--; } patm_tst_start(sc); mtx_unlock(&sc->tst_lock); } /* * Free a CBR connection's TST entries */ static void patm_tst_free(struct patm_softc *sc, struct patm_vcc *vcc) { u_int i; mtx_lock(&sc->tst_lock); for (i = 0; i < sc->mmap->tst_size - 1; i++) { if ((sc->tst_soft[i] & IDT_TST_MASK) == vcc->cid) { sc->tst_soft[i] = IDT_TST_VBR | TST_BOTH; sc->tst_free++; } } sc->bwrem += slots2cr(sc, vcc->scd->slots); patm_tst_start(sc); mtx_unlock(&sc->tst_lock); } /* * Write the soft TST into the idle incore TST and start the wait timer. * We assume that we hold the tst lock. */ static void patm_tst_update(struct patm_softc *sc) { u_int flag; /* flag to clear from soft TST */ u_int idle; /* the idle TST */ u_int act; /* the active TST */ u_int i; if (sc->tst_state & TST_ACT1) { act = 1; idle = 0; flag = TST_CH0; } else { act = 0; idle = 1; flag = TST_CH1; } /* update the idle one */ for (i = 0; i < sc->mmap->tst_size - 1; i++) if (sc->tst_soft[i] & flag) { patm_sram_write(sc, sc->tst_base[idle] + i, sc->tst_soft[i] & ~TST_BOTH); sc->tst_soft[i] &= ~flag; } /* the used one jump to the idle one */ patm_sram_write(sc, sc->tst_jump[act], IDT_TST_BR | (sc->tst_base[idle] << 2)); /* wait for the chip to jump */ sc->tst_state &= ~TST_PENDING; sc->tst_state |= TST_WAIT; callout_reset(&sc->tst_callout, 1, patm_tst_timer, sc); } /* * Timer for TST updates */ static void patm_tst_timer(void *p) { struct patm_softc *sc = p; u_int act; /* active TST */ u_int now; /* current place in TST */ mtx_lock(&sc->tst_lock); if (sc->tst_state & TST_WAIT) { /* ignore the PENDING state while we are waiting for * the chip to switch tables. Once the switch is done, * we will again lock at PENDING */ act = (sc->tst_state & TST_ACT1) ? 1 : 0; now = patm_nor_read(sc, IDT_NOR_NOW) >> 2; if (now >= sc->tst_base[act] && now <= sc->tst_jump[act]) { /* not yet */ callout_reset(&sc->tst_callout, 1, patm_tst_timer, sc); goto done; } sc->tst_state &= ~TST_WAIT; /* change back jump */ patm_sram_write(sc, sc->tst_jump[act], IDT_TST_BR | (sc->tst_base[act] << 2)); /* switch */ sc->tst_state ^= TST_ACT1; } if (sc->tst_state & TST_PENDING) /* we got another update request while the timer was running. */ patm_tst_update(sc); done: mtx_unlock(&sc->tst_lock); } static const char * dump_scd(struct patm_softc *sc, struct patm_scd *scd) { u_int i; for (i = 0; i < IDT_TSQE_TAG_SPACE; i++) printf("on_card[%u] = %p\n", i, scd->on_card[i]); printf("space=%u tag=%u num_on_card=%u last_tag=%u\n", scd->space, scd->tag, scd->num_on_card, scd->last_tag); return (""); } Index: head/sys/dev/pccard/pccard.c =================================================================== --- head/sys/dev/pccard/pccard.c (revision 313981) +++ head/sys/dev/pccard/pccard.c (revision 313982) @@ -1,1473 +1,1473 @@ /* $NetBSD: pcmcia.c,v 1.23 2000/07/28 19:17:02 drochner Exp $ */ /*- * Copyright (c) 1997 Marc Horowitz. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Marc Horowitz. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "power_if.h" #include "card_if.h" #define PCCARDDEBUG /* sysctl vars */ static SYSCTL_NODE(_hw, OID_AUTO, pccard, CTLFLAG_RD, 0, "PCCARD parameters"); int pccard_debug = 0; SYSCTL_INT(_hw_pccard, OID_AUTO, debug, CTLFLAG_RWTUN, &pccard_debug, 0, "pccard debug"); int pccard_cis_debug = 0; SYSCTL_INT(_hw_pccard, OID_AUTO, cis_debug, CTLFLAG_RWTUN, &pccard_cis_debug, 0, "pccard CIS debug"); #ifdef PCCARDDEBUG #define DPRINTF(arg) if (pccard_debug) printf arg #define DEVPRINTF(arg) if (pccard_debug) device_printf arg #define PRVERBOSE(arg) printf arg #define DEVPRVERBOSE(arg) device_printf arg #else #define DPRINTF(arg) #define DEVPRINTF(arg) #define PRVERBOSE(arg) if (bootverbose) printf arg #define DEVPRVERBOSE(arg) if (bootverbose) device_printf arg #endif static int pccard_ccr_read(struct pccard_function *pf, int ccr); static void pccard_ccr_write(struct pccard_function *pf, int ccr, int val); static int pccard_attach_card(device_t dev); static int pccard_detach_card(device_t dev); static void pccard_function_init(struct pccard_function *pf, int entry); static void pccard_function_free(struct pccard_function *pf); static int pccard_function_enable(struct pccard_function *pf); static void pccard_function_disable(struct pccard_function *pf); static int pccard_probe(device_t dev); static int pccard_attach(device_t dev); static int pccard_detach(device_t dev); static void pccard_print_resources(struct resource_list *rl, const char *name, int type, int count, const char *format); static int pccard_print_child(device_t dev, device_t child); static int pccard_set_resource(device_t dev, device_t child, int type, int rid, rman_res_t start, rman_res_t count); static int pccard_get_resource(device_t dev, device_t child, int type, int rid, rman_res_t *startp, rman_res_t *countp); static void pccard_delete_resource(device_t dev, device_t child, int type, int rid); static int pccard_set_res_flags(device_t dev, device_t child, int type, int rid, u_long flags); static int pccard_set_memory_offset(device_t dev, device_t child, int rid, uint32_t offset, uint32_t *deltap); static int pccard_probe_and_attach_child(device_t dev, device_t child, struct pccard_function *pf); static void pccard_probe_nomatch(device_t cbdev, device_t child); static int pccard_read_ivar(device_t bus, device_t child, int which, uintptr_t *result); static void pccard_driver_added(device_t dev, driver_t *driver); static struct resource *pccard_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); static int pccard_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r); static void pccard_child_detached(device_t parent, device_t dev); static int pccard_filter(void *arg); static void pccard_intr(void *arg); static int pccard_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep); static int pccard_teardown_intr(device_t dev, device_t child, struct resource *r, void *cookie); static const struct pccard_product * pccard_do_product_lookup(device_t bus, device_t dev, const struct pccard_product *tab, size_t ent_size, pccard_product_match_fn matchfn); static int pccard_ccr_read(struct pccard_function *pf, int ccr) { return (bus_space_read_1(pf->pf_ccrt, pf->pf_ccrh, pf->pf_ccr_offset + ccr)); } static void pccard_ccr_write(struct pccard_function *pf, int ccr, int val) { if ((pf->ccr_mask) & (1 << (ccr / 2))) { bus_space_write_1(pf->pf_ccrt, pf->pf_ccrh, pf->pf_ccr_offset + ccr, val); } } static int pccard_set_default_descr(device_t dev) { const char *vendorstr, *prodstr; uint32_t vendor, prod; char *str; if (pccard_get_vendor_str(dev, &vendorstr)) return (0); if (pccard_get_product_str(dev, &prodstr)) return (0); if (vendorstr != NULL && prodstr != NULL) { str = malloc(strlen(vendorstr) + strlen(prodstr) + 2, M_DEVBUF, M_WAITOK); sprintf(str, "%s %s", vendorstr, prodstr); device_set_desc_copy(dev, str); free(str, M_DEVBUF); } else { if (pccard_get_vendor(dev, &vendor)) return (0); if (pccard_get_product(dev, &prod)) return (0); str = malloc(100, M_DEVBUF, M_WAITOK); snprintf(str, 100, "vendor=%#x product=%#x", vendor, prod); device_set_desc_copy(dev, str); free(str, M_DEVBUF); } return (0); } static int pccard_attach_card(device_t dev) { struct pccard_softc *sc = PCCARD_SOFTC(dev); struct pccard_function *pf; struct pccard_ivar *ivar; device_t child; int i; if (!STAILQ_EMPTY(&sc->card.pf_head)) { if (bootverbose || pccard_debug) device_printf(dev, "Card already inserted.\n"); } DEVPRINTF((dev, "chip_socket_enable\n")); POWER_ENABLE_SOCKET(device_get_parent(dev), dev); DEVPRINTF((dev, "read_cis\n")); pccard_read_cis(sc); DEVPRINTF((dev, "check_cis_quirks\n")); pccard_check_cis_quirks(dev); /* * bail now if the card has no functions, or if there was an error in * the cis. */ if (sc->card.error) { device_printf(dev, "CARD ERROR!\n"); return (1); } if (STAILQ_EMPTY(&sc->card.pf_head)) { device_printf(dev, "Card has no functions!\n"); return (1); } if (bootverbose || pccard_debug) pccard_print_cis(dev); DEVPRINTF((dev, "functions scanning\n")); i = -1; STAILQ_FOREACH(pf, &sc->card.pf_head, pf_list) { i++; if (STAILQ_EMPTY(&pf->cfe_head)) { device_printf(dev, "Function %d has no config entries.!\n", i); continue; } pf->sc = sc; pf->cfe = NULL; pf->dev = NULL; } DEVPRINTF((dev, "Card has %d functions. pccard_mfc is %d\n", i + 1, pccard_mfc(sc))); STAILQ_FOREACH(pf, &sc->card.pf_head, pf_list) { if (STAILQ_EMPTY(&pf->cfe_head)) continue; ivar = malloc(sizeof(struct pccard_ivar), M_DEVBUF, M_WAITOK | M_ZERO); resource_list_init(&ivar->resources); child = device_add_child(dev, NULL, -1); device_set_ivars(child, ivar); ivar->pf = pf; pf->dev = child; pccard_probe_and_attach_child(dev, child, pf); } return (0); } static int pccard_probe_and_attach_child(device_t dev, device_t child, struct pccard_function *pf) { struct pccard_softc *sc = PCCARD_SOFTC(dev); int error; /* * In NetBSD, the drivers are responsible for activating each * function of a card and selecting the config to use. In * FreeBSD, all that's done automatically in the typical lazy * way we do device resoruce allocation (except we pick the * cfe up front). This is the biggest depature from the * inherited NetBSD model, apart from the FreeBSD resource code. * * This seems to work well in practice for most cards. * However, there are two cases that are problematic. If a * driver wishes to pick and chose which config entry to use, * then this method falls down. These are usually older * cards. In addition, there are some cards that have * multiple hardware units on the cards, but presents only one * CIS chain. These cards are combination cards, but only one * of these units can be on at a time. * * To overcome this limitation, while preserving the basic * model, the probe routine can select a cfe and try to * activate it. If that succeeds, then we'll keep track of * and let that information persist until we attach the card. * Probe routines that do this MUST return 0, and cannot * participate in the bidding process for a device. This * seems harsh until you realize that if a probe routine knows * enough to override the cfe we pick, then chances are very * very good that it is the only driver that could hope to * cope with the card. Bidding is for generic drivers, and * while some of them may also match, none of them will do * configuration override. */ error = device_probe(child); if (error != 0) goto out; pccard_function_init(pf, -1); if (sc->sc_enabled_count == 0) POWER_ENABLE_SOCKET(device_get_parent(dev), dev); if (pccard_function_enable(pf) == 0 && pccard_set_default_descr(child) == 0 && device_attach(child) == 0) { DEVPRINTF((sc->dev, "function %d CCR at %d offset %#x " "mask %#x: %#x %#x %#x %#x, %#x %#x %#x %#x, %#x\n", pf->number, pf->pf_ccr_window, pf->pf_ccr_offset, pf->ccr_mask, pccard_ccr_read(pf, 0x00), pccard_ccr_read(pf, 0x02), pccard_ccr_read(pf, 0x04), pccard_ccr_read(pf, 0x06), pccard_ccr_read(pf, 0x0A), pccard_ccr_read(pf, 0x0C), pccard_ccr_read(pf, 0x0E), pccard_ccr_read(pf, 0x10), pccard_ccr_read(pf, 0x12))); return (0); } error = ENXIO; out:; /* * Probe may fail AND also try to select a cfe, if so, free * it. This is how we do cfe override. Or the attach fails. * Either way, we have to clean up. */ if (pf->cfe != NULL) pccard_function_disable(pf); pf->cfe = NULL; pccard_function_free(pf); return error; } static int pccard_detach_card(device_t dev) { struct pccard_softc *sc = PCCARD_SOFTC(dev); struct pccard_function *pf; struct pccard_config_entry *cfe; struct pccard_ivar *devi; int state; /* * We are running on either the PCCARD socket's event thread * or in user context detaching a device by user request. */ STAILQ_FOREACH(pf, &sc->card.pf_head, pf_list) { if (pf->dev == NULL) continue; state = device_get_state(pf->dev); if (state == DS_ATTACHED || state == DS_BUSY) device_detach(pf->dev); if (pf->cfe != NULL) pccard_function_disable(pf); pccard_function_free(pf); devi = PCCARD_IVAR(pf->dev); device_delete_child(dev, pf->dev); free(devi, M_DEVBUF); } if (sc->sc_enabled_count == 0) POWER_DISABLE_SOCKET(device_get_parent(dev), dev); while (NULL != (pf = STAILQ_FIRST(&sc->card.pf_head))) { while (NULL != (cfe = STAILQ_FIRST(&pf->cfe_head))) { STAILQ_REMOVE_HEAD(&pf->cfe_head, cfe_list); free(cfe, M_DEVBUF); } STAILQ_REMOVE_HEAD(&sc->card.pf_head, pf_list); free(pf, M_DEVBUF); } STAILQ_INIT(&sc->card.pf_head); return (0); } static const struct pccard_product * pccard_do_product_lookup(device_t bus, device_t dev, const struct pccard_product *tab, size_t ent_size, pccard_product_match_fn matchfn) { const struct pccard_product *ent; int matches; uint32_t vendor; uint32_t prod; const char *vendorstr; const char *prodstr; const char *cis3str; const char *cis4str; #ifdef DIAGNOSTIC if (sizeof *ent > ent_size) panic("pccard_product_lookup: bogus ent_size %jd", (intmax_t) ent_size); #endif if (pccard_get_vendor(dev, &vendor)) return (NULL); if (pccard_get_product(dev, &prod)) return (NULL); if (pccard_get_vendor_str(dev, &vendorstr)) return (NULL); if (pccard_get_product_str(dev, &prodstr)) return (NULL); if (pccard_get_cis3_str(dev, &cis3str)) return (NULL); if (pccard_get_cis4_str(dev, &cis4str)) return (NULL); for (ent = tab; ent->pp_vendor != 0; ent = (const struct pccard_product *) ((const char *) ent + ent_size)) { matches = 1; if (ent->pp_vendor == PCCARD_VENDOR_ANY && ent->pp_product == PCCARD_PRODUCT_ANY && ent->pp_cis[0] == NULL && ent->pp_cis[1] == NULL) { if (ent->pp_name) device_printf(dev, "Total wildcard entry ignored for %s\n", ent->pp_name); continue; } if (matches && ent->pp_vendor != PCCARD_VENDOR_ANY && vendor != ent->pp_vendor) matches = 0; if (matches && ent->pp_product != PCCARD_PRODUCT_ANY && prod != ent->pp_product) matches = 0; if (matches && ent->pp_cis[0] && (vendorstr == NULL || strcmp(ent->pp_cis[0], vendorstr) != 0)) matches = 0; if (matches && ent->pp_cis[1] && (prodstr == NULL || strcmp(ent->pp_cis[1], prodstr) != 0)) matches = 0; if (matches && ent->pp_cis[2] && (cis3str == NULL || strcmp(ent->pp_cis[2], cis3str) != 0)) matches = 0; if (matches && ent->pp_cis[3] && (cis4str == NULL || strcmp(ent->pp_cis[3], cis4str) != 0)) matches = 0; if (matchfn != NULL) matches = (*matchfn)(dev, ent, matches); if (matches) return (ent); } return (NULL); } /** * @brief pccard_select_cfe * * Select a cfe entry to use. Should be called from the pccard's probe * routine after it knows for sure that it wants this card. * * XXX I think we need to make this symbol be static, ala the kobj stuff * we do for everything else. This is a quick hack. */ int pccard_select_cfe(device_t dev, int entry) { struct pccard_ivar *devi = PCCARD_IVAR(dev); struct pccard_function *pf = devi->pf; pccard_function_init(pf, entry); return (pf->cfe ? 0 : ENOMEM); } /* * Initialize a PCCARD function. May be called as long as the function is * disabled. * * Note: pccard_function_init should not keep resources allocated. It should * only set them up ala isa pnp, set the values in the rl lists, and return. * Any resource held after pccard_function_init is called is a bug. However, * the bus routines to get the resources also assume that pccard_function_init * does this, so they need to be fixed too. */ static void pccard_function_init(struct pccard_function *pf, int entry) { struct pccard_config_entry *cfe; struct pccard_ivar *devi = PCCARD_IVAR(pf->dev); struct resource_list *rl = &devi->resources; struct resource_list_entry *rle; - struct resource *r = 0; + struct resource *r = NULL; struct pccard_ce_iospace *ios; struct pccard_ce_memspace *mems; device_t bus; rman_res_t start, end, len; int i, rid, spaces; if (pf->pf_flags & PFF_ENABLED) { printf("pccard_function_init: function is enabled"); return; } /* * Driver probe routine requested a specific entry already * that succeeded. */ if (pf->cfe != NULL) return; /* * walk the list of configuration entries until we find one that * we can allocate all the resources to. */ bus = device_get_parent(pf->dev); STAILQ_FOREACH(cfe, &pf->cfe_head, cfe_list) { if (cfe->iftype != PCCARD_IFTYPE_IO) continue; if (entry != -1 && cfe->number != entry) continue; spaces = 0; for (i = 0; i < cfe->num_iospace; i++) { ios = cfe->iospace + i; start = ios->start; if (start) end = start + ios->length - 1; else end = ~0; DEVPRINTF((bus, "I/O rid %d start %#jx end %#jx\n", i, start, end)); rid = i; len = ios->length; r = bus_alloc_resource(bus, SYS_RES_IOPORT, &rid, start, end, len, rman_make_alignment_flags(len)); if (r == NULL) { DEVPRINTF((bus, "I/O rid %d failed\n", i)); goto not_this_one; } rle = resource_list_add(rl, SYS_RES_IOPORT, rid, rman_get_start(r), rman_get_end(r), len); if (rle == NULL) panic("Cannot add resource rid %d IOPORT", rid); rle->res = r; spaces++; } for (i = 0; i < cfe->num_memspace; i++) { mems = cfe->memspace + i; start = mems->cardaddr + mems->hostaddr; if (start) end = start + mems->length - 1; else end = ~0; DEVPRINTF((bus, "Memory rid %d start %#jx end %#jx\ncardaddr %#jx hostaddr %#jx length %#jx\n", i, start, end, mems->cardaddr, mems->hostaddr, mems->length)); rid = i; len = mems->length; r = bus_alloc_resource(bus, SYS_RES_MEMORY, &rid, start, end, len, rman_make_alignment_flags(len)); if (r == NULL) { DEVPRINTF((bus, "Memory rid %d failed\n", i)); // goto not_this_one; continue; } rle = resource_list_add(rl, SYS_RES_MEMORY, rid, rman_get_start(r), rman_get_end(r), len); if (rle == NULL) panic("Cannot add resource rid %d MEM", rid); rle->res = r; spaces++; } if (spaces == 0) { DEVPRINTF((bus, "Neither memory nor I/O mapped\n")); goto not_this_one; } if (cfe->irqmask) { rid = 0; r = bus_alloc_resource_any(bus, SYS_RES_IRQ, &rid, RF_SHAREABLE); if (r == NULL) { DEVPRINTF((bus, "IRQ rid %d failed\n", rid)); goto not_this_one; } rle = resource_list_add(rl, SYS_RES_IRQ, rid, rman_get_start(r), rman_get_end(r), 1); if (rle == NULL) panic("Cannot add resource rid %d IRQ", rid); rle->res = r; } /* If we get to here, we've allocated all we need */ pf->cfe = cfe; break; not_this_one:; DEVPRVERBOSE((bus, "Allocation failed for cfe %d\n", cfe->number)); resource_list_purge(rl); } } /* * Free resources allocated by pccard_function_init(), May be called as long * as the function is disabled. * * NOTE: This function should be unnecessary. pccard_function_init should * never keep resources initialized. */ static void pccard_function_free(struct pccard_function *pf) { struct pccard_ivar *devi = PCCARD_IVAR(pf->dev); struct resource_list_entry *rle; if (pf->pf_flags & PFF_ENABLED) { printf("pccard_function_free: function is enabled"); return; } STAILQ_FOREACH(rle, &devi->resources, link) { if (rle->res) { if (rman_get_device(rle->res) != pf->sc->dev) device_printf(pf->sc->dev, "function_free: Resource still owned by " "child, oops. " "(type=%d, rid=%d, addr=%#jx)\n", rle->type, rle->rid, rman_get_start(rle->res)); BUS_RELEASE_RESOURCE(device_get_parent(pf->sc->dev), pf->sc->dev, rle->type, rle->rid, rle->res); rle->res = NULL; } } resource_list_free(&devi->resources); } static void pccard_mfc_adjust_iobase(struct pccard_function *pf, rman_res_t addr, rman_res_t offset, rman_res_t size) { bus_size_t iosize, tmp; if (addr != 0) { if (pf->pf_mfc_iomax == 0) { pf->pf_mfc_iobase = addr + offset; pf->pf_mfc_iomax = pf->pf_mfc_iobase + size; } else { /* this makes the assumption that nothing overlaps */ if (pf->pf_mfc_iobase > addr + offset) pf->pf_mfc_iobase = addr + offset; if (pf->pf_mfc_iomax < addr + offset + size) pf->pf_mfc_iomax = addr + offset + size; } } tmp = pf->pf_mfc_iomax - pf->pf_mfc_iobase; /* round up to nearest (2^n)-1 */ for (iosize = 1; iosize < tmp; iosize <<= 1) ; iosize--; DEVPRINTF((pf->dev, "MFC: I/O base %#jx IOSIZE %#jx\n", (uintmax_t)pf->pf_mfc_iobase, (uintmax_t)(iosize + 1))); pccard_ccr_write(pf, PCCARD_CCR_IOBASE0, pf->pf_mfc_iobase & 0xff); pccard_ccr_write(pf, PCCARD_CCR_IOBASE1, (pf->pf_mfc_iobase >> 8) & 0xff); pccard_ccr_write(pf, PCCARD_CCR_IOBASE2, 0); pccard_ccr_write(pf, PCCARD_CCR_IOBASE3, 0); pccard_ccr_write(pf, PCCARD_CCR_IOSIZE, iosize); } /* Enable a PCCARD function */ static int pccard_function_enable(struct pccard_function *pf) { struct pccard_function *tmp; int reg; device_t dev = pf->sc->dev; if (pf->cfe == NULL) { DEVPRVERBOSE((dev, "No config entry could be allocated.\n")); return (ENOMEM); } if (pf->pf_flags & PFF_ENABLED) return (0); pf->sc->sc_enabled_count++; /* * it's possible for different functions' CCRs to be in the same * underlying page. Check for that. */ STAILQ_FOREACH(tmp, &pf->sc->card.pf_head, pf_list) { if ((tmp->pf_flags & PFF_ENABLED) && (pf->ccr_base >= (tmp->ccr_base - tmp->pf_ccr_offset)) && ((pf->ccr_base + PCCARD_CCR_SIZE) <= (tmp->ccr_base - tmp->pf_ccr_offset + tmp->pf_ccr_realsize))) { pf->pf_ccrt = tmp->pf_ccrt; pf->pf_ccrh = tmp->pf_ccrh; pf->pf_ccr_realsize = tmp->pf_ccr_realsize; /* * pf->pf_ccr_offset = (tmp->pf_ccr_offset - * tmp->ccr_base) + pf->ccr_base; */ /* pf->pf_ccr_offset = (tmp->pf_ccr_offset + pf->ccr_base) - tmp->ccr_base; */ pf->pf_ccr_window = tmp->pf_ccr_window; break; } } if (tmp == NULL) { pf->ccr_rid = 0; pf->ccr_res = bus_alloc_resource_anywhere(dev, SYS_RES_MEMORY, &pf->ccr_rid, PCCARD_MEM_PAGE_SIZE, RF_ACTIVE); if (!pf->ccr_res) goto bad; DEVPRINTF((dev, "ccr_res == %#jx-%#jx, base=%#x\n", rman_get_start(pf->ccr_res), rman_get_end(pf->ccr_res), pf->ccr_base)); CARD_SET_RES_FLAGS(device_get_parent(dev), dev, SYS_RES_MEMORY, pf->ccr_rid, PCCARD_A_MEM_ATTR); CARD_SET_MEMORY_OFFSET(device_get_parent(dev), dev, pf->ccr_rid, pf->ccr_base, &pf->pf_ccr_offset); pf->pf_ccrt = rman_get_bustag(pf->ccr_res); pf->pf_ccrh = rman_get_bushandle(pf->ccr_res); pf->pf_ccr_realsize = 1; } reg = (pf->cfe->number & PCCARD_CCR_OPTION_CFINDEX); reg |= PCCARD_CCR_OPTION_LEVIREQ; if (pccard_mfc(pf->sc)) { reg |= (PCCARD_CCR_OPTION_FUNC_ENABLE | PCCARD_CCR_OPTION_ADDR_DECODE); /* PCCARD_CCR_OPTION_IRQ_ENABLE set elsewhere as needed */ } pccard_ccr_write(pf, PCCARD_CCR_OPTION, reg); reg = 0; if ((pf->cfe->flags & PCCARD_CFE_IO16) == 0) reg |= PCCARD_CCR_STATUS_IOIS8; if (pf->cfe->flags & PCCARD_CFE_AUDIO) reg |= PCCARD_CCR_STATUS_AUDIO; pccard_ccr_write(pf, PCCARD_CCR_STATUS, reg); pccard_ccr_write(pf, PCCARD_CCR_SOCKETCOPY, 0); if (pccard_mfc(pf->sc)) pccard_mfc_adjust_iobase(pf, 0, 0, 0); #ifdef PCCARDDEBUG if (pccard_debug) { STAILQ_FOREACH(tmp, &pf->sc->card.pf_head, pf_list) { device_printf(tmp->sc->dev, "function %d CCR at %d offset %#x: " "%#x %#x %#x %#x, %#x %#x %#x %#x, %#x\n", tmp->number, tmp->pf_ccr_window, tmp->pf_ccr_offset, pccard_ccr_read(tmp, 0x00), pccard_ccr_read(tmp, 0x02), pccard_ccr_read(tmp, 0x04), pccard_ccr_read(tmp, 0x06), pccard_ccr_read(tmp, 0x0A), pccard_ccr_read(tmp, 0x0C), pccard_ccr_read(tmp, 0x0E), pccard_ccr_read(tmp, 0x10), pccard_ccr_read(tmp, 0x12)); } } #endif pf->pf_flags |= PFF_ENABLED; return (0); bad: /* * Decrement the reference count, and power down the socket, if * necessary. */ pf->sc->sc_enabled_count--; DEVPRINTF((dev, "bad --enabled_count = %d\n", pf->sc->sc_enabled_count)); return (1); } /* Disable PCCARD function. */ static void pccard_function_disable(struct pccard_function *pf) { struct pccard_function *tmp; device_t dev = pf->sc->dev; if (pf->cfe == NULL) panic("pccard_function_disable: function not initialized"); if ((pf->pf_flags & PFF_ENABLED) == 0) return; if (pf->intr_handler != NULL) { struct pccard_ivar *devi = PCCARD_IVAR(pf->dev); struct resource_list_entry *rle = resource_list_find(&devi->resources, SYS_RES_IRQ, 0); if (rle == NULL) panic("Can't disable an interrupt with no IRQ res\n"); BUS_TEARDOWN_INTR(dev, pf->dev, rle->res, pf->intr_handler_cookie); } /* * it's possible for different functions' CCRs to be in the same * underlying page. Check for that. Note we mark us as disabled * first to avoid matching ourself. */ pf->pf_flags &= ~PFF_ENABLED; STAILQ_FOREACH(tmp, &pf->sc->card.pf_head, pf_list) { if ((tmp->pf_flags & PFF_ENABLED) && (pf->ccr_base >= (tmp->ccr_base - tmp->pf_ccr_offset)) && ((pf->ccr_base + PCCARD_CCR_SIZE) <= (tmp->ccr_base - tmp->pf_ccr_offset + tmp->pf_ccr_realsize))) break; } /* Not used by anyone else; unmap the CCR. */ if (tmp == NULL) { bus_release_resource(dev, SYS_RES_MEMORY, pf->ccr_rid, pf->ccr_res); pf->ccr_res = NULL; } /* * Decrement the reference count, and power down the socket, if * necessary. */ pf->sc->sc_enabled_count--; } #define PCCARD_NPORT 2 #define PCCARD_NMEM 5 #define PCCARD_NIRQ 1 #define PCCARD_NDRQ 0 static int pccard_probe(device_t dev) { device_set_desc(dev, "16-bit PCCard bus"); return (0); } static int pccard_attach(device_t dev) { struct pccard_softc *sc = PCCARD_SOFTC(dev); int err; sc->dev = dev; sc->sc_enabled_count = 0; if ((err = pccard_device_create(sc)) != 0) return (err); STAILQ_INIT(&sc->card.pf_head); return (bus_generic_attach(dev)); } static int pccard_detach(device_t dev) { pccard_detach_card(dev); pccard_device_destroy(device_get_softc(dev)); return (0); } static int pccard_suspend(device_t self) { pccard_detach_card(self); return (0); } static int pccard_resume(device_t self) { return (0); } static void pccard_print_resources(struct resource_list *rl, const char *name, int type, int count, const char *format) { struct resource_list_entry *rle; int printed; int i; printed = 0; for (i = 0; i < count; i++) { rle = resource_list_find(rl, type, i); if (rle != NULL) { if (printed == 0) printf(" %s ", name); else if (printed > 0) printf(","); printed++; printf(format, rle->start); if (rle->count > 1) { printf("-"); printf(format, rle->start + rle->count - 1); } } else if (i > 3) { /* check the first few regardless */ break; } } } static int pccard_print_child(device_t dev, device_t child) { struct pccard_ivar *devi = PCCARD_IVAR(child); struct resource_list *rl = &devi->resources; int retval = 0; retval += bus_print_child_header(dev, child); retval += printf(" at"); if (devi != NULL) { pccard_print_resources(rl, "port", SYS_RES_IOPORT, PCCARD_NPORT, "%#lx"); pccard_print_resources(rl, "iomem", SYS_RES_MEMORY, PCCARD_NMEM, "%#lx"); pccard_print_resources(rl, "irq", SYS_RES_IRQ, PCCARD_NIRQ, "%ld"); pccard_print_resources(rl, "drq", SYS_RES_DRQ, PCCARD_NDRQ, "%ld"); retval += printf(" function %d config %d", devi->pf->number, devi->pf->cfe->number); } retval += bus_print_child_footer(dev, child); return (retval); } static int pccard_set_resource(device_t dev, device_t child, int type, int rid, rman_res_t start, rman_res_t count) { struct pccard_ivar *devi = PCCARD_IVAR(child); struct resource_list *rl = &devi->resources; if (type != SYS_RES_IOPORT && type != SYS_RES_MEMORY && type != SYS_RES_IRQ && type != SYS_RES_DRQ) return (EINVAL); if (rid < 0) return (EINVAL); if (type == SYS_RES_IOPORT && rid >= PCCARD_NPORT) return (EINVAL); if (type == SYS_RES_MEMORY && rid >= PCCARD_NMEM) return (EINVAL); if (type == SYS_RES_IRQ && rid >= PCCARD_NIRQ) return (EINVAL); if (type == SYS_RES_DRQ && rid >= PCCARD_NDRQ) return (EINVAL); resource_list_add(rl, type, rid, start, start + count - 1, count); if (NULL != resource_list_alloc(rl, device_get_parent(dev), dev, type, &rid, start, start + count - 1, count, 0)) return 0; else return ENOMEM; } static int pccard_get_resource(device_t dev, device_t child, int type, int rid, rman_res_t *startp, rman_res_t *countp) { struct pccard_ivar *devi = PCCARD_IVAR(child); struct resource_list *rl = &devi->resources; struct resource_list_entry *rle; rle = resource_list_find(rl, type, rid); if (rle == NULL) return (ENOENT); if (startp != NULL) *startp = rle->start; if (countp != NULL) *countp = rle->count; return (0); } static void pccard_delete_resource(device_t dev, device_t child, int type, int rid) { struct pccard_ivar *devi = PCCARD_IVAR(child); struct resource_list *rl = &devi->resources; resource_list_delete(rl, type, rid); } static int pccard_set_res_flags(device_t dev, device_t child, int type, int rid, u_long flags) { return (CARD_SET_RES_FLAGS(device_get_parent(dev), child, type, rid, flags)); } static int pccard_set_memory_offset(device_t dev, device_t child, int rid, uint32_t offset, uint32_t *deltap) { return (CARD_SET_MEMORY_OFFSET(device_get_parent(dev), child, rid, offset, deltap)); } static void pccard_probe_nomatch(device_t bus, device_t child) { struct pccard_ivar *devi = PCCARD_IVAR(child); struct pccard_function *pf = devi->pf; struct pccard_softc *sc = PCCARD_SOFTC(bus); int i; device_printf(bus, ""); printf(" (manufacturer=0x%04x, product=0x%04x, function_type=%d) " "at function %d\n", sc->card.manufacturer, sc->card.product, pf->function, pf->number); device_printf(bus, " CIS info: "); for (i = 0; sc->card.cis1_info[i] != NULL && i < 4; i++) printf("%s%s", i > 0 ? ", " : "", sc->card.cis1_info[i]); printf("\n"); return; } static int pccard_child_location_str(device_t bus, device_t child, char *buf, size_t buflen) { struct pccard_ivar *devi = PCCARD_IVAR(child); struct pccard_function *pf = devi->pf; snprintf(buf, buflen, "function=%d", pf->number); return (0); } static int pccard_child_pnpinfo_str(device_t bus, device_t child, char *buf, size_t buflen) { struct pccard_ivar *devi = PCCARD_IVAR(child); struct pccard_function *pf = devi->pf; struct pccard_softc *sc = PCCARD_SOFTC(bus); char cis0[128], cis1[128]; devctl_safe_quote(cis0, sc->card.cis1_info[0], sizeof(cis0)); devctl_safe_quote(cis1, sc->card.cis1_info[1], sizeof(cis1)); snprintf(buf, buflen, "manufacturer=0x%04x product=0x%04x " "cisvendor=\"%s\" cisproduct=\"%s\" function_type=%d", sc->card.manufacturer, sc->card.product, cis0, cis1, pf->function); return (0); } static int pccard_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) { struct pccard_ivar *devi = PCCARD_IVAR(child); struct pccard_function *pf = devi->pf; struct pccard_softc *sc = PCCARD_SOFTC(bus); if (!pf) panic("No pccard function pointer"); switch (which) { default: return (EINVAL); case PCCARD_IVAR_FUNCE_DISK: *(uint16_t *)result = pf->pf_funce_disk_interface | (pf->pf_funce_disk_power << 8); break; case PCCARD_IVAR_ETHADDR: bcopy(pf->pf_funce_lan_nid, result, ETHER_ADDR_LEN); break; case PCCARD_IVAR_VENDOR: *(uint32_t *)result = sc->card.manufacturer; break; case PCCARD_IVAR_PRODUCT: *(uint32_t *)result = sc->card.product; break; case PCCARD_IVAR_PRODEXT: *(uint16_t *)result = sc->card.prodext; break; case PCCARD_IVAR_FUNCTION: *(uint32_t *)result = pf->function; break; case PCCARD_IVAR_FUNCTION_NUMBER: *(uint32_t *)result = pf->number; break; case PCCARD_IVAR_VENDOR_STR: *(const char **)result = sc->card.cis1_info[0]; break; case PCCARD_IVAR_PRODUCT_STR: *(const char **)result = sc->card.cis1_info[1]; break; case PCCARD_IVAR_CIS3_STR: *(const char **)result = sc->card.cis1_info[2]; break; case PCCARD_IVAR_CIS4_STR: *(const char **)result = sc->card.cis1_info[3]; break; } return (0); } static void pccard_driver_added(device_t dev, driver_t *driver) { struct pccard_softc *sc = PCCARD_SOFTC(dev); struct pccard_function *pf; device_t child; STAILQ_FOREACH(pf, &sc->card.pf_head, pf_list) { if (STAILQ_EMPTY(&pf->cfe_head)) continue; child = pf->dev; if (device_get_state(child) != DS_NOTPRESENT) continue; pccard_probe_and_attach_child(dev, child, pf); } return; } static struct resource * pccard_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct pccard_ivar *dinfo; - struct resource_list_entry *rle = 0; + struct resource_list_entry *rle = NULL; int passthrough = (device_get_parent(child) != dev); int isdefault = (RMAN_IS_DEFAULT_RANGE(start, end) && count == 1); struct resource *r = NULL; /* XXX I'm no longer sure this is right */ if (passthrough) { return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid, start, end, count, flags)); } dinfo = device_get_ivars(child); rle = resource_list_find(&dinfo->resources, type, *rid); if (rle == NULL && isdefault) return (NULL); /* no resource of that type/rid */ if (rle == NULL || rle->res == NULL) { /* XXX Need to adjust flags */ r = bus_alloc_resource(dev, type, rid, start, end, count, flags); if (r == NULL) goto bad; resource_list_add(&dinfo->resources, type, *rid, rman_get_start(r), rman_get_end(r), count); rle = resource_list_find(&dinfo->resources, type, *rid); if (!rle) goto bad; rle->res = r; } /* * If dev doesn't own the device, then we can't give this device * out. */ if (rman_get_device(rle->res) != dev) return (NULL); rman_set_device(rle->res, child); if (flags & RF_ACTIVE) BUS_ACTIVATE_RESOURCE(dev, child, type, *rid, rle->res); return (rle->res); bad:; device_printf(dev, "WARNING: Resource not reserved by pccard\n"); return (NULL); } static int pccard_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct pccard_ivar *dinfo; int passthrough = (device_get_parent(child) != dev); - struct resource_list_entry *rle = 0; + struct resource_list_entry *rle = NULL; if (passthrough) return BUS_RELEASE_RESOURCE(device_get_parent(dev), child, type, rid, r); dinfo = device_get_ivars(child); rle = resource_list_find(&dinfo->resources, type, rid); if (!rle) { device_printf(dev, "Allocated resource not found, " "%d %#x %#jx %#jx\n", type, rid, rman_get_start(r), rman_get_size(r)); return ENOENT; } if (!rle->res) { device_printf(dev, "Allocated resource not recorded\n"); return ENOENT; } /* * Deactivate the resource (since it is being released), and * assign it to the bus. */ BUS_DEACTIVATE_RESOURCE(dev, child, type, rid, rle->res); rman_set_device(rle->res, dev); return (0); } static void pccard_child_detached(device_t parent, device_t dev) { struct pccard_ivar *ivar = PCCARD_IVAR(dev); struct pccard_function *pf = ivar->pf; pccard_function_disable(pf); } static int pccard_filter(void *arg) { struct pccard_function *pf = (struct pccard_function*) arg; int reg; int doisr = 1; /* * MFC cards know if they interrupted, so we have to ack the * interrupt and call the ISR. Non-MFC cards don't have these * bits, so they always get called. Many non-MFC cards have * this bit set always upon read, but some do not. * * We always ack the interrupt, even if there's no ISR * for the card. This is done on the theory that acking * the interrupt will pacify the card enough to keep an * interrupt storm from happening. Of course this won't * help in the non-MFC case. * * This has no impact for MPSAFEness of the client drivers. * We register this with whatever flags the intr_handler * was registered with. All these functions are MPSAFE. */ if (pccard_mfc(pf->sc)) { reg = pccard_ccr_read(pf, PCCARD_CCR_STATUS); if (reg & PCCARD_CCR_STATUS_INTR) pccard_ccr_write(pf, PCCARD_CCR_STATUS, reg & ~PCCARD_CCR_STATUS_INTR); else doisr = 0; } if (doisr) { if (pf->intr_filter != NULL) return (pf->intr_filter(pf->intr_handler_arg)); return (FILTER_SCHEDULE_THREAD); } return (FILTER_STRAY); } static void pccard_intr(void *arg) { struct pccard_function *pf = (struct pccard_function*) arg; pf->intr_handler(pf->intr_handler_arg); } static int pccard_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep) { struct pccard_softc *sc = PCCARD_SOFTC(dev); struct pccard_ivar *ivar = PCCARD_IVAR(child); struct pccard_function *pf = ivar->pf; int err; if (pf->intr_filter != NULL || pf->intr_handler != NULL) panic("Only one interrupt handler per function allowed"); err = bus_generic_setup_intr(dev, child, irq, flags, pccard_filter, intr ? pccard_intr : NULL, pf, cookiep); if (err != 0) return (err); pf->intr_filter = filt; pf->intr_handler = intr; pf->intr_handler_arg = arg; pf->intr_handler_cookie = *cookiep; if (pccard_mfc(sc)) { pccard_ccr_write(pf, PCCARD_CCR_OPTION, pccard_ccr_read(pf, PCCARD_CCR_OPTION) | PCCARD_CCR_OPTION_IREQ_ENABLE); } return (0); } static int pccard_teardown_intr(device_t dev, device_t child, struct resource *r, void *cookie) { struct pccard_softc *sc = PCCARD_SOFTC(dev); struct pccard_ivar *ivar = PCCARD_IVAR(child); struct pccard_function *pf = ivar->pf; int ret; if (pccard_mfc(sc)) { pccard_ccr_write(pf, PCCARD_CCR_OPTION, pccard_ccr_read(pf, PCCARD_CCR_OPTION) & ~PCCARD_CCR_OPTION_IREQ_ENABLE); } ret = bus_generic_teardown_intr(dev, child, r, cookie); if (ret == 0) { pf->intr_handler = NULL; pf->intr_handler_arg = NULL; pf->intr_handler_cookie = NULL; } return (ret); } static int pccard_activate_resource(device_t brdev, device_t child, int type, int rid, struct resource *r) { struct pccard_ivar *ivar = PCCARD_IVAR(child); struct pccard_function *pf = ivar->pf; switch(type) { case SYS_RES_IOPORT: /* * We need to adjust IOBASE[01] and IOSIZE if we're an MFC * card. */ if (pccard_mfc(pf->sc)) pccard_mfc_adjust_iobase(pf, rman_get_start(r), 0, rman_get_size(r)); break; default: break; } return (bus_generic_activate_resource(brdev, child, type, rid, r)); } static int pccard_deactivate_resource(device_t brdev, device_t child, int type, int rid, struct resource *r) { /* XXX undo pccard_activate_resource? XXX */ return (bus_generic_deactivate_resource(brdev, child, type, rid, r)); } static int pccard_attr_read_impl(device_t brdev, device_t child, uint32_t offset, uint8_t *val) { struct pccard_ivar *devi = PCCARD_IVAR(child); struct pccard_function *pf = devi->pf; /* * Optimization. Most of the time, devices want to access * the same page of the attribute memory that the CCR is in. * We take advantage of this fact here. */ if (offset / PCCARD_MEM_PAGE_SIZE == pf->ccr_base / PCCARD_MEM_PAGE_SIZE) *val = bus_space_read_1(pf->pf_ccrt, pf->pf_ccrh, offset % PCCARD_MEM_PAGE_SIZE); else { CARD_SET_MEMORY_OFFSET(brdev, child, pf->ccr_rid, offset, &offset); *val = bus_space_read_1(pf->pf_ccrt, pf->pf_ccrh, offset); CARD_SET_MEMORY_OFFSET(brdev, child, pf->ccr_rid, pf->ccr_base, &offset); } return 0; } static int pccard_attr_write_impl(device_t brdev, device_t child, uint32_t offset, uint8_t val) { struct pccard_ivar *devi = PCCARD_IVAR(child); struct pccard_function *pf = devi->pf; /* * Optimization. Most of the time, devices want to access * the same page of the attribute memory that the CCR is in. * We take advantage of this fact here. */ if (offset / PCCARD_MEM_PAGE_SIZE == pf->ccr_base / PCCARD_MEM_PAGE_SIZE) bus_space_write_1(pf->pf_ccrt, pf->pf_ccrh, offset % PCCARD_MEM_PAGE_SIZE, val); else { CARD_SET_MEMORY_OFFSET(brdev, child, pf->ccr_rid, offset, &offset); bus_space_write_1(pf->pf_ccrt, pf->pf_ccrh, offset, val); CARD_SET_MEMORY_OFFSET(brdev, child, pf->ccr_rid, pf->ccr_base, &offset); } return 0; } static int pccard_ccr_read_impl(device_t brdev, device_t child, uint32_t offset, uint8_t *val) { struct pccard_ivar *devi = PCCARD_IVAR(child); *val = pccard_ccr_read(devi->pf, offset); DEVPRINTF((child, "ccr_read of %#x (%#x) is %#x\n", offset, devi->pf->pf_ccr_offset, *val)); return 0; } static int pccard_ccr_write_impl(device_t brdev, device_t child, uint32_t offset, uint8_t val) { struct pccard_ivar *devi = PCCARD_IVAR(child); struct pccard_function *pf = devi->pf; /* * Can't use pccard_ccr_write since client drivers may access * registers not contained in the 'mask' if they are non-standard. */ DEVPRINTF((child, "ccr_write of %#x to %#x (%#x)\n", val, offset, devi->pf->pf_ccr_offset)); bus_space_write_1(pf->pf_ccrt, pf->pf_ccrh, pf->pf_ccr_offset + offset, val); return 0; } static device_method_t pccard_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pccard_probe), DEVMETHOD(device_attach, pccard_attach), DEVMETHOD(device_detach, pccard_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, pccard_suspend), DEVMETHOD(device_resume, pccard_resume), /* Bus interface */ DEVMETHOD(bus_print_child, pccard_print_child), DEVMETHOD(bus_driver_added, pccard_driver_added), DEVMETHOD(bus_child_detached, pccard_child_detached), DEVMETHOD(bus_alloc_resource, pccard_alloc_resource), DEVMETHOD(bus_release_resource, pccard_release_resource), DEVMETHOD(bus_activate_resource, pccard_activate_resource), DEVMETHOD(bus_deactivate_resource, pccard_deactivate_resource), DEVMETHOD(bus_setup_intr, pccard_setup_intr), DEVMETHOD(bus_teardown_intr, pccard_teardown_intr), DEVMETHOD(bus_set_resource, pccard_set_resource), DEVMETHOD(bus_get_resource, pccard_get_resource), DEVMETHOD(bus_delete_resource, pccard_delete_resource), DEVMETHOD(bus_probe_nomatch, pccard_probe_nomatch), DEVMETHOD(bus_read_ivar, pccard_read_ivar), DEVMETHOD(bus_child_pnpinfo_str, pccard_child_pnpinfo_str), DEVMETHOD(bus_child_location_str, pccard_child_location_str), /* Card Interface */ DEVMETHOD(card_set_res_flags, pccard_set_res_flags), DEVMETHOD(card_set_memory_offset, pccard_set_memory_offset), DEVMETHOD(card_attach_card, pccard_attach_card), DEVMETHOD(card_detach_card, pccard_detach_card), DEVMETHOD(card_do_product_lookup, pccard_do_product_lookup), DEVMETHOD(card_cis_scan, pccard_scan_cis), DEVMETHOD(card_attr_read, pccard_attr_read_impl), DEVMETHOD(card_attr_write, pccard_attr_write_impl), DEVMETHOD(card_ccr_read, pccard_ccr_read_impl), DEVMETHOD(card_ccr_write, pccard_ccr_write_impl), { 0, 0 } }; static driver_t pccard_driver = { "pccard", pccard_methods, sizeof(struct pccard_softc) }; devclass_t pccard_devclass; /* Maybe we need to have a slot device? */ DRIVER_MODULE(pccard, pcic, pccard_driver, pccard_devclass, 0, 0); DRIVER_MODULE(pccard, cbb, pccard_driver, pccard_devclass, 0, 0); MODULE_VERSION(pccard, 1); Index: head/sys/dev/pms/RefTisa/sallsdk/spc/sainit.c =================================================================== --- head/sys/dev/pms/RefTisa/sallsdk/spc/sainit.c (revision 313981) +++ head/sys/dev/pms/RefTisa/sallsdk/spc/sainit.c (revision 313982) @@ -1,4664 +1,4664 @@ /******************************************************************************* *Copyright (c) 2014 PMC-Sierra, Inc. All rights reserved. * *Redistribution and use in source and binary forms, with or without modification, are permitted provided *that the following conditions are met: *1. Redistributions of source code must retain the above copyright notice, this list of conditions and the *following disclaimer. *2. Redistributions in binary form must reproduce the above copyright notice, *this list of conditions and the following disclaimer in the documentation and/or other materials provided *with the distribution. * *THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED *WARRANTIES,INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS *FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE *FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT *NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR *BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT *LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS *SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE ********************************************************************************/ /*******************************************************************************/ /*! \file sainit.c * \brief The file implements the functions to initialize the LL layer * */ /******************************************************************************/ #include __FBSDID("$FreeBSD$"); #include #include #ifdef SA_ENABLE_TRACE_FUNCTIONS #ifdef siTraceFileID #undef siTraceFileID #endif #define siTraceFileID 'F' #endif bit32 gLLDebugLevel = 3; #if defined(SALLSDK_DEBUG) bit32 gLLDebugLevelSet = 0; // block reinitialize from updating bit32 gLLLogFuncDebugLevel = 0; bit32 gLLSoftResetCounter = 0; #endif bit32 gPollForMissingInt; #ifdef FW_EVT_LOG_TST -void *eventLogAddress = 0; +void *eventLogAddress = NULL; #endif extern bit32 gWait_3; extern bit32 gWait_2; bit32 gFPGA_TEST = 0; // If set unblock fpga functions /******************************************************************************/ /*! \brief Get the memory and lock requirement from LL layer * * Get the memory and lock requirement from LL layer * * \param agRoot Handles for this instance of SAS/SATA hardware * \param swConfig Pointer to the software configuration * \param memoryRequirement Point to the data structure that holds the different * chunks of memory that are required * \param usecsPerTick micro-seconds per tick for the LL layer * \param maxNumLocks maximum number of locks for the LL layer * * \return -void- * */ /*******************************************************************************/ GLOBAL void saGetRequirements( agsaRoot_t *agRoot, agsaSwConfig_t *swConfig, agsaMemoryRequirement_t *memoryRequirement, bit32 *usecsPerTick, bit32 *maxNumLocks ) { bit32 memoryReqCount = 0; bit32 i; static mpiConfig_t mpiConfig; static mpiMemReq_t mpiMemoryRequirement; /* sanity check */ SA_ASSERT((agNULL != swConfig), ""); SA_ASSERT((agNULL != memoryRequirement), ""); SA_ASSERT((agNULL != usecsPerTick), ""); SA_ASSERT((agNULL != maxNumLocks), ""); si_memset(&mpiMemoryRequirement, 0, sizeof(mpiMemReq_t)); si_memset(&mpiConfig, 0, sizeof(mpiConfig_t)); SA_DBG1(("saGetRequirements:agRoot %p swConfig %p memoryRequirement %p usecsPerTick %p maxNumLocks %p\n",agRoot, swConfig,memoryRequirement,usecsPerTick,maxNumLocks)); SA_DBG1(("saGetRequirements: usecsPerTick 0x%x (%d)\n",*usecsPerTick,*usecsPerTick)); /* Get Resource Requirements for SPC MPI */ /* Set the default/specified requirements swConfig from TD layer */ siConfiguration(agRoot, &mpiConfig, agNULL, swConfig); mpiRequirementsGet(&mpiConfig, &mpiMemoryRequirement); /* memory requirement for saRoot, CACHE memory */ memoryRequirement->agMemory[LLROOT_MEM_INDEX].singleElementLength = sizeof(agsaLLRoot_t); memoryRequirement->agMemory[LLROOT_MEM_INDEX].numElements = 1; memoryRequirement->agMemory[LLROOT_MEM_INDEX].totalLength = sizeof(agsaLLRoot_t); memoryRequirement->agMemory[LLROOT_MEM_INDEX].alignment = sizeof(void *); memoryRequirement->agMemory[LLROOT_MEM_INDEX].type = AGSA_CACHED_MEM; memoryReqCount ++; SA_DBG1(("saGetRequirements: agMemory[LLROOT_MEM_INDEX] singleElementLength = 0x%x totalLength = 0x%x align = 0x%x type %x\n", memoryRequirement->agMemory[LLROOT_MEM_INDEX].singleElementLength, memoryRequirement->agMemory[LLROOT_MEM_INDEX].totalLength, memoryRequirement->agMemory[LLROOT_MEM_INDEX].alignment, memoryRequirement->agMemory[LLROOT_MEM_INDEX].type )); /* memory requirement for Device Links, CACHE memory */ memoryRequirement->agMemory[DEVICELINK_MEM_INDEX].singleElementLength = sizeof(agsaDeviceDesc_t); memoryRequirement->agMemory[DEVICELINK_MEM_INDEX].numElements = swConfig->numDevHandles; memoryRequirement->agMemory[DEVICELINK_MEM_INDEX].totalLength = sizeof(agsaDeviceDesc_t) * swConfig->numDevHandles; memoryRequirement->agMemory[DEVICELINK_MEM_INDEX].alignment = sizeof(void *); memoryRequirement->agMemory[DEVICELINK_MEM_INDEX].type = AGSA_CACHED_MEM; memoryReqCount ++; SA_DBG1(("saGetRequirements: agMemory[DEVICELINK_MEM_INDEX] singleElementLength = 0x%x totalLength = 0x%x align = 0x%x type %x\n", memoryRequirement->agMemory[DEVICELINK_MEM_INDEX].singleElementLength, memoryRequirement->agMemory[DEVICELINK_MEM_INDEX].totalLength, memoryRequirement->agMemory[DEVICELINK_MEM_INDEX].alignment, memoryRequirement->agMemory[DEVICELINK_MEM_INDEX].type )); /* memory requirement for IORequest Links, CACHE memory */ memoryRequirement->agMemory[IOREQLINK_MEM_INDEX].singleElementLength = sizeof(agsaIORequestDesc_t); /* Add SA_RESERVED_REQUEST_COUNT to guarantee quality of service */ memoryRequirement->agMemory[IOREQLINK_MEM_INDEX].numElements = swConfig->maxActiveIOs + SA_RESERVED_REQUEST_COUNT; memoryRequirement->agMemory[IOREQLINK_MEM_INDEX].totalLength = sizeof(agsaIORequestDesc_t) * memoryRequirement->agMemory[IOREQLINK_MEM_INDEX].numElements; memoryRequirement->agMemory[IOREQLINK_MEM_INDEX].alignment = sizeof(void *); memoryRequirement->agMemory[IOREQLINK_MEM_INDEX].type = AGSA_CACHED_MEM; memoryReqCount ++; SA_DBG1(("saGetRequirements: agMemory[IOREQLINK_MEM_INDEX] singleElementLength = 0x%x totalLength = 0x%x align = 0x%x type %x\n", memoryRequirement->agMemory[IOREQLINK_MEM_INDEX].singleElementLength, memoryRequirement->agMemory[IOREQLINK_MEM_INDEX].totalLength, memoryRequirement->agMemory[IOREQLINK_MEM_INDEX].alignment, memoryRequirement->agMemory[IOREQLINK_MEM_INDEX].type )); /* memory requirement for Timer Links, CACHE memory */ memoryRequirement->agMemory[TIMERLINK_MEM_INDEX].singleElementLength = sizeof(agsaTimerDesc_t); memoryRequirement->agMemory[TIMERLINK_MEM_INDEX].numElements = NUM_TIMERS; memoryRequirement->agMemory[TIMERLINK_MEM_INDEX].totalLength = sizeof(agsaTimerDesc_t) * NUM_TIMERS; memoryRequirement->agMemory[TIMERLINK_MEM_INDEX].alignment = sizeof(void *); memoryRequirement->agMemory[TIMERLINK_MEM_INDEX].type = AGSA_CACHED_MEM; memoryReqCount ++; SA_DBG1(("saGetRequirements: agMemory[TIMERLINK_MEM_INDEX] singleElementLength = 0x%x totalLength = 0x%x align = 0x%x type %x\n", memoryRequirement->agMemory[TIMERLINK_MEM_INDEX].singleElementLength, memoryRequirement->agMemory[TIMERLINK_MEM_INDEX].totalLength, memoryRequirement->agMemory[TIMERLINK_MEM_INDEX].alignment, memoryRequirement->agMemory[TIMERLINK_MEM_INDEX].type )); #ifdef SA_ENABLE_TRACE_FUNCTIONS /* memory requirement for LL trace memory */ memoryRequirement->agMemory[LL_FUNCTION_TRACE].singleElementLength = 1; memoryRequirement->agMemory[LL_FUNCTION_TRACE].numElements = swConfig->TraceBufferSize; memoryRequirement->agMemory[LL_FUNCTION_TRACE].totalLength = swConfig->TraceBufferSize; memoryRequirement->agMemory[LL_FUNCTION_TRACE].alignment = sizeof(void *); memoryRequirement->agMemory[LL_FUNCTION_TRACE].type = AGSA_CACHED_MEM; memoryReqCount ++; SA_DBG1(("saGetRequirements: agMemory[LL_FUNCTION_TRACE] singleElementLength = 0x%x totalLength = 0x%x align = 0x%x type %x\n", memoryRequirement->agMemory[LL_FUNCTION_TRACE].singleElementLength, memoryRequirement->agMemory[LL_FUNCTION_TRACE].totalLength, memoryRequirement->agMemory[LL_FUNCTION_TRACE].alignment, memoryRequirement->agMemory[LL_FUNCTION_TRACE].type )); #endif /* END SA_ENABLE_TRACE_FUNCTIONS */ #ifdef FAST_IO_TEST { agsaMem_t *agMemory = memoryRequirement->agMemory; /* memory requirement for Super IO CACHE memory */ agMemory[LL_FAST_IO].singleElementLength = sizeof(saFastRequest_t); agMemory[LL_FAST_IO].numElements = LL_FAST_IO_SIZE; agMemory[LL_FAST_IO].totalLength = LL_FAST_IO_SIZE * agMemory[LL_FAST_IO].singleElementLength; agMemory[LL_FAST_IO].alignment = sizeof(void*); agMemory[LL_FAST_IO].type = AGSA_CACHED_MEM; memoryReqCount ++; SA_DBG1(("saGetRequirements: agMemory[LL_FAST_IO] singleElementLength = 0x%x totalLength = 0x%x align = 0x%x type %x\n", memoryRequirement->agMemory[LL_FAST_IO].singleElementLength, memoryRequirement->agMemory[LL_FAST_IO].totalLength, memoryRequirement->agMemory[LL_FAST_IO].alignment, memoryRequirement->agMemory[LL_FAST_IO].type )); } #endif #ifdef SA_ENABLE_HDA_FUNCTIONS { agsaMem_t *agMemory = memoryRequirement->agMemory; /* memory requirement for HDA FW image */ agMemory[HDA_DMA_BUFFER].singleElementLength = (1024 * 1024); /* must be greater than size of aap1 fw image */ agMemory[HDA_DMA_BUFFER].numElements = 1; agMemory[HDA_DMA_BUFFER].totalLength = agMemory[HDA_DMA_BUFFER].numElements * agMemory[HDA_DMA_BUFFER].singleElementLength; agMemory[HDA_DMA_BUFFER].alignment = 32; agMemory[HDA_DMA_BUFFER].type = AGSA_DMA_MEM; memoryReqCount ++; SA_DBG1(("saGetRequirements: agMemory[HDA_DMA_BUFFER] singleElementLength = 0x%x totalLength = 0x%x align = 0x%x type %x\n", memoryRequirement->agMemory[HDA_DMA_BUFFER].singleElementLength, memoryRequirement->agMemory[HDA_DMA_BUFFER].totalLength, memoryRequirement->agMemory[HDA_DMA_BUFFER].alignment, memoryRequirement->agMemory[HDA_DMA_BUFFER].type )); } #endif /* SA_ENABLE_HDA_FUNCTIONS */ /* memory requirement for MPI MSGU layer, DMA memory */ for ( i = 0; i < mpiMemoryRequirement.count; i ++ ) { memoryRequirement->agMemory[memoryReqCount].singleElementLength = mpiMemoryRequirement.region[i].elementSize; memoryRequirement->agMemory[memoryReqCount].numElements = mpiMemoryRequirement.region[i].numElements; memoryRequirement->agMemory[memoryReqCount].totalLength = mpiMemoryRequirement.region[i].totalLength; memoryRequirement->agMemory[memoryReqCount].alignment = mpiMemoryRequirement.region[i].alignment; memoryRequirement->agMemory[memoryReqCount].type = mpiMemoryRequirement.region[i].type; SA_DBG1(("saGetRequirements:MPI agMemory[%d] singleElementLength = 0x%x totalLength = 0x%x align = 0x%x type %x\n", memoryReqCount, memoryRequirement->agMemory[memoryReqCount].singleElementLength, memoryRequirement->agMemory[memoryReqCount].totalLength, memoryRequirement->agMemory[memoryReqCount].alignment, memoryRequirement->agMemory[memoryReqCount].type )); memoryReqCount ++; } /* requirement for locks */ if (swConfig->param3 == agNULL) { *maxNumLocks = (LL_IOREQ_IBQ_LOCK + AGSA_MAX_INBOUND_Q ); SA_DBG1(("saGetRequirements: param3 == agNULL maxNumLocks %d\n", *maxNumLocks )); } else { agsaQueueConfig_t *queueConfig; queueConfig = (agsaQueueConfig_t *)swConfig->param3; *maxNumLocks = (LL_IOREQ_IBQ_LOCK_PARM + queueConfig->numInboundQueues ); SA_DBG1(("saGetRequirements: maxNumLocks %d\n", *maxNumLocks )); } /* setup the time tick */ *usecsPerTick = SA_USECS_PER_TICK; SA_ASSERT(memoryReqCount < AGSA_NUM_MEM_CHUNKS, "saGetRequirements: Exceed max number of memory place holder"); /* set up memory requirement count */ memoryRequirement->count = memoryReqCount; swConfig->legacyInt_X = 1; swConfig->max_MSI_InterruptVectors = 32; swConfig->max_MSIX_InterruptVectors = 64;//16; SA_DBG1(("saGetRequirements: swConfig->stallUsec %d\n",swConfig->stallUsec )); #ifdef SA_CONFIG_MDFD_REGISTRY SA_DBG1(("saGetRequirements: swConfig->disableMDF %d\n",swConfig->disableMDF)); #endif /*SA_CONFIG_MDFD_REGISTRY*/ /*SA_DBG1(("saGetRequirements: swConfig->enableDIF %d\n",swConfig->enableDIF ));*/ /*SA_DBG1(("saGetRequirements: swConfig->enableEncryption %d\n",swConfig->enableEncryption ));*/ #ifdef SA_ENABLE_HDA_FUNCTIONS swConfig->hostDirectAccessSupport = 1; swConfig->hostDirectAccessMode = 0; #else swConfig->hostDirectAccessSupport = 0; swConfig->hostDirectAccessMode = 0; #endif } /******************************************************************************/ /*! \brief Initialize the Hardware * * Initialize the Hardware * * \param agRoot Handles for this instance of SAS/SATA hardware * \param memoryAllocated Point to the data structure that holds the different chunks of memory that are required * \param hwConfig Pointer to the hardware configuration * \param swConfig Pointer to the software configuration * \param usecsPerTick micro-seconds per tick for the LL layer * * \return If initialization is successful * - \e AGSA_RC_SUCCESS initialization is successful * - \e AGSA_RC_FAILURE initialization is not successful */ /*******************************************************************************/ GLOBAL bit32 saInitialize( agsaRoot_t *agRoot, agsaMemoryRequirement_t *memoryAllocated, agsaHwConfig_t *hwConfig, agsaSwConfig_t *swConfig, bit32 usecsPerTick ) { agsaLLRoot_t *saRoot; agsaDeviceDesc_t *pDeviceDesc; agsaIORequestDesc_t *pRequestDesc; agsaTimerDesc_t *pTimerDesc; agsaPort_t *pPort; agsaPortMap_t *pPortMap; agsaDeviceMap_t *pDeviceMap; agsaIOMap_t *pIOMap; bit32 maxNumIODevices; bit32 i, j; static mpiMemReq_t mpiMemoryAllocated; bit32 Tried_NO_HDA = agFALSE; bit32 Double_Reset_HDA = agFALSE; bit32 ret = AGSA_RC_SUCCESS; #ifdef FAST_IO_TEST void *fr; /* saFastRequest_t */ bit32 size; bit32 alignment; #endif /* sanity check */ SA_ASSERT((agNULL != agRoot), ""); SA_ASSERT((agNULL != memoryAllocated), ""); SA_ASSERT((agNULL != hwConfig), ""); SA_ASSERT((agNULL != swConfig), ""); SA_ASSERT((LLROOT_MEM_INDEX < memoryAllocated->count), ""); SA_ASSERT((DEVICELINK_MEM_INDEX < memoryAllocated->count), ""); SA_ASSERT((IOREQLINK_MEM_INDEX < memoryAllocated->count), ""); SA_ASSERT((TIMERLINK_MEM_INDEX < memoryAllocated->count), ""); si_memset(&mpiMemoryAllocated, 0, sizeof(mpiMemReq_t)); si_macro_check(agRoot); SA_DBG1(("saInitialize: WAIT_INCREMENT %d\n", WAIT_INCREMENT )); SA_DBG1(("saInitialize: usecsPerTick %d\n", usecsPerTick )); if(! smIS_SPC(agRoot)) { if(! smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: ossaHwRegReadConfig32 ID reads as %08X\n", ossaHwRegReadConfig32(agRoot,0 ) )); SA_DBG1(("saInitialize: expect %08X or %08X or\n", VEN_DEV_SPCV, VEN_DEV_SPCVE)); SA_DBG1(("saInitialize: expect %08X or %08X or\n", VEN_DEV_SPCVP, VEN_DEV_SPCVEP)); SA_DBG1(("saInitialize: expect %08X or %08X\n", VEN_DEV_ADAPVEP, VEN_DEV_ADAPVP)); return AGSA_RC_FAILURE; } } if( smIS_SPC(agRoot) && smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: Macro error !smIS_SPC %d smIS_SPCv %d smIS_SFC %d\n",smIS_SPC(agRoot),smIS_SPCV(agRoot), smIS_SFC(agRoot) )); return AGSA_RC_FAILURE; } /* Check the memory allocated */ for ( i = 0; i < memoryAllocated->count; i ++ ) { /* If memory allocation failed */ if (memoryAllocated->agMemory[i].singleElementLength && memoryAllocated->agMemory[i].numElements) { if ( (0 != memoryAllocated->agMemory[i].numElements) && (0 == memoryAllocated->agMemory[i].totalLength) ) { /* return failure */ SA_DBG1(("saInitialize:AGSA_RC_FAILURE Memory[%d] singleElementLength = 0x%x numElements = 0x%x NOT allocated\n", i, memoryAllocated->agMemory[i].singleElementLength, memoryAllocated->agMemory[i].numElements)); ret = AGSA_RC_FAILURE; return ret; } else { SA_DBG1(("saInitialize: Memory[%d] singleElementLength = 0x%x numElements = 0x%x allocated %p\n", i, memoryAllocated->agMemory[i].singleElementLength, memoryAllocated->agMemory[i].numElements, memoryAllocated->agMemory[i].virtPtr)); } } } /* Get the saRoot memory address */ saRoot = (agsaLLRoot_t *) (memoryAllocated->agMemory[LLROOT_MEM_INDEX].virtPtr); SA_ASSERT((agNULL != saRoot), "saRoot"); if(agNULL == saRoot) { SA_DBG1(("saInitialize:AGSA_RC_FAILURE saRoot\n")); return AGSA_RC_FAILURE; } agRoot->sdkData = (void *) saRoot; SA_DBG1(("saInitialize: saRoot %p\n",saRoot)); if ( (memoryAllocated != &saRoot->memoryAllocated) || (hwConfig != &saRoot->hwConfig) || (swConfig != &saRoot->swConfig) ) { agsaMemoryRequirement_t *memA = &saRoot->memoryAllocated; agsaHwConfig_t *hwC = &saRoot->hwConfig; agsaSwConfig_t *swC = &saRoot->swConfig; /* Copy data here */ *memA = *memoryAllocated; *hwC = *hwConfig; *swC = *swConfig; } #if defined(SALLSDK_DEBUG) if(gLLDebugLevelSet == 0) { gLLDebugLevelSet = 1; gLLDebugLevel = swConfig->sallDebugLevel & 0xF; SA_DBG1(("saInitialize: gLLDebugLevel %x\n",gLLDebugLevel)); } #endif /* SALLSDK_DEBUG */ #ifdef SA_ENABLE_TRACE_FUNCTIONS saRoot->TraceBufferLength = memoryAllocated->agMemory[LL_FUNCTION_TRACE].totalLength; saRoot->TraceBuffer = memoryAllocated->agMemory[LL_FUNCTION_TRACE].virtPtr; siEnableTracing ( agRoot ); /* */ #endif /* SA_ENABLE_TRACE_FUNCTIONS */ #ifdef FAST_IO_TEST { agsaMem_t *agMemory = memoryAllocated->agMemory; /* memory requirement for Super IO CACHE memory */ size = sizeof(saRoot->freeFastReq) / sizeof(saRoot->freeFastReq[0]); SA_ASSERT(size == agMemory[LL_FAST_IO].numElements, ""); SA_ASSERT(agMemory[LL_FAST_IO].virtPtr, ""); SA_ASSERT((agMemory[LL_FAST_IO].singleElementLength == sizeof(saFastRequest_t)) && (agMemory[LL_FAST_IO].numElements == LL_FAST_IO_SIZE) && (agMemory[LL_FAST_IO].totalLength == agMemory[LL_FAST_IO].numElements * agMemory[LL_FAST_IO].singleElementLength), ""); for (i = 0, alignment = agMemory[LL_FAST_IO].alignment, fr = agMemory[LL_FAST_IO].virtPtr; i < size; i++, fr = (void*)((bitptr)fr + (bitptr)(((bit32)sizeof(saFastRequest_t) + alignment - 1) & ~(alignment - 1)))) { saRoot->freeFastReq[i] = fr; } saRoot->freeFastIdx = size; } #endif /* FAST_IO_TEST*/ smTraceFuncEnter(hpDBG_VERY_LOUD, "m1"); SA_DBG1(("saInitialize: swConfig->PortRecoveryResetTimer %x\n",swConfig->PortRecoveryResetTimer )); SA_DBG1(("saInitialize: hwDEVICE_ID_VENDID 0x%08x\n", ossaHwRegReadConfig32(agRoot,0))); SA_DBG1(("saInitialize: CFGSTAT CFGCMD 0x%08x\n", ossaHwRegReadConfig32(agRoot,4))); SA_DBG1(("saInitialize: CLSCODE REVID 0x%08x\n", ossaHwRegReadConfig32(agRoot,8))); SA_DBG1(("saInitialize: BIST DT HDRTYPE LATTIM CLSIZE 0x%08x\n", ossaHwRegReadConfig32(agRoot,12))); SA_DBG1(("saInitialize: hwSVID 0x%08x\n", ossaHwRegReadConfig32(agRoot,44))); #ifdef SA_ENABLE_PCI_TRIGGER SA_DBG1(("saInitialize: SA_ENABLE_PCI_TRIGGER a 0x%08x %p\n", saRoot->swConfig.PCI_trigger,&saRoot->swConfig.PCI_trigger)); if( saRoot->swConfig.PCI_trigger & PCI_TRIGGER_INIT_TEST ) { SA_DBG1(("saInitialize: SA_ENABLE_PCI_TRIGGER 0x%08x %p\n", saRoot->swConfig.PCI_trigger,&saRoot->swConfig.PCI_trigger)); saRoot->swConfig.PCI_trigger &= ~PCI_TRIGGER_INIT_TEST; siPCITriger(agRoot); } #endif /* SA_ENABLE_PCI_TRIGGER */ saRoot->ChipId = (ossaHwRegReadConfig32(agRoot,0) & 0xFFFF0000); SA_DBG1(("saInitialize: saRoot->ChipId 0x%08x\n", saRoot->ChipId)); siUpdateBarOffsetTable(agRoot,saRoot->ChipId); if(saRoot->ChipId == VEN_DEV_SPC) { if(! smIS_SPC(agRoot)) { SA_DBG1(("saInitialize: smIS_SPC macro fail !!!!\n" )); smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "m1"); return AGSA_RC_FAILURE; } SA_DBG1(("saInitialize: SPC \n" )); } else if(saRoot->ChipId == VEN_DEV_HIL ) { SA_DBG1(("saInitialize: SPC HIL\n" )); if(! smIS_SPC(agRoot)) { SA_DBG1(("saInitialize: smIS_SPC macro fail !!!!\n" )); smTraceFuncExit(hpDBG_VERY_LOUD, 'b', "m1"); return AGSA_RC_FAILURE; } } else if(saRoot->ChipId == VEN_DEV_SPCV) { SA_DBG1(("saInitialize: SPC V\n" )); if(! smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: smIS_SPCV macro fail !!!!\n" )); smTraceFuncExit(hpDBG_VERY_LOUD, 'c', "m1"); return AGSA_RC_FAILURE; } } else if(saRoot->ChipId == VEN_DEV_SPCVE) { SA_DBG1(("saInitialize: SPC VE\n" )); if(! smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: smIS_SPCV macro fail !!!!\n" )); smTraceFuncExit(hpDBG_VERY_LOUD, 'd', "m1"); return AGSA_RC_FAILURE; } } else if(saRoot->ChipId == VEN_DEV_SPCVP) { SA_DBG1(("saInitialize: SPC VP\n" )); if(! smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: smIS_SPCV macro fail !!!!\n" )); smTraceFuncExit(hpDBG_VERY_LOUD, 'e', "m1"); return AGSA_RC_FAILURE; } } else if(saRoot->ChipId == VEN_DEV_SPCVEP) { SA_DBG1(("saInitialize: SPC VEP\n" )); if(! smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: smIS_SPCV macro fail !!!!\n" )); smTraceFuncExit(hpDBG_VERY_LOUD, 'f', "m1"); return AGSA_RC_FAILURE; } } else if(saRoot->ChipId == VEN_DEV_ADAPVP) { SA_DBG1(("saInitialize: Adaptec 8088\n" )); } else if(saRoot->ChipId == VEN_DEV_ADAPVEP) { SA_DBG1(("saInitialize: Adaptec 8089\n" )); } else if(saRoot->ChipId == VEN_DEV_SPC12V) { SA_DBG1(("saInitialize: SPC 12V\n" )); if(! smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: smIS_SPCV macro fail !!!!\n" )); smTraceFuncExit(hpDBG_VERY_LOUD, 'g', "m1"); return AGSA_RC_FAILURE; } } else if(saRoot->ChipId == VEN_DEV_SPC12VE) { SA_DBG1(("saInitialize: SPC 12VE\n" )); if(! smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: smIS_SPCV macro fail !!!!\n" )); smTraceFuncExit(hpDBG_VERY_LOUD, 'h', "m1"); return AGSA_RC_FAILURE; } } else if(saRoot->ChipId == VEN_DEV_SPC12VP) { SA_DBG1(("saInitialize: SPC 12VP\n" )); if(! smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: smIS_SPCV macro fail !!!!\n" )); smTraceFuncExit(hpDBG_VERY_LOUD, 'i', "m1"); return AGSA_RC_FAILURE; } } else if(saRoot->ChipId == VEN_DEV_SPC12VEP) { SA_DBG1(("saInitialize: SPC 12VEP\n" )); if(! smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: smIS_SPCV macro fail !!!!\n" )); smTraceFuncExit(hpDBG_VERY_LOUD, 'j', "m1"); return AGSA_RC_FAILURE; } } else if(saRoot->ChipId == VEN_DEV_SPC12ADP) { SA_DBG1(("saInitialize: SPC 12ADP\n" )); if(! smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: smIS_SPCV macro fail !!!!\n" )); smTraceFuncExit(hpDBG_VERY_LOUD, 'k', "m1"); return AGSA_RC_FAILURE; } } else if(saRoot->ChipId == VEN_DEV_SPC12ADPE) { SA_DBG1(("saInitialize: SPC 12ADPE\n" )); if(! smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: smIS_SPCV macro fail !!!!\n" )); smTraceFuncExit(hpDBG_VERY_LOUD, 'l', "m1"); return AGSA_RC_FAILURE; } } else if(saRoot->ChipId == VEN_DEV_SPC12ADPP) { SA_DBG1(("saInitialize: SPC 12ADPP\n" )); if(! smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: smIS_SPCV macro fail !!!!\n" )); smTraceFuncExit(hpDBG_VERY_LOUD, 'm', "m1"); return AGSA_RC_FAILURE; } } else if(saRoot->ChipId == VEN_DEV_SPC12ADPEP) { SA_DBG1(("saInitialize: SPC 12ADPEP\n" )); if(! smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: smIS_SPCV macro fail !!!!\n" )); smTraceFuncExit(hpDBG_VERY_LOUD, 'n', "m1"); return AGSA_RC_FAILURE; } } else if(saRoot->ChipId == VEN_DEV_SPC12SATA) { SA_DBG1(("saInitialize: SPC12SATA\n" )); if(! smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: smIS_SPCV macro fail !!!!\n" )); smTraceFuncExit(hpDBG_VERY_LOUD, 'o', "m1"); return AGSA_RC_FAILURE; } } else if(saRoot->ChipId == VEN_DEV_9015) { SA_DBG1(("saInitialize: SPC 12V FPGA\n" )); if(! smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: smIS_SPCV macro fail !!!!\n" )); smTraceFuncExit(hpDBG_VERY_LOUD, 'p', "m1"); return AGSA_RC_FAILURE; } } else if(saRoot->ChipId == VEN_DEV_9060) { SA_DBG1(("saInitialize: SPC 12V FPGA B\n" )); if(! smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: smIS_SPCV macro fail !!!!\n" )); smTraceFuncExit(hpDBG_VERY_LOUD, 'q', "m1"); return AGSA_RC_FAILURE; } } else if(saRoot->ChipId == VEN_DEV_SFC) { SA_DBG1(("saInitialize: SFC \n" )); } else { SA_DBG1(("saInitialize saRoot->ChipId %8X expect %8X or %8X\n", saRoot->ChipId,VEN_DEV_SPC, VEN_DEV_SPCV)); SA_ASSERT(0, "ChipId"); smTraceFuncExit(hpDBG_VERY_LOUD, 'r', "m1"); return AGSA_RC_FAILURE; } if( smIS_SPC(agRoot)) { SA_DBG1(("saInitialize: Rev is A %d B %d C %d\n",smIsCfgSpcREV_A(agRoot),smIsCfgSpcREV_B(agRoot),smIsCfgSpcREV_C(agRoot))); } else { SA_DBG1(("saInitialize: Rev is A %d B %d C %d\n",smIsCfgVREV_A(agRoot),smIsCfgVREV_B(agRoot),smIsCfgVREV_C(agRoot))); } if( smIS_SPC(agRoot)) { SA_DBG1(("saInitialize: LINK_CTRL 0x%08x Speed 0x%X Lanes 0x%X \n", ossaHwRegReadConfig32(agRoot,128), ((ossaHwRegReadConfig32(agRoot,128) & 0x000F0000) >> 16), ((ossaHwRegReadConfig32(agRoot,128) & 0x0FF00000) >> 20) )); } else { SA_DBG1(("saInitialize: LINK_CTRL 0x%08x Speed 0x%X Lanes 0x%X \n", ossaHwRegReadConfig32(agRoot,208), ((ossaHwRegReadConfig32(agRoot,208) & 0x000F0000) >> 16), ((ossaHwRegReadConfig32(agRoot,208) & 0x0FF00000) >> 20) )); } SA_DBG1(("saInitialize: V_SoftResetRegister %08X\n", ossaHwRegReadExt(agRoot, PCIBAR0, V_SoftResetRegister ))); /* SA_DBG1(("saInitialize:TOP_BOOT_STRAP STRAP_BIT %X\n", ossaHwRegReadExt(agRoot, PCIBAR1, 0) )); SA_DBG1(("SPC_REG_TOP_DEVICE_ID %8X expect %08X\n", ossaHwRegReadExt(agRoot, PCIBAR2, SPC_REG_TOP_DEVICE_ID), SPC_TOP_DEVICE_ID)); SA_DBG1(("SPC_REG_TOP_DEVICE_ID %8X expect %08X\n", siHalRegReadExt( agRoot, GEN_SPC_REG_TOP_DEVICE_ID,SPC_REG_TOP_DEVICE_ID ) , SPC_TOP_DEVICE_ID)); SA_DBG1(("SPC_REG_TOP_BOOT_STRAP %8X expect %08X\n", ossaHwRegReadExt(agRoot, PCIBAR2, SPC_REG_TOP_BOOT_STRAP), SPC_TOP_BOOT_STRAP)); SA_DBG1(("swConfig->numSASDevHandles =%d\n", swConfig->numDevHandles)); */ smTrace(hpDBG_VERY_LOUD,"29",swConfig->numDevHandles); /* TP:29 swConfig->numDevHandles */ /* Setup Device link */ /* Save the information of allocated device Link memory */ saRoot->deviceLinkMem = memoryAllocated->agMemory[DEVICELINK_MEM_INDEX]; if(agNULL == saRoot->deviceLinkMem.virtPtr) { SA_ASSERT(0, "deviceLinkMem"); smTraceFuncExit(hpDBG_VERY_LOUD, 'q', "m1"); return AGSA_RC_FAILURE; } si_memset(saRoot->deviceLinkMem.virtPtr, 0, saRoot->deviceLinkMem.totalLength); SA_DBG2(("saInitialize: [%d] saRoot->deviceLinkMem VirtPtr=%p PhysicalLo=%x Count=%x Total=%x type %x\n", DEVICELINK_MEM_INDEX, saRoot->deviceLinkMem.virtPtr, saRoot->deviceLinkMem.phyAddrLower, saRoot->deviceLinkMem.numElements, saRoot->deviceLinkMem.totalLength, saRoot->deviceLinkMem.type)); maxNumIODevices = swConfig->numDevHandles; SA_DBG2(("saInitialize: maxNumIODevices=%d, swConfig->numDevHandles=%d \n", maxNumIODevices, swConfig->numDevHandles)); #ifdef SA_ENABLE_PCI_TRIGGER SA_DBG1(("saInitialize: swConfig->PCI_trigger= 0x%x\n", swConfig->PCI_trigger)); #endif /* SA_ENABLE_PCI_TRIGGER */ /* Setup free IO Devices link list */ saLlistInitialize(&(saRoot->freeDevicesList)); for ( i = 0; i < (bit32) maxNumIODevices; i ++ ) { /* get the pointer to the device descriptor */ pDeviceDesc = (agsaDeviceDesc_t *) AGSAMEM_ELEMENT_READ(&(saRoot->deviceLinkMem), i); /* Initialize device descriptor */ saLlinkInitialize(&(pDeviceDesc->linkNode)); pDeviceDesc->initiatorDevHandle.osData = agNULL; pDeviceDesc->initiatorDevHandle.sdkData = agNULL; pDeviceDesc->targetDevHandle.osData = agNULL; pDeviceDesc->targetDevHandle.sdkData = agNULL; pDeviceDesc->deviceType = SAS_SATA_UNKNOWN_DEVICE; pDeviceDesc->pPort = agNULL; pDeviceDesc->DeviceMapIndex = 0; saLlistInitialize(&(pDeviceDesc->pendingIORequests)); /* Add the device descriptor to the free IO device link list */ saLlistAdd(&(saRoot->freeDevicesList), &(pDeviceDesc->linkNode)); } /* Setup IO Request link */ /* Save the information of allocated IO Request Link memory */ saRoot->IORequestMem = memoryAllocated->agMemory[IOREQLINK_MEM_INDEX]; si_memset(saRoot->IORequestMem.virtPtr, 0, saRoot->IORequestMem.totalLength); SA_DBG2(("saInitialize: [%d] saRoot->IORequestMem VirtPtr=%p PhysicalLo=%x Count=%x Total=%x type %x\n", IOREQLINK_MEM_INDEX, saRoot->IORequestMem.virtPtr, saRoot->IORequestMem.phyAddrLower, saRoot->IORequestMem.numElements, saRoot->IORequestMem.totalLength, saRoot->IORequestMem.type)); /* Setup free IO Request link list */ saLlistIOInitialize(&(saRoot->freeIORequests)); saLlistIOInitialize(&(saRoot->freeReservedRequests)); for ( i = 0; i < swConfig->maxActiveIOs; i ++ ) { /* get the pointer to the request descriptor */ pRequestDesc = (agsaIORequestDesc_t *) AGSAMEM_ELEMENT_READ(&(saRoot->IORequestMem), i); /* Initialize request descriptor */ saLlinkInitialize(&(pRequestDesc->linkNode)); pRequestDesc->valid = agFALSE; pRequestDesc->requestType = AGSA_REQ_TYPE_UNKNOWN; pRequestDesc->pIORequestContext = agNULL; pRequestDesc->HTag = i; pRequestDesc->pDevice = agNULL; pRequestDesc->pPort = agNULL; /* Add the request descriptor to the free Reserved Request link list */ /* SMP request must get service so reserve one request when first SMP completes */ if(saLlistIOGetCount(&(saRoot->freeReservedRequests)) < SA_RESERVED_REQUEST_COUNT) { saLlistIOAdd(&(saRoot->freeReservedRequests), &(pRequestDesc->linkNode)); } else { /* Add the request descriptor to the free IO Request link list */ saLlistIOAdd(&(saRoot->freeIORequests), &(pRequestDesc->linkNode)); } } /* Setup timer link */ /* Save the information of allocated timer Link memory */ saRoot->timerLinkMem = memoryAllocated->agMemory[TIMERLINK_MEM_INDEX]; si_memset(saRoot->timerLinkMem.virtPtr, 0, saRoot->timerLinkMem.totalLength); SA_DBG2(("saInitialize: [%d] saRoot->timerLinkMem VirtPtr=%p PhysicalLo=%x Count=%x Total=%x type %x\n", TIMERLINK_MEM_INDEX, saRoot->timerLinkMem.virtPtr, saRoot->timerLinkMem.phyAddrLower, saRoot->timerLinkMem.numElements, saRoot->timerLinkMem.totalLength, saRoot->timerLinkMem.type )); /* Setup free timer link list */ saLlistInitialize(&(saRoot->freeTimers)); for ( i = 0; i < NUM_TIMERS; i ++ ) { /* get the pointer to the timer descriptor */ pTimerDesc = (agsaTimerDesc_t *) AGSAMEM_ELEMENT_READ(&(saRoot->timerLinkMem), i); /* Initialize timer descriptor */ saLlinkInitialize(&(pTimerDesc->linkNode)); pTimerDesc->valid = agFALSE; pTimerDesc->timeoutTick = 0; pTimerDesc->pfnTimeout = agNULL; pTimerDesc->Event = 0; pTimerDesc->pParm = agNULL; /* Add the timer descriptor to the free timer link list */ saLlistAdd(&(saRoot->freeTimers), &(pTimerDesc->linkNode)); } /* Setup valid timer link list */ saLlistInitialize(&(saRoot->validTimers)); /* Setup Phys */ /* Setup PhyCount */ saRoot->phyCount = (bit8) hwConfig->phyCount; /* Init Phy data structure */ for ( i = 0; i < saRoot->phyCount; i ++ ) { saRoot->phys[i].pPort = agNULL; saRoot->phys[i].phyId = (bit8) i; /* setup phy status is PHY_STOPPED */ PHY_STATUS_SET(&(saRoot->phys[i]), PHY_STOPPED); } /* Setup Ports */ /* Setup PortCount */ saRoot->portCount = saRoot->phyCount; /* Setup free port link list */ saLlistInitialize(&(saRoot->freePorts)); for ( i = 0; i < saRoot->portCount; i ++ ) { /* get the pointer to the port */ pPort = &(saRoot->ports[i]); /* Initialize port */ saLlinkInitialize(&(pPort->linkNode)); pPort->portContext.osData = agNULL; pPort->portContext.sdkData = pPort; pPort->portId = 0; pPort->portIdx = (bit8) i; pPort->status = PORT_NORMAL; for ( j = 0; j < saRoot->phyCount; j ++ ) { pPort->phyMap[j] = agFALSE; } saLlistInitialize(&(pPort->listSASATADevices)); /* Add the port to the free port link list */ saLlistAdd(&(saRoot->freePorts), &(pPort->linkNode)); } /* Setup valid port link list */ saLlistInitialize(&(saRoot->validPorts)); /* Init sysIntsActive - default is interrupt enable */ saRoot->sysIntsActive = agFALSE; /* setup timer tick granunarity */ saRoot->usecsPerTick = usecsPerTick; /* setup smallest timer increment for stall */ saRoot->minStallusecs = swConfig->stallUsec; SA_DBG1(("saInitialize: WAIT_INCREMENT %d\n" ,WAIT_INCREMENT )); if (0 == WAIT_INCREMENT) { saRoot->minStallusecs = WAIT_INCREMENT_DEFAULT; } /* initialize LL timer tick */ saRoot->timeTick = 0; /* initialize device (de)registration callback fns */ saRoot->DeviceRegistrationCB = agNULL; saRoot->DeviceDeregistrationCB = agNULL; /* Initialize the PortMap for port context */ for ( i = 0; i < saRoot->portCount; i ++ ) { pPortMap = &(saRoot->PortMap[i]); pPortMap->PortContext = agNULL; pPortMap->PortID = PORT_MARK_OFF; pPortMap->PortStatus = PORT_NORMAL; saRoot->autoDeregDeviceflag[i] = 0; } /* Initialize the DeviceMap for device handle */ for ( i = 0; i < MAX_IO_DEVICE_ENTRIES; i ++ ) { pDeviceMap = &(saRoot->DeviceMap[i]); pDeviceMap->DeviceHandle = agNULL; pDeviceMap->DeviceIdFromFW = i; } /* Initialize the IOMap for IOrequest */ for ( i = 0; i < MAX_ACTIVE_IO_REQUESTS; i ++ ) { pIOMap = &(saRoot->IOMap[i]); pIOMap->IORequest = agNULL; pIOMap->Tag = MARK_OFF; } /* setup mpi configuration */ if (!swConfig->param3) { /* default configuration */ siConfiguration(agRoot, &saRoot->mpiConfig, hwConfig, swConfig); } else { /* get from TD layer and save it */ agsaQueueConfig_t *dCFG = &saRoot->QueueConfig; agsaQueueConfig_t *sCFG = (agsaQueueConfig_t *)swConfig->param3; if (dCFG != sCFG) { *dCFG = *sCFG; if ((hwConfig->hwInterruptCoalescingTimer) || (hwConfig->hwInterruptCoalescingControl)) { for ( i = 0; i < sCFG->numOutboundQueues; i ++ ) { /* disable FW assisted coalescing */ sCFG->outboundQueues[i].interruptDelay = 0; sCFG->outboundQueues[i].interruptCount = 0; } if(smIS_SPC(agRoot)) { if (hwConfig->hwInterruptCoalescingTimer == 0) { hwConfig->hwInterruptCoalescingTimer = 1; SA_DBG1(("saInitialize:InterruptCoalescingTimer should not be zero. Force to 1\n")); } } } ret = siConfiguration(agRoot, &saRoot->mpiConfig, hwConfig, swConfig); if (AGSA_RC_FAILURE == ret) { SA_DBG1(("saInitialize failure queue number=%d\n", saRoot->QueueConfig.numInboundQueues)); agRoot->sdkData = agNULL; smTraceFuncExit(hpDBG_VERY_LOUD, 'r', "m1"); return ret; } } } saRoot->swConfig.param3 = &saRoot->QueueConfig; mpiMemoryAllocated.count = memoryAllocated->count - MPI_MEM_INDEX; for ( i = 0; i < mpiMemoryAllocated.count; i ++ ) { mpiMemoryAllocated.region[i].virtPtr = memoryAllocated->agMemory[MPI_IBQ_OBQ_INDEX + i].virtPtr; mpiMemoryAllocated.region[i].appHandle = memoryAllocated->agMemory[MPI_IBQ_OBQ_INDEX + i].osHandle; mpiMemoryAllocated.region[i].physAddrUpper = memoryAllocated->agMemory[MPI_IBQ_OBQ_INDEX + i].phyAddrUpper; mpiMemoryAllocated.region[i].physAddrLower = memoryAllocated->agMemory[MPI_IBQ_OBQ_INDEX + i].phyAddrLower; mpiMemoryAllocated.region[i].totalLength = memoryAllocated->agMemory[MPI_IBQ_OBQ_INDEX + i].totalLength; mpiMemoryAllocated.region[i].numElements = memoryAllocated->agMemory[MPI_IBQ_OBQ_INDEX + i].numElements; mpiMemoryAllocated.region[i].elementSize = memoryAllocated->agMemory[MPI_IBQ_OBQ_INDEX + i].singleElementLength; mpiMemoryAllocated.region[i].alignment = memoryAllocated->agMemory[MPI_IBQ_OBQ_INDEX + i].alignment; mpiMemoryAllocated.region[i].type = memoryAllocated->agMemory[MPI_IBQ_OBQ_INDEX + i].type; SA_DBG2(("saInitialize: memoryAllocated->agMemory[%d] VirtPtr=%p PhysicalLo=%x Count=%x Total=%x type %x\n", (MPI_IBQ_OBQ_INDEX + i), memoryAllocated->agMemory[MPI_IBQ_OBQ_INDEX + i].virtPtr, memoryAllocated->agMemory[MPI_IBQ_OBQ_INDEX + i].phyAddrLower, memoryAllocated->agMemory[MPI_IBQ_OBQ_INDEX + i].numElements, memoryAllocated->agMemory[MPI_IBQ_OBQ_INDEX + i].totalLength, memoryAllocated->agMemory[MPI_IBQ_OBQ_INDEX + i].type)); /* set to zeros */ SA_DBG1(("saInitialize: Zero memory region %d virt %p allocated %d\n", i,mpiMemoryAllocated.region[i].virtPtr, mpiMemoryAllocated.region[i].totalLength)); si_memset(mpiMemoryAllocated.region[i].virtPtr , 0,mpiMemoryAllocated.region[i].totalLength); } if ((!swConfig->max_MSI_InterruptVectors) && (!swConfig->max_MSIX_InterruptVectors) && (!swConfig->legacyInt_X)) { /* polling mode */ SA_DBG1(("saInitialize: configured as polling mode\n")); } else { SA_DBG1(("saInitialize: swConfig->max_MSI_InterruptVectors %d\n",swConfig->max_MSI_InterruptVectors)); SA_DBG1(("saInitialize: swConfig->max_MSIX_InterruptVectors %d\n",swConfig->max_MSIX_InterruptVectors)); if ((swConfig->legacyInt_X > 1) || (swConfig->max_MSI_InterruptVectors > 32) || (swConfig->max_MSIX_InterruptVectors > 64)) { /* error */ agRoot->sdkData = agNULL; SA_DBG1(("saInitialize:AGSA_RC_FAILURE InterruptVectors A\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 's', "m1"); return AGSA_RC_FAILURE; } if ((swConfig->legacyInt_X) && (swConfig->max_MSI_InterruptVectors)) { /* error */ agRoot->sdkData = agNULL; SA_DBG1(("saInitialize:AGSA_RC_FAILURE InterruptVectors B\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 't', "m1"); return AGSA_RC_FAILURE; } else if ((swConfig->legacyInt_X) && (swConfig->max_MSIX_InterruptVectors)) { /* error */ agRoot->sdkData = agNULL; SA_DBG1(("saInitialize:AGSA_RC_FAILURE InterruptVectors C\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'u', "m1"); return AGSA_RC_FAILURE; } else if ((swConfig->max_MSI_InterruptVectors) && (swConfig->max_MSIX_InterruptVectors)) { /* error */ agRoot->sdkData = agNULL; SA_DBG1(("saInitialize:AGSA_RC_FAILURE InterruptVectors D\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'v', "m1"); return AGSA_RC_FAILURE; } } /* This section sets common interrupt for Legacy(IRQ) and MSI and MSIX types */ if(smIS_SPC(agRoot)) { SA_DBG1(("saInitialize: SPC interrupts\n" )); if (swConfig->legacyInt_X) { saRoot->OurInterrupt = siOurLegacyInterrupt; /* Called in ISR*/ saRoot->DisableInterrupts = siDisableLegacyInterrupts; /* Called in ISR*/ saRoot->ReEnableInterrupts = siReenableLegacyInterrupts;/* Called in Delayed Int handler*/ } else if (swConfig->max_MSIX_InterruptVectors) { saRoot->OurInterrupt = siOurMSIXInterrupt; saRoot->DisableInterrupts = siDisableMSIXInterrupts; saRoot->ReEnableInterrupts = siReenableMSIXInterrupts; } else if (swConfig->max_MSI_InterruptVectors) { saRoot->OurInterrupt = siOurMSIInterrupt; saRoot->DisableInterrupts = siDisableMSIInterrupts; saRoot->ReEnableInterrupts = siReenableMSIInterrupts; } else { /* polling mode */ saRoot->OurInterrupt = siOurLegacyInterrupt; /* Called in ISR*/ saRoot->DisableInterrupts = siDisableLegacyInterrupts; /* Called in ISR*/ saRoot->ReEnableInterrupts = siReenableLegacyInterrupts;/* Called in Delayed Int handler*/ } } else { SA_DBG1(("saInitialize: SPC V interrupts\n" )); if (swConfig->legacyInt_X ) { SA_DBG1(("saInitialize: SPC V legacyInt_X\n" )); saRoot->OurInterrupt = siOurLegacy_V_Interrupt; /* Called in ISR*/ saRoot->DisableInterrupts = siDisableLegacy_V_Interrupts; /* Called in ISR*/ saRoot->ReEnableInterrupts = siReenableLegacy_V_Interrupts;/* Called in Delayed Int handler*/ } else if (swConfig->max_MSIX_InterruptVectors) { SA_DBG1(("saInitialize: SPC V max_MSIX_InterruptVectors %X\n", swConfig->max_MSIX_InterruptVectors)); saRoot->OurInterrupt = siOurMSIX_V_Interrupt; /* */ saRoot->DisableInterrupts = siDisableMSIX_V_Interrupts; saRoot->ReEnableInterrupts = siReenableMSIX_V_Interrupts; } else if (swConfig->max_MSI_InterruptVectors) { SA_DBG1(("saInitialize: SPC V max_MSI_InterruptVectors\n" )); saRoot->OurInterrupt = siOurMSIX_V_Interrupt; /* */ saRoot->DisableInterrupts = siDisableMSIX_V_Interrupts; saRoot->ReEnableInterrupts = siReenableMSIX_V_Interrupts; } else { /* polling mode */ SA_DBG1(("saInitialize: SPC V polling mode\n" )); saRoot->OurInterrupt = siOurLegacy_V_Interrupt; /* Called in ISR*/ saRoot->DisableInterrupts = siDisableLegacy_V_Interrupts; /* Called in ISR*/ saRoot->ReEnableInterrupts = siReenableLegacy_V_Interrupts;/* Called in Delayed Int handler*/ } SA_DBG1(("saInitialize: SPC V\n" )); } saRoot->Use64bit = (saRoot->QueueConfig.numOutboundQueues > 32 ) ? 1 : 0; if( smIS64bInt(agRoot)) { SA_DBG1(("saInitialize: Use 64 bits for interrupts %d %d\n" ,saRoot->Use64bit, saRoot->QueueConfig.numOutboundQueues )); } else { SA_DBG1(("saInitialize: Use 32 bits for interrupts %d %d\n",saRoot->Use64bit , saRoot->QueueConfig.numOutboundQueues )); } #ifdef SA_LL_IBQ_PROTECT SA_DBG1(("saInitialize: Inbound locking defined since LL_IOREQ_IBQ0_LOCK %d\n",LL_IOREQ_IBQ0_LOCK)); #endif /* SA_LL_IBQ_PROTECT */ /* Disable interrupt */ saRoot->DisableInterrupts(agRoot, 0); SA_DBG1(("saInitialize: DisableInterrupts sysIntsActive %X\n" ,saRoot->sysIntsActive)); #ifdef SA_FW_TEST_BUNCH_STARTS saRoot->BunchStarts_Enable = FALSE; saRoot->BunchStarts_Threshold = 5; saRoot->BunchStarts_Pending = 0; saRoot->BunchStarts_TimeoutTicks = 10; // N x 100 ms #endif /* SA_FW_TEST_BUNCH_STARTS */ /* clear the interrupt vector bitmap */ for ( i = 0; i < MAX_NUM_VECTOR; i ++ ) { saRoot->interruptVecIndexBitMap[i] = 0; saRoot->interruptVecIndexBitMap1[i] = 0; } #if defined(SALLSDK_DEBUG) smTrace(hpDBG_VERY_LOUD,"2Y",0); /* TP:2Y SCRATCH_PAD */ SA_DBG1(("saInitialize: SCRATCH_PAD0 value = 0x%x\n", ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_SCRATCH_PAD_0))); SA_DBG1(("saInitialize: SCRATCH_PAD1 value = 0x%x\n", ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_SCRATCH_PAD_1))); SA_DBG1(("saInitialize: SCRATCH_PAD2 value = 0x%x\n", ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_SCRATCH_PAD_2))); SA_DBG1(("saInitialize: SCRATCH_PAD3 value = 0x%x\n", ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_SCRATCH_PAD_3))); #endif /* SALLSDK_DEBUG */ if(smIS_SPCV(agRoot)) { bit32 ScratchPad1 =0; bit32 ScratchPad3 =0; ScratchPad1 = ossaHwRegRead(agRoot,V_Scratchpad_1_Register); ScratchPad3 = ossaHwRegRead(agRoot,V_Scratchpad_3_Register); if((ScratchPad1 & SCRATCH_PAD1_V_RAAE_MASK) == SCRATCH_PAD1_V_RAAE_MASK) { if(((ScratchPad3 & SCRATCH_PAD3_V_ENC_MASK ) == SCRATCH_PAD3_V_ENC_DIS_ERR ) || ((ScratchPad3 & SCRATCH_PAD3_V_ENC_MASK ) == SCRATCH_PAD3_V_ENC_ENA_ERR ) ) { SA_DBG1(("saInitialize:Warning Encryption Issue SCRATCH_PAD3 value = 0x%x\n", ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_SCRATCH_PAD_3))); } } } if( smIS_SPC(agRoot)) { #ifdef SA_ENABLE_HDA_FUNCTIONS TryWithHDA_ON: Double_Reset_HDA = TRUE; if (swConfig->hostDirectAccessSupport) { if (AGSA_RC_FAILURE == siHDAMode(agRoot, swConfig->hostDirectAccessMode, (agsaFwImg_t *)swConfig->param4)) { SA_DBG1(("saInitialize:AGSA_RC_FAILURE siHDAMode\n")); agRoot->sdkData = agNULL; smTraceFuncExit(hpDBG_VERY_LOUD, 'w', "m1"); return AGSA_RC_FAILURE; } else { SA_DBG1(("saInitialize:1 Going to HDA mode HDA 0x%X \n",ossaHwRegReadExt(agRoot, PCIBAR3, HDA_RSP_OFFSET1MB+HDA_CMD_CODE_OFFSET))); if(Double_Reset_HDA == agFALSE) { siSpcSoftReset(agRoot, SPC_HDASOFT_RESET_SIGNATURE); SA_DBG1(("saInitialize: Double_Reset_HDA HDA 0x%X \n",ossaHwRegReadExt(agRoot, PCIBAR3, HDA_RSP_OFFSET1MB+HDA_CMD_CODE_OFFSET))); Double_Reset_HDA = TRUE; goto TryWithHDA_ON; } } } else { /* check FW is running */ if (BOOTTLOADERHDA_IDLE == (ossaHwRegReadExt(agRoot, PCIBAR3, HDA_RSP_OFFSET1MB+HDA_CMD_CODE_OFFSET) & HDA_STATUS_BITS)) { /* HDA mode */ SA_DBG1(("saInitialize: No HDA mode enable and FW is not running.\n")); if(Tried_NO_HDA != agTRUE ) { Tried_NO_HDA = TRUE; swConfig->hostDirectAccessSupport = 1; swConfig->hostDirectAccessMode = 1; siSpcSoftReset(agRoot, SPC_HDASOFT_RESET_SIGNATURE); SA_DBG1(("saInitialize: 2 Going to HDA mode HDA %X \n",ossaHwRegReadExt(agRoot, PCIBAR3, HDA_RSP_OFFSET1MB+HDA_CMD_CODE_OFFSET))); goto TryWithHDA_ON; } else { SA_DBG1(("saInitialize: could not start HDA mode HDA %X \n",ossaHwRegReadExt(agRoot, PCIBAR3, HDA_RSP_OFFSET1MB+HDA_CMD_CODE_OFFSET))); smTraceFuncExit(hpDBG_VERY_LOUD, 'x', "m1"); return AGSA_RC_FAILURE; } smTraceFuncExit(hpDBG_VERY_LOUD, 'y', "m1"); return AGSA_RC_FAILURE; } } #else /* SA_ENABLE_HDA_FUNCTIONS */ /* check FW is running */ if (BOOTTLOADERHDA_IDLE == (ossaHwRegReadExt(agRoot, PCIBAR3, HDA_RSP_OFFSET1MB+HDA_CMD_CODE_OFFSET) & HDA_STATUS_BITS) ) { /* HDA mode */ SA_DBG1(("saInitialize: No HDA mode enable and FW is not running.\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'z', "m1"); return AGSA_RC_FAILURE; } #endif /* SA_ENABLE_HDA_FUNCTIONS */ } else { SA_DBG1(("saInitialize: SPCv swConfig->hostDirectAccessMode %d swConfig->hostDirectAccessSupport %d\n",swConfig->hostDirectAccessMode,swConfig->hostDirectAccessSupport)); if (swConfig->hostDirectAccessSupport) { bit32 hda_status; bit32 soft_reset_status = AGSA_RC_SUCCESS; SA_DBG1(("saInitialize: SPCv load HDA\n")); hda_status = (ossaHwRegReadExt(agRoot, PCIBAR0, SPC_V_HDA_RESPONSE_OFFSET+28)); SA_DBG1(("saInitialize: hda_status 0x%x\n",hda_status)); siScratchDump(agRoot); if( swConfig->hostDirectAccessMode == 0) { soft_reset_status = siSoftReset(agRoot, SPC_HDASOFT_RESET_SIGNATURE); if(soft_reset_status != AGSA_RC_SUCCESS) { agRoot->sdkData = agNULL; SA_DBG1(("saInitialize:AGSA_RC_FAILURE soft_reset_status\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'A', "m1"); return AGSA_RC_FAILURE; } } if((hda_status & SPC_V_HDAR_RSPCODE_MASK) != SPC_V_HDAR_IDLE) { SA_DBG1(("saInitialize: hda_status not SPC_V_HDAR_IDLE 0x%08x\n", hda_status)); soft_reset_status = siSoftReset(agRoot, SPC_HDASOFT_RESET_SIGNATURE); hda_status = (ossaHwRegReadExt(agRoot, PCIBAR0, SPC_V_HDA_RESPONSE_OFFSET+28)); if((hda_status & SPC_V_HDAR_RSPCODE_MASK) != SPC_V_HDAR_IDLE) { SA_DBG1(("saInitialize: 2 reset hda_status not SPC_V_HDAR_IDLE 0x%08x\n", hda_status)); } } if(soft_reset_status != AGSA_RC_SUCCESS) { agRoot->sdkData = agNULL; SA_DBG1(("saInitialize:AGSA_RC_FAILURE soft_reset_status A\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'B', "m1"); return AGSA_RC_FAILURE; } #ifdef SA_ENABLE_HDA_FUNCTIONS if (AGSA_RC_FAILURE == siHDAMode_V(agRoot, swConfig->hostDirectAccessMode, (agsaFwImg_t *)swConfig->param4)) { SA_DBG1(("saInitialize:AGSA_RC_FAILURE siHDAMode_V\n")); siChipResetV(agRoot, SPC_HDASOFT_RESET_SIGNATURE); agRoot->sdkData = agNULL; smTraceFuncExit(hpDBG_VERY_LOUD, 'C', "m1"); return AGSA_RC_FAILURE; } #endif /* SA_ENABLE_HDA_FUNCTIONS */ } else { SA_DBG1(("saInitialize: SPCv normal\n")); } } /* copy the table to the LL layer */ si_memcpy(&saRoot->mpiConfig.phyAnalogConfig, &hwConfig->phyAnalogConfig, sizeof(agsaPhyAnalogSetupTable_t)); #ifdef SALL_API_TEST /* Initialize the LL IO counter */ si_memset(&saRoot->LLCounters, 0, sizeof(agsaIOCountInfo_t)); #endif si_memset(&saRoot->IoErrorCount, 0, sizeof(agsaIOErrorEventStats_t)); si_memset(&saRoot->IoEventCount, 0, sizeof(agsaIOErrorEventStats_t)); if(smIS_SPC(agRoot)) { if( smIS_spc8081(agRoot)) { if (AGSA_RC_FAILURE == siBar4Shift(agRoot, MBIC_GSM_SM_BASE)) { SA_DBG1(("saInitialize: siBar4Shift FAILED ******************************************\n")); } } siSpcSoftReset(agRoot, SPC_SOFT_RESET_SIGNATURE); } if(smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: saRoot->ChipId == VEN_DEV_SPCV\n")); siChipResetV(agRoot, SPC_SOFT_RESET_SIGNATURE); } /* MPI Initialization */ ret = mpiInitialize(agRoot, &mpiMemoryAllocated, &saRoot->mpiConfig); SA_DBG1(("saInitialize: MaxOutstandingIO 0x%x swConfig->maxActiveIOs 0x%x\n", saRoot->ControllerInfo.maxPendingIO,saRoot->swConfig.maxActiveIOs )); #ifdef SA_ENABLE_HDA_FUNCTIONS if( ret == AGSA_RC_FAILURE && Tried_NO_HDA == agFALSE && smIS_SPC(agRoot)) { /* FW not flashed */ Tried_NO_HDA=agTRUE; swConfig->hostDirectAccessSupport = 1; swConfig->hostDirectAccessMode = 1; siSoftReset(agRoot, SPC_SOFT_RESET_SIGNATURE); SA_DBG1(("saInitialize: 3 Going to HDA mode HDA %X \n",ossaHwRegReadExt(agRoot, PCIBAR3, HDA_RSP_OFFSET1MB+HDA_CMD_CODE_OFFSET))); goto TryWithHDA_ON; } #endif /* SA_ENABLE_HDA_FUNCTIONS */ if( ret == AGSA_RC_FAILURE) { SA_DBG1(("saInitialize: AGSA_RC_FAILURE mpiInitialize\n")); SA_DBG1(("saInitialize: SCRATCH_PAD0 value = 0x%x\n", ossaHwRegRead(agRoot, V_Scratchpad_0_Register))); SA_DBG1(("saInitialize: SCRATCH_PAD1 value = 0x%x\n", ossaHwRegRead(agRoot, V_Scratchpad_1_Register))); SA_DBG1(("saInitialize: SCRATCH_PAD2 value = 0x%x\n", ossaHwRegRead(agRoot, V_Scratchpad_2_Register))); SA_DBG1(("saInitialize: SCRATCH_PAD3 value = 0x%x\n", ossaHwRegRead(agRoot, V_Scratchpad_3_Register))); if(saRoot->swConfig.fatalErrorInterruptEnable) { ossaDisableInterrupts(agRoot,saRoot->swConfig.fatalErrorInterruptVector ); } agRoot->sdkData = agNULL; smTraceFuncExit(hpDBG_VERY_LOUD, 'D', "m1"); return ret; } /* setup hardware interrupt coalescing control and timer registers */ if(smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: SPC_V Not set hwInterruptCoalescingTimer\n" )); SA_DBG1(("saInitialize: SPC_V Not set hwInterruptCoalescingControl\n" )); } else { ossaHwRegWriteExt(agRoot, PCIBAR1, SPC_ICTIMER,hwConfig->hwInterruptCoalescingTimer ); ossaHwRegWriteExt(agRoot, PCIBAR1, SPC_ICCONTROL, hwConfig->hwInterruptCoalescingControl); } SA_DBG1(("saInitialize: swConfig->fatalErrorInterruptEnable %X\n",swConfig->fatalErrorInterruptEnable)); SA_DBG1(("saInitialize: saRoot->swConfig.fatalErrorInterruptVector %X\n",saRoot->swConfig.fatalErrorInterruptVector)); SA_DBG1(("saInitialize: swConfig->max_MSI_InterruptVectors %X\n",swConfig->max_MSI_InterruptVectors)); SA_DBG1(("saInitialize: swConfig->max_MSIX_InterruptVectors %X\n",swConfig->max_MSIX_InterruptVectors)); SA_DBG1(("saInitialize: swConfig->legacyInt_X %X\n",swConfig->legacyInt_X)); SA_DBG1(("saInitialize: swConfig->hostDirectAccessSupport %X\n",swConfig->hostDirectAccessSupport)); SA_DBG1(("saInitialize: swConfig->hostDirectAccessMode %X\n",swConfig->hostDirectAccessMode)); #ifdef SA_CONFIG_MDFD_REGISTRY SA_DBG1(("saInitialize: swConfig->disableMDF %X\n",swConfig->disableMDF)); #endif /*SA_CONFIG_MDFD_REGISTRY*/ /*SA_DBG1(("saInitialize: swConfig->enableDIF %X\n",swConfig->enableDIF));*/ /*SA_DBG1(("saInitialize: swConfig->enableEncryption %X\n",swConfig->enableEncryption));*/ /* log message if failure */ if (AGSA_RC_FAILURE == ret) { SA_DBG1(("saInitialize:AGSA_RC_FAILURE mpiInitialize\n")); /* Assign chip status */ saRoot->chipStatus = CHIP_FATAL_ERROR; } else { /* Assign chip status */ saRoot->chipStatus = CHIP_NORMAL; #ifdef SA_FW_TIMER_READS_STATUS siTimerAdd(agRoot,SA_FW_TIMER_READS_STATUS_INTERVAL, siReadControllerStatus,0,agNULL ); #endif /* SA_FW_TIMER_READS_STATUS */ } if( ret == AGSA_RC_SUCCESS || ret == AGSA_RC_VERSION_UNTESTED) { if(gPollForMissingInt) { mpiOCQueue_t *circularQ; SA_DBG1(("saInitialize: saRoot->sysIntsActive %X\n",saRoot->sysIntsActive)); circularQ = &saRoot->outboundQueue[0]; OSSA_READ_LE_32(circularQ->agRoot, &circularQ->producerIdx, circularQ->piPointer, 0); SA_DBG1(("saInitialize: PI 0x%03x CI 0x%03x\n",circularQ->producerIdx, circularQ->consumerIdx)); } } /* If fatal error interrupt enable we need checking it during the interrupt */ SA_DBG1(("saInitialize: swConfig.fatalErrorInterruptEnable %d\n",saRoot->swConfig.fatalErrorInterruptEnable)); SA_DBG1(("saInitialize: swConfig.fatalErrorInterruptVector %d\n",saRoot->swConfig.fatalErrorInterruptVector)); SA_DBG1(("saInitialize: swConfig->max_MSIX_InterruptVectors %X\n",swConfig->max_MSIX_InterruptVectors)); if(saRoot->swConfig.fatalErrorInterruptEnable) { SA_DBG1(("saInitialize: Doorbell_Set %08X U %08X\n", ossaHwRegReadExt(agRoot, PCIBAR0, V_Outbound_Doorbell_Set_Register), ossaHwRegReadExt(agRoot, PCIBAR0, V_Outbound_Doorbell_Set_RegisterU))); SA_DBG1(("saInitialize: Doorbell_Mask %08X U %08X\n", ossaHwRegReadExt(agRoot, PCIBAR0, V_Outbound_Doorbell_Mask_Set_Register ), ossaHwRegReadExt(agRoot, PCIBAR0, V_Outbound_Doorbell_Mask_Set_RegisterU ))); ossaReenableInterrupts(agRoot,saRoot->swConfig.fatalErrorInterruptVector ); SA_DBG1(("saInitialize: Doorbell_Set %08X U %08X\n", ossaHwRegReadExt(agRoot, PCIBAR0, V_Outbound_Doorbell_Set_Register), ossaHwRegReadExt(agRoot, PCIBAR0, V_Outbound_Doorbell_Set_RegisterU))); SA_DBG1(("saInitialize: Doorbell_Mask %08X U %08X\n", ossaHwRegReadExt(agRoot, PCIBAR0, V_Outbound_Doorbell_Mask_Set_Register ), ossaHwRegReadExt(agRoot, PCIBAR0, V_Outbound_Doorbell_Mask_Set_RegisterU ))); } SA_DBG1(("saInitialize: siDumpActiveIORequests\n")); siDumpActiveIORequests(agRoot, saRoot->swConfig.maxActiveIOs); smTraceFuncExit(hpDBG_VERY_LOUD, 'E', "m1"); /* return */ return ret; } #ifdef SA_FW_TIMER_READS_STATUS bit32 siReadControllerStatus( agsaRoot_t *agRoot, bit32 Event, void * pParm ) { bit32 to_ret =0; agsaLLRoot_t *saRoot = (agsaLLRoot_t *)(agRoot->sdkData); mpiReadGSTable(agRoot, &saRoot->mpiGSTable); if(smIS_SPCV_2_IOP(agRoot)) { if(saRoot->Iop1Tcnt_last == saRoot->mpiGSTable.Iop1Tcnt ) SA_DBG2(("siReadControllerStatus: Iop1 %d STUCK\n", saRoot->mpiGSTable.Iop1Tcnt)); } if( saRoot->MsguTcnt_last == saRoot->mpiGSTable.MsguTcnt || saRoot->IopTcnt_last == saRoot->mpiGSTable.IopTcnt ) { SA_DBG1(("siReadControllerStatus: Msgu %d Iop %d\n",saRoot->mpiGSTable.MsguTcnt, saRoot->mpiGSTable.IopTcnt)); saFatalInterruptHandler(agRoot, saRoot->swConfig.fatalErrorInterruptVector ); } SA_DBG2(("siReadControllerStatus: Msgu %d Iop %d\n",saRoot->mpiGSTable.MsguTcnt, saRoot->mpiGSTable.IopTcnt)); saRoot->MsguTcnt_last = saRoot->mpiGSTable.MsguTcnt; saRoot->IopTcnt_last = saRoot->mpiGSTable.IopTcnt; saRoot->Iop1Tcnt_last = saRoot->mpiGSTable.Iop1Tcnt; if(gPollForMissingInt) { mpiOCQueue_t *circularQ; SA_DBG4(("siReadControllerStatus: saRoot->sysIntsActive %X\n",saRoot->sysIntsActive)); circularQ = &saRoot->outboundQueue[0]; OSSA_READ_LE_32(circularQ->agRoot, &circularQ->producerIdx, circularQ->piPointer, 0); if(circularQ->producerIdx != circularQ->consumerIdx) { SA_DBG1(("siReadControllerStatus: saRoot->sysIntsActive %X\n",saRoot->sysIntsActive)); SA_DBG1(("siReadControllerStatus: PI 0x%03x CI 0x%03x\n",circularQ->producerIdx, circularQ->consumerIdx)); SA_DBG1(("siReadControllerStatus:IN MSGU_READ_ODMR %08X\n",siHalRegReadExt(agRoot, GEN_MSGU_ODMR, V_Outbound_Doorbell_Mask_Set_Register ))); SA_DBG1(("siReadControllerStatus:MSGU_READ_ODR %08X\n",siHalRegReadExt(agRoot, GEN_MSGU_ODR, V_Outbound_Doorbell_Set_Register))); ossaHwRegWriteExt(agRoot, PCIBAR0,V_Outbound_Doorbell_Clear_Register, 0xFFFFFFFF ); } } siTimerAdd(agRoot,SA_FW_TIMER_READS_STATUS_INTERVAL, siReadControllerStatus,Event,pParm ); return(to_ret); } #endif /* SA_FW_TIMER_READS_STATUS */ /******************************************************************************/ /*! \brief Routine to do SPC configuration with default or specified values * * Set up configuration table in LL Layer * * \param agRoot handles for this instance of SAS/SATA hardware * \param mpiConfig MPI Configuration * \param swConfig Pointer to the software configuration * * \return -void- */ /*******************************************************************************/ GLOBAL bit32 siConfiguration( agsaRoot_t *agRoot, mpiConfig_t *mpiConfig, agsaHwConfig_t *hwConfig, agsaSwConfig_t *swConfig ) { agsaQueueConfig_t *queueConfig; bit32 intOption, enable64 = 0; bit8 i; /* sanity check */ SA_ASSERT( (agNULL != agRoot), ""); smTraceFuncEnter(hpDBG_VERY_LOUD,"m2"); si_memset(mpiConfig, 0, sizeof(mpiConfig_t)); SA_DBG1(("siConfiguration: si_memset mpiConfig\n")); #if defined(SALLSDK_DEBUG) sidump_swConfig(swConfig); #endif mpiConfig->mainConfig.custset = swConfig->FWConfig; SA_DBG1(("siConfiguration:custset %8X %8X\n",mpiConfig->mainConfig.custset,swConfig->FWConfig)); if (swConfig->param3 == agNULL) { SA_DBG1(("siConfiguration: swConfig->param3 == agNULL\n")); /* initialize the mpiConfig */ /* We configure the Host main part of configuration table */ mpiConfig->mainConfig.iQNPPD_HPPD_GEvent = 0; mpiConfig->mainConfig.outboundHWEventPID0_3 = 0; mpiConfig->mainConfig.outboundHWEventPID4_7 = 0; mpiConfig->mainConfig.outboundNCQEventPID0_3 = 0; mpiConfig->mainConfig.outboundNCQEventPID4_7 = 0; mpiConfig->mainConfig.outboundTargetITNexusEventPID0_3 = 0; mpiConfig->mainConfig.outboundTargetITNexusEventPID4_7 = 0; mpiConfig->mainConfig.outboundTargetSSPEventPID0_3 = 0; mpiConfig->mainConfig.outboundTargetSSPEventPID4_7 = 0; mpiConfig->mainConfig.ioAbortDelay = 0; mpiConfig->mainConfig.upperEventLogAddress = 0; mpiConfig->mainConfig.lowerEventLogAddress = 0; mpiConfig->mainConfig.eventLogSize = MPI_LOGSIZE; mpiConfig->mainConfig.eventLogOption = 0; mpiConfig->mainConfig.upperIOPeventLogAddress = 0; mpiConfig->mainConfig.lowerIOPeventLogAddress = 0; mpiConfig->mainConfig.IOPeventLogSize = MPI_LOGSIZE; mpiConfig->mainConfig.IOPeventLogOption = 0; mpiConfig->mainConfig.FatalErrorInterrupt = 0; /* save the default value */ mpiConfig->numInboundQueues = AGSA_MAX_INBOUND_Q; mpiConfig->numOutboundQueues = AGSA_MAX_OUTBOUND_Q; mpiConfig->maxNumInboundQueues = AGSA_MAX_INBOUND_Q; mpiConfig->maxNumOutboundQueues = AGSA_MAX_OUTBOUND_Q; /* configure inbound queues */ for ( i = 0; i < AGSA_MAX_INBOUND_Q; i ++ ) { mpiConfig->inboundQueues[i].numElements = INBOUND_DEPTH_SIZE; mpiConfig->inboundQueues[i].elementSize = IOMB_SIZE64; mpiConfig->inboundQueues[i].priority = MPI_QUEUE_NORMAL; } /* configure outbound queues */ for ( i = 0; i < AGSA_MAX_OUTBOUND_Q; i ++ ) { mpiConfig->outboundQueues[i].numElements = OUTBOUND_DEPTH_SIZE; mpiConfig->outboundQueues[i].elementSize = IOMB_SIZE64; mpiConfig->outboundQueues[i].interruptVector = 0; mpiConfig->outboundQueues[i].interruptDelay = 0; mpiConfig->outboundQueues[i].interruptThreshold = 0; /* always enable OQ interrupt */ mpiConfig->outboundQueues[i].interruptEnable = 1; } } else { /* Parm3 is not null */ queueConfig = (agsaQueueConfig_t *)swConfig->param3; #if defined(SALLSDK_DEBUG) sidump_Q_config( queueConfig ); #endif SA_DBG1(("siConfiguration: swConfig->param3 == %p\n",queueConfig)); if ((queueConfig->numInboundQueues > AGSA_MAX_INBOUND_Q) || (queueConfig->numOutboundQueues > AGSA_MAX_OUTBOUND_Q)) { smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "m2"); SA_DBG1(("siConfiguration:AGSA_RC_FAILURE MAX_Q\n")); return AGSA_RC_FAILURE; } if ((queueConfig->numInboundQueues == 0 || queueConfig->numOutboundQueues == 0 )) { smTraceFuncExit(hpDBG_VERY_LOUD, 'b', "m2"); SA_DBG1(("siConfiguration:AGSA_RC_FAILURE NO_Q\n")); return AGSA_RC_FAILURE; } mpiConfig->mainConfig.eventLogSize = swConfig->sizefEventLog1 * KBYTES; mpiConfig->mainConfig.eventLogOption = swConfig->eventLog1Option; mpiConfig->mainConfig.IOPeventLogSize = swConfig->sizefEventLog2 * KBYTES; mpiConfig->mainConfig.IOPeventLogOption = swConfig->eventLog2Option; if ((queueConfig->numInboundQueues > IQ_NUM_32) || (queueConfig->numOutboundQueues > OQ_NUM_32)) { enable64 = 1; } if (agNULL == hwConfig) { intOption = 0; } else { #if defined(SALLSDK_DEBUG) sidump_hwConfig(hwConfig); #endif if(smIS_SPCV(agRoot)) { intOption = 0; } else { intOption = hwConfig->intReassertionOption & INT_OPTION; } } /* Enable SGPIO */ swConfig->sgpioSupportEnable = 1; /* set bit for normal priority or high priority path */ /* set fatal error interrupt enable and vector */ /* set Interrupt Reassertion enable and 64 IQ/OQ enable */ mpiConfig->mainConfig.FatalErrorInterrupt = (swConfig->fatalErrorInterruptEnable) /* bit 0*/ | (hwConfig == agNULL ? 0: (hwConfig->hwOption & HW_CFG_PICI_EFFECTIVE_ADDRESS ? (0x1 << SHIFT1): 0))| (swConfig->sgpioSupportEnable ? (0x1 << SHIFT2): 0) | /* compile option SA_ENABLE_POISION_TLP */(SA_PTNFE_POISION_TLP << SHIFT3) | #ifdef SA_CONFIG_MDFD_REGISTRY (swConfig->disableMDF ? (0x1 << SHIFT4): 0) | #else /* compile option SA_DISABLE_MDFD */ (SA_MDFD_MULTI_DATA_FETCH << SHIFT4) | #endif /*SA_CONFIG_MDFD_REGISTRY*/ /* compile option SA_DISABLE_OB_COAL */(SA_OUTBOUND_COALESCE << SHIFT5) | /* compile option SA_ENABLE_ARBTE */(SA_ARBTE << SHIFT6) | ((swConfig->fatalErrorInterruptVector & FATAL_ERROR_INT_BITS) << SHIFT8) | (enable64 << SHIFT16) | (intOption << SHIFT17); SA_DBG1(("siConfiguration: swConfig->fatalErrorInterruptEnable %X\n",swConfig->fatalErrorInterruptEnable)); SA_DBG1(("siConfiguration: swConfig->fatalErrorInterruptVector %X\n",swConfig->fatalErrorInterruptVector)); /* initialize the mpiConfig */ /* We configure the Host main part of configuration table */ mpiConfig->mainConfig.outboundTargetITNexusEventPID0_3 = 0; mpiConfig->mainConfig.outboundTargetITNexusEventPID4_7 = 0; mpiConfig->mainConfig.outboundTargetSSPEventPID0_3 = 0; mpiConfig->mainConfig.outboundTargetSSPEventPID4_7 = 0; mpiConfig->mainConfig.ioAbortDelay = 0; mpiConfig->mainConfig.PortRecoveryTimerPortResetTimer = swConfig->PortRecoveryResetTimer; /* get parameter from queueConfig */ mpiConfig->mainConfig.iQNPPD_HPPD_GEvent = queueConfig->iqNormalPriorityProcessingDepth | (queueConfig->iqHighPriorityProcessingDepth << SHIFT8) | (queueConfig->generalEventQueue << SHIFT16) | (queueConfig->tgtDeviceRemovedEventQueue << SHIFT24); mpiConfig->mainConfig.outboundHWEventPID0_3 = queueConfig->sasHwEventQueue[0] | (queueConfig->sasHwEventQueue[1] << SHIFT8) | (queueConfig->sasHwEventQueue[2] << SHIFT16) | (queueConfig->sasHwEventQueue[3] << SHIFT24); mpiConfig->mainConfig.outboundHWEventPID4_7 = queueConfig->sasHwEventQueue[4] | (queueConfig->sasHwEventQueue[5] << SHIFT8) | (queueConfig->sasHwEventQueue[6] << SHIFT16) | (queueConfig->sasHwEventQueue[7] << SHIFT24); mpiConfig->mainConfig.outboundNCQEventPID0_3 = queueConfig->sataNCQErrorEventQueue[0] | (queueConfig->sataNCQErrorEventQueue[1] << SHIFT8) | (queueConfig->sataNCQErrorEventQueue[2] << SHIFT16) | (queueConfig->sataNCQErrorEventQueue[3] << SHIFT24); mpiConfig->mainConfig.outboundNCQEventPID4_7 = queueConfig->sataNCQErrorEventQueue[4] | (queueConfig->sataNCQErrorEventQueue[5] << SHIFT8) | (queueConfig->sataNCQErrorEventQueue[6] << SHIFT16) | (queueConfig->sataNCQErrorEventQueue[7] << SHIFT24); /* save it */ mpiConfig->numInboundQueues = queueConfig->numInboundQueues; mpiConfig->numOutboundQueues = queueConfig->numOutboundQueues; mpiConfig->queueOption = queueConfig->queueOption; SA_DBG2(("siConfiguration: numInboundQueues=%d numOutboundQueues=%d\n", queueConfig->numInboundQueues, queueConfig->numOutboundQueues)); /* configure inbound queues */ /* We configure the size of queue based on swConfig */ for( i = 0; i < queueConfig->numInboundQueues; i ++ ) { mpiConfig->inboundQueues[i].numElements = (bit16)queueConfig->inboundQueues[i].elementCount; mpiConfig->inboundQueues[i].elementSize = (bit16)queueConfig->inboundQueues[i].elementSize;; mpiConfig->inboundQueues[i].priority = queueConfig->inboundQueues[i].priority; SA_DBG2(("siConfiguration: IBQ%d:elementCount=%d elementSize=%d priority=%d Total Size 0x%X\n", i, queueConfig->inboundQueues[i].elementCount, queueConfig->inboundQueues[i].elementSize, queueConfig->inboundQueues[i].priority, queueConfig->inboundQueues[i].elementCount * queueConfig->inboundQueues[i].elementSize )); } /* configura outbound queues */ /* We configure the size of queue based on swConfig */ for( i = 0; i < queueConfig->numOutboundQueues; i ++ ) { mpiConfig->outboundQueues[i].numElements = (bit16)queueConfig->outboundQueues[i].elementCount; mpiConfig->outboundQueues[i].elementSize = (bit16)queueConfig->outboundQueues[i].elementSize; mpiConfig->outboundQueues[i].interruptVector = (bit8)queueConfig->outboundQueues[i].interruptVectorIndex; mpiConfig->outboundQueues[i].interruptDelay = (bit16)queueConfig->outboundQueues[i].interruptDelay; mpiConfig->outboundQueues[i].interruptThreshold = (bit8)queueConfig->outboundQueues[i].interruptCount; mpiConfig->outboundQueues[i].interruptEnable = (bit32)queueConfig->outboundQueues[i].interruptEnable; SA_DBG2(("siConfiguration: OBQ%d:elementCount=%d elementSize=%d interruptCount=%d interruptEnable=%d\n", i, queueConfig->outboundQueues[i].elementCount, queueConfig->outboundQueues[i].elementSize, queueConfig->outboundQueues[i].interruptCount, queueConfig->outboundQueues[i].interruptEnable)); } } SA_DBG1(("siConfiguration:mpiConfig->mainConfig.FatalErrorInterrupt 0x%X\n",mpiConfig->mainConfig.FatalErrorInterrupt)); SA_DBG1(("siConfiguration:swConfig->fatalErrorInterruptVector 0x%X\n",swConfig->fatalErrorInterruptVector)); SA_DBG1(("siConfiguration:enable64 0x%X\n",enable64)); SA_DBG1(("siConfiguration:PortRecoveryResetTimer 0x%X\n",swConfig->PortRecoveryResetTimer)); smTraceFuncExit(hpDBG_VERY_LOUD, 'c', "m2"); /* return */ return AGSA_RC_SUCCESS; } #ifdef FW_EVT_LOG_TST void saLogDump(agsaRoot_t *agRoot, U32 *eventLogSize, U32 **eventLogAddress_) { agsaLLRoot_t *saRoot = (agsaLLRoot_t *)(agRoot->sdkData); //mpiConfig_t *mpiConfig = &saRoot->mpiConfig; mpiHostLLConfigDescriptor_t *mpiConfig = &saRoot->mainConfigTable; *eventLogAddress_ = (U32*)eventLogAddress; *eventLogSize = (U32)mpiConfig->eventLogSize; } #endif /*******************************************************************************/ /** \fn mpiInitialize(agsaRoot *agRoot, mpiMemReq_t* memoryAllocated, mpiConfig_t* config) * \brief Initializes the MPI Message Unit * \param agRoot Pointer to a data structure containing LL layer context handles * \param memoryAllocated Data structure that holds the different chunks of memory that are allocated * \param config MPI configuration * * This function is called to initialize SPC_HOST_MPI internal data structures and the SPC hardware. * This function is competed synch->ronously (there is no callback) * * Return: * AGSA_RC_SUCCESS if initialization succeeded. * AGSA_RC_FAILURE if initialization failed. */ /*******************************************************************************/ GLOBAL bit32 mpiInitialize(agsaRoot_t *agRoot, mpiMemReq_t* memoryAllocated, mpiConfig_t* config) { static spc_configMainDescriptor_t mainCfg; /* main part of MPI configuration */ static spc_inboundQueueDescriptor_t inQueueCfg; /* Inbound queue HW configuration structure */ static spc_outboundQueueDescriptor_t outQueueCfg; /* Outbound queue HW configuration structure */ bit16 qIdx, i, indexoffset; /* Queue index */ bit16 mIdx = 0; /* Memory region index */ bit32 MSGUCfgTblDWIdx, GSTLenMPIS; bit32 MSGUCfgTblBase, ret = AGSA_RC_SUCCESS; bit32 value, togglevalue; bit32 saveOffset; bit32 inboundoffset, outboundoffset; bit8 pcibar; bit16 maxinbound = AGSA_MAX_INBOUND_Q; bit16 maxoutbound = AGSA_MAX_OUTBOUND_Q; bit32 OB_CIPCIBar; bit32 IB_PIPCIBar; bit32 max_wait_time; bit32 max_wait_count; bit32 memOffset; agsaLLRoot_t *saRoot; mpiICQueue_t *circularIQ = agNULL; mpiOCQueue_t *circularOQ; bit32 mpiUnInitFailed = 0; bit32 mpiStartToggleFailed = 0; #if defined(SALLSDK_DEBUG) bit8 phycount = AGSA_MAX_VALID_PHYS; #endif /* SALLSDK_DEBUG */ SA_DBG1(("mpiInitialize: Entering\n")); SA_ASSERT(NULL != agRoot, "agRoot argument cannot be null"); SA_ASSERT(NULL != memoryAllocated, "memoryAllocated argument cannot be null"); SA_ASSERT(NULL != config, "config argument cannot be null"); SA_ASSERT(0 == (sizeof(spc_inboundQueueDescriptor_t) % 4), "spc_inboundQueueDescriptor_t type size has to be divisible by 4"); saRoot = (agsaLLRoot_t *)(agRoot->sdkData); si_memset(&mainCfg,0,sizeof(spc_configMainDescriptor_t)); si_memset(&inQueueCfg,0,sizeof(spc_inboundQueueDescriptor_t)); si_memset(&outQueueCfg,0,sizeof(spc_outboundQueueDescriptor_t)); SA_ASSERT((agNULL !=saRoot ), ""); if(saRoot == agNULL) { SA_DBG1(("mpiInitialize: saRoot == agNULL\n")); return(AGSA_RC_FAILURE); } smTraceFuncEnter(hpDBG_VERY_LOUD,"m3"); /*Shift BAR 4 for SPC HAILEAH*/ if(smIS_SPC(agRoot)) { if( smIS_HIL(agRoot)) { if (AGSA_RC_FAILURE == siBar4Shift(agRoot, MBIC_GSM_SM_BASE)) { SA_DBG1(("mpiInitialize: siBar4Shift FAILED ******************************************\n")); return AGSA_RC_FAILURE; } } } /* Wait for the SPC Configuration Table to be ready */ ret = mpiWaitForConfigTable(agRoot, &mainCfg); if (AGSA_RC_FAILURE == ret) { /* return error if MPI Configuration Table not ready */ SA_DBG1(("mpiInitialize: mpiWaitForConfigTable FAILED ******************************************\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "m3"); return ret; } /* read scratch pad0 to get PCI BAR and offset of configuration table */ MSGUCfgTblBase = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0); /* get PCI BAR */ MSGUCfgTblBase = (MSGUCfgTblBase & SCRATCH_PAD0_BAR_MASK) >> SHIFT26; /* get pci Bar index */ pcibar = (bit8)mpiGetPCIBarIndex(agRoot, MSGUCfgTblBase); SA_DBG1(("mpiInitialize: MSGUCfgTblBase = 0x%x\n", MSGUCfgTblBase)); #if defined(SALLSDK_DEBUG) /* get Phy count from configuration table */ phycount = (bit8)((mainCfg.ContrlCapFlag & PHY_COUNT_BITS) >> SHIFT19); SA_DBG1(("mpiInitialize: Number of PHYs = 0x%x\n", phycount)); smTrace(hpDBG_VERY_LOUD,"70",phycount); /* TP:70 phycount */ #endif /* SALLSDK_DEBUG */ /* get High Priority IQ support flag */ if (mainCfg.ContrlCapFlag & HP_SUPPORT_BIT) { SA_DBG1(("mpiInitialize: High Priority IQ support from SPC\n")); } /* get Interrupt Coalescing Support flag */ if (mainCfg.ContrlCapFlag & INT_COL_BIT) { SA_DBG1(("mpiInitialize: Interrupt Coalescing support from SPC\n")); } /* get configured the number of inbound/outbound queues */ if (memoryAllocated->count == TOTAL_MPI_MEM_CHUNKS) { config->maxNumInboundQueues = AGSA_MAX_INBOUND_Q; config->maxNumOutboundQueues = AGSA_MAX_OUTBOUND_Q; } else { config->maxNumInboundQueues = config->numInboundQueues; config->maxNumOutboundQueues = config->numOutboundQueues; maxinbound = config->numInboundQueues; maxoutbound = config->numOutboundQueues; } SA_DBG1(("mpiInitialize: Number of IQ %d\n", maxinbound)); SA_DBG1(("mpiInitialize: Number of OQ %d\n", maxoutbound)); /* get inbound queue offset */ inboundoffset = mainCfg.inboundQueueOffset; /* get outbound queue offset */ outboundoffset = mainCfg.outboundQueueOffset; if(smIS_SPCV(agRoot)) { SA_DBG2(("mpiInitialize: Offset of IQ %d\n", (inboundoffset & 0xFF000000) >> 24)); SA_DBG2(("mpiInitialize: Offset of OQ %d\n", (outboundoffset & 0xFF000000) >> 24)); inboundoffset &= 0x00FFFFFF; outboundoffset &= 0x00FFFFFF; } /* get offset of the configuration table */ MSGUCfgTblDWIdx = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0); MSGUCfgTblDWIdx = MSGUCfgTblDWIdx & SCRATCH_PAD0_OFFSET_MASK; saveOffset = MSGUCfgTblDWIdx; /* Checks if the configuration memory region size is the same as the mpiConfigMain */ if(memoryAllocated->region[mIdx].totalLength != sizeof(bit8) * config->mainConfig.eventLogSize) { SA_DBG1(("ERROR: The memory region [%d] 0x%X != 0x%X does not have the size of the MSGU event log ******************************************\n", mIdx,memoryAllocated->region[mIdx].totalLength,config->mainConfig.eventLogSize)); smTraceFuncExit(hpDBG_VERY_LOUD, 'b', "m3"); return AGSA_RC_FAILURE; } mainCfg.iQNPPD_HPPD_GEvent = config->mainConfig.iQNPPD_HPPD_GEvent; if(smIS_SPCV(agRoot)) { mainCfg.outboundHWEventPID0_3 = 0; mainCfg.outboundHWEventPID4_7 = 0; mainCfg.outboundNCQEventPID0_3 = 0; mainCfg.outboundNCQEventPID4_7 = 0; mainCfg.outboundTargetITNexusEventPID0_3 = 0; mainCfg.outboundTargetITNexusEventPID4_7 = 0; mainCfg.outboundTargetSSPEventPID0_3 = 0; mainCfg.outboundTargetSSPEventPID4_7 = 0; mainCfg.ioAbortDelay = 0; /* SPCV reserved */ mainCfg.custset = 0; mainCfg.portRecoveryResetTimer = config->mainConfig.PortRecoveryTimerPortResetTimer; SA_DBG1(("mpiInitialize:custset V %8X\n",mainCfg.custset)); SA_DBG1(("mpiInitialize:portRecoveryResetTimer V %8X\n",mainCfg.portRecoveryResetTimer)); mainCfg.interruptReassertionDelay = saRoot->hwConfig.intReassertionOption; SA_DBG1(("mpiInitialize:interruptReassertionDelay V %8X\n", mainCfg.interruptReassertionDelay)); } else { mainCfg.outboundHWEventPID0_3 = config->mainConfig.outboundHWEventPID0_3; mainCfg.outboundHWEventPID4_7 = config->mainConfig.outboundHWEventPID4_7; mainCfg.outboundNCQEventPID0_3 = config->mainConfig.outboundNCQEventPID0_3; mainCfg.outboundNCQEventPID4_7 = config->mainConfig.outboundNCQEventPID4_7; mainCfg.outboundTargetITNexusEventPID0_3 = config->mainConfig.outboundTargetITNexusEventPID0_3; mainCfg.outboundTargetITNexusEventPID4_7 = config->mainConfig.outboundTargetITNexusEventPID4_7; mainCfg.outboundTargetSSPEventPID0_3 = config->mainConfig.outboundTargetSSPEventPID0_3; mainCfg.outboundTargetSSPEventPID4_7 = config->mainConfig.outboundTargetSSPEventPID4_7; mainCfg.ioAbortDelay = config->mainConfig.ioAbortDelay; mainCfg.custset = config->mainConfig.custset; SA_DBG1(("mpiInitialize:custset spc %8X\n",mainCfg.custset)); } #ifdef FW_EVT_LOG_TST eventLogAddress = memoryAllocated->region[mIdx].virtPtr; #endif mainCfg.upperEventLogAddress = memoryAllocated->region[mIdx].physAddrUpper; mainCfg.lowerEventLogAddress = memoryAllocated->region[mIdx].physAddrLower; mainCfg.eventLogSize = config->mainConfig.eventLogSize; mainCfg.eventLogOption = config->mainConfig.eventLogOption; mIdx++; /* Checks if the configuration memory region size is the same as the mpiConfigMain */ if(memoryAllocated->region[mIdx].totalLength != sizeof(bit8) * config->mainConfig.IOPeventLogSize) { SA_DBG1(("ERROR: The memory region does not have the size of the IOP event log\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'c', "m3"); return AGSA_RC_FAILURE; } mainCfg.upperIOPeventLogAddress = memoryAllocated->region[mIdx].physAddrUpper; mainCfg.lowerIOPeventLogAddress = memoryAllocated->region[mIdx].physAddrLower; mainCfg.IOPeventLogSize = config->mainConfig.IOPeventLogSize; mainCfg.IOPeventLogOption = config->mainConfig.IOPeventLogOption; mainCfg.FatalErrorInterrupt = config->mainConfig.FatalErrorInterrupt; SA_DBG1(("mpiInitialize: iQNPPD_HPPD_GEvent 0x%x\n", mainCfg.iQNPPD_HPPD_GEvent)); if(smIS_SPCV(agRoot)) { } else { SA_DBG3(("mpiInitialize: outboundHWEventPID0_3 0x%x\n", mainCfg.outboundHWEventPID0_3)); SA_DBG3(("mpiInitialize: outboundHWEventPID4_7 0x%x\n", mainCfg.outboundHWEventPID4_7)); SA_DBG3(("mpiInitialize: outboundNCQEventPID0_3 0x%x\n", mainCfg.outboundNCQEventPID0_3)); SA_DBG3(("mpiInitialize: outboundNCQEventPID4_7 0x%x\n", mainCfg.outboundNCQEventPID4_7)); SA_DBG3(("mpiInitialize: outboundTargetITNexusEventPID0_3 0x%x\n", mainCfg.outboundTargetITNexusEventPID0_3)); SA_DBG3(("mpiInitialize: outboundTargetITNexusEventPID4_7 0x%x\n", mainCfg.outboundTargetITNexusEventPID4_7)); SA_DBG3(("mpiInitialize: outboundTargetSSPEventPID0_3 0x%x\n", mainCfg.outboundTargetSSPEventPID0_3)); SA_DBG3(("mpiInitialize: outboundTargetSSPEventPID4_7 0x%x\n", mainCfg.outboundTargetSSPEventPID4_7)); } SA_DBG3(("mpiInitialize: upperEventLogAddress 0x%x\n", mainCfg.upperEventLogAddress)); SA_DBG3(("mpiInitialize: lowerEventLogAddress 0x%x\n", mainCfg.lowerEventLogAddress)); SA_DBG3(("mpiInitialize: eventLogSize 0x%x\n", mainCfg.eventLogSize)); SA_DBG3(("mpiInitialize: eventLogOption 0x%x\n", mainCfg.eventLogOption)); #ifdef FW_EVT_LOG_TST SA_DBG3(("mpiInitialize: eventLogAddress 0x%p\n", eventLogAddress)); #endif SA_DBG3(("mpiInitialize: upperIOPLogAddress 0x%x\n", mainCfg.upperIOPeventLogAddress)); SA_DBG3(("mpiInitialize: lowerIOPLogAddress 0x%x\n", mainCfg.lowerIOPeventLogAddress)); SA_DBG3(("mpiInitialize: IOPeventLogSize 0x%x\n", mainCfg.IOPeventLogSize)); SA_DBG3(("mpiInitialize: IOPeventLogOption 0x%x\n", mainCfg.IOPeventLogOption)); SA_DBG3(("mpiInitialize: FatalErrorInterrupt 0x%x\n", mainCfg.FatalErrorInterrupt)); SA_DBG3(("mpiInitialize: HDAModeFlags 0x%x\n", mainCfg.HDAModeFlags)); SA_DBG3(("mpiInitialize: analogSetupTblOffset 0x%08x\n", mainCfg.analogSetupTblOffset)); saRoot->mainConfigTable.iQNPPD_HPPD_GEvent = mainCfg.iQNPPD_HPPD_GEvent; if(smIS_SPCV(agRoot)) { /* SPCV - reserved fields */ saRoot->mainConfigTable.outboundHWEventPID0_3 = 0; saRoot->mainConfigTable.outboundHWEventPID4_7 = 0; saRoot->mainConfigTable.outboundNCQEventPID0_3 = 0; saRoot->mainConfigTable.outboundNCQEventPID4_7 = 0; saRoot->mainConfigTable.outboundTargetITNexusEventPID0_3 = 0; saRoot->mainConfigTable.outboundTargetITNexusEventPID4_7 = 0; saRoot->mainConfigTable.outboundTargetSSPEventPID0_3 = 0; saRoot->mainConfigTable.outboundTargetSSPEventPID4_7 = 0; saRoot->mainConfigTable.ioAbortDelay = 0; saRoot->mainConfigTable.custset = 0; } else { saRoot->mainConfigTable.outboundHWEventPID0_3 = mainCfg.outboundHWEventPID0_3; saRoot->mainConfigTable.outboundHWEventPID4_7 = mainCfg.outboundHWEventPID4_7; saRoot->mainConfigTable.outboundNCQEventPID0_3 = mainCfg.outboundNCQEventPID0_3; saRoot->mainConfigTable.outboundNCQEventPID4_7 = mainCfg.outboundNCQEventPID4_7; saRoot->mainConfigTable.outboundTargetITNexusEventPID0_3 = mainCfg.outboundTargetITNexusEventPID0_3; saRoot->mainConfigTable.outboundTargetITNexusEventPID4_7 = mainCfg.outboundTargetITNexusEventPID4_7; saRoot->mainConfigTable.outboundTargetSSPEventPID0_3 = mainCfg.outboundTargetSSPEventPID0_3; saRoot->mainConfigTable.outboundTargetSSPEventPID4_7 = mainCfg.outboundTargetSSPEventPID4_7; saRoot->mainConfigTable.ioAbortDelay = mainCfg.ioAbortDelay; saRoot->mainConfigTable.custset = mainCfg.custset; } saRoot->mainConfigTable.upperEventLogAddress = mainCfg.upperEventLogAddress; saRoot->mainConfigTable.lowerEventLogAddress = mainCfg.lowerEventLogAddress; saRoot->mainConfigTable.eventLogSize = mainCfg.eventLogSize; saRoot->mainConfigTable.eventLogOption = mainCfg.eventLogOption; saRoot->mainConfigTable.upperIOPeventLogAddress = mainCfg.upperIOPeventLogAddress; saRoot->mainConfigTable.lowerIOPeventLogAddress = mainCfg.lowerIOPeventLogAddress; saRoot->mainConfigTable.IOPeventLogSize = mainCfg.IOPeventLogSize; saRoot->mainConfigTable.IOPeventLogOption = mainCfg.IOPeventLogOption; saRoot->mainConfigTable.FatalErrorInterrupt = mainCfg.FatalErrorInterrupt; if(smIS_SPCV(agRoot)) { ;/* SPCV - reserved fields */ } else { saRoot->mainConfigTable.HDAModeFlags = mainCfg.HDAModeFlags; } saRoot->mainConfigTable.analogSetupTblOffset = mainCfg.analogSetupTblOffset; smTrace(hpDBG_VERY_LOUD,"71",mIdx); /* TP:71 71 mIdx */ ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_IQNPPD_HPPD_OFFSET), mainCfg.iQNPPD_HPPD_GEvent); SA_DBG3(("mpiInitialize: Offset 0x%08x mainCfg.iQNPPD_HPPD_GEvent 0x%x\n", (bit32)(MSGUCfgTblDWIdx + MAIN_IQNPPD_HPPD_OFFSET), mainCfg.iQNPPD_HPPD_GEvent)); if(smIS_SPC6V(agRoot)) { if(smIsCfgVREV_B(agRoot)) { ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_IO_ABORT_DELAY), MAIN_IO_ABORT_DELAY_END_TO_END_CRC_DISABLE); SA_DBG1(("mpiInitialize:SPCV - MAIN_IO_ABORT_DELAY_END_TO_END_CRC_DISABLE\n" )); } if(smIsCfgVREV_C(agRoot)) { SA_DBG1(("mpiInitialize:SPCV - END_TO_END_CRC On\n" )); } SA_DBG3(("mpiInitialize:SPCV - rest reserved field \n" )); ;/* SPCV - reserved field */ } else if(smIS_SPC(agRoot)) { ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_OB_HW_EVENT_PID03_OFFSET), mainCfg.outboundHWEventPID0_3); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_OB_HW_EVENT_PID47_OFFSET), mainCfg.outboundHWEventPID4_7); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_OB_NCQ_EVENT_PID03_OFFSET), mainCfg.outboundNCQEventPID0_3); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_OB_NCQ_EVENT_PID47_OFFSET), mainCfg.outboundNCQEventPID4_7); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_TITNX_EVENT_PID03_OFFSET), mainCfg.outboundTargetITNexusEventPID0_3); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_TITNX_EVENT_PID47_OFFSET), mainCfg.outboundTargetITNexusEventPID4_7); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_OB_SSP_EVENT_PID03_OFFSET), mainCfg.outboundTargetSSPEventPID0_3); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_OB_SSP_EVENT_PID47_OFFSET), mainCfg.outboundTargetSSPEventPID4_7); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_CUSTOMER_SETTING), mainCfg.custset); }else { if(smIsCfgVREV_A(agRoot)) { ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_IO_ABORT_DELAY), MAIN_IO_ABORT_DELAY_END_TO_END_CRC_DISABLE); /* */ SA_DBG1(("mpiInitialize:SPCV12G - offset MAIN_IO_ABORT_DELAY 0x%x value MAIN_IO_ABORT_DELAY_END_TO_END_CRC_DISABLE 0x%x\n",MAIN_IO_ABORT_DELAY ,MAIN_IO_ABORT_DELAY_END_TO_END_CRC_DISABLE)); SA_DBG1(("mpiInitialize:SPCV12G - END_TO_END_CRC OFF for rev A %d\n",smIsCfgVREV_A(agRoot) )); } else if(smIsCfgVREV_B(agRoot)) { SA_DBG1(("mpiInitialize:SPCV12G - END_TO_END_CRC ON rev B %d ****************************\n",smIsCfgVREV_B(agRoot) )); /*ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_IO_ABORT_DELAY), MAIN_IO_ABORT_DELAY_END_TO_END_CRC_DISABLE); */ } else if(smIsCfgVREV_C(agRoot)) { SA_DBG1(("mpiInitialize:SPCV12G - END_TO_END_CRC on rev C %d\n",smIsCfgVREV_C(agRoot) )); } else { ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_IO_ABORT_DELAY), MAIN_IO_ABORT_DELAY_END_TO_END_CRC_DISABLE); SA_DBG1(("mpiInitialize:SPCV12G - END_TO_END_CRC Off unknown rev 0x%x\n", ossaHwRegReadConfig32((agRoot), 8 ))); } } ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_EVENT_LOG_ADDR_HI), mainCfg.upperEventLogAddress); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_EVENT_LOG_ADDR_LO), mainCfg.lowerEventLogAddress); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_EVENT_LOG_BUFF_SIZE), mainCfg.eventLogSize); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_EVENT_LOG_OPTION), mainCfg.eventLogOption); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_IOP_EVENT_LOG_ADDR_HI), mainCfg.upperIOPeventLogAddress); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_IOP_EVENT_LOG_ADDR_LO), mainCfg.lowerIOPeventLogAddress); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_IOP_EVENT_LOG_BUFF_SIZE), mainCfg.IOPeventLogSize); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_IOP_EVENT_LOG_OPTION), mainCfg.IOPeventLogOption); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_FATAL_ERROR_INTERRUPT), mainCfg.FatalErrorInterrupt); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_PRECTD_PRESETD), mainCfg.portRecoveryResetTimer); SA_DBG3(("mpiInitialize: Offset 0x%08x upperEventLogAddress 0x%x\n",(bit32)(MSGUCfgTblDWIdx + MAIN_EVENT_LOG_ADDR_HI), mainCfg.upperEventLogAddress )); SA_DBG3(("mpiInitialize: Offset 0x%08x lowerEventLogAddress 0x%x\n",(bit32)(MSGUCfgTblDWIdx + MAIN_EVENT_LOG_ADDR_LO), mainCfg.lowerEventLogAddress )); SA_DBG3(("mpiInitialize: Offset 0x%08x eventLogSize 0x%x\n",(bit32)(MSGUCfgTblDWIdx + MAIN_EVENT_LOG_BUFF_SIZE), mainCfg.eventLogSize )); SA_DBG3(("mpiInitialize: Offset 0x%08x eventLogOption 0x%x\n",(bit32)(MSGUCfgTblDWIdx + MAIN_EVENT_LOG_OPTION), mainCfg.eventLogOption )); SA_DBG3(("mpiInitialize: Offset 0x%08x upperIOPeventLogAddress 0x%x\n",(bit32)(MSGUCfgTblDWIdx + MAIN_IOP_EVENT_LOG_ADDR_HI), mainCfg.upperIOPeventLogAddress )); SA_DBG3(("mpiInitialize: Offset 0x%08x lowerIOPeventLogAddress 0x%x\n",(bit32)(MSGUCfgTblDWIdx + MAIN_IOP_EVENT_LOG_ADDR_LO), mainCfg.lowerIOPeventLogAddress )); SA_DBG3(("mpiInitialize: Offset 0x%08x IOPeventLogSize 0x%x\n",(bit32)(MSGUCfgTblDWIdx + MAIN_IOP_EVENT_LOG_BUFF_SIZE), mainCfg.IOPeventLogSize )); SA_DBG3(("mpiInitialize: Offset 0x%08x IOPeventLogOption 0x%x\n",(bit32)(MSGUCfgTblDWIdx + MAIN_IOP_EVENT_LOG_OPTION), mainCfg.IOPeventLogOption )); SA_DBG3(("mpiInitialize: Offset 0x%08x FatalErrorInterrupt 0x%x\n",(bit32)(MSGUCfgTblDWIdx + MAIN_FATAL_ERROR_INTERRUPT), mainCfg.FatalErrorInterrupt )); SA_DBG3(("mpiInitialize: Offset 0x%08x PortRecoveryResetTimer 0x%x\n",(bit32)(MSGUCfgTblDWIdx + MAIN_PRECTD_PRESETD), mainCfg.portRecoveryResetTimer )); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_IRAD_RESERVED), mainCfg.interruptReassertionDelay); SA_DBG3(("mpiInitialize: Offset 0x%08x InterruptReassertionDelay 0x%x\n",(bit32)(MSGUCfgTblDWIdx + MAIN_IRAD_RESERVED), mainCfg.interruptReassertionDelay )); mIdx++; /* skip the ci and pi memory region */ mIdx++; mIdx++; smTrace(hpDBG_VERY_LOUD,"72",mIdx); /* TP:72 mIdx */ smTrace(hpDBG_VERY_LOUD,"Bc",maxinbound); /* TP:Bc maxinbound */ smTrace(hpDBG_VERY_LOUD,"Bd",pcibar); /* TP:Bd pcibar */ /* index offset */ indexoffset = 0; memOffset = 0; /* Memory regions for the inbound queues */ for(qIdx = 0; qIdx < maxinbound; qIdx++) { /* point back to the begin then plus offset to next queue */ smTrace(hpDBG_VERY_LOUD,"Bd",pcibar); /* TP:Bd pcibar */ MSGUCfgTblDWIdx = saveOffset; MSGUCfgTblDWIdx += inboundoffset; MSGUCfgTblDWIdx += (sizeof(spc_inboundQueueDescriptor_t) * qIdx); SA_DBG1(("mpiInitialize: A saveOffset 0x%x MSGUCfgTblDWIdx 0x%x\n",saveOffset ,MSGUCfgTblDWIdx)); /* if the MPI configuration says that this queue is disabled ... */ if(0 == config->inboundQueues[qIdx].numElements) { /* ... Clears the configuration table for this queue */ inQueueCfg.elementPriSizeCount= 0; inQueueCfg.upperBaseAddress = 0; inQueueCfg.lowerBaseAddress = 0; inQueueCfg.ciUpperBaseAddress = 0; inQueueCfg.ciLowerBaseAddress = 0; /* skip inQueueCfg.PIPCIBar (PM8000 write access) */ /* skip inQueueCfg.PIOffset (PM8000 write access) */ /* Update the inbound configuration table in SPC GSM */ mpiUpdateIBQueueCfgTable(agRoot, &inQueueCfg, MSGUCfgTblDWIdx, pcibar); } /* If the queue is enabled, then ... */ else { bit32 memSize = config->inboundQueues[qIdx].numElements * config->inboundQueues[qIdx].elementSize; bit32 remainder = memSize & 127; /* Calculate the size of this queue padded to 128 bytes */ if (remainder > 0) { memSize += (128 - remainder); } /* ... first checks that the memory region has the right size */ if( (memoryAllocated->region[mIdx].totalLength - memOffset < memSize) || (NULL == memoryAllocated->region[mIdx].virtPtr) || (0 == memoryAllocated->region[mIdx].totalLength)) { SA_DBG1(("mpiInitialize: ERROR The memory region does not have the right size for this inbound queue")); smTraceFuncExit(hpDBG_VERY_LOUD, 'd', "m3"); return AGSA_RC_FAILURE; } else { /* Then, using the MPI configuration argument, initializes the corresponding element on the saRoot */ saRoot->inboundQueue[qIdx].numElements = config->inboundQueues[qIdx].numElements; saRoot->inboundQueue[qIdx].elementSize = config->inboundQueues[qIdx].elementSize; saRoot->inboundQueue[qIdx].priority = config->inboundQueues[qIdx].priority; si_memcpy(&saRoot->inboundQueue[qIdx].memoryRegion, &memoryAllocated->region[mIdx], sizeof(mpiMem_t)); saRoot->inboundQueue[qIdx].memoryRegion.virtPtr = (bit8 *)saRoot->inboundQueue[qIdx].memoryRegion.virtPtr + memOffset; saRoot->inboundQueue[qIdx].memoryRegion.physAddrLower += memOffset; saRoot->inboundQueue[qIdx].memoryRegion.elementSize = memSize; saRoot->inboundQueue[qIdx].memoryRegion.totalLength = memSize; saRoot->inboundQueue[qIdx].memoryRegion.numElements = 1; /* Initialize the local copy of PIs, CIs */ SA_DBG1(("mpiInitialize: queue %d PI CI zero\n",qIdx)); saRoot->inboundQueue[qIdx].producerIdx = 0; saRoot->inboundQueue[qIdx].consumerIdx = 0; saRoot->inboundQueue[qIdx].agRoot = agRoot; /* MPI memory region for inbound CIs are 2 */ saRoot->inboundQueue[qIdx].ciPointer = (((bit8 *)(memoryAllocated->region[MPI_CI_INDEX].virtPtr)) + qIdx * 4); /* ... and in the local structure we will use to copy to the HW configuration table */ /* CI base address */ inQueueCfg.elementPriSizeCount= config->inboundQueues[qIdx].numElements | (config->inboundQueues[qIdx].elementSize << SHIFT16) | (config->inboundQueues[qIdx].priority << SHIFT30); inQueueCfg.upperBaseAddress = saRoot->inboundQueue[qIdx].memoryRegion.physAddrUpper; inQueueCfg.lowerBaseAddress = saRoot->inboundQueue[qIdx].memoryRegion.physAddrLower; inQueueCfg.ciUpperBaseAddress = memoryAllocated->region[MPI_CI_INDEX].physAddrUpper; inQueueCfg.ciLowerBaseAddress = memoryAllocated->region[MPI_CI_INDEX].physAddrLower + qIdx * 4; /* write the configured data of inbound queue to SPC GSM */ mpiUpdateIBQueueCfgTable(agRoot, &inQueueCfg, MSGUCfgTblDWIdx, pcibar); /* get inbound PI PCI Bar and Offset */ /* get the PI PCI Bar offset and convert it to logical BAR */ IB_PIPCIBar = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + IB_PIPCI_BAR)); saRoot->inboundQueue[qIdx].PIPCIBar = mpiGetPCIBarIndex(agRoot, IB_PIPCIBar); saRoot->inboundQueue[qIdx].PIPCIOffset = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + IB_PIPCI_BAR_OFFSET)); saRoot->inboundQueue[qIdx].qNumber = qIdx; memOffset += memSize; if ((0 == ((qIdx + 1) % MAX_QUEUE_EACH_MEM)) || (qIdx == (maxinbound - 1))) { mIdx++; indexoffset += MAX_QUEUE_EACH_MEM; memOffset = 0; } } /* else for memeory ok */ } /* queue enable */ } /* loop for inbound queue */ smTrace(hpDBG_VERY_LOUD,"73",0); /* TP:73 outbound queues */ /* index offset */ indexoffset = 0; memOffset = 0; /* Let's process the memory regions for the outbound queues */ for(qIdx = 0; qIdx < maxoutbound; qIdx++) { /* point back to the begin then plus offset to next queue */ MSGUCfgTblDWIdx = saveOffset; MSGUCfgTblDWIdx += outboundoffset; MSGUCfgTblDWIdx += (sizeof(spc_outboundQueueDescriptor_t) * qIdx); /* if the MPI configuration says that this queue is disabled ... */ if(0 == config->outboundQueues[qIdx].numElements) { /* ... Clears the configuration table for this queue */ outQueueCfg.upperBaseAddress = 0; outQueueCfg.lowerBaseAddress = 0; outQueueCfg.piUpperBaseAddress = 0; outQueueCfg.piLowerBaseAddress = 0; /* skip outQueueCfg.CIPCIBar = 0; read access only */ /* skip outQueueCfg.CIOffset = 0; read access only */ outQueueCfg.elementSizeCount = 0; outQueueCfg.interruptVecCntDelay = 0; /* Updated the configuration table in SPC GSM */ mpiUpdateOBQueueCfgTable(agRoot, &outQueueCfg, MSGUCfgTblDWIdx, pcibar); } /* If the outbound queue is enabled, then ... */ else { bit32 memSize = config->outboundQueues[qIdx].numElements * config->outboundQueues[qIdx].elementSize; bit32 remainder = memSize & 127; /* Calculate the size of this queue padded to 128 bytes */ if (remainder > 0) { memSize += (128 - remainder); } /* ... first checks that the memory region has the right size */ if((memoryAllocated->region[mIdx].totalLength - memOffset < memSize) || (NULL == memoryAllocated->region[mIdx].virtPtr) || (0 == memoryAllocated->region[mIdx].totalLength)) { SA_DBG1(("ERROR: The memory region does not have the right size for this outbound queue")); smTraceFuncExit(hpDBG_VERY_LOUD, 'e', "m3"); return AGSA_RC_FAILURE; } else { /* Then, using the MPI configuration argument, initializes the corresponding element on the MPI context ... */ saRoot->outboundQueue[qIdx].numElements = config->outboundQueues[qIdx].numElements; saRoot->outboundQueue[qIdx].elementSize = config->outboundQueues[qIdx].elementSize; si_memcpy(&saRoot->outboundQueue[qIdx].memoryRegion, &memoryAllocated->region[mIdx], sizeof(mpiMem_t)); saRoot->outboundQueue[qIdx].memoryRegion.virtPtr = (bit8 *)saRoot->outboundQueue[qIdx].memoryRegion.virtPtr + memOffset; saRoot->outboundQueue[qIdx].memoryRegion.physAddrLower += memOffset; saRoot->outboundQueue[qIdx].memoryRegion.elementSize = memSize; saRoot->outboundQueue[qIdx].memoryRegion.totalLength = memSize; saRoot->outboundQueue[qIdx].memoryRegion.numElements = 1; saRoot->outboundQueue[qIdx].producerIdx = 0; saRoot->outboundQueue[qIdx].consumerIdx = 0; saRoot->outboundQueue[qIdx].agRoot = agRoot; /* MPI memory region for outbound PIs are 3 */ saRoot->outboundQueue[qIdx].piPointer = (((bit8 *)(memoryAllocated->region[MPI_CI_INDEX + 1].virtPtr))+ qIdx * 4); /* ... and in the local structure we will use to copy to the HW configuration table */ outQueueCfg.upperBaseAddress = saRoot->outboundQueue[qIdx].memoryRegion.physAddrUpper; outQueueCfg.lowerBaseAddress = saRoot->outboundQueue[qIdx].memoryRegion.physAddrLower; /* PI base address */ outQueueCfg.piUpperBaseAddress = memoryAllocated->region[MPI_CI_INDEX + 1].physAddrUpper; outQueueCfg.piLowerBaseAddress = memoryAllocated->region[MPI_CI_INDEX + 1].physAddrLower + qIdx * 4; outQueueCfg.elementSizeCount = config->outboundQueues[qIdx].numElements | (config->outboundQueues[qIdx].elementSize << SHIFT16); /* enable/disable interrupt - use saSystemInterruptsActive() API */ /* instead of ossaHwRegWrite(agRoot, MSGU_ODMR, 0); */ /* Outbound Doorbell Auto disable */ /* LL does not use ossaHwRegWriteExt(agRoot, PCIBAR1, SPC_ODAR, 0xffffffff); */ if (config->outboundQueues[qIdx].interruptEnable) { /* enable interrupt flag bit30 of outbound table */ outQueueCfg.elementSizeCount |= OB_PROPERTY_INT_ENABLE; } if(smIS_SPCV(agRoot)) { outQueueCfg.interruptVecCntDelay = ((config->outboundQueues[qIdx].interruptVector & INT_VEC_BITS ) << SHIFT24); } else { outQueueCfg.interruptVecCntDelay = (config->outboundQueues[qIdx].interruptDelay & INT_DELAY_BITS) | ((config->outboundQueues[qIdx].interruptThreshold & INT_THR_BITS ) << SHIFT16) | ((config->outboundQueues[qIdx].interruptVector & INT_VEC_BITS ) << SHIFT24); } /* create a VectorIndex Bit Map */ if (qIdx < OQ_NUM_32) { saRoot->interruptVecIndexBitMap[config->outboundQueues[qIdx].interruptVector] |= (1 << qIdx); SA_DBG2(("mpiInitialize:below 32 saRoot->interruptVecIndexBitMap[config->outboundQueues[qIdx].interruptVector] 0x%08x\n",saRoot->interruptVecIndexBitMap[config->outboundQueues[qIdx].interruptVector])); } else { saRoot->interruptVecIndexBitMap1[config->outboundQueues[qIdx].interruptVector] |= (1 << (qIdx - OQ_NUM_32)); SA_DBG2(("mpiInitialize:Above 32 saRoot->interruptVecIndexBitMap1[config->outboundQueues[qIdx].interruptVector] 0x%08x\n",saRoot->interruptVecIndexBitMap1[config->outboundQueues[qIdx].interruptVector])); } /* Update the outbound configuration table */ mpiUpdateOBQueueCfgTable(agRoot, &outQueueCfg, MSGUCfgTblDWIdx, pcibar); /* read the CI PCIBar offset and convert it to logical bar */ OB_CIPCIBar = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + OB_CIPCI_BAR)); saRoot->outboundQueue[qIdx].CIPCIBar = mpiGetPCIBarIndex(agRoot, OB_CIPCIBar); saRoot->outboundQueue[qIdx].CIPCIOffset = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + OB_CIPCI_BAR_OFFSET)); saRoot->outboundQueue[qIdx].DIntTOffset = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + OB_DYNAMIC_COALES_OFFSET)); saRoot->outboundQueue[qIdx].qNumber = qIdx; memOffset += memSize; if ((0 == ((qIdx + 1) % MAX_QUEUE_EACH_MEM)) || (qIdx == (maxoutbound - 1))) { mIdx++; indexoffset += MAX_QUEUE_EACH_MEM; memOffset =0; } } } } /* calculate number of vectors */ saRoot->numInterruptVectors = 0; for (qIdx = 0; qIdx < MAX_NUM_VECTOR; qIdx++) { if ((saRoot->interruptVecIndexBitMap[qIdx]) || (saRoot->interruptVecIndexBitMap1[qIdx])) { (saRoot->numInterruptVectors)++; } } SA_DBG2(("mpiInitialize:(saRoot->numInterruptVectors) 0x%x\n",(saRoot->numInterruptVectors))); if(smIS_SPCV(agRoot)) { /* setup interrupt vector table */ mpiWrIntVecTable(agRoot,config); } if(smIS_SPCV(agRoot)) { mpiWrAnalogSetupTable(agRoot,config); } /* setup phy analog registers */ mpiWriteCALAll(agRoot, &config->phyAnalogConfig); { bit32 pcibar = 0; bit32 TableOffset; pcibar = siGetPciBar(agRoot); TableOffset = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0); TableOffset &= SCRATCH_PAD0_OFFSET_MASK; SA_DBG1(("mpiInitialize: mpiContextTable TableOffset 0x%08X contains 0x%08X\n",TableOffset,ossaHwRegReadExt(agRoot, pcibar, TableOffset ))); SA_ASSERT( (ossaHwRegReadExt(agRoot, pcibar, TableOffset ) == 0x53434D50), "Config table signiture"); SA_DBG1(("mpiInitialize: AGSA_MPI_MAIN_CONFIGURATION_TABLE 0x%08X\n", 0)); SA_DBG1(("mpiInitialize: AGSA_MPI_GENERAL_STATUS_TABLE 0x%08X\n", (ossaHwRegReadExt(agRoot, pcibar, TableOffset+MAIN_GST_OFFSET) & 0xFFFF ))); SA_DBG1(("mpiInitialize: AGSA_MPI_INBOUND_QUEUE_CONFIGURATION_TABLE 0x%08X\n", (ossaHwRegReadExt(agRoot, pcibar, TableOffset+MAIN_IBQ_OFFSET) & 0xFFFF))); SA_DBG1(("mpiInitialize: AGSA_MPI_OUTBOUND_QUEUE_CONFIGURATION_TABLE 0x%08X\n", (ossaHwRegReadExt(agRoot, pcibar, TableOffset+MAIN_OBQ_OFFSET) & 0xFFFF))); SA_DBG1(("mpiInitialize: AGSA_MPI_SAS_PHY_ANALOG_SETUP_TABLE 0x%08X\n", (ossaHwRegReadExt(agRoot, pcibar, TableOffset+MAIN_ANALOG_SETUP_OFFSET) & 0xFFFF ))); SA_DBG1(("mpiInitialize: AGSA_MPI_INTERRUPT_VECTOR_TABLE 0x%08X\n", (ossaHwRegReadExt(agRoot, pcibar, TableOffset+MAIN_INT_VEC_TABLE_OFFSET) & 0xFFFF))); SA_DBG1(("mpiInitialize: AGSA_MPI_PER_SAS_PHY_ATTRIBUTE_TABLE 0x%08X\n", (ossaHwRegReadExt(agRoot, pcibar, TableOffset+MAIN_PHY_ATTRIBUTE_OFFSET) & 0xFFFF))); SA_DBG1(("mpiInitialize: AGSA_MPI_OUTBOUND_QUEUE_FAILOVER_TABLE 0x%08X\n", (ossaHwRegReadExt(agRoot, pcibar, TableOffset+MAIN_MOQFOT_MOQFOES) & 0xFFFF))); } if(agNULL != saRoot->swConfig.mpiContextTable ) { agsaMPIContext_t * context = (agsaMPIContext_t * )saRoot->swConfig.mpiContextTable; bit32 length = saRoot->swConfig.mpiContextTablelen; bit32 pcibar = 0; bit32 TableOffset; pcibar = siGetPciBar(agRoot); TableOffset = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0); TableOffset &= SCRATCH_PAD0_OFFSET_MASK; SA_DBG1(("mpiInitialize: mpiContextTable TableOffset 0x%08X contains 0x%08X\n",TableOffset,ossaHwRegReadExt(agRoot, pcibar, TableOffset ))); SA_ASSERT( (ossaHwRegReadExt(agRoot, pcibar, TableOffset ) == 0x53434D50), "Config table signiture"); if ( (ossaHwRegReadExt(agRoot, pcibar, TableOffset ) != 0x53434D50)) { SA_DBG1(("mpiInitialize: TableOffset 0x%x reads 0x%x expect 0x%x \n",TableOffset,ossaHwRegReadExt(agRoot, pcibar, TableOffset ),0x53434D50)); } if(context ) { SA_DBG1(("mpiInitialize: MPITableType 0x%x context->offset 0x%x context->value 0x%x\n",context->MPITableType,context->offset,context->value)); while( length != 0) { switch(context->MPITableType) { bit32 OffsetInMain; case AGSA_MPI_MAIN_CONFIGURATION_TABLE: SA_DBG1(("mpiInitialize: AGSA_MPI_MAIN_CONFIGURATION_TABLE %d 0x%x + 0x%x = 0x%x\n",context->MPITableType,TableOffset, context->offset, context->value)); OffsetInMain = TableOffset; ossaHwRegWriteExt(agRoot, pcibar, OffsetInMain + (context->offset * 4) , context->value); break; case AGSA_MPI_GENERAL_STATUS_TABLE: SA_DBG1(("mpiInitialize: AGSA_MPI_GENERAL_STATUS_TABLE %d offset 0x%x + 0x%x = 0x%x\n",context->MPITableType ,TableOffset+MAIN_GST_OFFSET, context->offset, context->value )); OffsetInMain = (ossaHwRegReadExt(agRoot, pcibar, TableOffset+MAIN_GST_OFFSET ) & 0xFFFF) + TableOffset; ossaHwRegWriteExt(agRoot, pcibar, OffsetInMain + (context->offset * 4), context->value); break; case AGSA_MPI_INBOUND_QUEUE_CONFIGURATION_TABLE: SA_DBG1(("mpiInitialize: AGSA_MPI_INBOUND_QUEUE_CONFIGURATION_TABLE %d offset 0x%x + 0x%x = 0x%x\n",context->MPITableType,TableOffset+MAIN_IBQ_OFFSET, context->offset, context->value)); OffsetInMain = (ossaHwRegReadExt(agRoot, pcibar, TableOffset+MAIN_IBQ_OFFSET ) & 0xFFFF) + TableOffset; ossaHwRegWriteExt(agRoot, pcibar, OffsetInMain + (context->offset * 4), context->value); break; case AGSA_MPI_OUTBOUND_QUEUE_CONFIGURATION_TABLE: SA_DBG1(("mpiInitialize: AGSA_MPI_OUTBOUND_QUEUE_CONFIGURATION_TABLE %d offset 0x%x + 0x%x = 0x%x\n",context->MPITableType,TableOffset+MAIN_OBQ_OFFSET, context->offset, context->value)); OffsetInMain = (ossaHwRegReadExt(agRoot, pcibar, TableOffset+MAIN_OBQ_OFFSET ) & 0xFFFF) + TableOffset; ossaHwRegWriteExt(agRoot, pcibar, OffsetInMain + (context->offset * 4), context->value); break; case AGSA_MPI_SAS_PHY_ANALOG_SETUP_TABLE: SA_DBG1(("mpiInitialize: AGSA_MPI_SAS_PHY_ANALOG_SETUP_TABLE %d offset 0x%x + 0x%x = 0x%x\n",context->MPITableType,TableOffset+MAIN_ANALOG_SETUP_OFFSET, context->offset, context->value)); OffsetInMain = (ossaHwRegReadExt(agRoot, pcibar, TableOffset+ MAIN_ANALOG_SETUP_OFFSET) & 0xFFFF) + TableOffset; ossaHwRegWriteExt(agRoot, pcibar, OffsetInMain + (context->offset * 4), context->value); break; case AGSA_MPI_INTERRUPT_VECTOR_TABLE: SA_DBG1(("mpiInitialize: AGSA_MPI_INTERRUPT_VECTOR_TABLE %d offset 0x%x + 0x%x = 0x%x\n",context->MPITableType,TableOffset+MAIN_INT_VEC_TABLE_OFFSET, context->offset, context->value)); OffsetInMain = (ossaHwRegReadExt(agRoot, pcibar, TableOffset+ MAIN_INT_VEC_TABLE_OFFSET) & 0xFFFF) + TableOffset; ossaHwRegWriteExt(agRoot, pcibar, OffsetInMain + (context->offset * 4), context->value); break; case AGSA_MPI_PER_SAS_PHY_ATTRIBUTE_TABLE: SA_DBG1(("mpiInitialize: AGSA_MPI_PER_SAS_PHY_ATTRIBUTE_TABLE %d offset 0x%x + 0x%x = 0x%x\n",context->MPITableType,TableOffset+MAIN_PHY_ATTRIBUTE_OFFSET, context->offset, context->value)); OffsetInMain = (ossaHwRegReadExt(agRoot, pcibar, TableOffset+MAIN_PHY_ATTRIBUTE_OFFSET ) & 0xFFFF) + TableOffset; ossaHwRegWriteExt(agRoot, pcibar, OffsetInMain + (context->offset * 4), context->value); break; case AGSA_MPI_OUTBOUND_QUEUE_FAILOVER_TABLE: SA_DBG1(("mpiInitialize: AGSA_MPI_OUTBOUND_QUEUE_FAILOVER_TABLE %d offset 0x%x + 0x%x = 0x%x\n",context->MPITableType,TableOffset+MAIN_MOQFOT_MOQFOES, context->offset, context->value)); OffsetInMain = (ossaHwRegReadExt(agRoot, pcibar, TableOffset+MAIN_MOQFOT_MOQFOES ) & 0xFFFF) + TableOffset; ossaHwRegWriteExt(agRoot, pcibar, OffsetInMain + (context->offset * 4), context->value); break; default: SA_DBG1(("mpiInitialize: error MPITableType unknown %d offset 0x%x value 0x%x\n",context->MPITableType, context->offset, context->value)); break; } if(smIS_SPC12V(agRoot)) { if (saRoot->ControllerInfo.fwInterfaceRev > 0x301 ) { SA_DBG1(("mpiInitialize: MAIN_AWT_MIDRANGE 0x%08X\n", ossaHwRegReadExt(agRoot, pcibar, TableOffset + MAIN_AWT_MIDRANGE) )); } } if(length >= sizeof(agsaMPIContext_t)) { length -= sizeof(agsaMPIContext_t); context++; } else { length = 0; } } } SA_DBG1(("mpiInitialize: context %p saRoot->swConfig.mpiContextTable %p %d\n",context,saRoot->swConfig.mpiContextTable,context == saRoot->swConfig.mpiContextTable ? 1 : 0)); if ( (ossaHwRegReadExt(agRoot, pcibar, TableOffset ) != 0x53434D50)) { SA_DBG1(("mpiInitialize:TableOffset 0x%x reads 0x%x expect 0x%x \n",TableOffset,ossaHwRegReadExt(agRoot, pcibar, TableOffset ),0x53434D50)); } SA_ASSERT( (ossaHwRegReadExt(agRoot, pcibar, TableOffset ) == 0x53434D50), "Config table signiture After"); } /* At this point the Message Unit configuration table is set up. Now we need to ring the doorbell */ togglevalue = 0; smTrace(hpDBG_VERY_LOUD,"74", siHalRegReadExt(agRoot, GEN_MSGU_IBDB_SET, MSGU_IBDB_SET )); /* TP:74 Doorbell */ /* Write bit0=1 to Inbound DoorBell Register to tell the SPC FW the table is updated */ siHalRegWriteExt(agRoot, GEN_MSGU_IBDB_SET, MSGU_IBDB_SET, SPC_MSGU_CFG_TABLE_UPDATE); if(siHalRegReadExt(agRoot, GEN_MSGU_IBDB_SET, MSGU_IBDB_SET ) & SPC_MSGU_CFG_TABLE_UPDATE) { SA_DBG1(("mpiInitialize: SPC_MSGU_CFG_TABLE_UPDATE (0x%X) \n", siHalRegReadExt(agRoot, GEN_MSGU_IBDB_SET, MSGU_IBDB_SET))); } else { SA_DBG1(("mpiInitialize: SPC_MSGU_CFG_TABLE_UPDATE not set (0x%X)\n", siHalRegReadExt(agRoot, GEN_MSGU_IBDB_SET, MSGU_IBDB_SET))); ossaStallThread(agRoot, WAIT_INCREMENT); } smTrace(hpDBG_VERY_LOUD,"A5", siHalRegReadExt(agRoot, GEN_MSGU_IBDB_SET, MSGU_IBDB_SET )); /* TP:A5 Doorbell */ /* // ossaHwRegWrite(agRoot, MSGU_IBDB_SET, SPC_MSGU_CFG_TABLE_UPDATE); MSGU_WRITE_IDR(SPC_MSGU_CFG_TABLE_UPDATE); */ /* wait until Inbound DoorBell Clear Register toggled */ WaitLonger: max_wait_time = WAIT_SECONDS(gWait_2); /* 2 sec */ max_wait_count = MAKE_MODULO(max_wait_time,WAIT_INCREMENT); do { ossaStallThread(agRoot, WAIT_INCREMENT); value = MSGU_READ_IDR; value &= SPC_MSGU_CFG_TABLE_UPDATE; } while ((value != togglevalue) && (max_wait_count -= WAIT_INCREMENT)); smTrace(hpDBG_VERY_LOUD,"80", max_wait_count); /* TP:80 TP max_wait_count */ if (!max_wait_count && mpiStartToggleFailed < 5 ) { SA_DBG1(("mpiInitialize: mpiStartToggleFailed count %d\n", mpiStartToggleFailed)); mpiStartToggleFailed++; goto WaitLonger; } if (!max_wait_count ) { SA_DBG1(("mpiInitialize: TIMEOUT:IBDB value/toggle = 0x%x 0x%x\n", value, togglevalue)); MSGUCfgTblDWIdx = saveOffset; GSTLenMPIS = ossaHwRegReadExt(agRoot, pcibar, (bit32)MSGUCfgTblDWIdx + (bit32)(mainCfg.GSTOffset + GST_GSTLEN_MPIS_OFFSET)); SA_DBG1(("mpiInitialize: MPI State = 0x%x\n", GSTLenMPIS)); smTraceFuncExit(hpDBG_VERY_LOUD, 'f', "m3"); return AGSA_RC_FAILURE; } smTrace(hpDBG_VERY_LOUD,"81", mpiStartToggleFailed ); /* TP:81 TP */ /* check the MPI-State for initialization */ MSGUCfgTblDWIdx = saveOffset; GSTLenMPIS = ossaHwRegReadExt(agRoot, pcibar, (bit32)MSGUCfgTblDWIdx + (bit32)(mainCfg.GSTOffset + GST_GSTLEN_MPIS_OFFSET)); if ( (GST_MPI_STATE_UNINIT == (GSTLenMPIS & GST_MPI_STATE_MASK)) && ( mpiUnInitFailed < 5 ) ) { SA_DBG1(("mpiInitialize: MPI State = 0x%x mpiUnInitFailed count %d\n", GSTLenMPIS & GST_MPI_STATE_MASK,mpiUnInitFailed)); ossaStallThread(agRoot, (20 * 1000)); mpiUnInitFailed++; goto WaitLonger; } if (GST_MPI_STATE_INIT != (GSTLenMPIS & GST_MPI_STATE_MASK)) { SA_DBG1(("mpiInitialize: Error Not GST_MPI_STATE_INIT MPI State = 0x%x\n", GSTLenMPIS & GST_MPI_STATE_MASK)); smTraceFuncExit(hpDBG_VERY_LOUD, 'g', "m3"); return AGSA_RC_FAILURE; } smTrace(hpDBG_VERY_LOUD,"82", 0); /* TP:82 TP */ /* check MPI Initialization error */ GSTLenMPIS = GSTLenMPIS >> SHIFT16; if (0x0000 != GSTLenMPIS) { SA_DBG1(("mpiInitialize: MPI Error = 0x%x\n", GSTLenMPIS)); smTraceFuncExit(hpDBG_VERY_LOUD, 'h', "m3"); return AGSA_RC_FAILURE; } smTrace(hpDBG_VERY_LOUD,"83", 0); /* TP:83 TP */ /* reread IQ PI offset from SPC if IQ/OQ > 32 */ if ((maxinbound > IQ_NUM_32) || (maxoutbound > OQ_NUM_32)) { for(qIdx = 0; qIdx < maxinbound; qIdx++) { /* point back to the begin then plus offset to next queue */ MSGUCfgTblDWIdx = saveOffset; MSGUCfgTblDWIdx += inboundoffset; MSGUCfgTblDWIdx += (sizeof(spc_inboundQueueDescriptor_t) * qIdx); saRoot->inboundQueue[qIdx].PIPCIOffset = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + IB_PIPCI_BAR_OFFSET)); } } smTrace(hpDBG_VERY_LOUD,"84", 0); /* TP:84 TP */ /* at least one inbound queue and one outbound queue enabled */ if ((0 == config->inboundQueues[0].numElements) || (0 == config->outboundQueues[0].numElements)) { SA_DBG1(("mpiInitialize: Error,IQ0 or OQ0 have to enable\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'i', "m3"); return AGSA_RC_FAILURE; } smTrace(hpDBG_VERY_LOUD,"85", 0); /* TP:85 TP */ /* clean the inbound queues */ for (i = 0; i < config->numInboundQueues; i ++) { if(0 != config->inboundQueues[i].numElements) { circularIQ = &saRoot->inboundQueue[i]; si_memset(circularIQ->memoryRegion.virtPtr, 0, circularIQ->memoryRegion.totalLength); si_memset(saRoot->inboundQueue[i].ciPointer, 0, sizeof(bit32)); if(smIS_SPCV(agRoot)) { ossaHwRegWriteExt(circularIQ->agRoot, circularIQ->PIPCIBar, circularIQ->PIPCIOffset, 0); SA_DBG1(("mpiInitialize: SPC V writes IQ %2d offset 0x%x\n",i ,circularIQ->PIPCIOffset)); } } } smTrace(hpDBG_VERY_LOUD,"86", 0); /* TP:86 TP */ /* clean the outbound queues */ for (i = 0; i < config->numOutboundQueues; i ++) { if(0 != config->outboundQueues[i].numElements) { circularOQ = &saRoot->outboundQueue[i]; si_memset(circularOQ->memoryRegion.virtPtr, 0, circularOQ->memoryRegion.totalLength); si_memset(saRoot->outboundQueue[i].piPointer, 0, sizeof(bit32)); if(smIS_SPCV(agRoot)) { ossaHwRegWriteExt(circularOQ->agRoot, circularOQ->CIPCIBar, circularOQ->CIPCIOffset, 0); SA_DBG2(("mpiInitialize: SPC V writes OQ %2d offset 0x%x\n",i ,circularOQ->CIPCIOffset)); } } } smTrace(hpDBG_VERY_LOUD,"75",0); /* TP:75 AAP1 IOP */ /* read back AAP1 and IOP event log address and size */ MSGUCfgTblDWIdx = saveOffset; value = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_EVENT_LOG_ADDR_HI)); saRoot->mainConfigTable.upperEventLogAddress = value; SA_DBG1(("mpiInitialize: upperEventLogAddress 0x%x\n", value)); value = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_EVENT_LOG_ADDR_LO)); saRoot->mainConfigTable.lowerEventLogAddress = value; SA_DBG1(("mpiInitialize: lowerEventLogAddress 0x%x\n", value)); value = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_EVENT_LOG_BUFF_SIZE)); saRoot->mainConfigTable.eventLogSize = value; SA_DBG1(("mpiInitialize: eventLogSize 0x%x\n", value)); value = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_EVENT_LOG_OPTION)); saRoot->mainConfigTable.eventLogOption = value; SA_DBG1(("mpiInitialize: eventLogOption 0x%x\n", value)); SA_DBG1(("mpiInitialize: EventLog dd /p %08X`%08X L %x\n",saRoot->mainConfigTable.upperEventLogAddress,saRoot->mainConfigTable.lowerEventLogAddress,saRoot->mainConfigTable.eventLogSize/4 )); value = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_IOP_EVENT_LOG_ADDR_HI)); saRoot->mainConfigTable.upperIOPeventLogAddress = value; SA_DBG1(("mpiInitialize: upperIOPLogAddress 0x%x\n", value)); value = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_IOP_EVENT_LOG_ADDR_LO)); saRoot->mainConfigTable.lowerIOPeventLogAddress = value; SA_DBG1(("mpiInitialize: lowerIOPLogAddress 0x%x\n", value)); SA_DBG1(("mpiInitialize: IOPLog dd /p %08X`%08X L %x\n",saRoot->mainConfigTable.upperIOPeventLogAddress,saRoot->mainConfigTable.lowerIOPeventLogAddress,saRoot->mainConfigTable.IOPeventLogSize/4 )); value = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_IOP_EVENT_LOG_BUFF_SIZE)); saRoot->mainConfigTable.IOPeventLogSize = value; SA_DBG1(("mpiInitialize: IOPeventLogSize 0x%x\n", value)); value = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_IOP_EVENT_LOG_OPTION)); saRoot->mainConfigTable.IOPeventLogOption = value; SA_DBG1(("mpiInitialize: IOPeventLogOption 0x%x\n", value)); value = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_FATAL_ERROR_INTERRUPT)); #ifdef SA_PRINTOUT_IN_WINDBG #ifndef DBG DbgPrint("mpiInitialize: EventLog (%d) dd /p %08X`%08X L %x\n", saRoot->mainConfigTable.eventLogOption, saRoot->mainConfigTable.upperEventLogAddress, saRoot->mainConfigTable.lowerEventLogAddress, saRoot->mainConfigTable.eventLogSize/4 ); DbgPrint("mpiInitialize: IOPLog (%d) dd /p %08X`%08X L %x\n", saRoot->mainConfigTable.IOPeventLogOption, saRoot->mainConfigTable.upperIOPeventLogAddress, saRoot->mainConfigTable.lowerIOPeventLogAddress, saRoot->mainConfigTable.IOPeventLogSize/4 ); #endif /* DBG */ #endif /* SA_PRINTOUT_IN_WINDBG */ saRoot->mainConfigTable.FatalErrorInterrupt = value; smTrace(hpDBG_VERY_LOUD,"76",value); /* TP:76 FatalErrorInterrupt */ SA_DBG1(("mpiInitialize: hwConfig->hwOption %X\n", saRoot->hwConfig.hwOption )); SA_DBG1(("mpiInitialize: FatalErrorInterrupt 0x%x\n", value)); /* read back Register Dump offset and length */ value = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_FATAL_ERROR_RDUMP0_OFFSET)); saRoot->mainConfigTable.FatalErrorDumpOffset0 = value; SA_DBG1(("mpiInitialize: FatalErrorDumpOffset0 0x%x\n", value)); value = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_FATAL_ERROR_RDUMP0_LENGTH)); saRoot->mainConfigTable.FatalErrorDumpLength0 = value; SA_DBG1(("mpiInitialize: FatalErrorDumpLength0 0x%x\n", value)); value = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_FATAL_ERROR_RDUMP1_OFFSET)); saRoot->mainConfigTable.FatalErrorDumpOffset1 = value; SA_DBG1(("mpiInitialize: FatalErrorDumpOffset1 0x%x\n", value)); value = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_FATAL_ERROR_RDUMP1_LENGTH)); saRoot->mainConfigTable.FatalErrorDumpLength1 = value; SA_DBG1(("mpiInitialize: FatalErrorDumpLength1 0x%x\n", value)); value = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_PRECTD_PRESETD)); saRoot->mainConfigTable.PortRecoveryTimerPortResetTimer = value; SA_DBG1(("mpiInitialize: PortRecoveryTimerPortResetTimer 0x%x\n", value)); value = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_IRAD_RESERVED)); saRoot->mainConfigTable.InterruptReassertionDelay = value; SA_DBG1(("mpiInitialize: InterruptReassertionDelay 0x%x\n", value)); if(smIS_SPCV(agRoot)) { bit32 sp1; sp1= ossaHwRegRead(agRoot,V_Scratchpad_1_Register ); if(SCRATCH_PAD1_V_ERROR_STATE(sp1)) { SA_DBG1(("mpiInitialize: SCRATCH_PAD1_V_ERROR_STAT 0x%x\n",sp1 )); ret = AGSA_RC_FAILURE; } } smTraceFuncExit(hpDBG_VERY_LOUD, 'j', "m3"); return ret; } /*******************************************************************************/ /** \fn mpiWaitForConfigTable(agsaRoot_t *agRoot, spc_configMainDescriptor_t *config) * \brief Reading and Writing the Configuration Table * \param agsaRoot Pointer to a data structure containing LL layer context handles * \param config Pointer to Configuration Table * * Return: * AGSA_RC_SUCCESS if read the configuration table from SPC sucessful * AGSA_RC_FAILURE if read the configuration table from SPC failed */ /*******************************************************************************/ GLOBAL bit32 mpiWaitForConfigTable(agsaRoot_t *agRoot, spc_configMainDescriptor_t *config) { agsaLLRoot_t *saRoot = (agsaLLRoot_t *)(agRoot->sdkData); bit32 MSGUCfgTblBase, ret = AGSA_RC_SUCCESS; bit32 CfgTblDWIdx; bit32 value, value1; bit32 max_wait_time; bit32 max_wait_count; bit32 Signature, ExpSignature; bit8 pcibar; SA_DBG2(("mpiWaitForConfigTable: Entering\n")); SA_ASSERT(NULL != agRoot, "agRoot argument cannot be null"); smTraceFuncEnter(hpDBG_VERY_LOUD,"m4"); /* check error state */ value = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_1,MSGU_SCRATCH_PAD_1); value1 = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_2,MSGU_SCRATCH_PAD_2); if( smIS_SPC(agRoot) ) { SA_DBG1(("mpiWaitForConfigTable: Waiting for SPC FW becoming ready.P1 0x%X P2 0x%X\n",value,value1)); /* check AAP error */ if (SCRATCH_PAD1_ERR == (value & SCRATCH_PAD_STATE_MASK)) { /* error state */ SA_DBG1(("mpiWaitForConfigTable: AAP error state and code 0x%x, ScratchPad2=0x%x\n", value, value1)); #if defined(SALLSDK_DEBUG) SA_DBG1(("mpiWaitForConfigTable: SCRATCH_PAD0 value = 0x%x\n", siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0))); SA_DBG1(("mpiWaitForConfigTable: SCRATCH_PAD3 value = 0x%x\n", siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_3,MSGU_SCRATCH_PAD_3))); #endif smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "m4"); return AGSA_RC_FAILURE; } /* check IOP error */ if (SCRATCH_PAD2_ERR == (value1 & SCRATCH_PAD_STATE_MASK)) { /* error state */ SA_DBG1(("mpiWaitForConfigTable: IOP error state and code 0x%x, ScratchPad1=0x%x\n", value1, value)); #if defined(SALLSDK_DEBUG) SA_DBG1(("mpiWaitForConfigTable: SCRATCH_PAD0 value = 0x%x\n", siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0))); SA_DBG1(("mpiWaitForConfigTable: SCRATCH_PAD3 value = 0x%x\n", siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_3,MSGU_SCRATCH_PAD_3))); #endif smTraceFuncExit(hpDBG_VERY_LOUD, 'b', "m4"); return AGSA_RC_FAILURE; } /* bit 4-31 of scratch pad1 should be zeros if it is not in error state */ #ifdef DONT_DO /* */ if (value & SCRATCH_PAD1_STATE_MASK) { /* error case */ SA_DBG1(("mpiWaitForConfigTable: wrong state failure, scratchPad1 0x%x\n", value)); SA_DBG1(("mpiWaitForConfigTable: ScratchPad0 AAP error code 0x%x\n", siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0))); #if defined(SALLSDK_DEBUG) SA_DBG1(("mpiWaitForConfigTable: SCRATCH_PAD2 value = 0x%x\n", siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_2,MSGU_SCRATCH_PAD_0))); SA_DBG1(("mpiWaitForConfigTable: SCRATCH_PAD3 value = 0x%x\n", siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_3,MSGU_SCRATCH_PAD_3))); #endif smTraceFuncExit(hpDBG_VERY_LOUD, 'c', "m4"); return AGSA_RC_FAILURE; } /* bit 4-31 of scratch pad2 should be zeros if it is not in error state */ if (value1 & SCRATCH_PAD2_STATE_MASK) { /* error case */ SA_DBG1(("mpiWaitForConfigTable: wrong state failure, scratchPad2 0x%x\n", value1)); SA_DBG1(("mpiWaitForConfigTable: ScratchPad3 IOP error code 0x%x\n",siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_3,MSGU_SCRATCH_PAD_3) )); #if defined(SALLSDK_DEBUG) SA_DBG1(("mpiWaitForConfigTable: SCRATCH_PAD0 value = 0x%x\n", siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0))); SA_DBG1(("mpiWaitForConfigTable: SCRATCH_PAD1 value = 0x%x\n", siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_1,MSGU_SCRATCH_PAD_1))); #endif smTraceFuncExit(hpDBG_VERY_LOUD, 'd', "m4"); return AGSA_RC_FAILURE; } #endif /* DONT_DO */ /* checking the fw and IOP in ready state */ max_wait_time = WAIT_SECONDS(gWait_2); /* 2 sec timeout */ max_wait_count = MAKE_MODULO(max_wait_time,WAIT_INCREMENT); /* wait until scratch pad 1 and 2 registers in ready state */ do { ossaStallThread(agRoot, WAIT_INCREMENT); value =siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_1,MSGU_SCRATCH_PAD_1) & SCRATCH_PAD1_RDY; value1 =siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_2,MSGU_SCRATCH_PAD_2) & SCRATCH_PAD2_RDY; if(smIS_SPCV(agRoot)) { SA_DBG1(("mpiWaitForConfigTable:VEN_DEV_SPCV force SCRATCH_PAD2 RDY 1 %08X 2 %08X\n" ,value,value1)); value1 =3; } if ((max_wait_count -= WAIT_INCREMENT) == 0) { SA_DBG1(("mpiWaitForConfigTable: Timeout!! SCRATCH_PAD1/2 value = 0x%x 0x%x\n", value, value1)); break; } } while ((value != SCRATCH_PAD1_RDY) || (value1 != SCRATCH_PAD2_RDY)); if (!max_wait_count) { SA_DBG1(("mpiWaitForConfigTable: timeout failure\n")); #if defined(SALLSDK_DEBUG) SA_DBG1(("mpiWaitForConfigTable: SCRATCH_PAD0 value = 0x%x\n", siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0))); SA_DBG1(("mpiWaitForConfigTable: SCRATCH_PAD3 value = 0x%x\n", siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_3,MSGU_SCRATCH_PAD_3))); #endif smTraceFuncExit(hpDBG_VERY_LOUD, 'e', "m4"); return AGSA_RC_FAILURE; } }else { if(((value & SCRATCH_PAD1_V_BOOTSTATE_HDA_SEEPROM ) == SCRATCH_PAD1_V_BOOTSTATE_HDA_SEEPROM)) { SA_DBG1(("mpiWaitForConfigTable: HDA mode set in SEEPROM SP1 0x%X\n",value)); } if(((value & SCRATCH_PAD1_V_READY) != SCRATCH_PAD1_V_READY) || (value == 0xffffffff)) { SA_DBG1(("mpiWaitForConfigTable: Waiting for _V_ FW becoming ready.P1 0x%X P2 0x%X\n",value,value1)); /* checking the fw and IOP in ready state */ max_wait_time = WAIT_SECONDS(gWait_2); /* 2 sec timeout */ max_wait_count = MAKE_MODULO(max_wait_time,WAIT_INCREMENT); /* wait until scratch pad 1 and 2 registers in ready state */ do { ossaStallThread(agRoot, WAIT_INCREMENT); value = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_1,MSGU_SCRATCH_PAD_1); value1 = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_2,MSGU_SCRATCH_PAD_2); if ((max_wait_count -= WAIT_INCREMENT) == 0) { SA_DBG1(("mpiWaitForConfigTable: Timeout!! SCRATCH_PAD1/2 value = 0x%x 0x%x\n", value, value1)); return AGSA_RC_FAILURE; } } while (((value & SCRATCH_PAD1_V_READY) != SCRATCH_PAD1_V_READY) || (value == 0xffffffff)); } } SA_DBG1(("mpiWaitForConfigTable: FW Ready, SCRATCH_PAD1/2 value = 0x%x 0x%x\n", value, value1)); /* read scratch pad0 to get PCI BAR and offset of configuration table */ MSGUCfgTblBase = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0); /* get offset */ CfgTblDWIdx = MSGUCfgTblBase & SCRATCH_PAD0_OFFSET_MASK; /* get PCI BAR */ MSGUCfgTblBase = (MSGUCfgTblBase & SCRATCH_PAD0_BAR_MASK) >> SHIFT26; if(smIS_SPC(agRoot)) { if( smIS_spc8081(agRoot)) { if (BAR4 != MSGUCfgTblBase) { SA_DBG1(("mpiWaitForConfigTable: smIS_spc8081 PCI BAR is not BAR4, bar=0x%x - failure\n", MSGUCfgTblBase)); smTraceFuncExit(hpDBG_VERY_LOUD, 'f', "m4"); return AGSA_RC_FAILURE; } } else { if (BAR5 != MSGUCfgTblBase) { SA_DBG1(("mpiWaitForConfigTable: PCI BAR is not BAR5, bar=0x%x - failure\n", MSGUCfgTblBase)); smTraceFuncExit(hpDBG_VERY_LOUD, 'g', "m4"); return AGSA_RC_FAILURE; } } } /* convert the PCI BAR to logical bar number */ pcibar = (bit8)mpiGetPCIBarIndex(agRoot, MSGUCfgTblBase); /* read signature from the configuration table */ Signature = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx); /* Error return if the signature is not "PMCS" */ ExpSignature = ('P') | ('M' << SHIFT8) | ('C' << SHIFT16) | ('S' << SHIFT24); if (Signature != ExpSignature) { SA_DBG1(("mpiWaitForConfigTable: Signature value = 0x%x\n", Signature)); smTraceFuncExit(hpDBG_VERY_LOUD, 'h', "m4"); return AGSA_RC_FAILURE; } /* save Signature */ si_memcpy(&config->Signature, &Signature, sizeof(Signature)); /* read Interface Revsion from the configuration table */ config->InterfaceRev = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_INTERFACE_REVISION); /* read FW Revsion from the configuration table */ config->FWRevision = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_FW_REVISION); /* read Max Outstanding IO from the configuration table */ config->MaxOutstandingIO = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_MAX_OUTSTANDING_IO_OFFSET); /* read Max SGL and Max Devices from the configuration table */ config->MDevMaxSGL = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_MAX_SGL_OFFSET); /* read Controller Cap Flags from the configuration table */ config->ContrlCapFlag = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_CNTRL_CAP_OFFSET); /* read GST Table Offset from the configuration table */ config->GSTOffset = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_GST_OFFSET); /* read Inbound Queue Offset from the configuration table */ config->inboundQueueOffset = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_IBQ_OFFSET); /* read Outbound Queue Offset from the configuration table */ config->outboundQueueOffset = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_OBQ_OFFSET); if(smIS_SPCV(agRoot)) { ;/* SPCV - reserved field */ } else { /* read HDA Flags from the configuration table */ config->HDAModeFlags = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_HDA_FLAGS_OFFSET); } /* read analog Setting offset from the configuration table */ config->analogSetupTblOffset = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_ANALOG_SETUP_OFFSET); if(smIS_SPCV(agRoot)) { ;/* SPCV - reserved field */ /* read interrupt vector table offset */ config->InterruptVecTblOffset = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_INT_VEC_TABLE_OFFSET); /* read phy attribute table offset */ config->phyAttributeTblOffset = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_PHY_ATTRIBUTE_OFFSET); SA_DBG1(("mpiWaitForConfigTable: INT Vector Tble Offset = 0x%x\n", config->InterruptVecTblOffset)); SA_DBG1(("mpiWaitForConfigTable: Phy Attribute Tble Offset = 0x%x\n", config->phyAttributeTblOffset)); } else { ;/* SPC - Not used */ } /* read Error Dump Offset and Length */ config->FatalErrorDumpOffset0 = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_FATAL_ERROR_RDUMP0_OFFSET); config->FatalErrorDumpLength0 = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_FATAL_ERROR_RDUMP0_LENGTH); config->FatalErrorDumpOffset1 = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_FATAL_ERROR_RDUMP1_OFFSET); config->FatalErrorDumpLength1 = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_FATAL_ERROR_RDUMP1_LENGTH); SA_DBG1(("mpiWaitForConfigTable: Interface Revision value = 0x%08x\n", config->InterfaceRev)); SA_DBG1(("mpiWaitForConfigTable: FW Revision value = 0x%08x\n", config->FWRevision)); if(smIS_SPC(agRoot)) { SA_DBG1(("mpiWaitForConfigTable: sTSDK ver. 0x%08x\n", STSDK_LL_SPC_VERSION)); } if(smIS_SPC6V(agRoot)) { SA_DBG1(("mpiWaitForConfigTable: sTSDK ver. 0x%08x\n",STSDK_LL_VERSION )); } if(smIS_SPC12V(agRoot)) { SA_DBG1(("mpiWaitForConfigTable: sTSDK ver. 0x%08x\n",STSDK_LL_12G_VERSION )); } SA_DBG1(("mpiWaitForConfigTable: MaxOutstandingIO value = 0x%08x\n", config->MaxOutstandingIO)); SA_DBG1(("mpiWaitForConfigTable: MDevMaxSGL value = 0x%08x\n", config->MDevMaxSGL)); SA_DBG1(("mpiWaitForConfigTable: ContrlCapFlag value = 0x%08x\n", config->ContrlCapFlag)); SA_DBG1(("mpiWaitForConfigTable: GSTOffset value = 0x%08x\n", config->GSTOffset)); SA_DBG1(("mpiWaitForConfigTable: inboundQueueOffset value = 0x%08x\n", config->inboundQueueOffset)); SA_DBG1(("mpiWaitForConfigTable: outboundQueueOffset value = 0x%08x\n", config->outboundQueueOffset)); SA_DBG1(("mpiWaitForConfigTable: FatalErrorDumpOffset0 value = 0x%08x\n", config->FatalErrorDumpOffset0)); SA_DBG1(("mpiWaitForConfigTable: FatalErrorDumpLength0 value = 0x%08x\n", config->FatalErrorDumpLength0)); SA_DBG1(("mpiWaitForConfigTable: FatalErrorDumpOffset1 value = 0x%08x\n", config->FatalErrorDumpOffset1)); SA_DBG1(("mpiWaitForConfigTable: FatalErrorDumpLength1 value = 0x%08x\n", config->FatalErrorDumpLength1)); SA_DBG1(("mpiWaitForConfigTable: HDAModeFlags value = 0x%08x\n", config->HDAModeFlags)); SA_DBG1(("mpiWaitForConfigTable: analogSetupTblOffset value = 0x%08x\n", config->analogSetupTblOffset)); /* check interface version */ if(smIS_SPC6V(agRoot)) { if (config->InterfaceRev != STSDK_LL_INTERFACE_VERSION) { SA_DBG1(("mpiWaitForConfigTable: V sTSDK interface ver. 0x%x does not match InterfaceRev 0x%x warning!\n", STSDK_LL_INTERFACE_VERSION, config->InterfaceRev)); ret = AGSA_RC_VERSION_UNTESTED; if ((config->InterfaceRev & STSDK_LL_INTERFACE_VERSION_IGNORE_MASK) != (STSDK_LL_INTERFACE_VERSION & STSDK_LL_INTERFACE_VERSION_IGNORE_MASK)) { SA_DBG1(("mpiWaitForConfigTable: V sTSDK interface ver. 0x%x incompatible with InterfaceRev 0x%x warning!\n", STSDK_LL_INTERFACE_VERSION, config->InterfaceRev)); ret = AGSA_RC_VERSION_INCOMPATIBLE; smTraceFuncExit(hpDBG_VERY_LOUD, 'i', "m4"); return ret; } } } else if(smIS_SPC12V(agRoot)) { if (config->InterfaceRev != STSDK_LL_12G_INTERFACE_VERSION) { SA_DBG1(("mpiWaitForConfigTable: 12g V sTSDK interface ver. 0x%x does not match InterfaceRev 0x%x warning!\n", STSDK_LL_12G_INTERFACE_VERSION, config->InterfaceRev)); ret = AGSA_RC_VERSION_UNTESTED; if ((config->InterfaceRev & STSDK_LL_INTERFACE_VERSION_IGNORE_MASK) != (STSDK_LL_12G_INTERFACE_VERSION & STSDK_LL_INTERFACE_VERSION_IGNORE_MASK)) { SA_DBG1(("mpiWaitForConfigTable: V sTSDK interface ver. 0x%x incompatible with InterfaceRev 0x%x warning!\n", STSDK_LL_12G_INTERFACE_VERSION, config->InterfaceRev)); ret = AGSA_RC_VERSION_INCOMPATIBLE; ret = AGSA_RC_VERSION_UNTESTED; smTraceFuncExit(hpDBG_VERY_LOUD, 'j', "m4"); return ret; } } } else { if (config->InterfaceRev != STSDK_LL_OLD_INTERFACE_VERSION) { SA_DBG1(("mpiWaitForConfigTable: SPC sTSDK interface ver. 0x%08x not compatible with InterfaceRev 0x%x warning!\n", STSDK_LL_INTERFACE_VERSION, config->InterfaceRev)); ret = AGSA_RC_VERSION_INCOMPATIBLE; smTraceFuncExit(hpDBG_VERY_LOUD, 'k', "m4"); return ret; } } /* Check FW versions */ if(smIS_SPC6V(agRoot)) { SA_DBG1(("mpiWaitForConfigTable:6 sTSDK ver. sa.h 0x%08x config 0x%08x\n", STSDK_LL_VERSION, config->FWRevision)); /* check FW and LL sTSDK version */ if (config->FWRevision != MATCHING_V_FW_VERSION ) { if (config->FWRevision > MATCHING_V_FW_VERSION) { SA_DBG1(("mpiWaitForConfigTable: sTSDK ver. 0x%x hadn't tested with FW ver. 0x%08x warning!\n", STSDK_LL_VERSION, config->FWRevision)); ret = AGSA_RC_VERSION_UNTESTED; } else if (config->FWRevision < MIN_FW_SPCVE_VERSION_SUPPORTED) { SA_DBG1(("mpiWaitForConfigTable: sTSDK ver. 0x%x not compatible with FW ver. 0x%08x warning!\n", STSDK_LL_VERSION, config->FWRevision)); ret = AGSA_RC_VERSION_INCOMPATIBLE; smTraceFuncExit(hpDBG_VERY_LOUD, 'l', "m4"); return ret; } else { SA_DBG1(("mpiWaitForConfigTable: sTSDK ver. 0x%x mismatch with FW ver. 0x%08x warning!\n",STSDK_LL_VERSION , config->FWRevision)); ret = AGSA_RC_VERSION_UNTESTED; } } }else if(smIS_SPC12V(agRoot)) { SA_DBG1(("mpiWaitForConfigTable:12 sTSDK ver. sa.h 0x%08x config 0x%08x\n", STSDK_LL_12G_VERSION, config->FWRevision)); /* check FW and LL sTSDK version */ if (config->FWRevision != MATCHING_12G_V_FW_VERSION ) { if (config->FWRevision > MATCHING_12G_V_FW_VERSION) { SA_DBG1(("mpiWaitForConfigTable: sTSDK ver. 0x%x hadn't tested with FW ver. 0x%08x warning!\n", STSDK_LL_12G_VERSION, config->FWRevision)); ret = AGSA_RC_VERSION_UNTESTED; } else if (config->FWRevision < MIN_FW_12G_SPCVE_VERSION_SUPPORTED) { SA_DBG1(("mpiWaitForConfigTable: sTSDK ver. 0x%x not compatible with FW ver. 0x%08x warning!\n", STSDK_LL_12G_VERSION, config->FWRevision)); ret = AGSA_RC_VERSION_INCOMPATIBLE; smTraceFuncExit(hpDBG_VERY_LOUD, 'm', "m4"); return ret; } else { SA_DBG1(("mpiWaitForConfigTable: sTSDK ver. 0x%x mismatch with FW ver. 0x%08x warning!\n",STSDK_LL_12G_VERSION , config->FWRevision)); ret = AGSA_RC_VERSION_UNTESTED; } } } else { if (config->FWRevision != MATCHING_SPC_FW_VERSION ) { if (config->FWRevision > MATCHING_SPC_FW_VERSION) { SA_DBG1(("mpiWaitForConfigTable: sTSDK ver. 0x%x hadn't tested with FW ver. 0x%08x warning!\n", STSDK_LL_SPC_VERSION, config->FWRevision)); ret = AGSA_RC_VERSION_UNTESTED; } else if (config->FWRevision < MIN_FW_SPC_VERSION_SUPPORTED) { SA_DBG1(("mpiWaitForConfigTable: sTSDK ver. 0x%x not compatible with FW ver. 0x%08x warning!\n", STSDK_LL_SPC_VERSION, config->FWRevision)); ret = AGSA_RC_VERSION_INCOMPATIBLE; smTraceFuncExit(hpDBG_VERY_LOUD, 'n', "m4"); return ret; } else { SA_DBG1(("mpiWaitForConfigTable: sTSDK ver. 0x%x mismatch with FW ver. 0x%08x warning!\n",STSDK_LL_SPC_VERSION , config->FWRevision)); ret = AGSA_RC_VERSION_UNTESTED; } } } SA_DBG1(("mpiWaitForConfigTable: ILA version 0x%08X\n", ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_ILAT_ILAV_ILASMRN_ILAMRN_ILAMJN) )); if(smIS_SPC12V(agRoot)) { if (config->InterfaceRev > 0x301 ) { SA_DBG1(("mpiWaitForConfigTable: MAIN_INACTIVE_ILA_REVSION 0x%08X\n", ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_INACTIVE_ILA_REVSION) )); SA_DBG1(("mpiWaitForConfigTable: MAIN_SEEPROM_REVSION 0x%08X\n", ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_SEEPROM_REVSION) )); } } if(smIS_SPC12V(agRoot)) { if (config->InterfaceRev > 0x301 ) { SA_DBG1(("mpiWaitForConfigTable: MAIN_AWT_MIDRANGE 0x%08X\n", ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_AWT_MIDRANGE) )); } } if(smIS_SFC(agRoot)) { /* always success for SFC*/ ret = AGSA_RC_SUCCESS; } if (agNULL != saRoot) { /* save the information */ saRoot->ControllerInfo.signature = Signature; saRoot->ControllerInfo.fwInterfaceRev = config->InterfaceRev; if(smIS_SPCV(agRoot)) { saRoot->ControllerInfo.hwRevision = (ossaHwRegReadConfig32(agRoot,8) & 0xFF); SA_DBG1(("mpiWaitForConfigTable: hwRevision 0x%x\n",saRoot->ControllerInfo.hwRevision )); } else { saRoot->ControllerInfo.hwRevision = SPC_READ_DEV_REV; } saRoot->ControllerInfo.fwRevision = config->FWRevision; saRoot->ControllerInfo.ilaRevision = config->ilaRevision; saRoot->ControllerInfo.maxPendingIO = config->MaxOutstandingIO; saRoot->ControllerInfo.maxSgElements = config->MDevMaxSGL & 0xFFFF; saRoot->ControllerInfo.maxDevices = (config->MDevMaxSGL & MAX_DEV_BITS) >> SHIFT16; saRoot->ControllerInfo.queueSupport = config->ContrlCapFlag & Q_SUPPORT_BITS; saRoot->ControllerInfo.phyCount = (bit8)((config->ContrlCapFlag & PHY_COUNT_BITS) >> SHIFT19); saRoot->ControllerInfo.sasSpecsSupport = (config->ContrlCapFlag & SAS_SPEC_BITS) >> SHIFT25; SA_DBG1(("mpiWaitForConfigTable: MaxOutstandingIO 0x%x swConfig->maxActiveIOs 0x%x\n", config->MaxOutstandingIO,saRoot->swConfig.maxActiveIOs )); if(smIS_SPCV(agRoot)) { ;/* SPCV - reserved field */ } else { saRoot->ControllerInfo.controllerSetting = (bit8)config->HDAModeFlags; } saRoot->ControllerInfo.sdkInterfaceRev = STSDK_LL_INTERFACE_VERSION; saRoot->ControllerInfo.sdkRevision = STSDK_LL_VERSION; saRoot->mainConfigTable.regDumpPCIBAR = pcibar; saRoot->mainConfigTable.FatalErrorDumpOffset0 = config->FatalErrorDumpOffset0; saRoot->mainConfigTable.FatalErrorDumpLength0 = config->FatalErrorDumpLength0; saRoot->mainConfigTable.FatalErrorDumpOffset1 = config->FatalErrorDumpOffset1; saRoot->mainConfigTable.FatalErrorDumpLength1 = config->FatalErrorDumpLength1; if(smIS_SPCV(agRoot)) { ;/* SPCV - reserved field */ } else { saRoot->mainConfigTable.HDAModeFlags = config->HDAModeFlags; } saRoot->mainConfigTable.analogSetupTblOffset = config->analogSetupTblOffset; if(smIS_SPCV(agRoot)) { saRoot->mainConfigTable.InterruptVecTblOffset = config->InterruptVecTblOffset; saRoot->mainConfigTable.phyAttributeTblOffset = config->phyAttributeTblOffset; saRoot->mainConfigTable.PortRecoveryTimerPortResetTimer = config->portRecoveryResetTimer; } SA_DBG1(("mpiWaitForConfigTable: Signature = 0x%x\n", Signature)); SA_DBG1(("mpiWaitForConfigTable: hwRevision = 0x%x\n", saRoot->ControllerInfo.hwRevision)); SA_DBG1(("mpiWaitForConfigTable: FW Revision = 0x%x\n", config->FWRevision)); SA_DBG1(("mpiWaitForConfigTable: Max Sgl = 0x%x\n", saRoot->ControllerInfo.maxSgElements)); SA_DBG1(("mpiWaitForConfigTable: Max Device = 0x%x\n", saRoot->ControllerInfo.maxDevices)); SA_DBG1(("mpiWaitForConfigTable: Queue Support = 0x%x\n", saRoot->ControllerInfo.queueSupport)); SA_DBG1(("mpiWaitForConfigTable: Phy Count = 0x%x\n", saRoot->ControllerInfo.phyCount)); SA_DBG1(("mpiWaitForConfigTable: sas Specs Support = 0x%x\n", saRoot->ControllerInfo.sasSpecsSupport)); } if(ret != AGSA_RC_SUCCESS ) { SA_DBG1(("mpiWaitForConfigTable: return 0x%x not AGSA_RC_SUCCESS warning!\n", ret)); } smTraceFuncExit(hpDBG_VERY_LOUD, 'o', "m4"); return ret; } /*******************************************************************************/ /** \fn mpiUnInitConfigTable(agsaRoot_t *agRoot, spc_configMainDescriptor_t *config) * \brief UnInitialization Configuration Table * \param agsaRoot Pointer to a data structure containing LL layer context handles * * Return: * AGSA_RC_SUCCESS if Un-initialize the configuration table sucessful * AGSA_RC_FAILURE if Un-initialize the configuration table failed */ /*******************************************************************************/ GLOBAL bit32 mpiUnInitConfigTable(agsaRoot_t *agRoot) { bit32 MSGUCfgTblBase; bit32 CfgTblDWIdx, GSTOffset, GSTLenMPIS; bit32 value, togglevalue; bit32 max_wait_time; bit32 max_wait_count; bit8 pcibar; smTraceFuncEnter(hpDBG_VERY_LOUD,"m7"); SA_DBG1(("mpiUnInitConfigTable: agRoot %p\n",agRoot)); SA_ASSERT(NULL != agRoot, "agRoot argument cannot be null"); togglevalue = 0; /* read scratch pad0 to get PCI BAR and offset of configuration table */ MSGUCfgTblBase =siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0); if(MSGUCfgTblBase == 0xFFFFFFFF) { SA_DBG1(("mpiUnInitConfigTable: MSGUCfgTblBase = 0x%x AGSA_RC_FAILURE\n",MSGUCfgTblBase)); return AGSA_RC_FAILURE; } /* get offset */ CfgTblDWIdx = MSGUCfgTblBase & SCRATCH_PAD0_OFFSET_MASK; /* get PCI BAR */ MSGUCfgTblBase = (MSGUCfgTblBase & SCRATCH_PAD0_BAR_MASK) >> SHIFT26; /* convert the PCI BAR to logical bar number */ pcibar = (bit8)mpiGetPCIBarIndex(agRoot, MSGUCfgTblBase); /* Write bit 1 to Inbound DoorBell Register */ siHalRegWriteExt(agRoot, GEN_MSGU_IBDB_SET, MSGU_IBDB_SET, SPC_MSGU_CFG_TABLE_RESET); /* wait until Inbound DoorBell Clear Register toggled */ max_wait_time = WAIT_SECONDS(gWait_2); /* 2 sec */ max_wait_count = MAKE_MODULO(max_wait_time,WAIT_INCREMENT); do { ossaStallThread(agRoot, WAIT_INCREMENT); value = MSGU_READ_IDR; value &= SPC_MSGU_CFG_TABLE_RESET; } while ((value != togglevalue) && (max_wait_count -= WAIT_INCREMENT)); if (!max_wait_count) { SA_DBG1(("mpiUnInitConfigTable: TIMEOUT:IBDB value/toggle = 0x%x 0x%x\n", value, togglevalue)); smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "m7"); if(smIS_SPC(agRoot) ) { return AGSA_RC_FAILURE; } } /* check the MPI-State for termination in progress */ /* wait until Inbound DoorBell Clear Register toggled */ max_wait_time = WAIT_SECONDS(gWait_2); /* 2 sec */ max_wait_count = MAKE_MODULO(max_wait_time,WAIT_INCREMENT); GSTOffset = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_GST_OFFSET); do { ossaStallThread(agRoot, WAIT_INCREMENT); if(GSTOffset == 0xFFFFFFFF) { SA_DBG1(("mpiUnInitConfigTable:AGSA_RC_FAILURE GSTOffset = 0x%x\n",GSTOffset)); return AGSA_RC_FAILURE; } GSTLenMPIS = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + (bit32)(GSTOffset + GST_GSTLEN_MPIS_OFFSET)); if (GST_MPI_STATE_UNINIT == (GSTLenMPIS & GST_MPI_STATE_MASK)) { break; } } while (max_wait_count -= WAIT_INCREMENT); if (!max_wait_count) { SA_DBG1(("mpiUnInitConfigTable: TIMEOUT, MPI State = 0x%x\n", GSTLenMPIS & GST_MPI_STATE_MASK)); #if defined(SALLSDK_DEBUG) SA_DBG1(("mpiUnInitConfigTable: SCRATCH_PAD0 value = 0x%x\n", ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_SCRATCH_PAD_0))); SA_DBG1(("mpiUnInitConfigTable: SCRATCH_PAD1 value = 0x%x\n", ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_SCRATCH_PAD_1))); SA_DBG1(("mpiUnInitConfigTable: SCRATCH_PAD2 value = 0x%x\n", ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_SCRATCH_PAD_2))); SA_DBG1(("mpiUnInitConfigTable: SCRATCH_PAD3 value = 0x%x\n", ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_SCRATCH_PAD_3))); #endif smTraceFuncExit(hpDBG_VERY_LOUD, 'b', "m7"); return AGSA_RC_FAILURE; } smTraceFuncExit(hpDBG_VERY_LOUD, 'c', "m7"); return AGSA_RC_SUCCESS; } /*******************************************************************************/ /** \fn void mpiUpdateIBQueueCfgTable(agsaRoot_t *agRoot, spc_inboundQueueDescriptor_t *outQueueCfg, * bit32 QueueTableOffset,bit8 pcibar) * \brief Writing to the inbound queue of the Configuration Table * \param agsaRoot Pointer to a data structure containing both application and LL layer context handles * \param outQueueCfg Pointer to inbuond configuration area * \param QueueTableOffset Queue configuration table offset * \param pcibar PCI BAR * * Return: * None */ /*******************************************************************************/ GLOBAL void mpiUpdateIBQueueCfgTable(agsaRoot_t *agRoot, spc_inboundQueueDescriptor_t *inQueueCfg, bit32 QueueTableOffset, bit8 pcibar) { smTraceFuncEnter(hpDBG_VERY_LOUD,"m5"); smTrace(hpDBG_VERY_LOUD,"Ba",QueueTableOffset); /* TP:Ba QueueTableOffset */ smTrace(hpDBG_VERY_LOUD,"Bb",pcibar); /* TP:Bb pcibar */ ossaHwRegWriteExt(agRoot, pcibar, (bit32)(QueueTableOffset + IB_PROPERITY_OFFSET), inQueueCfg->elementPriSizeCount); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(QueueTableOffset + IB_BASE_ADDR_HI_OFFSET), inQueueCfg->upperBaseAddress); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(QueueTableOffset + IB_BASE_ADDR_LO_OFFSET), inQueueCfg->lowerBaseAddress); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(QueueTableOffset + IB_CI_BASE_ADDR_HI_OFFSET), inQueueCfg->ciUpperBaseAddress); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(QueueTableOffset + IB_CI_BASE_ADDR_LO_OFFSET), inQueueCfg->ciLowerBaseAddress); SA_DBG3(("mpiUpdateIBQueueCfgTable: Offset 0x%08x elementPriSizeCount 0x%x\n",(bit32)(QueueTableOffset + IB_PROPERITY_OFFSET), inQueueCfg->elementPriSizeCount)); SA_DBG3(("mpiUpdateIBQueueCfgTable: Offset 0x%08x upperBaseAddress 0x%x\n",(bit32)(QueueTableOffset + IB_BASE_ADDR_HI_OFFSET), inQueueCfg->upperBaseAddress)); SA_DBG3(("mpiUpdateIBQueueCfgTable: Offset 0x%08x lowerBaseAddress 0x%x\n",(bit32)(QueueTableOffset + IB_BASE_ADDR_LO_OFFSET), inQueueCfg->lowerBaseAddress)); SA_DBG3(("mpiUpdateIBQueueCfgTable: Offset 0x%08x ciUpperBaseAddress 0x%x\n",(bit32)(QueueTableOffset + IB_CI_BASE_ADDR_HI_OFFSET), inQueueCfg->ciUpperBaseAddress)); SA_DBG3(("mpiUpdateIBQueueCfgTable: Offset 0x%08x ciLowerBaseAddress 0x%x\n",(bit32)(QueueTableOffset + IB_CI_BASE_ADDR_LO_OFFSET), inQueueCfg->ciLowerBaseAddress)); smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "m5"); } /*******************************************************************************/ /** \fn void mpiUpdateOBQueueCfgTable(agsaRoot_t *agRoot, spc_outboundQueueDescriptor_t *outQueueCfg, * bit32 QueueTableOffset,bit8 pcibar) * \brief Writing to the inbound queue of the Configuration Table * \param agsaRoot Pointer to a data structure containing both application * and LL layer context handles * \param outQueueCfg Pointer to outbuond configuration area * \param QueueTableOffset Queue configuration table offset * \param pcibar PCI BAR * * Return: * None */ /*******************************************************************************/ GLOBAL void mpiUpdateOBQueueCfgTable(agsaRoot_t *agRoot, spc_outboundQueueDescriptor_t *outQueueCfg, bit32 QueueTableOffset, bit8 pcibar) { smTraceFuncEnter(hpDBG_VERY_LOUD,"m8"); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(QueueTableOffset + OB_PROPERITY_OFFSET), outQueueCfg->elementSizeCount); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(QueueTableOffset + OB_BASE_ADDR_HI_OFFSET), outQueueCfg->upperBaseAddress); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(QueueTableOffset + OB_BASE_ADDR_LO_OFFSET), outQueueCfg->lowerBaseAddress); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(QueueTableOffset + OB_PI_BASE_ADDR_HI_OFFSET), outQueueCfg->piUpperBaseAddress); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(QueueTableOffset + OB_PI_BASE_ADDR_LO_OFFSET), outQueueCfg->piLowerBaseAddress); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(QueueTableOffset + OB_INTERRUPT_COALES_OFFSET), outQueueCfg->interruptVecCntDelay); SA_DBG3(("mpiUpdateOBQueueCfgTable: Offset 0x%08x elementSizeCount 0x%x\n",(bit32)(QueueTableOffset + OB_PROPERITY_OFFSET), outQueueCfg->elementSizeCount)); SA_DBG3(("mpiUpdateOBQueueCfgTable: Offset 0x%08x upperBaseAddress 0x%x\n",(bit32)(QueueTableOffset + OB_BASE_ADDR_HI_OFFSET), outQueueCfg->upperBaseAddress)); SA_DBG3(("mpiUpdateOBQueueCfgTable: Offset 0x%08x lowerBaseAddress 0x%x\n",(bit32)(QueueTableOffset + OB_BASE_ADDR_LO_OFFSET), outQueueCfg->lowerBaseAddress)); SA_DBG3(("mpiUpdateOBQueueCfgTable: Offset 0x%08x piUpperBaseAddress 0x%x\n",(bit32)(QueueTableOffset + OB_PI_BASE_ADDR_HI_OFFSET), outQueueCfg->piUpperBaseAddress)); SA_DBG3(("mpiUpdateOBQueueCfgTable: Offset 0x%08x piLowerBaseAddress 0x%x\n",(bit32)(QueueTableOffset + OB_PI_BASE_ADDR_LO_OFFSET), outQueueCfg->piLowerBaseAddress)); SA_DBG3(("mpiUpdateOBQueueCfgTable: Offset 0x%08x interruptVecCntDelay 0x%x\n",(bit32)(QueueTableOffset + OB_INTERRUPT_COALES_OFFSET), outQueueCfg->interruptVecCntDelay)); smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "m8"); } /*******************************************************************************/ /** \fn void mpiUpdateOBQueueCfgTable(agsaRoot_t *agRoot, spc_outboundQueueDescriptor_t *outQueueCfg, * bit32 QueueTableOffset,bit8 pcibar) * \brief Writing to the inbound queue of the Configuration Table * \param agsaRoot Pointer to a data structure containing both application * and LL layer context handles * \param outQueueCfg Pointer to outbuond configuration area * \param QueueTableOffset Queue configuration table offset * \param pcibar PCI BAR * * Return: * None */ /*******************************************************************************/ GLOBAL void mpiUpdateFatalErrorTable(agsaRoot_t *agRoot, bit32 FerrTableOffset, bit32 lowerBaseAddress, bit32 upperBaseAddress, bit32 length, bit8 pcibar) { smTraceFuncEnter(hpDBG_VERY_LOUD,"2U"); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(FerrTableOffset + MPI_FATAL_EDUMP_TABLE_LO_OFFSET), lowerBaseAddress); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(FerrTableOffset + MPI_FATAL_EDUMP_TABLE_HI_OFFSET), upperBaseAddress); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(FerrTableOffset + MPI_FATAL_EDUMP_TABLE_LENGTH), length); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(FerrTableOffset + MPI_FATAL_EDUMP_TABLE_HANDSHAKE), 0); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(FerrTableOffset + MPI_FATAL_EDUMP_TABLE_STATUS), 0); SA_DBG3(("mpiUpdateFatalErrorTable: Offset 0x%08x MPI_FATAL_EDUMP_TABLE_LO_OFFSET 0x%x\n",FerrTableOffset + MPI_FATAL_EDUMP_TABLE_LO_OFFSET, lowerBaseAddress)); SA_DBG3(("mpiUpdateFatalErrorTable: Offset 0x%08x MPI_FATAL_EDUMP_TABLE_HI_OFFSET 0x%x\n",FerrTableOffset + MPI_FATAL_EDUMP_TABLE_HI_OFFSET,upperBaseAddress )); SA_DBG3(("mpiUpdateFatalErrorTable: Offset 0x%08x MPI_FATAL_EDUMP_TABLE_LENGTH 0x%x\n",FerrTableOffset + MPI_FATAL_EDUMP_TABLE_LENGTH, length)); SA_DBG3(("mpiUpdateFatalErrorTable: Offset 0x%08x MPI_FATAL_EDUMP_TABLE_HANDSHAKE 0x%x\n",FerrTableOffset + MPI_FATAL_EDUMP_TABLE_HANDSHAKE,0 )); SA_DBG3(("mpiUpdateFatalErrorTable: Offset 0x%08x MPI_FATAL_EDUMP_TABLE_STATUS 0x%x\n",FerrTableOffset + MPI_FATAL_EDUMP_TABLE_STATUS,0 )); smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "2U"); } /*******************************************************************************/ /** \fn bit32 mpiGetPCIBarIndex(agsaRoot_t *agRoot, pciBar) * \brief Get PCI BAR Index from PCI BAR * \param agsaRoot Pointer to a data structure containing both application and LL layer context handles * \param pciBar - PCI BAR * * Return: * PCI BAR Index */ /*******************************************************************************/ GLOBAL bit32 mpiGetPCIBarIndex(agsaRoot_t *agRoot, bit32 pciBar) { switch(pciBar) { case BAR0: case BAR1: pciBar = PCIBAR0; break; case BAR2: case BAR3: pciBar = PCIBAR1; break; case BAR4: pciBar = PCIBAR2; break; case BAR5: pciBar = PCIBAR3; break; default: pciBar = PCIBAR0; break; } return pciBar; } /*******************************************************************************/ /** \fn void mpiReadGSTTable(agsaRoot_t *agRoot, spc_GSTableDescriptor_t *mpiGSTable) * \brief Reading the General Status Table * * \param agsaRoot Handles for this instance of SAS/SATA LLL * \param mpiGSTable Pointer of General Status Table * * Return: * None */ /*******************************************************************************/ GLOBAL void mpiReadGSTable(agsaRoot_t *agRoot, spc_GSTableDescriptor_t *mpiGSTable) { bit32 CFGTableOffset, TableOffset; bit32 GSTableOffset; bit8 i, pcibar; smTraceFuncEnter(hpDBG_VERY_LOUD,"m9"); /* get offset of the configuration table */ TableOffset = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0); if(0xFFFFFFFF == TableOffset) { SA_ASSERT(0xFFFFFFFF == TableOffset, "Chip PCI dead"); SA_DBG1(("mpiReadGSTable: Chip PCI dead TableOffset 0x%x\n", TableOffset)); return; } // SA_DBG1(("mpiReadGSTable: TableOffset 0x%x\n", TableOffset)); CFGTableOffset = TableOffset & SCRATCH_PAD0_OFFSET_MASK; /* get PCI BAR */ TableOffset = (TableOffset & SCRATCH_PAD0_BAR_MASK) >> SHIFT26; /* convert the PCI BAR to logical bar number */ pcibar = (bit8)mpiGetPCIBarIndex(agRoot, TableOffset); /* read GST Table Offset from the configuration table */ GSTableOffset = ossaHwRegReadExt(agRoot, pcibar, (bit32)CFGTableOffset + MAIN_GST_OFFSET); // SA_DBG1(("mpiReadGSTable: GSTableOffset 0x%x\n",GSTableOffset )); GSTableOffset = CFGTableOffset + GSTableOffset; mpiGSTable->GSTLenMPIS = ossaHwRegReadExt(agRoot, pcibar, (bit32)(GSTableOffset + GST_GSTLEN_MPIS_OFFSET)); mpiGSTable->IQFreezeState0 = ossaHwRegReadExt(agRoot, pcibar, (bit32)(GSTableOffset + GST_IQ_FREEZE_STATE0_OFFSET)); mpiGSTable->IQFreezeState1 = ossaHwRegReadExt(agRoot, pcibar, (bit32)(GSTableOffset + GST_IQ_FREEZE_STATE1_OFFSET)); mpiGSTable->MsguTcnt = ossaHwRegReadExt(agRoot, pcibar, (bit32)(GSTableOffset + GST_MSGUTCNT_OFFSET)); mpiGSTable->IopTcnt = ossaHwRegReadExt(agRoot, pcibar, (bit32)(GSTableOffset + GST_IOPTCNT_OFFSET)); mpiGSTable->Iop1Tcnt = ossaHwRegReadExt(agRoot, pcibar, (bit32)(GSTableOffset + GST_IOP1TCNT_OFFSET)); SA_DBG4(("mpiReadGSTable: GSTLenMPIS 0x%x\n", mpiGSTable->GSTLenMPIS)); SA_DBG4(("mpiReadGSTable: GSTLen 0x%x\n", (mpiGSTable->GSTLenMPIS & 0xfff8) >> SHIFT3)); SA_DBG4(("mpiReadGSTable: IQFreezeState0 0x%x\n", mpiGSTable->IQFreezeState0)); SA_DBG4(("mpiReadGSTable: IQFreezeState1 0x%x\n", mpiGSTable->IQFreezeState1)); SA_DBG4(("mpiReadGSTable: MsguTcnt 0x%x\n", mpiGSTable->MsguTcnt)); SA_DBG4(("mpiReadGSTable: IopTcnt 0x%x\n", mpiGSTable->IopTcnt)); SA_DBG4(("mpiReadGSTable: Iop1Tcnt 0x%x\n", mpiGSTable->Iop1Tcnt)); if(smIS_SPCV(agRoot)) { /***** read Phy State from SAS Phy Attribute Table */ TableOffset = ossaHwRegReadExt(agRoot, pcibar, (bit32)CFGTableOffset + MAIN_PHY_ATTRIBUTE_OFFSET); TableOffset &= 0x00FFFFFF; TableOffset = TableOffset + CFGTableOffset; for (i = 0; i < 8; i++) { mpiGSTable->PhyState[i] = ossaHwRegReadExt(agRoot, pcibar, (bit32)(TableOffset + i * sizeof(phyAttrb_t))); SA_DBG4(("mpiReadGSTable: PhyState[0x%x] 0x%x\n", i, mpiGSTable->PhyState[i])); } } else { for (i = 0; i < 8; i++) { mpiGSTable->PhyState[i] = ossaHwRegReadExt(agRoot, pcibar, (bit32)(GSTableOffset + GST_PHYSTATE_OFFSET + i * 4)); SA_DBG4(("mpiReadGSTable: PhyState[0x%x] 0x%x\n", i, mpiGSTable->PhyState[i])); } } mpiGSTable->GPIOpins = ossaHwRegReadExt(agRoot, pcibar, (bit32)(GSTableOffset + GST_GPIO_PINS_OFFSET)); SA_DBG4(("mpiReadGSTable: GPIOpins 0x%x\n", mpiGSTable->GPIOpins)); for (i = 0; i < 8; i++) { mpiGSTable->recoverErrInfo[i] = ossaHwRegReadExt(agRoot, pcibar, (bit32)(GSTableOffset + GST_RERRINFO_OFFSET)); SA_DBG4(("mpiReadGSTable: recoverErrInfo[0x%x] 0x%x\n", i, mpiGSTable->recoverErrInfo[i])); } smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "m9"); } /*******************************************************************************/ /** \fn void siInitResources(agsaRoot_t *agRoot) * Initialization of LL resources * * \param agsaRoot Handles for this instance of SAS/SATA LLL * \param memoryAllocated Point to the data structure that holds the different * chunks of memory that are required * * Return: * None */ /*******************************************************************************/ GLOBAL void siInitResources(agsaRoot_t *agRoot, agsaMemoryRequirement_t *memoryAllocated, agsaHwConfig_t *hwConfig, agsaSwConfig_t *swConfig, bit32 usecsPerTick) { agsaLLRoot_t *saRoot; agsaDeviceDesc_t *pDeviceDesc; agsaIORequestDesc_t *pRequestDesc; agsaTimerDesc_t *pTimerDesc; agsaPort_t *pPort; agsaPortMap_t *pPortMap; agsaDeviceMap_t *pDeviceMap; agsaIOMap_t *pIOMap; bit32 maxNumIODevices; bit32 i, j; mpiICQueue_t *circularIQ; mpiOCQueue_t *circularOQ; if (agNULL == agRoot) { return; } /* Get the saRoot memory address */ saRoot = (agsaLLRoot_t *) (memoryAllocated->agMemory[LLROOT_MEM_INDEX].virtPtr); agRoot->sdkData = (void *) saRoot; /* Setup Device link */ /* Save the information of allocated device Link memory */ saRoot->deviceLinkMem = memoryAllocated->agMemory[DEVICELINK_MEM_INDEX]; si_memset(saRoot->deviceLinkMem.virtPtr, 0, saRoot->deviceLinkMem.totalLength); SA_DBG2(("siInitResources: [%d] saRoot->deviceLinkMem VirtPtr=%p PhysicalLo=%x Count=%x Total=%x type %x\n" , DEVICELINK_MEM_INDEX, saRoot->deviceLinkMem.virtPtr, saRoot->deviceLinkMem.phyAddrLower, saRoot->deviceLinkMem.numElements, saRoot->deviceLinkMem.totalLength, saRoot->deviceLinkMem.type)); maxNumIODevices = swConfig->numDevHandles; SA_DBG2(("siInitResources: maxNumIODevices=%d, swConfig->numDevHandles=%d \n", maxNumIODevices, swConfig->numDevHandles)); /* Setup free IO Devices link list */ saLlistInitialize(&(saRoot->freeDevicesList)); for ( i = 0; i < (bit32) maxNumIODevices; i ++ ) { /* get the pointer to the device descriptor */ pDeviceDesc = (agsaDeviceDesc_t *) AGSAMEM_ELEMENT_READ(&(saRoot->deviceLinkMem), i); /* Initialize device descriptor */ saLlinkInitialize(&(pDeviceDesc->linkNode)); pDeviceDesc->initiatorDevHandle.osData = agNULL; pDeviceDesc->initiatorDevHandle.sdkData = agNULL; pDeviceDesc->targetDevHandle.osData = agNULL; pDeviceDesc->targetDevHandle.sdkData = agNULL; pDeviceDesc->deviceType = SAS_SATA_UNKNOWN_DEVICE; pDeviceDesc->pPort = agNULL; pDeviceDesc->DeviceMapIndex = 0; saLlistInitialize(&(pDeviceDesc->pendingIORequests)); /* Add the device descriptor to the free IO device link list */ saLlistAdd(&(saRoot->freeDevicesList), &(pDeviceDesc->linkNode)); } /* Setup IO Request link */ /* Save the information of allocated IO Request Link memory */ saRoot->IORequestMem = memoryAllocated->agMemory[IOREQLINK_MEM_INDEX]; si_memset(saRoot->IORequestMem.virtPtr, 0, saRoot->IORequestMem.totalLength); SA_DBG2(("siInitResources: [%d] saRoot->IORequestMem VirtPtr=%p PhysicalLo=%x Count=%x Total=%x type %x\n", IOREQLINK_MEM_INDEX, saRoot->IORequestMem.virtPtr, saRoot->IORequestMem.phyAddrLower, saRoot->IORequestMem.numElements, saRoot->IORequestMem.totalLength, saRoot->IORequestMem.type)); /* Setup free IO Request link list */ saLlistIOInitialize(&(saRoot->freeIORequests)); saLlistIOInitialize(&(saRoot->freeReservedRequests)); for ( i = 0; i < swConfig->maxActiveIOs; i ++ ) { /* get the pointer to the request descriptor */ pRequestDesc = (agsaIORequestDesc_t *) AGSAMEM_ELEMENT_READ(&(saRoot->IORequestMem), i); /* Initialize request descriptor */ saLlinkIOInitialize(&(pRequestDesc->linkNode)); pRequestDesc->valid = agFALSE; pRequestDesc->requestType = AGSA_REQ_TYPE_UNKNOWN; pRequestDesc->pIORequestContext = agNULL; pRequestDesc->HTag = i; pRequestDesc->pDevice = agNULL; pRequestDesc->pPort = agNULL; /* Add the request descriptor to the free IO Request link list */ /* Add the request descriptor to the free Reserved Request link list */ /* SMP request must get service so reserve one request when first SMP completes */ if(saLlistIOGetCount(&(saRoot->freeReservedRequests)) < SA_RESERVED_REQUEST_COUNT) { saLlistIOAdd(&(saRoot->freeReservedRequests), &(pRequestDesc->linkNode)); } else { saLlistIOAdd(&(saRoot->freeIORequests), &(pRequestDesc->linkNode)); } } /* Setup timer link */ /* Save the information of allocated timer Link memory */ saRoot->timerLinkMem = memoryAllocated->agMemory[TIMERLINK_MEM_INDEX]; si_memset(saRoot->timerLinkMem.virtPtr, 0, saRoot->timerLinkMem.totalLength); SA_DBG2(("siInitResources: [%d] saRoot->timerLinkMem VirtPtr=%p PhysicalLo=%x Count=%x Total=%x type %x\n", TIMERLINK_MEM_INDEX, saRoot->timerLinkMem.virtPtr, saRoot->timerLinkMem.phyAddrLower, saRoot->timerLinkMem.numElements, saRoot->timerLinkMem.totalLength, saRoot->timerLinkMem.type)); /* Setup free timer link list */ saLlistInitialize(&(saRoot->freeTimers)); for ( i = 0; i < NUM_TIMERS; i ++ ) { /* get the pointer to the timer descriptor */ pTimerDesc = (agsaTimerDesc_t *) AGSAMEM_ELEMENT_READ(&(saRoot->timerLinkMem), i); /* Initialize timer descriptor */ saLlinkInitialize(&(pTimerDesc->linkNode)); pTimerDesc->valid = agFALSE; pTimerDesc->timeoutTick = 0; pTimerDesc->pfnTimeout = agNULL; pTimerDesc->Event = 0; pTimerDesc->pParm = agNULL; /* Add the timer descriptor to the free timer link list */ saLlistAdd(&(saRoot->freeTimers), &(pTimerDesc->linkNode)); } /* Setup valid timer link list */ saLlistInitialize(&(saRoot->validTimers)); /* Setup Phys */ /* Setup PhyCount */ saRoot->phyCount = (bit8) hwConfig->phyCount; /* Init Phy data structure */ for ( i = 0; i < saRoot->phyCount; i ++ ) { saRoot->phys[i].pPort = agNULL; saRoot->phys[i].phyId = (bit8) i; /* setup phy status is PHY_STOPPED */ PHY_STATUS_SET(&(saRoot->phys[i]), PHY_STOPPED); } /* Setup Ports */ /* Setup PortCount */ saRoot->portCount = saRoot->phyCount; /* Setup free port link list */ saLlistInitialize(&(saRoot->freePorts)); for ( i = 0; i < saRoot->portCount; i ++ ) { /* get the pointer to the port */ pPort = &(saRoot->ports[i]); /* Initialize port */ saLlinkInitialize(&(pPort->linkNode)); pPort->portContext.osData = agNULL; pPort->portContext.sdkData = pPort; pPort->portId = 0; pPort->portIdx = (bit8) i; pPort->status = PORT_NORMAL; for ( j = 0; j < saRoot->phyCount; j ++ ) { pPort->phyMap[j] = agFALSE; } saLlistInitialize(&(pPort->listSASATADevices)); /* Add the port to the free port link list */ saLlistAdd(&(saRoot->freePorts), &(pPort->linkNode)); } /* Setup valid port link list */ saLlistInitialize(&(saRoot->validPorts)); /* Init sysIntsActive */ saRoot->sysIntsActive = agFALSE; /* setup timer tick granunarity */ saRoot->usecsPerTick = usecsPerTick; /* initialize LL timer tick */ saRoot->timeTick = 0; /* initialize device (de)registration callback fns */ saRoot->DeviceRegistrationCB = agNULL; saRoot->DeviceDeregistrationCB = agNULL; /* Initialize the PortMap for port context */ for ( i = 0; i < saRoot->portCount; i ++ ) { pPortMap = &(saRoot->PortMap[i]); pPortMap->PortContext = agNULL; pPortMap->PortID = PORT_MARK_OFF; pPortMap->PortStatus = PORT_NORMAL; saRoot->autoDeregDeviceflag[i] = 0; } /* Initialize the DeviceMap for device handle */ for ( i = 0; i < MAX_IO_DEVICE_ENTRIES; i ++ ) { pDeviceMap = &(saRoot->DeviceMap[i]); pDeviceMap->DeviceHandle = agNULL; pDeviceMap->DeviceIdFromFW = i; } /* Initialize the IOMap for IOrequest */ for ( i = 0; i < MAX_ACTIVE_IO_REQUESTS; i ++ ) { pIOMap = &(saRoot->IOMap[i]); pIOMap->IORequest = agNULL; pIOMap->Tag = MARK_OFF; } /* clean the inbound queues */ for (i = 0; i < saRoot->QueueConfig.numInboundQueues; i ++) { if(0 != saRoot->inboundQueue[i].numElements) { circularIQ = &saRoot->inboundQueue[i]; si_memset(circularIQ->memoryRegion.virtPtr, 0, circularIQ->memoryRegion.totalLength); si_memset(saRoot->inboundQueue[i].ciPointer, 0, sizeof(bit32)); } } /* clean the outbound queues */ for (i = 0; i < saRoot->QueueConfig.numOutboundQueues; i ++) { if(0 != saRoot->outboundQueue[i].numElements) { circularOQ = &saRoot->outboundQueue[i]; si_memset(circularOQ->memoryRegion.virtPtr, 0, circularOQ->memoryRegion.totalLength); si_memset(saRoot->outboundQueue[i].piPointer, 0, sizeof(bit32)); circularOQ->producerIdx = 0; circularOQ->consumerIdx = 0; SA_DBG3(("siInitResource: Q %d Clean PI 0x%03x CI 0x%03x\n", i,circularOQ->producerIdx, circularOQ->consumerIdx)); } } return; } /*******************************************************************************/ /** \fn void mpiReadCALTable(agsaRoot_t *agRoot, * spc_SPASTable_t *mpiCALTable, bit32 index) * \brief Reading the Phy Analog Setup Register Table * \param agsaRoot Handles for this instance of SAS/SATA LLL * \param mpiCALTable Pointer of Phy Calibration Table * * Return: * None */ /*******************************************************************************/ GLOBAL void mpiReadCALTable(agsaRoot_t *agRoot, spc_SPASTable_t *mpiCALTable, bit32 index) { bit32 CFGTableOffset, TableOffset; bit32 CALTableOffset; bit8 pcibar; /* get offset of the configuration table */ TableOffset = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0); CFGTableOffset = TableOffset & SCRATCH_PAD0_OFFSET_MASK; /* get PCI BAR */ TableOffset = (TableOffset & SCRATCH_PAD0_BAR_MASK) >> SHIFT26; /* convert the PCI BAR to logical bar number */ pcibar = (bit8)mpiGetPCIBarIndex(agRoot, TableOffset); /* read Calibration Table Offset from the configuration table */ CALTableOffset = ossaHwRegReadExt(agRoot, pcibar, (bit32)CFGTableOffset + MAIN_ANALOG_SETUP_OFFSET); if(smIS_SPCV(agRoot)) { CALTableOffset &= 0x00FFFFFF; } CALTableOffset = CFGTableOffset + CALTableOffset + (index * ANALOG_SETUP_ENTRY_SIZE * 4); mpiCALTable->spaReg0 = ossaHwRegReadExt(agRoot, pcibar, (bit32)(CALTableOffset + TX_PORT_CFG1_OFFSET)); mpiCALTable->spaReg1 = ossaHwRegReadExt(agRoot, pcibar, (bit32)(CALTableOffset + TX_PORT_CFG2_OFFSET)); mpiCALTable->spaReg2 = ossaHwRegReadExt(agRoot, pcibar, (bit32)(CALTableOffset + TX_PORT_CFG3_OFFSET)); mpiCALTable->spaReg3 = ossaHwRegReadExt(agRoot, pcibar, (bit32)(CALTableOffset + TX_CFG_OFFSET)); mpiCALTable->spaReg4 = ossaHwRegReadExt(agRoot, pcibar, (bit32)(CALTableOffset + RV_PORT_CFG1_OFFSET)); mpiCALTable->spaReg5 = ossaHwRegReadExt(agRoot, pcibar, (bit32)(CALTableOffset + RV_PORT_CFG2_OFFSET)); mpiCALTable->spaReg6 = ossaHwRegReadExt(agRoot, pcibar, (bit32)(CALTableOffset + RV_CFG1_OFFSET)); mpiCALTable->spaReg7 = ossaHwRegReadExt(agRoot, pcibar, (bit32)(CALTableOffset + RV_CFG2_OFFSET)); SA_DBG3(("mpiReadCALTable: spaReg0 0x%x\n", mpiCALTable->spaReg0)); SA_DBG3(("mpiReadCALTable: spaReg1 0x%x\n", mpiCALTable->spaReg1)); SA_DBG3(("mpiReadCALTable: spaReg2 0x%x\n", mpiCALTable->spaReg2)); SA_DBG3(("mpiReadCALTable: spaReg3 0x%x\n", mpiCALTable->spaReg3)); SA_DBG3(("mpiReadCALTable: spaReg4 0x%x\n", mpiCALTable->spaReg4)); SA_DBG3(("mpiReadCALTable: spaReg5 0x%x\n", mpiCALTable->spaReg5)); SA_DBG3(("mpiReadCALTable: spaReg6 0x%x\n", mpiCALTable->spaReg6)); SA_DBG3(("mpiReadCALTable: spaReg7 0x%x\n", mpiCALTable->spaReg7)); } /*******************************************************************************/ /** \fn void mpiWriteCALTable(agsaRoot_t *agRoot, * spc_SPASTable_t *mpiCALTable, index) * \brief Writing the Phy Analog Setup Register Table * \param agsaRoot Handles for this instance of SAS/SATA LLL * \param mpiCALTable Pointer of Phy Calibration Table * * Return: * None */ /*******************************************************************************/ GLOBAL void mpiWriteCALTable(agsaRoot_t *agRoot, spc_SPASTable_t *mpiCALTable, bit32 index) { bit32 CFGTableOffset, TableOffset; bit32 CALTableOffset; bit8 pcibar; smTraceFuncEnter(hpDBG_VERY_LOUD,"m6"); /* get offset of the configuration table */ TableOffset = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0); CFGTableOffset = TableOffset & SCRATCH_PAD0_OFFSET_MASK; /* get PCI BAR */ TableOffset = (TableOffset & SCRATCH_PAD0_BAR_MASK) >> SHIFT26; /* convert the PCI BAR to logical bar number */ pcibar = (bit8)mpiGetPCIBarIndex(agRoot, TableOffset); /* read Calibration Table Offset from the configuration table */ CALTableOffset = ossaHwRegReadExt(agRoot, pcibar, (bit32)CFGTableOffset + MAIN_ANALOG_SETUP_OFFSET); if(smIS_SPCV(agRoot)) { CALTableOffset &= 0x00FFFFFF; } CALTableOffset = CFGTableOffset + CALTableOffset + (index * ANALOG_SETUP_ENTRY_SIZE * 4); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(CALTableOffset + TX_PORT_CFG1_OFFSET), mpiCALTable->spaReg0); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(CALTableOffset + TX_PORT_CFG2_OFFSET), mpiCALTable->spaReg1); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(CALTableOffset + TX_PORT_CFG3_OFFSET), mpiCALTable->spaReg2); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(CALTableOffset + TX_CFG_OFFSET), mpiCALTable->spaReg3); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(CALTableOffset + RV_PORT_CFG1_OFFSET), mpiCALTable->spaReg4); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(CALTableOffset + RV_PORT_CFG2_OFFSET), mpiCALTable->spaReg5); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(CALTableOffset + RV_CFG1_OFFSET), mpiCALTable->spaReg6); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(CALTableOffset + RV_CFG2_OFFSET), mpiCALTable->spaReg7); SA_DBG4(("mpiWriteCALTable: Offset 0x%08x spaReg0 0x%x 0x%x 0x%x 0x%x\n",(bit32)(CALTableOffset + TX_PORT_CFG1_OFFSET), mpiCALTable->spaReg0, mpiCALTable->spaReg1, mpiCALTable->spaReg2, mpiCALTable->spaReg3)); SA_DBG4(("mpiWriteCALTable: Offset 0x%08x spaReg4 0x%x 0x%x 0x%x 0x%x\n",(bit32)(CALTableOffset + RV_PORT_CFG1_OFFSET), mpiCALTable->spaReg4, mpiCALTable->spaReg5, mpiCALTable->spaReg6, mpiCALTable->spaReg7)); smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "m6"); } /*******************************************************************************/ /** \fn void mpiWriteCALAll(agsaRoot_t *agRoot, * agsaPhyAnalogSetupTable_t *mpiCALTable) * \brief Writing the Phy Analog Setup Register Table * \param agsaRoot Handles for this instance of SAS/SATA LLL * \param mpiCALTable Pointer of Phy Calibration Table * * Return: * None */ /*******************************************************************************/ GLOBAL void mpiWriteCALAll(agsaRoot_t *agRoot, agsaPhyAnalogSetupTable_t *mpiCALTable) { bit8 i; smTraceFuncEnter(hpDBG_VERY_LOUD,"mz"); if(smIS_SPCV(agRoot)) { smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "mz"); return; } for (i = 0; i < MAX_INDEX; i++) { mpiWriteCALTable(agRoot, (spc_SPASTable_t *)&mpiCALTable->phyAnalogSetupRegisters[i], i); } smTraceFuncExit(hpDBG_VERY_LOUD, 'b', "mz"); } GLOBAL void mpiWrAnalogSetupTable(agsaRoot_t *agRoot, mpiConfig_t *config ) { bit32 AnalogTableBase,CFGTableOffset, value,phy; bit32 AnalogtableSize; bit8 pcibar; value = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0); pcibar = (bit8)mpiGetPCIBarIndex(agRoot, value); CFGTableOffset = value & SCRATCH_PAD0_OFFSET_MASK; AnalogtableSize = AnalogTableBase = ossaHwRegReadExt(agRoot,pcibar , (bit32)CFGTableOffset + MAIN_ANALOG_SETUP_OFFSET); AnalogtableSize &= 0xFF000000; AnalogtableSize >>= SHIFT24; AnalogTableBase &= 0x00FFFFFF; AnalogTableBase = CFGTableOffset + AnalogTableBase; // config->phyAnalogConfig.phyAnalogSetupRegisters[0].spaRegister0 = 0; SA_DBG1(("mpiWrAnalogSetupTable:Analogtable Base Offset %08X pcibar %d\n",AnalogTableBase, pcibar )); SA_DBG1(("mpiWrAnalogSetupTable:%d %d\n",(int)sizeof(agsaPhyAnalogSetupRegisters_t), AnalogtableSize)); for(phy = 0; phy < 10; phy++) /* upto 10 phys See PM*/ { ossaHwRegWriteExt(agRoot, pcibar,(AnalogTableBase + ( AnalogtableSize * phy)+ 0 ),config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister0 ); ossaHwRegWriteExt(agRoot, pcibar,(AnalogTableBase + ( AnalogtableSize * phy)+ 4 ),config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister1 ); ossaHwRegWriteExt(agRoot, pcibar,(AnalogTableBase + ( AnalogtableSize * phy)+ 8 ),config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister2 ); ossaHwRegWriteExt(agRoot, pcibar,(AnalogTableBase + ( AnalogtableSize * phy)+ 12),config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister3 ); ossaHwRegWriteExt(agRoot, pcibar,(AnalogTableBase + ( AnalogtableSize * phy)+ 16),config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister4 ); ossaHwRegWriteExt(agRoot, pcibar,(AnalogTableBase + ( AnalogtableSize * phy)+ 20),config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister5 ); ossaHwRegWriteExt(agRoot, pcibar,(AnalogTableBase + ( AnalogtableSize * phy)+ 24),config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister6 ); ossaHwRegWriteExt(agRoot, pcibar,(AnalogTableBase + ( AnalogtableSize * phy)+ 28),config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister7 ); ossaHwRegWriteExt(agRoot, pcibar,(AnalogTableBase + ( AnalogtableSize * phy)+ 32),config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister8 ); ossaHwRegWriteExt(agRoot, pcibar,(AnalogTableBase + ( AnalogtableSize * phy)+ 36),config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister9 ); SA_DBG4(("mpiWrAnalogSetupTable:phy %d Offset 0x%08x spaRegister0 0x%x 0x%x\n",phy, (bit32) AnalogTableBase+ (AnalogtableSize * phy) + 0,config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister0 ,ossaHwRegReadExt(agRoot, pcibar,AnalogTableBase + ( AnalogtableSize * phy)+ 0 ))); SA_DBG4(("mpiWrAnalogSetupTable:phy %d Offset 0x%08x spaRegister1 0x%x 0x%x\n",phy, (bit32) AnalogTableBase+ (AnalogtableSize * phy) + 4,config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister1 ,ossaHwRegReadExt(agRoot, pcibar,AnalogTableBase + ( AnalogtableSize * phy)+ 4 ))); SA_DBG4(("mpiWrAnalogSetupTable:phy %d Offset 0x%08x spaRegister2 0x%x\n",phy, (bit32) AnalogTableBase+ (AnalogtableSize * phy) + 8,config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister2 )); SA_DBG4(("mpiWrAnalogSetupTable:phy %d Offset 0x%08x spaRegister3 0x%x\n",phy, (bit32) AnalogTableBase+ (AnalogtableSize * phy) +12,config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister3 )); SA_DBG4(("mpiWrAnalogSetupTable:phy %d Offset 0x%08x spaRegister4 0x%x\n",phy, (bit32) AnalogTableBase+ (AnalogtableSize * phy) +16,config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister4 )); SA_DBG4(("mpiWrAnalogSetupTable:phy %d Offset 0x%08x spaRegister5 0x%x\n",phy, (bit32) AnalogTableBase+ (AnalogtableSize * phy) +20,config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister5 )); SA_DBG4(("mpiWrAnalogSetupTable:phy %d Offset 0x%08x spaRegister6 0x%x\n",phy, (bit32) AnalogTableBase+ (AnalogtableSize * phy) +24,config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister6 )); SA_DBG4(("mpiWrAnalogSetupTable:phy %d Offset 0x%08x spaRegister7 0x%x\n",phy, (bit32) AnalogTableBase+ (AnalogtableSize * phy) +28,config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister7 )); SA_DBG4(("mpiWrAnalogSetupTable:phy %d Offset 0x%08x spaRegister8 0x%x\n",phy, (bit32) AnalogTableBase+ (AnalogtableSize * phy) +32,config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister8 )); SA_DBG4(("mpiWrAnalogSetupTable:phy %d Offset 0x%08x spaRegister9 0x%x\n",phy, (bit32) AnalogTableBase+ (AnalogtableSize * phy) +36,config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister9 )); } } GLOBAL void mpiWrIntVecTable(agsaRoot_t *agRoot, mpiConfig_t* config ) { bit32 CFGTableOffset, value; bit32 INTVTableOffset; bit32 ValuetoWrite; bit8 pcibar, i,obq; /* get offset of the configuration table */ value = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0); CFGTableOffset = value & SCRATCH_PAD0_OFFSET_MASK; /* get PCI BAR */ value = (value & SCRATCH_PAD0_BAR_MASK) >> SHIFT26; /* convert the PCI BAR to logical bar number */ pcibar = (bit8)mpiGetPCIBarIndex(agRoot, value); /* read Interrupt Table Offset from the main configuration table */ INTVTableOffset = ossaHwRegReadExt(agRoot, pcibar, (bit32)CFGTableOffset + MAIN_INT_VEC_TABLE_OFFSET); INTVTableOffset &= 0x00FFFFFF; INTVTableOffset = CFGTableOffset + INTVTableOffset; SA_DBG1(("mpiWrIntVecTable: Base Offset %08X\n",(bit32)(INTVTableOffset + INT_VT_Coal_CNT_TO ) )); for (i = 0; i < MAX_NUM_VECTOR; i ++) { bit32 found=0; for (obq = 0; obq < MAX_NUM_VECTOR; obq++) { /* find OBQ for vector i */ if( config->outboundQueues[obq].interruptVector == i ) { found=1; break; } } if(!found ) { continue; } ValuetoWrite = (( config->outboundQueues[obq].interruptDelay << SHIFT15) | config->outboundQueues[obq].interruptThreshold ); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(INTVTableOffset + INT_VT_Coal_CNT_TO + i * sizeof(InterruptVT_t)), ValuetoWrite ); SA_DBG3(("mpiWrIntVecTable: Q %d interruptDelay 0x%X interruptThreshold 0x%X \n",i, config->outboundQueues[i].interruptDelay, config->outboundQueues[i].interruptThreshold )); SA_DBG3(("mpiWrIntVecTable: %d INT_VT_Coal_CNT_TO Bar %d Offset %3X Writing 0x%08x\n",i, pcibar, (bit32)(INTVTableOffset + INT_VT_Coal_CNT_TO + i * sizeof(InterruptVT_t)), ValuetoWrite)); } for (i = 0; i < MAX_NUM_VECTOR; i++) { /* read interrupt colescing control and timer */ value = ossaHwRegReadExt(agRoot, pcibar, (bit32)(INTVTableOffset + INT_VT_Coal_CNT_TO + i * sizeof(InterruptVT_t))); SA_DBG4(("mpiWrIntVecTable: Offset 0x%08x Interrupt Colescing iccict[%02d] 0x%x\n", (bit32)(INTVTableOffset + INT_VT_Coal_CNT_TO + i * sizeof(InterruptVT_t)), i, value)); } } GLOBAL void mpiWrPhyAttrbTable(agsaRoot_t *agRoot, sasPhyAttribute_t *phyAttrib) { bit32 CFGTableOffset, value; bit32 PHYTableOffset; bit8 pcibar, i; /* get offset of the configuration table */ value = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0); CFGTableOffset = value & SCRATCH_PAD0_OFFSET_MASK; /* get PCI BAR */ value = (value & SCRATCH_PAD0_BAR_MASK) >> SHIFT26; /* convert the PCI BAR to logical bar number */ pcibar = (bit8)mpiGetPCIBarIndex(agRoot, value); /* read Phy Attribute Table Offset from the configuration table */ PHYTableOffset = ossaHwRegReadExt(agRoot, pcibar, (bit32)CFGTableOffset + MAIN_PHY_ATTRIBUTE_OFFSET); PHYTableOffset &=0x00FFFFFF; PHYTableOffset = CFGTableOffset + PHYTableOffset + PHY_EVENT_OQ; SA_DBG1(("mpiWrPhyAttrbTable: PHYTableOffset 0x%08x\n", PHYTableOffset)); /* write OQ event per phy */ for (i = 0; i < MAX_VALID_PHYS; i ++) { ossaHwRegWriteExt(agRoot, pcibar, (bit32)(PHYTableOffset + i * sizeof(phyAttrb_t)), phyAttrib->phyAttribute[i].phyEventOQ); SA_DBG3(("mpiWrPhyAttrbTable:%d Offset 0x%08x phyAttribute 0x%x\n",i,(bit32)(PHYTableOffset + i * sizeof(phyAttrb_t)), phyAttrib->phyAttribute[i].phyEventOQ )); } for (i = 0; i < MAX_VALID_PHYS; i ++) { value = ossaHwRegReadExt(agRoot, pcibar, (bit32)(PHYTableOffset + i * sizeof(phyAttrb_t))); SA_DBG1(("mpiWrPhyAttrbTable: OQ Event per phy[%x] 0x%x\n", i, value)); } } #ifdef TEST /******************************************************************/ /*******************************************************************************/ /** \fn mpiFreezeInboundQueue(agsaRoot_t *agRoot) * \brief Freeze the inbound queue * * \param agRoot Handles for this instance of SAS/SATA hardware * \param bitMapQueueNum0 bit map for inbound queue number 0 - 31 to freeze * \param bitMapQueueNum1 bit map for inbound queue number 32 - 63 to freeze * * Return: * AGSA_RC_SUCCESS if Un-initialize the configuration table sucessful * AGSA_RC_FAILURE if Un-initialize the configuration table failed */ /*******************************************************************************/ GLOBAL bit32 mpiFreezeInboundQueue(agsaRoot_t *agRoot, bit32 bitMapQueueNum0, bit32 bitMapQueueNum1) { bit32 value, togglevalue; bit32 max_wait_time; bit32 max_wait_count; SA_DBG2(("Entering function:mpiFreezeInboundQueue\n")); SA_ASSERT(NULL != agRoot, "agRoot argument cannot be null"); togglevalue = 0; if (bitMapQueueNum0) { /* update the inbound queue number to HOST_SCRATCH_PAD1 register for queue 0 to 31 */ SA_DBG1(("mpiFreezeInboundQueue: SCRATCH_PAD0 value = 0x%x\n", siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0))); SA_DBG1(("mpiFreezeInboundQueue: SCRATCH_PAD3 value = 0x%x\n", siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_3,MSGU_SCRATCH_PAD_3))); value = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_1); value |= bitMapQueueNum0; siHalRegWriteExt(agRoot, GEN_MSGU_HOST_SCRATCH_PAD_1, MSGU_HOST_SCRATCH_PAD_1, value); } if (bitMapQueueNum1) { /* update the inbound queue number to HOST_SCRATCH_PAD2 register for queue 32 to 63 */ value = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_2,MSGU_SCRATCH_PAD_2); value |= bitMapQueueNum1; siHalRegWriteExt(agRoot, GEN_MSGU_HOST_SCRATCH_PAD_2, MSGU_HOST_SCRATCH_PAD_2, value); } /* Write bit 2 to Inbound DoorBell Register */ siHalRegWriteExt(agRoot, GEN_MSGU_IBDB_SET, MSGU_IBDB_SET, IBDB_IBQ_FREEZE); /* wait until Inbound DoorBell Clear Register toggled */ max_wait_time = WAIT_SECONDS(gWait_2); /* 2 sec */ max_wait_count = MAKE_MODULO(max_wait_time,WAIT_INCREMENT); do { ossaStallThread(agRoot, WAIT_INCREMENT); /* Read Inbound DoorBell Register - for RevB */ // value = ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_IBDB_SET); value = MSGU_READ_IDR; value &= IBDB_IBQ_FREEZE; } while ((value != togglevalue) && (max_wait_count -= WAIT_INCREMENT)); if (!max_wait_count) { SA_DBG1(("mpiFreezeInboundQueue: IBDB value/toggle = 0x%x 0x%x\n", value, togglevalue)); return AGSA_RC_FAILURE; } return AGSA_RC_SUCCESS; } /******************************************************************************/ /** \fn mpiUnFreezeInboundQueue(agsaRoot_t *agRoot) * \brief Freeze the inbound queue * * \param agRoot Handles for this instance of SAS/SATA hardware * \param bitMapQueueNum0 bit map for inbound queue number 0 - 31 to freeze * \param bitMapQueueNum1 bit map for inbound queue number 32 - 63 to freeze * * Return: * AGSA_RC_SUCCESS if Un-initialize the configuration table sucessful * AGSA_RC_FAILURE if Un-initialize the configuration table failed */ /******************************************************************************/ GLOBAL bit32 mpiUnFreezeInboundQueue(agsaRoot_t *agRoot, bit32 bitMapQueueNum0, bit32 bitMapQueueNum1) { bit32 value, togglevalue; bit32 max_wait_time; bit32 max_wait_count; SA_DBG2(("Entering function:mpiUnFreezeInboundQueue\n")); SA_ASSERT(NULL != agRoot, "agRoot argument cannot be null"); togglevalue = 0; if (bitMapQueueNum0) { /* update the inbound queue number to HOST_SCRATCH_PAD1 register - for queue 0 to 31 */ value = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_1,MSGU_SCRATCH_PAD_1); value |= bitMapQueueNum0; siHalRegWriteExt(agRoot, GEN_MSGU_HOST_SCRATCH_PAD_1, MSGU_HOST_SCRATCH_PAD_1, value); } if (bitMapQueueNum1) { /* update the inbound queue number to HOST_SCRATCH_PAD2 register - for queue 32 to 63 */ value = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_2,MSGU_SCRATCH_PAD_2); value |= bitMapQueueNum1; siHalRegWriteExt(agRoot, GEN_MSGU_HOST_SCRATCH_PAD_2, MSGU_HOST_SCRATCH_PAD_2, value); } /* Write bit 2 to Inbound DoorBell Register */ siHalRegWriteExt(agRoot, GEN_MSGU_IBDB_SET, MSGU_IBDB_SET, IBDB_IBQ_UNFREEZE); /* wait until Inbound DoorBell Clear Register toggled */ max_wait_time = WAIT_SECONDS(gWait_2); /* 2 sec */ max_wait_count = MAKE_MODULO(max_wait_time,WAIT_INCREMENT); do { ossaStallThread(agRoot, WAIT_INCREMENT); /* Read Inbound DoorBell Register - for RevB */ value = MSGU_READ_IDR; value &= IBDB_IBQ_UNFREEZE; } while ((value != togglevalue) && (max_wait_count -= WAIT_INCREMENT)); if (!max_wait_count) { SA_DBG1(("mpiUnFreezeInboundQueue: IBDB value/toggle = 0x%x 0x%x\n", value, togglevalue)); return AGSA_RC_FAILURE; } return AGSA_RC_SUCCESS; } #endif /* TEST ****************************************************************/ GLOBAL bit32 si_check_V_HDA(agsaRoot_t *agRoot) { bit32 ret = AGSA_RC_SUCCESS; bit32 hda_status = 0; hda_status = (ossaHwRegReadExt(agRoot, PCIBAR0, SPC_V_HDA_RESPONSE_OFFSET+28)); SA_DBG1(("si_check_V_HDA: hda_status 0x%08X\n",hda_status )); if((hda_status & SPC_V_HDAR_RSPCODE_MASK) == SPC_V_HDAR_IDLE) { /* HDA mode */ SA_DBG1(("si_check_V_HDA: HDA mode, value = 0x%x\n", hda_status)); ret = AGSA_RC_HDA_NO_FW_RUNNING; } return(ret); } GLOBAL bit32 si_check_V_Ready(agsaRoot_t *agRoot) { bit32 ret = AGSA_RC_SUCCESS; bit32 SCRATCH_PAD1; bit32 max_wait_time; bit32 max_wait_count; /* ILA */ max_wait_time = (200 * 1000); /* wait 200 milliseconds */ max_wait_count = MAKE_MODULO(max_wait_time,WAIT_INCREMENT); do { ossaStallThread(agRoot, WAIT_INCREMENT); SCRATCH_PAD1 = ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_SCRATCH_PAD_1); } while (((SCRATCH_PAD1 & SCRATCH_PAD1_V_ILA_MASK) != SCRATCH_PAD1_V_ILA_MASK) && (max_wait_count -= WAIT_INCREMENT)); if (!max_wait_count) { SA_DBG1(("si_check_V_Ready: SCRATCH_PAD1_V_ILA_MASK (0x%x) not set SCRATCH_PAD1 = 0x%x\n",SCRATCH_PAD1_V_ILA_MASK, SCRATCH_PAD1)); return( AGSA_RC_FAILURE); } /* RAAE */ max_wait_time = (200 * 1000); /* wait 200 milliseconds */ max_wait_count = MAKE_MODULO(max_wait_time,WAIT_INCREMENT); do { ossaStallThread(agRoot, WAIT_INCREMENT); SCRATCH_PAD1 = ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_SCRATCH_PAD_1); } while (((SCRATCH_PAD1 & SCRATCH_PAD1_V_RAAE_MASK) != SCRATCH_PAD1_V_RAAE_MASK) && (max_wait_count -= WAIT_INCREMENT)); if (!max_wait_count) { SA_DBG1(("si_check_V_Ready: SCRATCH_PAD1_V_RAAE_MASK (0x%x) not set SCRATCH_PAD1 = 0x%x\n",SCRATCH_PAD1_V_RAAE_MASK, SCRATCH_PAD1)); return( AGSA_RC_FAILURE); } /* IOP0 */ max_wait_time = (200 * 1000); /* wait 200 milliseconds */ max_wait_count = MAKE_MODULO(max_wait_time,WAIT_INCREMENT); do { ossaStallThread(agRoot, WAIT_INCREMENT); SCRATCH_PAD1 = ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_SCRATCH_PAD_1); } while (((SCRATCH_PAD1 & SCRATCH_PAD1_V_IOP0_MASK) != SCRATCH_PAD1_V_IOP0_MASK) && (max_wait_count -= WAIT_INCREMENT)); if (!max_wait_count) { SA_DBG1(("si_check_V_Ready: SCRATCH_PAD1_V_IOP0_MASK (0x%x) not set SCRATCH_PAD1 = 0x%x\n",SCRATCH_PAD1_V_IOP0_MASK ,SCRATCH_PAD1)); return( AGSA_RC_FAILURE); } /* IOP1 */ max_wait_time = (200 * 1000); /* wait 200 milliseconds */ max_wait_count = MAKE_MODULO(max_wait_time,WAIT_INCREMENT); do { ossaStallThread(agRoot, WAIT_INCREMENT); SCRATCH_PAD1 = ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_SCRATCH_PAD_1); } while (((SCRATCH_PAD1 & SCRATCH_PAD1_V_IOP1_MASK) != SCRATCH_PAD1_V_IOP1_MASK) && (max_wait_count -= WAIT_INCREMENT)); if (!max_wait_count) { SA_DBG1(("si_check_V_Ready: SCRATCH_PAD1_V_IOP1_MASK (0x%x) not set SCRATCH_PAD1 = 0x%x\n",SCRATCH_PAD1_V_IOP1_MASK, SCRATCH_PAD1)); // return( AGSA_RC_FAILURE); } return(ret); } GLOBAL bit32 siScratchDump(agsaRoot_t *agRoot) { bit32 SCRATCH_PAD1; bit32 ret =0; #ifdef SALLSDK_DEBUG bit32 SCRATCH_PAD2; bit32 SCRATCH_PAD3; bit32 SCRATCH_PAD0; SCRATCH_PAD0 = ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_SCRATCH_PAD_0); SCRATCH_PAD2 = ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_SCRATCH_PAD_2); SCRATCH_PAD3 = ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_SCRATCH_PAD_3); #endif /* SALLSDK_DEBUG */ SCRATCH_PAD1 = ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_SCRATCH_PAD_1); SA_DBG1(("siScratchDump: SCRATCH_PAD 0 0x%08x 1 0x%08x 2 0x%08x 3 0x%08x\n",SCRATCH_PAD0,SCRATCH_PAD1,SCRATCH_PAD2,SCRATCH_PAD3 )); if((SCRATCH_PAD1 & SCRATCH_PAD1_V_RESERVED) == SCRATCH_PAD1_V_RESERVED ) { SA_DBG1(("siScratchDump: SCRATCH_PAD1 SCRATCH_PAD1_V_RESERVED 0x%08x\n", SCRATCH_PAD1_V_RESERVED)); } else { if((SCRATCH_PAD1 & SCRATCH_PAD1_V_RAAE_MASK) == SCRATCH_PAD1_V_RAAE_MASK ) { SA_DBG1(("siScratchDump: SCRATCH_PAD1 valid 0x%08x\n",SCRATCH_PAD0 )); SA_DBG1(("siScratchDump: RAAE ready 0x%08x\n",SCRATCH_PAD1 & SCRATCH_PAD1_V_RAAE_MASK)); } if((SCRATCH_PAD1 & SCRATCH_PAD1_V_ILA_MASK) == SCRATCH_PAD1_V_ILA_MASK) { SA_DBG1(("siScratchDump: ILA ready 0x%08x\n", SCRATCH_PAD1 & SCRATCH_PAD1_V_ILA_MASK)); } if(SCRATCH_PAD1 & SCRATCH_PAD1_V_BOOTSTATE_MASK) { SA_DBG1(("siScratchDump: BOOTSTATE not success 0x%08x\n",SCRATCH_PAD1 & SCRATCH_PAD1_V_BOOTSTATE_MASK)); } if((SCRATCH_PAD1 & SCRATCH_PAD1_V_IOP0_MASK) == SCRATCH_PAD1_V_IOP0_MASK) { SA_DBG1(("siScratchDump: IOP0 ready 0x%08x\n",SCRATCH_PAD1 & SCRATCH_PAD1_V_IOP0_MASK)); } if((SCRATCH_PAD1 & SCRATCH_PAD1_V_IOP1_MASK) == SCRATCH_PAD1_V_IOP1_MASK) { SA_DBG1(("siScratchDump: IOP1 ready 0x%08x\n",SCRATCH_PAD1 & SCRATCH_PAD1_V_IOP1_MASK )); } if((SCRATCH_PAD1 & SCRATCH_PAD1_V_READY) == SCRATCH_PAD1_V_READY) { SA_DBG1(("siScratchDump: SCRATCH_PAD1_V_READY 0x%08x\n",SCRATCH_PAD1 & SCRATCH_PAD1_V_READY )); } if((SCRATCH_PAD1 & SCRATCH_PAD1_V_BOOTSTATE_MASK) == SCRATCH_PAD1_V_BOOTSTATE_MASK) { SA_DBG1(("siScratchDump: SCRATCH_PAD1_V_BOOTSTATE_MASK 0x%08x\n",SCRATCH_PAD1 & SCRATCH_PAD1_V_BOOTSTATE_MASK )); } } return(ret); } void si_macro_check(agsaRoot_t *agRoot) { SA_DBG1(("si_macro_check:smIS_SPC %d\n",smIS_SPC(agRoot) )); SA_DBG1(("si_macro_check:smIS_HIL %d\n",smIS_HIL(agRoot) )); SA_DBG1(("si_macro_check:smIS_SFC %d\n",smIS_SFC(agRoot) )); SA_DBG1(("si_macro_check:smIS_spc8001 %d\n",smIS_spc8001(agRoot) )); SA_DBG1(("si_macro_check:smIS_spc8081 %d\n",smIS_spc8081(agRoot) )); SA_DBG1(("si_macro_check:smIS_SPCV8008 %d\n",smIS_SPCV8008(agRoot) )); SA_DBG1(("si_macro_check:smIS_SPCV8009 %d\n",smIS_SPCV8009(agRoot) )); SA_DBG1(("si_macro_check:smIS_SPCV8018 %d\n",smIS_SPCV8018(agRoot) )); SA_DBG1(("si_macro_check:smIS_SPCV8019 %d\n",smIS_SPCV8019(agRoot) )); SA_DBG1(("si_macro_check:smIS_ADAP8088 %d\n",smIS_ADAP8088(agRoot) )); SA_DBG1(("si_macro_check:smIS_ADAP8089 %d\n",smIS_ADAP8089(agRoot) )); SA_DBG1(("si_macro_check:smIS_SPCV8070 %d\n",smIS_SPCV8070(agRoot) )); SA_DBG1(("si_macro_check:smIS_SPCV8071 %d\n",smIS_SPCV8071(agRoot) )); SA_DBG1(("si_macro_check:smIS_SPCV8072 %d\n",smIS_SPCV8072(agRoot) )); SA_DBG1(("si_macro_check:smIS_SPCV8073 %d\n",smIS_SPCV8073(agRoot) )); SA_DBG1(("si_macro_check:smIS_SPCV8074 %d\n",smIS_SPCV8074(agRoot) )); SA_DBG1(("si_macro_check:smIS_SPCV8075 %d\n",smIS_SPCV8075(agRoot) )); SA_DBG1(("si_macro_check:smIS_SPCV8076 %d\n",smIS_SPCV8076(agRoot) )); SA_DBG1(("si_macro_check:smIS_SPCV8077 %d\n",smIS_SPCV8077(agRoot) )); SA_DBG1(("si_macro_check:smIS_SPCV9015 %d\n",smIS_SPCV9015(agRoot) )); SA_DBG1(("si_macro_check:smIS_SPCV9060 %d\n",smIS_SPCV9060(agRoot) )); SA_DBG1(("si_macro_check:smIS_SPCV %d\n",smIS_SPCV(agRoot) )); SA_DBG1(("si_macro_check:smIS64bInt %d\n", smIS64bInt(agRoot) )); } Index: head/sys/dev/pms/RefTisa/tisa/sassata/common/tdioctl.c =================================================================== --- head/sys/dev/pms/RefTisa/tisa/sassata/common/tdioctl.c (revision 313981) +++ head/sys/dev/pms/RefTisa/tisa/sassata/common/tdioctl.c (revision 313982) @@ -1,3682 +1,3682 @@ /******************************************************************************* *Copyright (c) 2014 PMC-Sierra, Inc. All rights reserved. * *Redistribution and use in source and binary forms, with or without modification, are permitted provided *that the following conditions are met: *1. Redistributions of source code must retain the above copyright notice, this list of conditions and the *following disclaimer. *2. Redistributions in binary form must reproduce the above copyright notice, *this list of conditions and the following disclaimer in the documentation and/or other materials provided *with the distribution. * *THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED *WARRANTIES,INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS *FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE *FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT *NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR *BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT *LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS *SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE ********************************************************************************/ /*******************************************************************************/ /** \file * * * This file contains Management IOCTL APIs * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #ifdef FDS_SM #include #include #include #endif #ifdef FDS_DM #include #include #include #endif #include #include #include #include #ifdef INITIATOR_DRIVER #include #include #include #endif #ifdef TARGET_DRIVER #include #include #include #endif #include #include #include #include #include #include #include #include #define agFieldOffset(baseType,fieldName) \ /*lint -e545 */ \ ((bit32)((bitptr)(&(((baseType *)0)->fieldName)))) \ #ifdef SA_LL_API_TEST osGLOBAL bit32 tdLlApiTestIoctl(tiRoot_t *tiRoot, tiIOCTLPayload_t *agIOCTLPayload, void *agParam1, void *agParam2, void *agParam3); #endif /* SA_LL_API_TEST */ extern bit32 volatile sgpioResponseSet; #ifdef SPC_ENABLE_PROFILE /***************************************************************************** * * tdipFWProfileIoctl * * Purpose: This routine is called to process the FW Profile IOCTL function. * This function is used for both target and initiator. * * Parameters: * tiRoot: Pointer to driver instance * agIOCTLPayload: Pointer to the IOCTL payload. * agParam1: Pointer to pass context handle for IOCTL DMA operation * agParam2: Pointer to pass context handle for IOCTL DMA operation * agParam3: Pointer to pass context handle for IOCTL DMA operation * * Return: * * IOCTL_CALL_SUCCESS The requested operation completed successfully. * IOCTL_CALL_FAIL Fail to complete the IOCTL request. * Detail error code is function specific and * defined by the specific IOCTL function. * IOCTL_CALL_INVALID_CODE This IOCTL function is not recognized. * * *****************************************************************************/ osGLOBAL bit32 tdipFWProfileIoctl( tiRoot_t *tiRoot, tiIOCTLPayload_t *agIOCTLPayload, void *agParam1, void *agParam2, void *agParam3 ) { bit32 status = IOCTL_CALL_SUCCESS; bit32 bufAddrUpper = 0; bit32 bufAddrLower = 0; tdFWProfile_t *fwProfile; void *osMemHandle = agNULL; void *buffer = agNULL; agsaFwProfile_t fwProfileInfo = {0}; tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; agsaRoot_t *agRoot = &tdsaAllShared->agRootInt; fwProfile = (tdFWProfile_t *)&agIOCTLPayload->FunctionSpecificArea[0]; fwProfileInfo.processor = fwProfile->processor; fwProfileInfo.cmd = fwProfile->cmd; fwProfileInfo.len = fwProfile->len; fwProfileInfo.tcid = fwProfile->tcid; if(fwProfile->cmd == START_CODE_PROFILE) { fwProfileInfo.codeStartAdd = fwProfile->codeStartAdd; fwProfileInfo.codeEndAdd = fwProfile->codeEndAdd; } if((fwProfile->cmd == STOP_TIMER_PROFILE) || (fwProfile->cmd == STOP_CODE_PROFILE)) { if(fwProfile->len != 0) { if(ostiAllocMemory( tiRoot, &osMemHandle, (void **)&buffer, &bufAddrUpper, &bufAddrLower, 8, fwProfile->len, agFALSE)) { return IOCTL_CALL_FAIL; } osti_memset((void *)buffer, 0, fwProfile->len); } fwProfileInfo.agSgl.sgLower = bufAddrLower; fwProfileInfo.agSgl.sgUpper = bufAddrUpper; fwProfileInfo.agSgl.len = fwProfile->len; fwProfileInfo.agSgl.extReserved = 0; tdsaAllShared->tdFWProfileEx.buffer = osMemHandle; tdsaAllShared->tdFWProfileEx.virtAddr = buffer; tdsaAllShared->tdFWProfileEx.len = fwProfile->len; } tdsaAllShared->tdFWProfileEx.tdFWProfile = fwProfile; tdsaAllShared->tdFWProfileEx.param1 = agParam1; tdsaAllShared->tdFWProfileEx.param2 = agParam2; tdsaAllShared->tdFWProfileEx.payload = agIOCTLPayload; tdsaAllShared->tdFWProfileEx.inProgress = 1; status = saFwProfile(agRoot, agNULL, 0, &fwProfileInfo ); if(status) { if((fwProfile->cmd == STOP_TIMER_PROFILE) || (fwProfile->cmd == STOP_CODE_PROFILE)) ostiFreeMemory(tiRoot, osMemHandle, fwProfile->len); status = IOCTL_CALL_FAIL; } else status = IOCTL_CALL_PENDING; return status; } #endif /***************************************************************************** * * tdipFWControlIoctl * * Purpose: This routine is called to process the FW control IOCTL function. * This function is used for both target and initiator. * * Parameters: * tiRoot: Pointer to driver instance * agIOCTLPayload: Pointer to the IOCTL payload. * agParam1: Pointer to pass context handle for IOCTL DMA operation * agParam2: Pointer to pass context handle for IOCTL DMA operation * agParam3: Pointer to pass context handle for IOCTL DMA operation * * Return: * * IOCTL_CALL_SUCCESS The requested operation completed successfully. * IOCTL_CALL_FAIL Fail to complete the IOCTL request. * Detail error code is function specific and * defined by the specific IOCTL function. * IOCTL_CALL_PENDING This request is asynchronous and completed * in some other context. * IOCTL_CALL_INVALID_CODE This IOCTL function is not recognized. * * *****************************************************************************/ osGLOBAL bit32 tdipFWControlIoctl( tiRoot_t *tiRoot, tiIOCTLPayload_t *agIOCTLPayload, void *agParam1, void *agParam2, void *agParam3 ) { bit32 status = IOCTL_CALL_PENDING; bit32 bufAddrUpper = 0; bit32 bufAddrLower = 0; tdFWControl_t *fwControl; void *osMemHandle = agNULL; void *buffer = agNULL; agsaUpdateFwFlash_t flashUpdateInfo; tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; agsaRoot_t *agRoot = &tdsaAllShared->agRootInt; if( agIOCTLPayload->Length < ( agFieldOffset(tiIOCTLPayload_t, FunctionSpecificArea) + sizeof(tdFWControl_t) ) ) { agIOCTLPayload->Status = IOCTL_ERR_STATUS_INTERNAL_ERROR; status = IOCTL_CALL_FAIL; return status; } fwControl = (tdFWControl_t *)&agIOCTLPayload->FunctionSpecificArea[0]; if(fwControl->len != 0) { if(ostiAllocMemory( tiRoot, &osMemHandle, (void **)&buffer, &bufAddrUpper, &bufAddrLower, 8, fwControl->len, agFALSE) ) return IOCTL_CALL_FAIL; } osti_memset( (void *)buffer, 0, fwControl->len ); osti_memcpy( (void *)buffer, fwControl->buffer, fwControl->len ); flashUpdateInfo.agSgl.sgLower = bufAddrLower; flashUpdateInfo.agSgl.sgUpper = bufAddrUpper; flashUpdateInfo.agSgl.len = fwControl->len; flashUpdateInfo.agSgl.extReserved = 0; flashUpdateInfo.currentImageOffset = fwControl->offset; flashUpdateInfo.currentImageLen = fwControl->len; flashUpdateInfo.totalImageLen = fwControl->size; switch (agIOCTLPayload->MinorFunction) { case IOCTL_MN_FW_DOWNLOAD_DATA: { TI_DBG6(("tdipFWControlIoctl: calling saFwFlashUpdate\n")); tdsaAllShared->tdFWControlEx.tdFWControl = fwControl; tdsaAllShared->tdFWControlEx.buffer = osMemHandle; tdsaAllShared->tdFWControlEx.param1 = agParam1; tdsaAllShared->tdFWControlEx.param2 = agParam2; tdsaAllShared->tdFWControlEx.payload = agIOCTLPayload; tdsaAllShared->tdFWControlEx.inProgress = 1; status = saFwFlashUpdate( agRoot, agNULL, 0, &flashUpdateInfo ); if(status) { status = IOCTL_CALL_FAIL; fwControl->retcode = IOCTL_CALL_TIMEOUT; } else { status = IOCTL_CALL_PENDING; } break; } default: status = IOCTL_CALL_INVALID_CODE; TI_DBG1( ("tdipFWControlIoctl: ERROR: Wrong IOCTL code %d\n", agIOCTLPayload->MinorFunction) ); ostiFreeMemory(tiRoot, osMemHandle, fwControl->len); return status; } /* end IOCTL switch */ return status; } /* tdipFWControlIoctl */ /***************************************************************************** * * tiCOMMgntIOCTL * * Purpose: This routine is a TISA API for processing the PMC specific * IOCTL function. * * Each IOCTL function is identified by the IOCTL header * specified in the data payload as the following: * Field Description * ----- ----------- * Signature PMC IOCTL signature. * #define PMC_IOCTL_SIGNATURE 0x1234 * MajorFunction Major function number. * MinorFunction Minor function number. * Length Length of this structure in bytes. * Status Return status for this IOCTL function. * FunctionSpecificArea Variable length function specific area. * * Parameters: * tiRoot: Pointer to driver instance * agIOCTLPayload: Pointer to the IOCTL payload. * agParam1: Pointer to pass context handle for IOCTL DMA operation * agParam2: Pointer to pass context handle for IOCTL DMA operation * agParam3: Pointer to pass context handle for IOCTL DMA operation * * Return: * * IOCTL_CALL_SUCCESS The requested operation completed successfully. * IOCTL_CALL_FAIL Fail to complete the IOCTL request. * Detail error code is function specific and * defined by the specific IOCTL function. * IOCTL_CALL_PENDING This request is asynchronous and completed * in some other context. * IOCTL_CALL_INVALID_CODE This IOCTL function is not recognized. * IOCTL_CALL_INVALID_DEVICE Invalid target or destination device. * * Note: * Used ostiAllocMemory() OS layer callback function to allocate memory * for DMA operaion. Then use ostiFreeMemory() to deallocate the memory. * *****************************************************************************/ osGLOBAL bit32 tiCOMMgntIOCTL( tiRoot_t *tiRoot, tiIOCTLPayload_t *agIOCTLPayload, void *agParam1, void *agParam2, void *agParam3 ) { bit32 status = IOCTL_CALL_INVALID_CODE; tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; agsaRoot_t *agRoot = &(tdsaAllShared->agRootNonInt); bit32 EventLogLength = 0; bit32 EventLogOption; bit32 ReadLength = 0; bit32 Offset = 0; bit32 RequestLength = 0; /* user request on how much data to pass to application */ agsaContext_t *agContext = NULL; - bit8 *loc = 0; + bit8 *loc = NULL; TI_DBG3(("tiCOMMgntIOCTL: start\n")); TI_DBG3(("tiCOMMgntIOCTL: tiRoot %p agIOCTLPayload %p agParam1 %p agParam2 %p agParam3 %p\n", tiRoot,agIOCTLPayload,agParam1,agParam2,agParam3 )); TI_DBG3(("tiCOMMgntIOCTL: Signature %X\ntiCOMMgntIOCTL: MajorFunction 0x%X\ntiCOMMgntIOCTL: MinorFunction 0x%X\ntiCOMMgntIOCTL: Length 0x%X\ntiCOMMgntIOCTL: Status 0x%X\ntiCOMMgntIOCTL: Reserved 0x%X\ntiCOMMgntIOCTL: FunctionSpecificArea 0x%X\n", agIOCTLPayload->Signature, agIOCTLPayload->MajorFunction, agIOCTLPayload->MinorFunction, agIOCTLPayload->Length, agIOCTLPayload->Status, agIOCTLPayload->Reserved, agIOCTLPayload->FunctionSpecificArea[0] )); /* PMC IOCTL signatures matched ? */ if(agIOCTLPayload->Signature != PMC_IOCTL_SIGNATURE) { TI_DBG1(("tiCOMMgntIOCTL:agIOCTLPayload->Signature %x IOCTL_CALL_INVALID_CODE\n",agIOCTLPayload->Signature )); status = IOCTL_CALL_INVALID_CODE; return (status); } switch (agIOCTLPayload->MajorFunction) { //TODO: make the card identification more robust. For now - just to keep going with FW download #ifdef IOCTL_INTERRUPT_TIME_CONFIG case IOCTL_MJ_CARD_PARAMETER: { switch( agIOCTLPayload->MinorFunction ) { case IOCTL_MN_CARD_GET_INTERRUPT_CONFIG: { agsaInterruptConfigPage_t *pInterruptConfig = (agsaInterruptConfigPage_t *)&agIOCTLPayload->FunctionSpecificArea[0]; status = saGetControllerConfig(agRoot, 0, AGSA_INTERRUPT_CONFIGURATION_PAGE, pInterruptConfig->vectorMask0, pInterruptConfig->vectorMask1, agParam2); if(status == AGSA_RC_SUCCESS) { status = IOCTL_CALL_PENDING; agIOCTLPayload->Status = IOCTL_ERR_STATUS_OK; } else { agIOCTLPayload->Status = IOCTL_ERR_STATUS_INTERNAL_ERROR; } break; } case IOCTL_MN_CARD_GET_TIMER_CONFIG: status = saGetControllerConfig(agRoot, 0, AGSA_SAS_PROTOCOL_TIMER_CONFIG_PAGE, 0, 0, agParam2); if(status == AGSA_RC_SUCCESS) { status = IOCTL_CALL_PENDING; agIOCTLPayload->Status = IOCTL_ERR_STATUS_OK; } else { agIOCTLPayload->Status = IOCTL_ERR_STATUS_INTERNAL_ERROR; } break; } break; } #endif /* IOCTL_INTERRUPT_TIME_CONFIG */ case IOCTL_MJ_INI_DRIVER_IDENTIFY: { status=IOCTL_CALL_SUCCESS; break; } case IOCTL_MJ_GET_DEVICE_LUN: status = tdsaGetNumOfLUNIOCTL(tiRoot,agIOCTLPayload, agParam1, agParam2, agParam3); if(status == IOCTL_CALL_SUCCESS) { status = IOCTL_CALL_PENDING; } break; case IOCTL_MJ_SMP_REQUEST: status = tdsaSendSMPIoctl(tiRoot, agIOCTLPayload, agParam1,agParam2,agParam3); break; case IOCTL_MJ_FW_CONTROL: { //ostiIOCTLClearSignal (tiRoot, &agParam1, &agParam2, &agParam3); status = tdipFWControlIoctl( tiRoot, agIOCTLPayload, agParam1, agParam2, agParam3); break; } //#ifdef EVENT_LOG_INFO_TESTING /* Reserved field in tiIOCTLPayload_t is used as offset */ case IOCTL_MJ_GET_EVENT_LOG1: { switch (agIOCTLPayload->MinorFunction) { case IOCTL_MN_FW_GET_TRACE_BUFFER: { agsaControllerEventLog_t EventLog; saGetControllerEventLogInfo(agRoot, &EventLog); TI_DBG6(("tiCOMMgntIOCTL: IOCTL_MJ_GET_EVENT_LOG1 Length %d\n", agIOCTLPayload->Length)); RequestLength = agIOCTLPayload->Length; Offset = agIOCTLPayload->Reserved; EventLogLength = EventLog.eventLog1.totalLength; EventLogOption = EventLog.eventLog1Option; if (EventLogLength <= Offset) { TI_DBG1(("tiCOMMgntIOCTL: 1 out of range Requestlength %d Offset %d event log length %d\n", RequestLength, Offset, EventLogLength)); // out of range agIOCTLPayload->Status = IOCTL_ERR_STATUS_NO_MORE_DATA; agIOCTLPayload->Length = 0; if(EventLogOption == 0) { agIOCTLPayload->Status = IOCTL_ERR_FW_EVENTLOG_DISABLED; } status=IOCTL_CALL_SUCCESS; return status; } ReadLength = MIN(EventLogLength - Offset, RequestLength); loc = (bit8 *)EventLog.eventLog1.virtPtr + Offset; osti_memcpy(&(agIOCTLPayload->FunctionSpecificArea), loc, ReadLength); // tdhexdump("IOCTL_MJ_GET_EVENT_LOG1 first 32bytes", (bit8 *)&(agIOCTLPayload->FunctionSpecificArea), 32); agIOCTLPayload->Status = IOCTL_ERR_STATUS_OK; agIOCTLPayload->Length = (bit16)ReadLength; status=IOCTL_CALL_SUCCESS; break; } case IOCTL_MN_FW_GET_EVENT_FLASH_LOG1: { TI_DBG6(("tiCOMMgntIOCTL: IOCTL_MN_FW_GET_EVENT_FLASH_LOG1\n")); status = tdsaRegDumpGetIoctl(tiRoot, agIOCTLPayload, agParam1, agParam2, agParam3); break; } } break; } case IOCTL_MJ_GET_EVENT_LOG2: { switch (agIOCTLPayload->MinorFunction) { case IOCTL_MN_FW_GET_TRACE_BUFFER: { agsaControllerEventLog_t EventLog; saGetControllerEventLogInfo(agRoot, &EventLog); TI_DBG6(("tiCOMMgntIOCTL: IOCTL_MJ_GET_EVENT_LOG2 Length %d\n", agIOCTLPayload->Length)); RequestLength = agIOCTLPayload->Length; Offset = agIOCTLPayload->Reserved; EventLogLength = EventLog.eventLog2.totalLength; EventLogOption = EventLog.eventLog2Option; if (EventLogLength <= Offset) { TI_DBG1(("tiCOMMgntIOCTL: 2 out of range Requestlength %d Offset %d event log length %d\n", RequestLength, Offset, EventLogLength)); /* out of range */ agIOCTLPayload->Status = IOCTL_ERR_STATUS_NO_MORE_DATA; agIOCTLPayload->Length = 0; if(EventLogOption == 0) { agIOCTLPayload->Status = IOCTL_ERR_FW_EVENTLOG_DISABLED; } status=IOCTL_CALL_SUCCESS; return status; } ReadLength = MIN(EventLogLength - Offset, RequestLength); loc = (bit8 *)EventLog.eventLog2.virtPtr + Offset; osti_memcpy(&(agIOCTLPayload->FunctionSpecificArea), loc, ReadLength); // tdhexdump("IOCTL_MJ_GET_EVENT_LOG2 first 32bytes", (bit8 *)&(agIOCTLPayload->FunctionSpecificArea), 32); agIOCTLPayload->Status = IOCTL_ERR_STATUS_OK; agIOCTLPayload->Length = (bit16)ReadLength; status=IOCTL_CALL_SUCCESS; break; } case IOCTL_MN_FW_GET_EVENT_FLASH_LOG2: { TI_DBG6(("tiCOMMgntIOCTL: IOCTL_MN_FW_GET_EVENT_FLASH_LOG2\n")); status = tdsaRegDumpGetIoctl(tiRoot, agIOCTLPayload, agParam1, agParam2, agParam3); break; } } break; } case IOCTL_MJ_FW_INFO: { agsaControllerInfo_t ControllerInfo; saGetControllerInfo(agRoot, &ControllerInfo); TI_DBG1(("tiCOMMgntIOCTL: IOCTL_MJ_FW_INFO Length %d\n", agIOCTLPayload->Length)); RequestLength = agIOCTLPayload->Length; Offset = agIOCTLPayload->Reserved; if (RequestLength == 0) { TI_DBG1(("tiCOMMgntIOCTL: IOCTL_MJ_FW_INFO: No more Data!\n")); /* out of range */ agIOCTLPayload->Status = IOCTL_ERR_STATUS_NO_MORE_DATA; agIOCTLPayload->Length = 0; status=IOCTL_CALL_SUCCESS; return status; } osti_memcpy((bit8*)&(agIOCTLPayload->FunctionSpecificArea), (bit8*)&ControllerInfo, sizeof(agsaControllerInfo_t)); TI_DBG1(("tiCOMMgntIOCTL:IOCTL_MJ_FW_INFO ControllerInfo signature 0x%X\n",ControllerInfo.signature)); TI_DBG1(("tiCOMMgntIOCTL:IOCTL_MJ_FW_INFO ControllerInfo PCILinkRate 0x%X\n",ControllerInfo.PCILinkRate)); TI_DBG1(("tiCOMMgntIOCTL:IOCTL_MJ_FW_INFO ControllerInfo PCIWidth 0x%X\n",ControllerInfo.PCIWidth)); agIOCTLPayload->Status = IOCTL_ERR_STATUS_OK; status=IOCTL_CALL_SUCCESS; break; } case IOCTL_MJ_GET_FW_REV: { agsaControllerInfo_t ControllerInfo; saGetControllerInfo(agRoot, &ControllerInfo); TI_DBG3(("tiCOMMgntIOCTL: IOCTL_MJ_GET_FW_REV Length %d\n", agIOCTLPayload->Length)); RequestLength = agIOCTLPayload->Length; Offset = agIOCTLPayload->Reserved; if (RequestLength == 0) { TI_DBG1(("tiCOMMgntIOCTL: IOCTL_MJ_GET_FW_REV: No more Data!\n")); /* out of range */ agIOCTLPayload->Status = IOCTL_ERR_STATUS_NO_MORE_DATA; agIOCTLPayload->Length = 0; status=IOCTL_CALL_SUCCESS; return status; } osti_memcpy((bit8*)&(agIOCTLPayload->FunctionSpecificArea), (bit8*)&ControllerInfo.fwRevision, sizeof(bit32)); loc = (bit8 *)&(agIOCTLPayload->FunctionSpecificArea)+ sizeof(bit32); osti_memcpy(loc, (bit8*)&ControllerInfo.sdkRevision, sizeof(bit32)); agIOCTLPayload->Status = IOCTL_ERR_STATUS_OK; status=IOCTL_CALL_SUCCESS; break; } #ifdef SPC_ENABLE_PROFILE case IOCTL_MJ_FW_PROFILE: { TI_DBG6(("tiCOMMgntIOCTL: IOCTL_MJ_FW_PROFILE\n")); status = tdipFWProfileIoctl( tiRoot, agIOCTLPayload, agParam1, agParam2, agParam3); break; } #endif /* SPC_ENABLE_PROFILE */ case IOCTL_MJ_GET_CORE_DUMP: { TI_DBG6(("tiCOMMgntIOCTL: IOCTL_MJ_GET_CORE_DUMP\n")); if (tiIS_SPC(agRoot)) { status = tdsaRegDumpGetIoctl(tiRoot, agIOCTLPayload, agParam1, agParam2, agParam3); } else { agIOCTLPayload->Status = IOCTL_ERR_STATUS_NOT_SUPPORTED; status = IOCTL_CALL_SUCCESS; } break; } //#endif case IOCTL_MJ_NVMD_SET: { bit8 nvmDev; TI_DBG6(("tiCOMMgntIOCTL: IOCTL_MJ_NVMD_SET\n")); nvmDev = (bit8) agIOCTLPayload->Status; agIOCTLPayload->Status = 0; status = tdsaNVMDSetIoctl(tiRoot, agIOCTLPayload, agParam1, agParam2, &nvmDev); break; } #if 0 case IOCTL_MJ_GPIO: { bit32 sVid =0; TI_DBG6(("tiCOMMgntIOCTL: IOCTL_MJ_GPIO\n")); /* Get Subsystem vendor */ sVid = ostiChipConfigReadBit32(tiRoot,0x2C); sVid = sVid & 0xFFFF; /* GPIO is only intended for chip down design * therefore it's only applies to 8H/SPCv product family */ if(sVid == 0x9005) return IOCTL_CALL_INVALID_DEVICE; status = tdsaGpioSetup(tiRoot, agContext, agIOCTLPayload, agParam1, agParam2); if(status == IOCTL_CALL_SUCCESS) status = IOCTL_CALL_PENDING; /* Wait for response from the Controller */ else return status; break; } #endif case IOCTL_MJ_SGPIO: { TI_DBG6(("tiCOMMgntIOCTL: IOCTL_MJ_SGPIO\n")); status = tdsaSGpioIoctlSetup(tiRoot, agContext, agIOCTLPayload, agParam1, agParam2); break; } case IOCTL_MJ_NVMD_GET: { bit8 nvmDev; TI_DBG3(("tiCOMMgntIOCTL: IOCTL_MJ_NVMD_GET\n")); nvmDev = (bit8) agIOCTLPayload->Status; agIOCTLPayload->Status = 0; status = tdsaNVMDGetIoctl(tiRoot, agIOCTLPayload, agParam1, agParam2, &nvmDev); break; } case IOCTL_MJ_GET_FORENSIC_DATA: { TI_DBG3(("tiCOMMgntIOCTL: IOCTL_MJ_GET_FORENSIC_DATA\n")); status = tdsaForensicDataGetIoctl(tiRoot, agIOCTLPayload, agParam1, agParam2, agParam3); break; } case IOCTL_MJ_GET_DEVICE_INFO: { TI_DBG3(("tiCOMMgntIOCTL: IOCTL_MJ_GET_DEVICE_INFO\n")); status = tdsaDeviceInfoGetIoctl(tiRoot, agIOCTLPayload, agParam1, agParam2, agParam3); break; } case IOCTL_MJ_GET_IO_ERROR_STATISTIC: { TI_DBG3(("tiCOMMgntIOCTL: IOCTL_MJ_GET_IO_ERROR_STATISTIC\n")); status = tdsaIoErrorStatisticGetIoctl(tiRoot, agIOCTLPayload, agParam1, agParam2, agParam3); break; } case IOCTL_MJ_GET_IO_EVENT_STATISTIC: { TI_DBG3(("tiCOMMgntIOCTL: IOCTL_MJ_GET_IO_EVENT_STATISTIC\n")); status = tdsaIoEventStatisticGetIoctl(tiRoot, agIOCTLPayload, agParam1, agParam2, agParam3); break; } case IOCTL_MJ_SEND_BIST: { TI_DBG1(("tiCOMMgntIOCTL: IOCTL_MJ_SEND_BIST\n")); status = tdsaSendBISTIoctl(tiRoot, agIOCTLPayload, agParam1, agParam2, agParam3); break; } #if 0 case IOCTL_MJ_SET_OR_GET_REGISTER: { TI_DBG3(("tiCOMMgntIOCTL: IOCTL_MJ_SET_OR_GET_REGISTER\n")); status = tdsaRegisterIoctl(tiRoot, agIOCTLPayload, agParam1, agParam2, agParam3); break; } #endif case IOCTL_MJ_PHY_DETAILS: { PhyDetails_t *PhyDetails = (PhyDetails_t*)&agIOCTLPayload->FunctionSpecificArea; agsaRoot_t *agRoot = &(tdsaAllShared->agRootNonInt); agsaLLRoot_t *saRoot = (agsaLLRoot_t *)(agRoot->sdkData); bit8 *sasAddressHi; bit8 *sasAddressLo; bit8 sas_dev_type; int i = 0; tiIniGetDirectSataSasAddr(tiRoot, i , &sasAddressHi, &sasAddressLo); for( i = 0; i < saRoot->phyCount ; i++) { PhyDetails[i].attached_phy = saRoot->phys[i].sasIdentify.phyIdentifier; /* deice types * SAS * 0x01 - Sas end device * 0x02 - Expander device * SATA * 0x11 - Sata * NO DEVICE 0x00 */ sas_dev_type = (saRoot->phys[i].sasIdentify.deviceType_addressFrameType & 0x70 ) >> 4 ; if ((saRoot->phys[i].status == 1) && (sas_dev_type == 0)){ //status 1 - Phy Up //Sata phy PhyDetails[i].attached_dev_type = SAS_PHY_SATA_DEVICE;//0x11 for sata end device osti_memcpy(&PhyDetails[i].attached_sasAddressHi, tdsaAllShared->Ports[i].SASID.sasAddressHi, sizeof(bit32)); osti_memcpy(&PhyDetails[i].attached_sasAddressLo, tdsaAllShared->Ports[i].SASID.sasAddressLo, sizeof(bit32)); PhyDetails[i].attached_sasAddressLo[3] += i + 16; } else { PhyDetails[i].attached_dev_type = sas_dev_type; osti_memcpy(&PhyDetails[i].attached_sasAddressHi, saRoot->phys[i].sasIdentify.sasAddressHi, sizeof(bit32)); osti_memcpy(&PhyDetails[i].attached_sasAddressLo, saRoot->phys[i].sasIdentify.sasAddressLo, sizeof(bit32)); } osti_memcpy(&PhyDetails[i].sasAddressLo,&(tdsaAllShared->Ports[i].SASID.sasAddressLo), sizeof(bit32)); osti_memcpy(&PhyDetails[i].sasAddressHi,&(tdsaAllShared->Ports[i].SASID.sasAddressHi), sizeof(bit32)); } // osti_memcpy(&agIoctlPayload->FunctionSpecificArea,&PhyInfo, sizeof(agsaSGpioReqResponse_t)); // printk("Ioctl success\n"); return IOCTL_CALL_SUCCESS; } case IOCTL_MJ_PHY_GENERAL_STATUS: { agsaPhyGeneralState_t *PhyData=NULL; bit32 ret = AGSA_RC_FAILURE; PhyData = (agsaPhyGeneralState_t*) &agIOCTLPayload->FunctionSpecificArea[0]; PhyData->Reserved2 = 0; /* Validate the length */ if (agIOCTLPayload->Length < sizeof(agsaPhyGeneralState_t)) { status = IOCTL_CALL_FAIL; break; } tdsaAllShared->tdFWControlEx.param1 = agParam1; tdsaAllShared->tdFWControlEx.param2 = agParam2; tdsaAllShared->tdFWControlEx.payload = agIOCTLPayload; tdsaAllShared->tdFWControlEx.inProgress = 1; //tdsaAllShared->tdFWControlEx.usrAddr = PhyData; ret = tdsaGetPhyGeneralStatusIoctl(tiRoot,PhyData); if(ret == AGSA_RC_FAILURE) { status = IOCTL_CALL_FAIL; tdsaAllShared->tdFWControlEx.payload = NULL; tdsaAllShared->tdFWControlEx.inProgress = 0; break; } else if(ret == IOCTL_ERR_STATUS_NOT_SUPPORTED) { agIOCTLPayload->Status = IOCTL_ERR_STATUS_NOT_SUPPORTED; status = IOCTL_CALL_SUCCESS; break; } //status = IOCTL_CALL_PENDING; status = IOCTL_CALL_PENDING; } break; #if 1 case IOCTL_MJ_GET_PHY_PROFILE: { TI_DBG1(("tiCOMMgntIOCTL: IOCTL_MJ_GET_PHY_PROFILE %p %p %p\n",agParam1,agParam2,agParam3)); status = tdsaPhyProfileIoctl(tiRoot, agIOCTLPayload, agParam1, agParam2, agParam3); break; } #endif case IOCTL_MJ_LL_TRACING: { void * stu = &agIOCTLPayload->FunctionSpecificArea[0]; switch(agIOCTLPayload->MinorFunction) { case IOCTL_MN_LL_RESET_TRACE_INDEX: { #ifdef SA_ENABLE_TRACE_FUNCTIONS TSTMTID_TRACE_BUFFER_RESET *llist = (TSTMTID_TRACE_BUFFER_RESET *)stu; hpTraceBufferParms_t BufferParms; TI_DBG5(("tdReturnIOCTL_Info: hpIOCTL_ResetTraceIndex\n")); BufferParms.TraceCompiled = 0; BufferParms.TraceWrap = 0; BufferParms.CurrentTraceIndexWrapCount = 0; BufferParms.BufferSize = 0; BufferParms.CurrentIndex = 0; BufferParms.pTrace = NULL; BufferParms.pTraceIndexWrapCount = NULL; BufferParms.pTraceMask = NULL; BufferParms.pCurrentTraceIndex = NULL; smTraceGetInfo(agRoot,&BufferParms); TI_DBG5(("tdReturnIOCTL_Info: pTrace %p\n",BufferParms.pTrace)); TI_DBG5(("tdReturnIOCTL_Info: pCurrentTraceIndex %p %X\n",BufferParms.pCurrentTraceIndex,*BufferParms.pCurrentTraceIndex)); TI_DBG5(("tdReturnIOCTL_Info: pTraceIndexWrapCount %p %X\n",BufferParms.pTraceIndexWrapCount,*BufferParms.pTraceIndexWrapCount)); TI_DBG5(("tdReturnIOCTL_Info: pTraceMask %p %X\n",BufferParms.pTraceMask,*BufferParms.pTraceMask)); if( llist->Flag != 0) { if( llist->TraceMask != *BufferParms.pTraceMask) { smTraceSetMask(agRoot, llist->TraceMask ); } } if( llist->Reset) { *BufferParms.pCurrentTraceIndex = 0; smResetTraceBuffer(agRoot); *BufferParms.pCurrentTraceIndex = 0; *BufferParms.pTraceIndexWrapCount =0; llist->TraceMask = *BufferParms.pTraceMask; } #endif /* SA_ENABLE_TRACE_FUNCTIONS */ agIOCTLPayload->Status = IOCTL_ERR_STATUS_OK; status = IOCTL_CALL_SUCCESS; } break; case IOCTL_MN_LL_GET_TRACE_BUFFER_INFO: { hpTraceBufferParms_t BufferParms; TSTMTID_TRACE_BUFFER_INFO *llist = (TSTMTID_TRACE_BUFFER_INFO *)stu; TI_DBG5(("tdReturnIOCTL_Info: hpIOCTL_GetTraceBufferInfo\n")); BufferParms.TraceCompiled = 0; BufferParms.TraceWrap = 0; BufferParms.CurrentTraceIndexWrapCount = 0; BufferParms.BufferSize = 0; BufferParms.CurrentIndex = 0; BufferParms.pTrace = NULL; BufferParms.pTraceMask = NULL; #ifdef SA_ENABLE_TRACE_FUNCTIONS smTraceGetInfo(agRoot,&BufferParms); #endif /* SA_ENABLE_TRACE_FUNCTIONS not enabled */ llist->TraceCompiled = BufferParms.TraceCompiled; llist->BufferSize = BufferParms.BufferSize; llist->CurrentIndex = BufferParms.CurrentIndex ; llist->CurrentTraceIndexWrapCount = BufferParms.CurrentTraceIndexWrapCount; llist->TraceWrap = BufferParms.TraceWrap; if(BufferParms.pTraceMask != NULL) { llist->TraceMask = *BufferParms.pTraceMask; } agIOCTLPayload->Status = IOCTL_ERR_STATUS_OK; status = IOCTL_CALL_SUCCESS; } break; case IOCTL_MN_LL_GET_TRACE_BUFFER: { #ifdef SA_ENABLE_TRACE_FUNCTIONS TSTMTID_TRACE_BUFFER_FETCH *llist = (TSTMTID_TRACE_BUFFER_FETCH *)stu; hpTraceBufferParms_t BufferParms; bit32 c= 0; BufferParms.TraceCompiled = 0; BufferParms.TraceWrap = 0; BufferParms.CurrentTraceIndexWrapCount = 0; BufferParms.BufferSize = 0; BufferParms.CurrentIndex = 0; BufferParms.pTrace = NULL; smTraceGetInfo(agRoot,&BufferParms); TI_DBG6(("tdReturnIOCTL_Info: hpIOCTL_GetTraceBuffer\n")); if(llist->LowFence != LowFence32Bits) { break; } if(llist->HighFence != HighFence32Bits) { break; } if(llist->BufferOffsetBegin + FetchBufferSIZE > BufferParms.BufferSize ) { } for ( c=0; c < FetchBufferSIZE;c++) { llist->Data[c] = *(BufferParms.pTrace+( c + llist->BufferOffsetBegin)); } #endif /* SA_ENABLE_TRACE_FUNCTIONS not enabled */ } agIOCTLPayload->Status = IOCTL_ERR_STATUS_OK; status = IOCTL_CALL_SUCCESS; break; } break; } #ifdef SA_LL_API_TEST case IOCTL_MJ_LL_API_TEST: { status = tdLlApiTestIoctl( tiRoot, agIOCTLPayload, agParam1,agParam2,agParam3 ); break; } #endif /* SA_LL_API_TEST */ case IOCTL_MJ_MODE_CTL_PAGE: { /* The SPCv controller has some options accessed via mode pages */ tiEncryptDekConfigPage_t *pModePage= (tiEncryptDekConfigPage_t *) &agIOCTLPayload->FunctionSpecificArea[0]; bit32 pageLength = 0; bit32 pageCode; bit32 modeOperation; pageCode = pModePage->pageCode & 0xFF; modeOperation = *(bit32 *) agParam2; switch(modeOperation) { case tiModePageSet: switch (pageCode) { case TI_ENCRYPTION_DEK_CONFIG_PAGE: pageLength = sizeof(tiEncryptDekConfigPage_t); break; case TI_ENCRYPTION_CONTROL_PARM_PAGE: pageLength = sizeof(tiEncryptControlParamPage_t); break; case TI_ENCRYPTION_GENERAL_CONFIG_PAGE: /* Pages are currently unsupported */ pageLength = 0; break; } status = saSetControllerConfig(agRoot, 0, pageCode, pageLength, pModePage, (agsaContext_t *)agIOCTLPayload); break; case tiModePageGet: status = saGetControllerConfig(agRoot, 0, pageCode, 0, 0, (agsaContext_t *)agIOCTLPayload); break; default: agIOCTLPayload->Status = IOCTL_ERR_STATUS_NOT_SUPPORTED; } } break; #ifdef PHY_RESTART_TEST case IOCTL_MJ_PORT_START: { bit32 portID, tiStatus; bit32 *data = (bit32*) &agIOCTLPayload->FunctionSpecificArea[0]; portID = *data; tiStatus = tiCOMPortStart(tiRoot, portID, tdsaAllShared->Ports[portID].tiPortalContext, 0); if (tiStatus == tiSuccess) { agIOCTLPayload->Status = IOCTL_ERR_STATUS_OK; } else { agIOCTLPayload->Status = IOCTL_ERR_STATUS_INTERNAL_ERROR; } status = IOCTL_CALL_SUCCESS; break; } case IOCTL_MJ_PORT_STOP: { bit32 portID, tiStatus; bit32 *data = (bit32*) &agIOCTLPayload->FunctionSpecificArea[0]; portID = *data; tiStatus = tiCOMPortStop(tiRoot, tdsaAllShared->Ports[portID].tiPortalContext); if (tiStatus == tiSuccess) { agIOCTLPayload->Status = IOCTL_ERR_STATUS_OK; } else { agIOCTLPayload->Status = IOCTL_ERR_STATUS_INTERNAL_ERROR; } status = IOCTL_CALL_SUCCESS; break; } #endif case IOCTL_MJ_SEND_TMF: switch(agIOCTLPayload->MinorFunction) { case IOCTL_MN_TMF_DEVICE_RESET: status = tdsaSendTMFIoctl(tiRoot, agIOCTLPayload, agParam1, agParam2, AG_TARGET_WARM_RESET); break; case IOCTL_MN_TMF_LUN_RESET: status = tdsaSendTMFIoctl(tiRoot, agIOCTLPayload, agParam1, agParam2, AG_LOGICAL_UNIT_RESET); break; } break; case IOCTL_MJ_GET_DRIVER_VERSION: osti_sprintf(agIOCTLPayload->FunctionSpecificArea, "%s", AGTIAPI_DRIVER_VERSION); agIOCTLPayload->Status = IOCTL_ERR_STATUS_OK; status=IOCTL_CALL_SUCCESS; break; default: agIOCTLPayload->Status = IOCTL_ERR_STATUS_NOT_SUPPORTED; break; } return status; } #if 0 /***************************************************************************** * * tdsaGpioSetup * * Purpose: This routine is called to set Gpio parameters to the controller. * * Parameters: * tiRoot: Pointer to driver instance * agsaContext_t : * tiIOCTLPayload_t : ioctl header with payload gpio info * agParam1,agParam2 : Generic parameters * * Return: status * * *****************************************************************************/ osGLOBAL bit32 tdsaGpioSetup( tiRoot_t *tiRoot, agsaContext_t *agContext, tiIOCTLPayload_t *agIOCTLPayload, void *agParam1, void *agParam2 ) { tdsaTimerRequest_t *osIoctlTimer; agsaGpioEventSetupInfo_t *gpioEventSetupInfo; agsaGpioWriteSetupInfo_t *gpioWriteSetupInfo; agsaGpioPinSetupInfo_t *gpioPinSetupInfo; tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; agsaRoot_t *agRoot = &(tdsaAllShared->agRootInt); bit32 status = IOCTL_CALL_SUCCESS; TI_DBG3(("tdsaGpioSetup: start\n")); if(tiRoot == agNULL || agIOCTLPayload == agNULL ) return IOCTL_CALL_FAIL; osIoctlTimer = &tdsaAllShared->osIoctlTimer; tdsaInitTimerRequest(tiRoot, osIoctlTimer); tdIoctlStartTimer(tiRoot, osIoctlTimer); /* Start the timout handler for both ioctl and controller response */ tdsaAllShared->tdFWControlEx.virtAddr = (bit8 *)osIoctlTimer; tdsaAllShared->tdFWControlEx.usrAddr = (bit8 *)&agIOCTLPayload->FunctionSpecificArea[0]; tdsaAllShared->tdFWControlEx.param1 = agParam1; tdsaAllShared->tdFWControlEx.param2 = agParam2; tdsaAllShared->tdFWControlEx.payload = agIOCTLPayload; tdsaAllShared->tdFWControlEx.inProgress = 1; switch (agIOCTLPayload->MinorFunction) { case IOCTL_MN_GPIO_PINSETUP: { TI_DBG3(("tdsaGpioSetup: IOCTL_MN_GPIO_PINSETUP\n")); gpioPinSetupInfo =(agsaGpioPinSetupInfo_t *)&agIOCTLPayload->FunctionSpecificArea[0]; status = saGpioPinSetup(agRoot, agContext, 0, gpioPinSetupInfo); break; } case IOCTL_MN_GPIO_EVENTSETUP: { TI_DBG3(("tdsaGpioSetup: IOCTL_MN_GPIO_EVENTSETUP\n")); gpioEventSetupInfo = (agsaGpioEventSetupInfo_t *)&agIOCTLPayload->FunctionSpecificArea[0]; status = saGpioEventSetup(agRoot, agContext, 0, gpioEventSetupInfo); break; } case IOCTL_MN_GPIO_READ: { TI_DBG3(("tdsaGpioSetup: IOCTL_MN_GPIO_READ\n")); status = saGpioRead(agRoot, agContext, 0); break; } case IOCTL_MN_GPIO_WRITE: { TI_DBG3(("tdsaGpioSetup: IOCTL_MN_GPIO_WRITE\n")); gpioWriteSetupInfo = (agsaGpioWriteSetupInfo_t *)&agIOCTLPayload->FunctionSpecificArea[0]; status = saGpioWrite(agRoot, agContext, 0, gpioWriteSetupInfo->gpioWritemask, gpioWriteSetupInfo->gpioWriteVal); break; } default : return status; } if(status != AGSA_RC_SUCCESS) { status = IOCTL_CALL_FAIL; agIOCTLPayload->Status = IOCTL_ERR_STATUS_INTERNAL_ERROR; tdsaSingleThreadedEnter(tiRoot, TD_TIMER_LOCK); if (osIoctlTimer->timerRunning == agTRUE) { tdsaSingleThreadedLeave(tiRoot, TD_TIMER_LOCK); tdsaKillTimer(tiRoot, osIoctlTimer); }else{ tdsaSingleThreadedLeave(tiRoot, TD_TIMER_LOCK); } } TI_DBG3(("tdsaGpioPinSetup: End\n")); return status; } #endif /***************************************************************************** * * ostiGetGpioIOCTLRsp * * Purpose: This routine is called for Get Gpio IOCTL reaponse has been received. * * Parameters: * tiRoot: Pointer to driver instance * payloadRsp: Pointer to the FW download IOMB's payload. * * Return: none * * *****************************************************************************/ osGLOBAL void ostiGetGpioIOCTLRsp( tiRoot_t *tiRoot, bit32 status, bit32 gpioReadValue, agsaGpioPinSetupInfo_t *gpioPinSetupInfo, agsaGpioEventSetupInfo_t *gpioEventSetupInfo ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; tiIOCTLPayload_t *agIoctlPayload ; agsaGpioReadInfo_t *gpioReadInfo; tdsaTimerRequest_t *osIoctlTimer; osIoctlTimer = (tdsaTimerRequest_t *)tdsaAllShared->tdFWControlEx.virtAddr; TI_DBG2(("ostiGetGpioIOCTLRsp: start, status = %d \n", status)); agIoctlPayload = (tiIOCTLPayload_t *)(tdsaAllShared->tdFWControlEx.payload); if(agIoctlPayload == agNULL){ return; } agIoctlPayload->Status =(bit16) status; if( (status != IOCTL_CALL_TIMEOUT) && (osIoctlTimer != NULL)) { tdsaSingleThreadedEnter(tiRoot, TD_TIMER_LOCK); if (osIoctlTimer->timerRunning == agTRUE) { tdsaSingleThreadedLeave(tiRoot, TD_TIMER_LOCK); tdsaKillTimer(tiRoot, osIoctlTimer); }else{ tdsaSingleThreadedLeave(tiRoot, TD_TIMER_LOCK); } }else { tdsaAllShared->tdFWControlEx.inProgress = 0; agIoctlPayload->Status = (bit16)status; ostiIOCTLSetSignal(tiRoot, tdsaAllShared->tdFWControlEx.param1, tdsaAllShared->tdFWControlEx.param2, NULL); return; } if(status == SUCCESS) TI_DBG3((" ostiGetGpioIOCTLRsp:Got GPIO response from OUTBuf")); else { tdsaAllShared->tdFWControlEx.inProgress = 0; ostiIOCTLSetSignal(tiRoot, tdsaAllShared->tdFWControlEx.param1, tdsaAllShared->tdFWControlEx.param2, NULL); return; } switch (agIoctlPayload->MinorFunction) { case IOCTL_MN_GPIO_PINSETUP: { TI_DBG3((" ostiGetGpioIOCTLRsp:Got GPIO response for IOCTL_MN_GPIO_PINSETUP")); break; } case IOCTL_MN_GPIO_EVENTSETUP: { TI_DBG3((" ostiGetGpioIOCTLRsp:Got GPIO response for IOCTL_MN_GPIO_EVENTSETUP")); break; } case IOCTL_MN_GPIO_WRITE: { TI_DBG3((" ostiGetGpioIOCTLRsp:Got GPIO response for IOCTL_MN_GPIO_WRITE")); break; } case IOCTL_MN_GPIO_READ: { gpioReadInfo = ( agsaGpioReadInfo_t *)tdsaAllShared->tdFWControlEx.usrAddr; gpioReadInfo->gpioReadValue = gpioReadValue; gpioReadInfo->gpioInputEnabled = gpioPinSetupInfo->gpioInputEnabled ; /* GPIOIE */ gpioReadInfo->gpioEventLevelChangePart1 = gpioPinSetupInfo->gpioTypePart1; /* GPIEVCHANGE (pins 11-0) */ gpioReadInfo->gpioEventLevelChangePart2 = gpioPinSetupInfo->gpioTypePart2; /* GPIEVCHANGE (pins 23-20) */ gpioReadInfo->gpioEventRisingEdgePart1 = 0xFFF & gpioEventSetupInfo->gpioEventRisingEdge; /* GPIEVRISE (pins 11-0) */ gpioReadInfo->gpioEventRisingEdgePart2 = 0x00F00000 & (gpioEventSetupInfo->gpioEventRisingEdge); /* GPIEVRISE (pins 23-20) */ gpioReadInfo->gpioEventFallingEdgePart1 = 0xFFF & gpioEventSetupInfo->gpioEventFallingEdge; /* GPIEVALL (pins 11-0) */ gpioReadInfo->gpioEventFallingEdgePart2 = 0x00F00000 & gpioEventSetupInfo->gpioEventFallingEdge; /* GPIEVALL (pins 23-20 */ break; } default : break; } if(tdsaAllShared->tdFWControlEx.inProgress) { tdsaAllShared->tdFWControlEx.inProgress = 0; ostiIOCTLSetSignal(tiRoot, tdsaAllShared->tdFWControlEx.param1, tdsaAllShared->tdFWControlEx.param2, NULL); } TI_DBG2(("ostiGetGpioIOCTLRsp: end \n")); return ; } /***************************************************************************** * * tdsaSGpioIoctlSetup * * Purpose: This routine is called to send SGPIO request to the controller. * * Parameters: * tiRoot: Pointer to driver instance * agsaContext_t: Context for this request * tiIOCTLPayload_t: ioctl header with payload sgpio info * agParam1,agParam2: Generic parameters * * Return: status * * *****************************************************************************/ osGLOBAL bit32 tdsaSGpioIoctlSetup( tiRoot_t *tiRoot, agsaContext_t *agContext, tiIOCTLPayload_t *agIOCTLPayload, void *agParam1, void *agParam2 ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; agsaRoot_t *agRoot = &(tdsaAllShared->agRootInt); bit32 status = IOCTL_CALL_FAIL; agsaSGpioReqResponse_t *pSGpioReq = (agsaSGpioReqResponse_t *)&agIOCTLPayload->FunctionSpecificArea[0]; TI_DBG3(("tdsaSGpioIoctlSetup: start\n")); agIOCTLPayload->Status = IOCTL_ERR_STATUS_INTERNAL_ERROR; do { if (tiRoot == agNULL || agIOCTLPayload == agNULL) { break; } /* Validate the length */ if (agIOCTLPayload->Length < sizeof(agsaSGpioReqResponse_t)) { TI_DBG3(("Invalid length\n")); break; } /* Validate the SMP Frame Type, Function and Register Type fields */ if ((pSGpioReq->smpFrameType != SMP_REQUEST) || \ ((pSGpioReq->function != SMP_READ_GPIO_REGISTER) && (pSGpioReq->function != SMP_WRITE_GPIO_REGISTER)) || \ (pSGpioReq->registerType > AGSA_SGPIO_GENERAL_PURPOSE_TRANSMIT_REG)) { TI_DBG4(("Invalid Parameter\n")); break; } /* Specific validation for configuration register type */ if (AGSA_SGPIO_CONFIG_REG == pSGpioReq->registerType) { if ((pSGpioReq->registerIndex > 0x01) || \ ((0x00 == pSGpioReq->registerIndex) && (pSGpioReq->registerCount > 0x02)) || \ ((0x01 == pSGpioReq->registerIndex) && (pSGpioReq->registerCount > 0x01))) { break; } } /* Use FW control place in shared structure to keep the necessary information */ tdsaAllShared->tdFWControlEx.param1 = agParam1; tdsaAllShared->tdFWControlEx.param2 = agParam2; tdsaAllShared->tdFWControlEx.payload = agIOCTLPayload; tdsaAllShared->tdFWControlEx.inProgress = 1; status = saSgpio(agRoot, agContext, 0, pSGpioReq); if (status != AGSA_RC_SUCCESS) { break; } status = IOCTL_CALL_PENDING; } while (0); TI_DBG3(("tdsaGpioPinSetup: End\n")); return status; } /***************************************************************************** * * ostiSgpioIoctlRsp * * Purpose: This routine is called when a SGPIO IOCTL response is received. * * Parameters: * tiRoot: Pointer to driver instance * pSgpioResponse: Pointer to the SGPIO response * * Return: none * * *****************************************************************************/ osGLOBAL void ostiSgpioIoctlRsp( tiRoot_t *tiRoot, agsaSGpioReqResponse_t *pSgpioResponse ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; tiIOCTLPayload_t *agIoctlPayload = agNULL; TI_DBG3(("ostiSgpioIoctlRsp: start\n")); if (tdsaAllShared->tdFWControlEx.inProgress) { agIoctlPayload = (tiIOCTLPayload_t *)(tdsaAllShared->tdFWControlEx.payload); if (agIoctlPayload) { tdsaAllShared->tdFWControlEx.payload = NULL; osti_memcpy(&agIoctlPayload->FunctionSpecificArea[0], pSgpioResponse, sizeof(agsaSGpioReqResponse_t)); agIoctlPayload->Status = IOCTL_ERR_STATUS_OK; sgpioResponseSet = 1; } tdsaAllShared->sgpioResponseSet = 1; //Sunitha:Check if needed? ostiIOCTLSetSignal(tiRoot, tdsaAllShared->tdFWControlEx.param1, tdsaAllShared->tdFWControlEx.param2, agNULL); tdsaAllShared->tdFWControlEx.inProgress = 0; } TI_DBG3(("ostiSgpioIoctlRsp: end\n")); } /***************************************************************************** * * ostiCOMMgntIOCTLRsp * * Purpose: This routine is called when FW control IOCTL reaponse has been received. * This function is used for both target and initiator. * * Parameters: * tiRoot: Pointer to driver instance * payloadRsp: Pointer to the FW download IOMB's payload. * * Return: none * * * *****************************************************************************/ osGLOBAL void ostiCOMMgntIOCTLRsp( tiRoot_t *tiRoot, bit32 status ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; TI_DBG1(("ostiCOMMgntIOCTLRsp: status 0x%x\n",status)); (tdsaAllShared->tdFWControlEx.tdFWControl)->retcode = status; ostiFreeMemory(tiRoot, tdsaAllShared->tdFWControlEx.buffer, tdsaAllShared->tdFWControlEx.tdFWControl->len); ostiIOCTLSetSignal(tiRoot, tdsaAllShared->tdFWControlEx.param1, tdsaAllShared->tdFWControlEx.param2, NULL); } /***************************************************************************** * * ostiRegDumpIOCTLRsp * * Purpose: This routine is called when Register Dump from flash IOCTL reaponse has been received. * This function is used for both target and initiator. * * Parameters: * tiRoot: Pointer to driver instance * payloadRsp: Pointer to the FW download IOMB's payload. * * Return: none * * * *****************************************************************************/ osGLOBAL void ostiRegDumpIOCTLRsp( tiRoot_t *tiRoot, bit32 status ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; TI_DBG1(("ostiRegDumpIOCTLRsp: start\n")); // (tdsaAllShared->tdFWControlEx.tdFWControl)->retcode = status; osti_memcpy((void *)(tdsaAllShared->tdFWControlEx.usrAddr), (void *)(tdsaAllShared->tdFWControlEx.virtAddr), tdsaAllShared->tdFWControlEx.len); ostiFreeMemory(tiRoot, tdsaAllShared->tdFWControlEx.buffer, tdsaAllShared->tdFWControlEx.len); ostiIOCTLSetSignal(tiRoot, tdsaAllShared->tdFWControlEx.param1, tdsaAllShared->tdFWControlEx.param2, NULL); } /***************************************************************************** * * ostiSetNVMDIOCTLRsp * * Purpose: This routine is called for Set NVMD IOCTL reaponse has been received. * This function is used for both target and initiator. * * Parameters: * tiRoot: Pointer to driver instance * payloadRsp: Pointer to the FW download IOMB's payload. * * Return: none * * * *****************************************************************************/ osGLOBAL void ostiSetNVMDIOCTLRsp( tiRoot_t *tiRoot, bit32 status ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; tiIOCTLPayload_t *agIOCTLPayload; if(status) { agIOCTLPayload = (tiIOCTLPayload_t *)(tdsaAllShared->tdFWControlEx.payload); agIOCTLPayload->Status = (bit16)status; } TI_DBG1(("ostiSetNVMDIOCTLRsp: start, status = %d\n", status)); // (tdsaAllShared->tdFWControlEx.tdFWControl)->retcode = status; ostiFreeMemory(tiRoot, tdsaAllShared->tdFWControlEx.buffer, tdsaAllShared->tdFWControlEx.len); ostiIOCTLSetSignal(tiRoot, tdsaAllShared->tdFWControlEx.param1, tdsaAllShared->tdFWControlEx.param2, NULL); } #ifdef SPC_ENABLE_PROFILE /***************************************************************************** * * ostiFWProfileIOCTLRsp * * Purpose: This routine is called for Fw Profile IOCTL reaponse has been received. * This function is used for both target and initiator. * * Parameters: * tiRoot: Pointer to driver instance * status: * * Return: none * * * *****************************************************************************/ osGLOBAL void ostiFWProfileIOCTLRsp( tiRoot_t *tiRoot, bit32 status, bit32 len) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; tdFWProfile_t *fwProfile; TI_DBG1(("ostiFWProfileIOCTLRsp: start\n")); fwProfile = (tdFWProfile_t *)tdsaAllShared->tdFWProfileEx.tdFWProfile; // (tdsaAllShared->tdFWControlEx.tdFWControl)->retcode = status; if (status == AGSA_RC_SUCCESS) { if((fwProfile->cmd == STOP_TIMER_PROFILE) || (fwProfile->cmd == STOP_CODE_PROFILE)) { osti_memcpy((void *)(fwProfile->buffer), (void *)(tdsaAllShared->tdFWProfileEx.virtAddr), len); ostiFreeMemory(tiRoot, tdsaAllShared->tdFWProfileEx.buffer, tdsaAllShared->tdFWProfileEx.len); } } fwProfile->status = status; fwProfile->len = len; ostiIOCTLSetSignal(tiRoot, tdsaAllShared->tdFWProfileEx.param1, tdsaAllShared->tdFWProfileEx.param2, NULL); } #endif /***************************************************************************** * * ostiGetNVMDIOCTLRsp * * Purpose: This routine is called for Get NVMD IOCTL reaponse has been received. * This function is used for both target and initiator. * * Parameters: * tiRoot: Pointer to driver instance * payloadRsp: Pointer to the FW download IOMB's payload. * * Return: none * * * *****************************************************************************/ osGLOBAL void ostiGetNVMDIOCTLRsp( tiRoot_t *tiRoot, bit32 status ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; tiIOCTLPayload_t *agIOCTLPayload; if(status) { agIOCTLPayload = (tiIOCTLPayload_t *)(tdsaAllShared->tdFWControlEx.payload); agIOCTLPayload->Status = (bit16)status; } TI_DBG1(("ostiGetNVMDIOCTLRsp: start, status = %d\n", status)); tdsaAllShared->NvmdResponseSet = 1; if(tdsaAllShared->tdFWControlEx.param1 != agNULL) { osti_memcpy((void *)(tdsaAllShared->tdFWControlEx.usrAddr), (void *)(tdsaAllShared->tdFWControlEx.virtAddr), tdsaAllShared->tdFWControlEx.len); ostiFreeMemory(tiRoot, tdsaAllShared->tdFWControlEx.buffer, tdsaAllShared->tdFWControlEx.len); ostiIOCTLSetSignal(tiRoot, tdsaAllShared->tdFWControlEx.param1, tdsaAllShared->tdFWControlEx.param2, NULL); } } /***************************************************************************** * * ostiGetPhyProfileIOCTLRsp * * Purpose: This routine is called for phy response has been received. * This function is used for both target and initiator. * * Parameters: * tiRoot: Pointer to driver instance * payloadRsp: Pointer to the IOMB's payload. * * Return: none * * * *****************************************************************************/ osGLOBAL void ostiGetPhyProfileIOCTLRsp( tiRoot_t *tiRoot, bit32 status ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; tiIOCTLPayload_t *agIOCTLPayload; tdPhyCount_t *PhyBlob = agNULL; if(status) { agIOCTLPayload = (tiIOCTLPayload_t *)(tdsaAllShared->tdFWControlEx.payload); agIOCTLPayload->Status = (bit16)status; PhyBlob = (tdPhyCount_t*)&agIOCTLPayload->FunctionSpecificArea[0]; if(PhyBlob) { // PhyBlob->Phy |= 0x800; if(PhyBlob->phyResetProblem == 0 ) { PhyBlob->phyResetProblem = -1; } TI_DBG1(("ostiGetPhyProfileIOCTLRsp: PhyBlob->Phy 0x%x\n",PhyBlob->Phy)); TI_DBG1(("ostiGetPhyProfileIOCTLRsp: PhyBlob->BW_rx 0x%x\n",PhyBlob->BW_rx)); TI_DBG1(("ostiGetPhyProfileIOCTLRsp: PhyBlob->BW_tx 0x%x\n",PhyBlob->BW_tx)); TI_DBG1(("ostiGetPhyProfileIOCTLRsp: PhyBlob->InvalidDword 0x%x\n",PhyBlob->InvalidDword)); TI_DBG1(("ostiGetPhyProfileIOCTLRsp: PhyBlob->runningDisparityError 0x%x\n",PhyBlob->runningDisparityError)); TI_DBG1(("ostiGetPhyProfileIOCTLRsp: PhyBlob->codeViolation 0x%x\n",PhyBlob->codeViolation)); TI_DBG1(("ostiGetPhyProfileIOCTLRsp: PhyBlob->phyResetProblem 0x%x\n",PhyBlob->phyResetProblem)); TI_DBG1(("ostiGetPhyProfileIOCTLRsp: PhyBlob->inboundCRCError 0x%x\n",PhyBlob->inboundCRCError)); } } TI_DBG1(("ostiGetPhyProfileIOCTLRsp: start, status = %d\n", status)); TI_DBG1(("ostiGetPhyProfileIOCTLRsp: start, len = %d %p %p\n", tdsaAllShared->tdFWControlEx.len,tdsaAllShared->tdFWControlEx.usrAddr,tdsaAllShared->tdFWControlEx.virtAddr)); // osti_memcpy((void *)(tdsaAllShared->tdFWControlEx.usrAddr), // (void *)(tdsaAllShared->tdFWControlEx.virtAddr), // tdsaAllShared->tdFWControlEx.len); ostiFreeMemory(tiRoot, tdsaAllShared->tdFWControlEx.buffer, tdsaAllShared->tdFWControlEx.len); ostiIOCTLSetSignal(tiRoot, tdsaAllShared->tdFWControlEx.param1, tdsaAllShared->tdFWControlEx.param2, NULL); } /***************************************************************************** * * ostiGenEventIOCTLRsp * * Purpose: This routine is called when General Event happened while waiting for IOCTL response. * This function is used for both target and initiator. * * Parameters: * tiRoot: Pointer to driver instance * payloadRsp: Pointer to the FW download IOMB's payload. * * Return: none * * * *****************************************************************************/ osGLOBAL void ostiGenEventIOCTLRsp( tiRoot_t *tiRoot, bit32 status ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; tiIOCTLPayload_t *agIOCTLPayload; TI_DBG1(("ostiGenEventIOCTLRsp: start\n")); if(tdsaAllShared->tdFWControlEx.inProgress) /*Free only if our IOCTL is in progress*/ { agIOCTLPayload = (tiIOCTLPayload_t *)(tdsaAllShared->tdFWControlEx.payload); agIOCTLPayload->Status = IOCTL_ERR_STATUS_INTERNAL_ERROR; (tdsaAllShared->tdFWControlEx.tdFWControl)->retcode = IOCTL_ERR_STATUS_INTERNAL_ERROR; ostiFreeMemory(tiRoot, tdsaAllShared->tdFWControlEx.buffer, tdsaAllShared->tdFWControlEx.len); ostiIOCTLSetSignal(tiRoot, tdsaAllShared->tdFWControlEx.param1, tdsaAllShared->tdFWControlEx.param2, NULL); tdsaAllShared->tdFWControlEx.inProgress = 0; } #ifdef SPC_ENABLE_PROFILE if(tdsaAllShared->tdFWProfileEx.inProgress) { agIOCTLPayload = (tiIOCTLPayload_t *)(tdsaAllShared->tdFWProfileEx.payload); agIOCTLPayload->Status = IOCTL_ERR_STATUS_INTERNAL_ERROR; if(tdsaAllShared->tdFWProfileEx.virtAddr != NULL) /*Free only if our IOCTL is in progress*/ { ostiFreeMemory(tiRoot, tdsaAllShared->tdFWProfileEx.buffer, tdsaAllShared->tdFWProfileEx.len); tdsaAllShared->tdFWProfileEx.virtAddr = NULL; } ostiIOCTLSetSignal(tiRoot, tdsaAllShared->tdFWProfileEx.param1, tdsaAllShared->tdFWProfileEx.param2, NULL); tdsaAllShared->tdFWProfileEx.inProgress = 0; } #endif /*SPC_ENABLE_PROFILE*/ } osGLOBAL void ostiGetDeviceInfoIOCTLRsp( tiRoot_t *tiRoot, bit32 status, void *param ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; tiIOCTLPayload_t *agIOCTLPayload = agNULL; tdDeviceInfoPayload_t *pTDDeviceInfo = agNULL; agsaDeviceInfo_t *pSADeviceInfo = agNULL; TI_DBG1(("ostiGetDeviceInfoIOCTLRsp: start\n")); agIOCTLPayload = (tiIOCTLPayload_t *)(tdsaAllShared->tdFWControlEx.payload); pSADeviceInfo = (agsaDeviceInfo_t*)param; pTDDeviceInfo = (tdDeviceInfoPayload_t*)agIOCTLPayload->FunctionSpecificArea; if (pSADeviceInfo != agNULL) { /* fill the device information in IOCTL payload */ osti_memcpy(&pTDDeviceInfo->devInfo.sasAddressHi, pSADeviceInfo->sasAddressHi, sizeof(bit32)); osti_memcpy(&pTDDeviceInfo->devInfo.sasAddressLo, pSADeviceInfo->sasAddressLo, sizeof(bit32)); pTDDeviceInfo->devInfo.sasAddressHi = DMA_BEBIT32_TO_BIT32(pTDDeviceInfo->devInfo.sasAddressHi); pTDDeviceInfo->devInfo.sasAddressLo = DMA_BEBIT32_TO_BIT32(pTDDeviceInfo->devInfo.sasAddressLo); pTDDeviceInfo->devInfo.deviceType = (pSADeviceInfo->devType_S_Rate & 0x30) >> 4; pTDDeviceInfo->devInfo.linkRate = pSADeviceInfo->devType_S_Rate & 0x0F; agIOCTLPayload->Status = IOCTL_ERR_STATUS_OK; } else { agIOCTLPayload->Status = IOCTL_ERR_STATUS_INVALID_DEVICE; } if(tdsaAllShared->tdFWControlEx.inProgress) /*Free only if our IOCTL is in progress*/ { ostiIOCTLSetSignal(tiRoot, tdsaAllShared->tdFWControlEx.param1, tdsaAllShared->tdFWControlEx.param2, NULL); tdsaAllShared->tdFWControlEx.inProgress = 0; } } #ifdef INITIATOR_DRIVER osGLOBAL void ostiGetIoErrorStatsIOCTLRsp( tiRoot_t *tiRoot, bit32 status, void *param ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; itdsaIni_t *Initiator = (itdsaIni_t *)tdsaAllShared->itdsaIni; tiIOCTLPayload_t *agIOCTLPayload = agNULL; tdIoErrorStatisticPayload_t *pIoErrorPayload = agNULL; agsaIOErrorEventStats_t *pIoErrorCount = agNULL; OS_ASSERT(sizeof(agsaIOErrorEventStats_t) == sizeof(tdIoErrorEventStatisticIOCTL_t), "agsaIOErrorEventStats_t tdIoErrorEventStatisticIOCTL_t\n"); TI_DBG1(("ostiGetIoErrorStatsIOCTLRsp: start\n")); agIOCTLPayload = (tiIOCTLPayload_t *)(tdsaAllShared->tdFWControlEx.payload); pIoErrorPayload = (tdIoErrorStatisticPayload_t*)agIOCTLPayload->FunctionSpecificArea; pIoErrorCount = (agsaIOErrorEventStats_t*)param; osti_memcpy(&pIoErrorPayload->IoError, pIoErrorCount, sizeof(agsaIOErrorEventStats_t)); /*copy SCSI status and sense key count from OS layer to TD layer*/ osti_memcpy(&pIoErrorPayload->ScsiStatusCounter, &Initiator->ScsiStatusCounts, sizeof(tdSCSIStatusCount_t)); osti_memcpy(&pIoErrorPayload->SenseKeyCounter, &Initiator->SenseKeyCounter, sizeof(tdSenseKeyCount_t)); if (pIoErrorPayload->flag) { osti_memset(&Initiator->ScsiStatusCounts, 0,sizeof(tdSCSIStatusCount_t) ); osti_memset(&Initiator->SenseKeyCounter, 0,sizeof(tdSenseKeyCount_t) ); } agIOCTLPayload->Status = IOCTL_ERR_STATUS_OK; } #endif /* INITIATOR_DRIVER */ osGLOBAL void ostiGetIoEventStatsIOCTLRsp( tiRoot_t *tiRoot, bit32 status, void *param ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; tiIOCTLPayload_t *agIOCTLPayload = agNULL; tdIoEventStatisticPayload_t *pIoEventPayload = agNULL; agsaIOErrorEventStats_t *pIoEventCount = agNULL; TI_DBG1(("ostiGetIoEventStatsIOCTLRsp: start\n")); agIOCTLPayload = (tiIOCTLPayload_t *)(tdsaAllShared->tdFWControlEx.payload); pIoEventPayload = (tdIoEventStatisticPayload_t*)agIOCTLPayload->FunctionSpecificArea; pIoEventCount = (agsaIOErrorEventStats_t*)param; osti_memcpy(&pIoEventPayload->IoEvent, pIoEventCount, sizeof(agsaIOErrorEventStats_t)); agIOCTLPayload->Status = IOCTL_ERR_STATUS_OK; } osGLOBAL void ostiGetForensicDataIOCTLRsp( tiRoot_t *tiRoot, bit32 status, void *param ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; tiIOCTLPayload_t *agIOCTLPayload = agNULL; tdForensicDataPayload_t *pForensicDataPayload = agNULL; agsaForensicData_t *pForensicData = agNULL; TI_DBG3(("ostiGetForensicDataIOCTLRsp: start, status = %d\n", status)); agIOCTLPayload = (tiIOCTLPayload_t *)(tdsaAllShared->tdFWControlEx.payload); pForensicDataPayload = (tdForensicDataPayload_t*)agIOCTLPayload->FunctionSpecificArea; pForensicData = (agsaForensicData_t*)param; if (agNULL == agIOCTLPayload) { return; } if (FORENSIC_DATA_TYPE_CHECK_FATAL == pForensicData->DataType) { agIOCTLPayload->Status = (bit16)status; return; } if (status == AGSA_RC_SUCCESS) { switch (pForensicData->DataType) { case FORENSIC_DATA_TYPE_NON_FATAL: case FORENSIC_DATA_TYPE_FATAL: pForensicDataPayload->dataBuffer.directOffset = pForensicData->BufferType.dataBuf.directOffset; pForensicDataPayload->dataBuffer.readLen = pForensicData->BufferType.dataBuf.readLen; break; case FORENSIC_DATA_TYPE_GSM_SPACE: pForensicDataPayload->gsmBuffer.directOffset = pForensicData->BufferType.gsmBuf.directOffset; pForensicDataPayload->gsmBuffer.readLen = pForensicData->BufferType.gsmBuf.readLen; break; case FORENSIC_DATA_TYPE_QUEUE: break; default: TI_DBG1(("ostiGetForensicDataIOCTLRsp: forensic data type error %d\n", pForensicData->DataType)); break; } agIOCTLPayload->Status = IOCTL_ERR_STATUS_OK; } else if(status == IOCTL_ERROR_NO_FATAL_ERROR) { agIOCTLPayload->Status = (bit16)status; } else { agIOCTLPayload->Status = IOCTL_ERR_STATUS_INTERNAL_ERROR; } /*Free only if our IOCTL is in progress*/ if(tdsaAllShared->tdFWControlEx.inProgress) { TI_DBG3(("ostiGetForensicDataIOCTLRsp: Waiting for the signal \n")); ostiIOCTLSetSignal(tiRoot, tdsaAllShared->tdFWControlEx.param1, tdsaAllShared->tdFWControlEx.param2, NULL); TI_DBG3(("ostiGetForensicDataIOCTLRsp: Signal wait completed \n")); tdsaAllShared->tdFWControlEx.inProgress = 0; } } /***************************************************************************** * * tdsaRegDumpGetIoctl * * Purpose: This routine is called to get Register Dump information. * This function is used for both target and initiator. * * Parameters: * tiRoot: Pointer to driver instance * agIOCTLPayload: Pointer to the IOCTL payload. * agParam1: Pointer to pass context handle for IOCTL DMA operation * agParam2: Pointer to pass context handle for IOCTL DMA operation * agParam3: Pointer to pass context handle for IOCTL DMA operation * * Return: * * IOCTL_CALL_SUCCESS The requested operation completed successfully. * IOCTL_CALL_FAIL Fail to complete the IOCTL request. * Detail error code is function specific and * defined by the specific IOCTL function. * IOCTL_CALL_PENDING This request is asynchronous and completed * in some other context. * IOCTL_CALL_INVALID_CODE This IOCTL function is not recognized. * * *****************************************************************************/ osGLOBAL bit32 tdsaRegDumpGetIoctl( tiRoot_t *tiRoot, tiIOCTLPayload_t *agIOCTLPayload, void *agParam1, void *agParam2, void *agParam3 ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; agsaRoot_t *agRoot = &(tdsaAllShared->agRootInt); // agsaControllerStatus_t RegDump; bit32 Offset = 0; bit32 RequestLength = 0; /* user request on how much data to pass to application */ agsaRegDumpInfo_t regDumpInfo; void *buffer = agNULL; void *osMemHandle = agNULL; bit32 status = IOCTL_CALL_SUCCESS; bit32 CoreDumpLength = 16384; /* change it once data is available */ bit32 EventLogOffset = 65536; ///saGetControllerStatus(agRoot, &RegDump); /* length of FSA as provided by application */ RequestLength = agIOCTLPayload->Length; /// FunctionSpecificOffset = 0; /* Offset into the FunctionSpecificArea of payload */ /* offset into core dump that was passed from application */ Offset = agIOCTLPayload->Reserved; if((CoreDumpLength <= Offset)&& (agIOCTLPayload->MinorFunction != IOCTL_MN_FW_GET_EVENT_FLASH_LOG1)&& (agIOCTLPayload->MinorFunction != IOCTL_MN_FW_GET_EVENT_FLASH_LOG2)) { agIOCTLPayload->Status = IOCTL_ERR_STATUS_NO_MORE_DATA; agIOCTLPayload->Length = 0; status=IOCTL_CALL_SUCCESS; return status; } regDumpInfo.regDumpOffset = Offset; agIOCTLPayload->Status = IOCTL_ERR_STATUS_OK; /* dump either aap1 or iop registers */ switch(agIOCTLPayload->MinorFunction){ /*Coredump*/ case IOCTL_MN_FW_GET_CORE_DUMP_AAP1: //CoreDumpBAROffset = RegDump.fatalErrorInfo.regDumpOffset0; /* get this from mpi config table */ //CoreDumpLength = RegDump.fatalErrorInfo.regDumpLen0; /*changes for added Call back*/ tdsaAllShared->tdFWControlEx.param1 = agParam1; tdsaAllShared->tdFWControlEx.param2 = agParam2; regDumpInfo.regDumpSrc = 0; regDumpInfo.regDumpNum = 0; regDumpInfo.directLen = RequestLength; regDumpInfo.directData = &agIOCTLPayload->FunctionSpecificArea[0]; /*changes for added Call back*/ //status = IOCTL_CALL_SUCCESS; tdsaAllShared->tdFWControlEx.inProgress = 1; status = IOCTL_CALL_PENDING; break; case IOCTL_MN_FW_GET_CORE_DUMP_IOP: //CoreDumpBAROffset = RegDump.fatalErrorInfo.regDumpOffset1; /* get this from mpi config table */ //CoreDumpLength = RegDump.fatalErrorInfo.regDumpLen1; /*changes for added Call back*/ tdsaAllShared->tdFWControlEx.param1 = agParam1; tdsaAllShared->tdFWControlEx.param2 = agParam2; regDumpInfo.regDumpSrc = 0; regDumpInfo.regDumpNum = 1; regDumpInfo.directLen = RequestLength; regDumpInfo.directData = &agIOCTLPayload->FunctionSpecificArea[0]; /*changes for added Call back*/ //status = IOCTL_CALL_SUCCESS; tdsaAllShared->tdFWControlEx.inProgress = 1; status = IOCTL_CALL_PENDING; break; case IOCTL_MN_FW_GET_CORE_DUMP_FLASH_AAP1: regDumpInfo.regDumpSrc = 1; regDumpInfo.regDumpNum = 0; if(RequestLength != 0) { if(ostiAllocMemory( tiRoot, &osMemHandle, (void **)&buffer, &(regDumpInfo.indirectAddrUpper32), &(regDumpInfo.indirectAddrLower32), 8, RequestLength, agFALSE)) return IOCTL_CALL_FAIL; } osti_memset((void *)buffer, 0, RequestLength); regDumpInfo.indirectLen = RequestLength; // use FW control place in shared structure to keep the neccesary information tdsaAllShared->tdFWControlEx.buffer = osMemHandle; tdsaAllShared->tdFWControlEx.virtAddr = buffer; tdsaAllShared->tdFWControlEx.usrAddr = (bit8*)&agIOCTLPayload->FunctionSpecificArea[0]; tdsaAllShared->tdFWControlEx.len = RequestLength; tdsaAllShared->tdFWControlEx.param1 = agParam1; tdsaAllShared->tdFWControlEx.param2 = agParam2; tdsaAllShared->tdFWControlEx.payload = agIOCTLPayload; tdsaAllShared->tdFWControlEx.inProgress = 1; status = IOCTL_CALL_PENDING; break; case IOCTL_MN_FW_GET_CORE_DUMP_FLASH_IOP: regDumpInfo.regDumpSrc = 1; regDumpInfo.regDumpNum = 1; if(RequestLength != 0) { if(ostiAllocMemory( tiRoot, &osMemHandle, (void **)&buffer, &(regDumpInfo.indirectAddrUpper32), &(regDumpInfo.indirectAddrLower32), 8, RequestLength, agFALSE)) return IOCTL_CALL_FAIL; } osti_memset((void *)buffer, 0, RequestLength); regDumpInfo.indirectLen = RequestLength; // use FW control place in shared structure to keep the neccesary information tdsaAllShared->tdFWControlEx.buffer = osMemHandle; tdsaAllShared->tdFWControlEx.virtAddr = buffer; tdsaAllShared->tdFWControlEx.usrAddr = (bit8*)&agIOCTLPayload->FunctionSpecificArea[0]; tdsaAllShared->tdFWControlEx.len = RequestLength; tdsaAllShared->tdFWControlEx.param1 = agParam1; tdsaAllShared->tdFWControlEx.param2 = agParam2; tdsaAllShared->tdFWControlEx.payload = agIOCTLPayload; tdsaAllShared->tdFWControlEx.inProgress = 1; status = IOCTL_CALL_PENDING; break; /*EventLog from Flash*/ case IOCTL_MN_FW_GET_EVENT_FLASH_LOG1: //aap1 Eventlog if(CoreDumpLength + EventLogOffset <= Offset) { agIOCTLPayload->Status = IOCTL_ERR_STATUS_NO_MORE_DATA; agIOCTLPayload->Length = 0; status=IOCTL_CALL_SUCCESS; return status; } regDumpInfo.regDumpSrc = 1; regDumpInfo.regDumpNum = 0; if(RequestLength != 0) { if(ostiAllocMemory( tiRoot, &osMemHandle, (void **)&buffer, &(regDumpInfo.indirectAddrUpper32), &(regDumpInfo.indirectAddrLower32), 8, RequestLength, agFALSE)) return IOCTL_CALL_FAIL; } osti_memset((void *)buffer, 0, RequestLength); regDumpInfo.indirectLen = RequestLength; // use FW control place in shared structure to keep the neccesary information tdsaAllShared->tdFWControlEx.buffer = osMemHandle; tdsaAllShared->tdFWControlEx.virtAddr = buffer; tdsaAllShared->tdFWControlEx.usrAddr = (bit8*)&agIOCTLPayload->FunctionSpecificArea[0]; tdsaAllShared->tdFWControlEx.len = RequestLength; tdsaAllShared->tdFWControlEx.param1 = agParam1; tdsaAllShared->tdFWControlEx.param2 = agParam2; tdsaAllShared->tdFWControlEx.payload = agIOCTLPayload; tdsaAllShared->tdFWControlEx.inProgress = 1; status = IOCTL_CALL_PENDING; break; case IOCTL_MN_FW_GET_EVENT_FLASH_LOG2: //iop Eventlog if(CoreDumpLength + EventLogOffset <= Offset) { agIOCTLPayload->Status = IOCTL_ERR_STATUS_NO_MORE_DATA; agIOCTLPayload->Length = 0; status=IOCTL_CALL_SUCCESS; return status; } regDumpInfo.regDumpSrc = 1; regDumpInfo.regDumpNum = 1; if(RequestLength != 0) { if(ostiAllocMemory( tiRoot, &osMemHandle, (void **)&buffer, &(regDumpInfo.indirectAddrUpper32), &(regDumpInfo.indirectAddrLower32), 8, RequestLength, agFALSE)) return IOCTL_CALL_FAIL; } osti_memset((void *)buffer, 0, RequestLength); regDumpInfo.indirectLen = RequestLength; // use FW control place in shared structure to keep the neccesary information tdsaAllShared->tdFWControlEx.buffer = osMemHandle; tdsaAllShared->tdFWControlEx.virtAddr = buffer; tdsaAllShared->tdFWControlEx.usrAddr = (bit8*)&agIOCTLPayload->FunctionSpecificArea[0]; tdsaAllShared->tdFWControlEx.len = RequestLength; tdsaAllShared->tdFWControlEx.param1 = agParam1; tdsaAllShared->tdFWControlEx.param2 = agParam2; tdsaAllShared->tdFWControlEx.payload = agIOCTLPayload; tdsaAllShared->tdFWControlEx.inProgress = 1; status = IOCTL_CALL_PENDING; break; default: status = IOCTL_CALL_INVALID_CODE; TI_DBG1(("tiCOMMgntIOCTL: ERROR: Wrong IOCTL code %d\n", agIOCTLPayload->MinorFunction)); break; } if(saGetRegisterDump(agRoot, agNULL, 0, ®DumpInfo) != AGSA_RC_SUCCESS) { status = IOCTL_CALL_FAIL; agIOCTLPayload->Status = IOCTL_ERR_STATUS_INTERNAL_ERROR; } return status; } osGLOBAL void ostiCOMMgntVPDSetIOCTLRsp( tiRoot_t *tiRoot, bit32 status ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; // agsaRoot_t *agRoot = &(tdsaAllShared->agRootInt); TI_DBG1(("ostiCOMMgntVPDSetIOCTLRsp: start\n")); (tdsaAllShared->tdFWControlEx.tdFWControl)->retcode = status; ostiFreeMemory(tiRoot, tdsaAllShared->tdFWControlEx.buffer, tdsaAllShared->tdFWControlEx.len); ostiIOCTLSetSignal(tiRoot, tdsaAllShared->tdFWControlEx.param1, tdsaAllShared->tdFWControlEx.param2, NULL); } /***************************************************************************** * * tdsaNVMDSetIoctl * * Purpose: This routine is called to set Config. SEEPROM information. * This function is used for both target and initiator. * * Parameters: * tiRoot: Pointer to driver instance * agIOCTLPayload: Pointer to the IOCTL payload. * agParam1: Pointer to pass context handle for IOCTL DMA operation * agParam2: Pointer to pass context handle for IOCTL DMA operation * agParam3: Pointer to pass context handle for IOCTL DMA operation * * Return: * * IOCTL_CALL_SUCCESS The requested operation completed successfully. * IOCTL_CALL_FAIL Fail to complete the IOCTL request. * Detail error code is function specific and * defined by the specific IOCTL function. * IOCTL_CALL_PENDING This request is asynchronous and completed * in some other context. * IOCTL_CALL_INVALID_CODE This IOCTL function is not recognized. * * *****************************************************************************/ osGLOBAL bit32 tdsaNVMDSetIoctl( tiRoot_t *tiRoot, tiIOCTLPayload_t *agIOCTLPayload, void *agParam1, void *agParam2, void *agParam3 ) { bit32 RequestLength = 0; bit32 bufAddrUpper = 0; bit32 bufAddrLower = 0; tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; agsaRoot_t *agRoot = &(tdsaAllShared->agRootInt); void *buffer = agNULL; void *osMemHandle = agNULL; bit32 status = IOCTL_CALL_SUCCESS; agsaNVMDData_t nvmdInfo; TI_DBG2(("tdsaNVMDSetIoctl: start\n")); RequestLength = agIOCTLPayload->Length; osti_memset(&nvmdInfo, 0, sizeof(agsaNVMDData_t)); switch(agIOCTLPayload->MinorFunction) { case IOCTL_MN_NVMD_SET_CONFIG: //nvmdInfo.NVMDevice = 1; nvmdInfo.NVMDevice = *((bit8*)agParam3); nvmdInfo.signature = 0xFEDCBA98; nvmdInfo.dataOffsetAddress = agIOCTLPayload->Reserved; nvmdInfo.indirectPayload = 1; nvmdInfo.indirectLen = RequestLength; if (nvmdInfo.NVMDevice == 0) { nvmdInfo.TWIDeviceAddress = 0xa0; nvmdInfo.TWIBusNumber = 0; nvmdInfo.TWIDevicePageSize = 0; nvmdInfo.TWIDeviceAddressSize = 1; } if(RequestLength != 0) { if(ostiAllocMemory( tiRoot, &osMemHandle, (void **)&buffer, &bufAddrUpper, &bufAddrLower, 8, RequestLength, agFALSE)) return IOCTL_CALL_FAIL; } else { return IOCTL_CALL_FAIL; } osti_memset((void *)buffer, 0, RequestLength); osti_memcpy((void *)buffer, agIOCTLPayload->FunctionSpecificArea, RequestLength); nvmdInfo.indirectAddrLower32 = bufAddrLower; nvmdInfo.indirectAddrUpper32 = bufAddrUpper; // use FW control place in shared structure to keep the neccesary information tdsaAllShared->tdFWControlEx.buffer = osMemHandle; tdsaAllShared->tdFWControlEx.virtAddr = buffer; tdsaAllShared->tdFWControlEx.usrAddr = (bit8*)&agIOCTLPayload->FunctionSpecificArea[0]; tdsaAllShared->tdFWControlEx.len = RequestLength; tdsaAllShared->tdFWControlEx.param1 = agParam1; tdsaAllShared->tdFWControlEx.param2 = agParam2; tdsaAllShared->tdFWControlEx.payload = agIOCTLPayload; tdsaAllShared->tdFWControlEx.inProgress = 1; status = IOCTL_CALL_PENDING; break; default: status = IOCTL_CALL_INVALID_CODE; TI_DBG1(("tdsaNVMDSetIoctl: ERROR: Wrong IOCTL code %d\n", agIOCTLPayload->MinorFunction)); break; } if(saSetNVMDCommand(agRoot, agNULL, 0, &nvmdInfo) != AGSA_RC_SUCCESS) { status = IOCTL_CALL_FAIL; agIOCTLPayload->Status = IOCTL_ERR_STATUS_INTERNAL_ERROR; } return status; } /***************************************************************************** * * tdsaNVMDGetIoctl * * Purpose: This routine is called to get Config. SEEPROM information. * This function is used for both target and initiator. * * Parameters: * tiRoot: Pointer to driver instance * agIOCTLPayload: Pointer to the IOCTL payload. * agParam1: Pointer to pass context handle for IOCTL DMA operation * agParam2: Pointer to pass context handle for IOCTL DMA operation * agParam3: Pointer to pass context handle for IOCTL DMA operation * * Return: * * IOCTL_CALL_SUCCESS The requested operation completed successfully. * IOCTL_CALL_FAIL Fail to complete the IOCTL request. * Detail error code is function specific and * defined by the specific IOCTL function. * IOCTL_CALL_PENDING This request is asynchronous and completed * in some other context. * IOCTL_CALL_INVALID_CODE This IOCTL function is not recognized. * * *****************************************************************************/ osGLOBAL bit32 tdsaNVMDGetIoctl( tiRoot_t *tiRoot, tiIOCTLPayload_t *agIOCTLPayload, void *agParam1, void *agParam2, void *agParam3 ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; agsaRoot_t *agRoot = &(tdsaAllShared->agRootInt); void *buffer = agNULL; void *osMemHandle = agNULL; bit32 status = IOCTL_CALL_SUCCESS; agsaNVMDData_t nvmdInfo; bit32 Offset = 0; bit32 RequestLength = 0; bit32 ostiMemoryStatus = 0; bit32 i,j; bit8* seepromBuffer; bit8* phySettingsBuffer; TI_DBG2(("tdsaNVMDGetIoctl: start\n")); RequestLength = agIOCTLPayload->Length; Offset = agIOCTLPayload->Reserved; osti_memset(&nvmdInfo, 0, sizeof(agsaNVMDData_t)); /* This condition is not valid for direct read so commenting */ /*if(!tiIS_SPC(agRoot)) { if( RequestLength <= Offset ) //4096-max seeprom size { agIOCTLPayload->Status = IOCTL_ERR_STATUS_NO_MORE_DATA; agIOCTLPayload->Length = 0; status=IOCTL_CALL_SUCCESS; return status; } }*/ agIOCTLPayload->Status = IOCTL_ERR_STATUS_OK; switch(agIOCTLPayload->MinorFunction) { case IOCTL_MN_NVMD_GET_CONFIG: // nvmdInfo.NVMDevice = 1; nvmdInfo.NVMDevice = *((bit8*)agParam3); nvmdInfo.signature = 0xFEDCBA98; nvmdInfo.dataOffsetAddress = Offset; nvmdInfo.indirectPayload = 1; nvmdInfo.indirectLen = RequestLength; if (nvmdInfo.NVMDevice == 0) { nvmdInfo.TWIDeviceAddress = 0xa0; nvmdInfo.TWIBusNumber = 0; nvmdInfo.TWIDevicePageSize = 0; nvmdInfo.TWIDeviceAddressSize = 1; } if(RequestLength != 0) { ostiMemoryStatus = ostiAllocMemory( tiRoot, &osMemHandle, (void **)&buffer, &(nvmdInfo.indirectAddrUpper32), &(nvmdInfo.indirectAddrLower32), 8, RequestLength, agFALSE); if((ostiMemoryStatus != tiSuccess) && (buffer == agNULL)) return IOCTL_CALL_FAIL; } else { return IOCTL_CALL_FAIL; } osti_memset((void *)buffer, 0, RequestLength); // use FW control place in shared structure to keep the neccesary information tdsaAllShared->tdFWControlEx.buffer = osMemHandle; tdsaAllShared->tdFWControlEx.virtAddr = buffer; tdsaAllShared->tdFWControlEx.usrAddr = (bit8*)&agIOCTLPayload->FunctionSpecificArea[0]; tdsaAllShared->tdFWControlEx.len = RequestLength; tdsaAllShared->tdFWControlEx.param1 = agParam1; tdsaAllShared->tdFWControlEx.param2 = agParam2; tdsaAllShared->tdFWControlEx.payload = agIOCTLPayload; tdsaAllShared->tdFWControlEx.inProgress = 1; status = IOCTL_CALL_PENDING; break; default: status = IOCTL_CALL_INVALID_CODE; TI_DBG1(("tiCOMMgntIOCTL: ERROR: Wrong IOCTL code %d\n", agIOCTLPayload->MinorFunction)); break; } tdsaAllShared->NvmdResponseSet = 0; if(saGetNVMDCommand(agRoot, agNULL, 0, &nvmdInfo) != AGSA_RC_SUCCESS) { status = IOCTL_CALL_FAIL; agIOCTLPayload->Status = IOCTL_ERR_STATUS_INTERNAL_ERROR; return status; } /* Copy the SAS address */ if(agParam1 == agNULL) { while(!tdsaAllShared->NvmdResponseSet) { // tiCOMDelayedInterruptHandler(tiRoot, 0, 1, tiNonInterruptContext); } if(nvmdInfo.NVMDevice == 4 || nvmdInfo.NVMDevice == 1) { seepromBuffer = buffer; /*Get Initiator SAS address*/ if(tiIS_SPC(agRoot)) { for(j=0,i=ADAPTER_WWN_SPC_START_OFFSET; i<= ADAPTER_WWN_SPC_END_OFFSET; i++,j++) agIOCTLPayload->FunctionSpecificArea[j] = seepromBuffer[i]; } else { for(j=0,i=ADAPTER_WWN_START_OFFSET; i<= ADAPTER_WWN_END_OFFSET; i++,j++) agIOCTLPayload->FunctionSpecificArea[j] = seepromBuffer[i]; } } /* Copy the Phy settings */ else if(nvmdInfo.NVMDevice == 6) { phySettingsBuffer = buffer; for(i=0; iFunctionSpecificArea[i] = phySettingsBuffer[i]; } tdsaAllShared->NvmdResponseSet = 0; ostiFreeMemory(tiRoot, tdsaAllShared->tdFWControlEx.buffer, tdsaAllShared->tdFWControlEx.len); } return status; } /***************************************************************************** * * tdsaDeviceInfoGetIoctl * * Purpose: This routine is called to get the specified device information. * * Parameters: * tiRoot: Pointer to driver instance * agIOCTLPayload: Pointer to the IOCTL payload. * agParam1: Pointer to pass context handle for IOCTL DMA operation * agParam2: Pointer to pass context handle for IOCTL DMA operation * agParam3: Pointer to pass context handle for IOCTL DMA operation * * Return: * * IOCTL_CALL_SUCCESS The requested operation completed successfully. * IOCTL_CALL_FAIL Fail to complete the IOCTL request. * Detail error code is function specific and * defined by the specific IOCTL function. * IOCTL_CALL_PENDING This request is asynchronous and completed * in some other context. * IOCTL_CALL_INVALID_CODE This IOCTL function is not recognized. * * *****************************************************************************/ osGLOBAL bit32 tdsaDeviceInfoGetIoctl( tiRoot_t *tiRoot, tiIOCTLPayload_t *agIOCTLPayload, void *agParam1, void *agParam2, void *agParam3 ) { tdsaDeviceData_t *oneDeviceData = agNULL; tiDeviceHandle_t *tiDeviceHandle = agNULL; tdDeviceInfoPayload_t *pDeviceInfo = agNULL; /*agsaDevHandle_t *agDevHandle = agNULL;*/ bit32 status = IOCTL_CALL_SUCCESS; pDeviceInfo = (tdDeviceInfoPayload_t*)agIOCTLPayload->FunctionSpecificArea; TI_DBG3(("tdsaDeviceInfoGetIoctl: %d:%3d:%d %p %p %p\n", (bit8)pDeviceInfo->PathId, (bit8)pDeviceInfo->TargetId, (bit8)pDeviceInfo->Lun, agParam1, agParam2, agParam3)); tiDeviceHandle = ostiMapToDevHandle(tiRoot, (bit8)pDeviceInfo->PathId, (bit8)pDeviceInfo->TargetId, (bit8)pDeviceInfo->Lun ); if (tiDeviceHandle == agNULL) { TI_DBG1(("tdsaDeviceInfoGetIoctl: tiDeviceHandle is NULL !!!! SCSI address = %d:%3d:%d\n", pDeviceInfo->PathId, pDeviceInfo->TargetId, pDeviceInfo->Lun)); agIOCTLPayload->Status = IOCTL_ERR_STATUS_INVALID_DEVICE; status = IOCTL_CALL_FAIL; return status; } oneDeviceData = (tdsaDeviceData_t *)tiDeviceHandle->tdData; if(oneDeviceData == agNULL) { TI_DBG1(("tdsaDeviceInfoGetIoctl: tiDeviceHandle=%p DeviceData is NULL!!! SCSI address = %d:%3d:%d\n", tiDeviceHandle, pDeviceInfo->PathId, pDeviceInfo->TargetId, pDeviceInfo->Lun)); agIOCTLPayload->Status = IOCTL_ERR_STATUS_INVALID_DEVICE; status = IOCTL_CALL_FAIL; return status; } /* for hotplug */ if (oneDeviceData->valid != agTRUE || oneDeviceData->registered != agTRUE || oneDeviceData->tdPortContext == agNULL ) { TI_DBG1(("tdsaDeviceInfoGetIoctl: tiDeviceHandle=%p did %d DeviceData was removed!!! SCSI address = %d:%3d:%d\n", tiDeviceHandle, oneDeviceData->id, pDeviceInfo->PathId, pDeviceInfo->TargetId, pDeviceInfo->Lun)); agIOCTLPayload->Status = IOCTL_ERR_STATUS_INVALID_DEVICE; status = IOCTL_CALL_FAIL; return status; } /* fill the device information in IOCTL payload */ pDeviceInfo->devInfo.phyId = oneDeviceData->phyID; osti_memcpy(&pDeviceInfo->devInfo.sasAddressHi, oneDeviceData->agDeviceInfo.sasAddressHi, sizeof(bit32)); osti_memcpy(&pDeviceInfo->devInfo.sasAddressLo, oneDeviceData->agDeviceInfo.sasAddressLo, sizeof(bit32)); pDeviceInfo->devInfo.sasAddressHi = DMA_BEBIT32_TO_BIT32(pDeviceInfo->devInfo.sasAddressHi); pDeviceInfo->devInfo.sasAddressLo = DMA_BEBIT32_TO_BIT32(pDeviceInfo->devInfo.sasAddressLo); pDeviceInfo->devInfo.deviceType = (oneDeviceData->agDeviceInfo.devType_S_Rate & 0x30) >> 4; pDeviceInfo->devInfo.linkRate = oneDeviceData->agDeviceInfo.devType_S_Rate & 0x0F; agIOCTLPayload->Status = IOCTL_ERR_STATUS_OK; TI_DBG3(("tdsaDeviceInfoGetIoctl:IOCTL_CALL_SUCCESS\n")); /*saGetDeviceInfo(agRoot, agNULL, 0, 0, agDevHandle);*/ status = IOCTL_CALL_SUCCESS; return status; } /***************************************************************************** * * tdsaIoErrorStatisticGetIoctl * * Purpose: This routine is called to get the IO error statistic. * * Parameters: * tiRoot: Pointer to driver instance * agIOCTLPayload: Pointer to the IOCTL payload. * agParam1: Pointer to pass context handle for IOCTL DMA operation * agParam2: Pointer to pass context handle for IOCTL DMA operation * agParam3: Pointer to pass context handle for IOCTL DMA operation * * Return: * * IOCTL_CALL_SUCCESS The requested operation completed successfully. * IOCTL_CALL_FAIL Fail to complete the IOCTL request. * Detail error code is function specific and * defined by the specific IOCTL function. * IOCTL_CALL_PENDING This request is asynchronous and completed * in some other context. * IOCTL_CALL_INVALID_CODE This IOCTL function is not recognized. * * *****************************************************************************/ osGLOBAL bit32 tdsaIoErrorStatisticGetIoctl( tiRoot_t *tiRoot, tiIOCTLPayload_t *agIOCTLPayload, void *agParam1, void *agParam2, void *agParam3 ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; agsaRoot_t *agRoot = &(tdsaAllShared->agRootInt); tdIoErrorStatisticPayload_t *pIoErrorPayload = agNULL; bit32 status = IOCTL_CALL_SUCCESS; pIoErrorPayload = (tdIoErrorStatisticPayload_t*)agIOCTLPayload->FunctionSpecificArea; tdsaAllShared->tdFWControlEx.buffer = agNULL; tdsaAllShared->tdFWControlEx.virtAddr = agNULL; tdsaAllShared->tdFWControlEx.usrAddr = (bit8*)&agIOCTLPayload->FunctionSpecificArea[0]; tdsaAllShared->tdFWControlEx.len = 0; tdsaAllShared->tdFWControlEx.param1 = agParam1; tdsaAllShared->tdFWControlEx.param2 = agParam2; tdsaAllShared->tdFWControlEx.payload = agIOCTLPayload; tdsaAllShared->tdFWControlEx.inProgress = 0; saGetIOErrorStats(agRoot, agNULL, pIoErrorPayload->flag); return status; } /***************************************************************************** * * tdsaIoEventStatisticGetIoctl * * Purpose: This routine is called to get the IO event statistic. * * Parameters: * tiRoot: Pointer to driver instance * agIOCTLPayload: Pointer to the IOCTL payload. * agParam1: Pointer to pass context handle for IOCTL DMA operation * agParam2: Pointer to pass context handle for IOCTL DMA operation * agParam3: Pointer to pass context handle for IOCTL DMA operation * * Return: * * IOCTL_CALL_SUCCESS The requested operation completed successfully. * IOCTL_CALL_FAIL Fail to complete the IOCTL request. * Detail error code is function specific and * defined by the specific IOCTL function. * IOCTL_CALL_PENDING This request is asynchronous and completed * in some other context. * IOCTL_CALL_INVALID_CODE This IOCTL function is not recognized. * * *****************************************************************************/ osGLOBAL bit32 tdsaIoEventStatisticGetIoctl( tiRoot_t *tiRoot, tiIOCTLPayload_t *agIOCTLPayload, void *agParam1, void *agParam2, void *agParam3 ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; agsaRoot_t *agRoot = &(tdsaAllShared->agRootInt); tdIoEventStatisticPayload_t *pIoEventPayload = agNULL; bit32 status = IOCTL_CALL_SUCCESS; pIoEventPayload = (tdIoEventStatisticPayload_t*)agIOCTLPayload->FunctionSpecificArea; tdsaAllShared->tdFWControlEx.buffer = agNULL; tdsaAllShared->tdFWControlEx.virtAddr = agNULL; tdsaAllShared->tdFWControlEx.usrAddr = (bit8*)&agIOCTLPayload->FunctionSpecificArea[0]; tdsaAllShared->tdFWControlEx.len = 0; tdsaAllShared->tdFWControlEx.param1 = agParam1; tdsaAllShared->tdFWControlEx.param2 = agParam2; tdsaAllShared->tdFWControlEx.payload = agIOCTLPayload; tdsaAllShared->tdFWControlEx.inProgress = 0; saGetIOEventStats(agRoot, agNULL, pIoEventPayload->flag); return status; } /***************************************************************************** * * tdsaRegisterIoctl * * Purpose: This routine is called to get Forensic Data. * * Parameters: * tiRoot: Pointer to driver instance * agIOCTLPayload: Pointer to the IOCTL payload. * agParam1: Pointer to pass context handle for IOCTL DMA operation * agParam2: Pointer to pass context handle for IOCTL DMA operation * agParam3: Pointer to pass context handle for IOCTL DMA operation * * Return: * * IOCTL_CALL_SUCCESS The requested operation completed successfully. * IOCTL_CALL_FAIL Fail to complete the IOCTL request. * Detail error code is function specific and * defined by the specific IOCTL function. * IOCTL_CALL_PENDING This request is asynchronous and completed * in some other context. * IOCTL_CALL_INVALID_CODE This IOCTL function is not recognized. * * *****************************************************************************/ osGLOBAL bit32 tdsaRegisterIoctl( tiRoot_t *tiRoot, tiIOCTLPayload_t *agIOCTLPayload, void *agParam1, void *agParam2, void *agParam3 ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; // agsaRoot_t *agRoot = &(tdsaAllShared->agRootInt); tdRegisterPayload_t *pRegisterPayload = agNULL; bit32 status = IOCTL_CALL_SUCCESS; pRegisterPayload = (tdRegisterPayload_t*)agIOCTLPayload->FunctionSpecificArea; tdsaAllShared->tdFWControlEx.buffer = agNULL; tdsaAllShared->tdFWControlEx.virtAddr = agNULL; tdsaAllShared->tdFWControlEx.usrAddr = (bit8*)&agIOCTLPayload->FunctionSpecificArea[0]; tdsaAllShared->tdFWControlEx.len = 0; tdsaAllShared->tdFWControlEx.param1 = agParam1; tdsaAllShared->tdFWControlEx.param2 = agParam2; tdsaAllShared->tdFWControlEx.payload = agIOCTLPayload; tdsaAllShared->tdFWControlEx.inProgress = 0; TI_DBG1(("tdsaRegisterIoctl: Flag %d RegAddr 0x%x RegValue 0x%x\n", pRegisterPayload->flag, pRegisterPayload->RegAddr, pRegisterPayload->RegValue)); if (pRegisterPayload->flag) { /* set register */ ostiChipWriteBit32Ext(tiRoot, 0, pRegisterPayload->RegAddr, pRegisterPayload->RegValue); } else { /* get register */ pRegisterPayload->RegValue = ostiChipReadBit32Ext(tiRoot, 0, pRegisterPayload->RegAddr); } agIOCTLPayload->Status = IOCTL_ERR_STATUS_OK; return status; } osGLOBAL bit32 tdsaGetPhyGeneralStatusIoctl( tiRoot_t *tiRoot, agsaPhyGeneralState_t *PhyData ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; agsaRoot_t *agRoot = &(tdsaAllShared->agRootNonInt); // agsaLLRoot_t *saRoot = (agsaLLRoot_t *)(agRoot->sdkData); // bit8 totalValidPhys; bit32 status = AGSA_RC_SUCCESS; bit32 i = 0; agsaControllerInfo_t ControllerInfo; saGetControllerInfo(agRoot,&ControllerInfo); TI_DBG3(("tdsaGetPhyGeneralStatusIoctl: start\n")); do { if(tIsSPC(agRoot)||tIsSPCHIL(agRoot)) { status = IOCTL_ERR_STATUS_NOT_SUPPORTED; break; } PhyData->Reserved1 = ControllerInfo.phyCount; for(i=0;iReserved1;i++) { status = saGetPhyProfile( agRoot,agNULL,tdsaRotateQnumber(tiRoot, agNULL), AGSA_SAS_PHY_GENERAL_STATUS_PAGE,i); if(status == AGSA_RC_FAILURE) { break; } } }while(0); TI_DBG3(("tdsaGetPhyGeneralStatusIoctl: End\n")); return status; } /***************************************************************************** * * ostiGetPhyGeneralStatusRsp * * Purpose: This routine is called when a PhyStatus IOCTL response is received. * * Parameters: * tiRoot: Pointer to driver instance * agsaSASPhyGeneralStatusPage_t: Status of the phy. * bit32: phyID * * Return: none * * *****************************************************************************/ osGLOBAL void ostiGetPhyGeneralStatusRsp( tiRoot_t *tiRoot, agsaSASPhyGeneralStatusPage_t *GenStatus, bit32 phyID ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; tiIOCTLPayload_t *agIoctlPayload = agNULL; agsaPhyGeneralState_t *pSetPhyStatusRes = agNULL; TI_DBG1(("ostiGetPhyGeneralStatusRsp: start\n")); if (tdsaAllShared->tdFWControlEx.inProgress) { agIoctlPayload = (tiIOCTLPayload_t *)(tdsaAllShared->tdFWControlEx.payload); if ((agIoctlPayload) && (PMC_IOCTL_SIGNATURE == agIoctlPayload->Signature)&& (IOCTL_MJ_PHY_GENERAL_STATUS == agIoctlPayload->MajorFunction)) { pSetPhyStatusRes = (agsaPhyGeneralState_t*) &agIoctlPayload->FunctionSpecificArea[0]; osti_memcpy(&pSetPhyStatusRes->PhyGenData[phyID], GenStatus, sizeof(agsaSASPhyGeneralStatusPage_t)); pSetPhyStatusRes->Reserved2++; if(pSetPhyStatusRes->Reserved1 == pSetPhyStatusRes->Reserved2) { tdsaAllShared->tdFWControlEx.payload = NULL; ostiIOCTLSetSignal(tiRoot, tdsaAllShared->tdFWControlEx.param1, tdsaAllShared->tdFWControlEx.param2, agNULL); tdsaAllShared->tdFWControlEx.inProgress = 0; agIoctlPayload->Status = IOCTL_ERR_STATUS_OK; } } } TI_DBG1(("ostiGetPhyGeneralStatusRsp: end\n")); } osGLOBAL bit32 tdsaPhyProfileIoctl( tiRoot_t *tiRoot, tiIOCTLPayload_t *agIOCTLPayload, void *agParam1, void *agParam2, void *agParam3 ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; agsaRoot_t *agRoot = &(tdsaAllShared->agRootInt); void *buffer = agNULL; void *osMemHandle = agNULL; bit32 status = IOCTL_CALL_SUCCESS; bit32 retcode = AGSA_RC_FAILURE; bit32 RequestLength= agIOCTLPayload->Length; bit32 bufAddrUpper = 0; bit32 bufAddrLower = 0; tdPhyCount_t *PhyBlob = (tdPhyCount_t*)&agIOCTLPayload->FunctionSpecificArea[0]; if(ostiAllocMemory( tiRoot, &osMemHandle, (void **)&buffer, &bufAddrUpper, &bufAddrLower, RequestLength, RequestLength, agTRUE)) return IOCTL_CALL_FAIL; tdsaAllShared->tdFWControlEx.buffer = osMemHandle; tdsaAllShared->tdFWControlEx.virtAddr = buffer; tdsaAllShared->tdFWControlEx.usrAddr = (bit8*)&agIOCTLPayload->FunctionSpecificArea[0]; tdsaAllShared->tdFWControlEx.len = 32; tdsaAllShared->tdFWControlEx.param1 = agParam1; tdsaAllShared->tdFWControlEx.param2 = agParam2; tdsaAllShared->tdFWControlEx.payload = agIOCTLPayload; tdsaAllShared->tdFWControlEx.inProgress = 1; TI_DBG1(("tdsaPhyProfileIoctl: MinorFunction %d\n",agIOCTLPayload->MinorFunction)); // PhyBlob->Phy |= 0x100; if( tiIS_SPC(agRoot) ) { TI_DBG1(("tdsaPhyProfileIoctl: SPC operation 0x%x PHY %d\n",agIOCTLPayload->MinorFunction,PhyBlob->Phy)); retcode = saLocalPhyControl(agRoot,agNULL,0 ,PhyBlob->Phy ,agIOCTLPayload->MinorFunction , agNULL); if(retcode == AGSA_RC_SUCCESS) { status = IOCTL_CALL_PENDING; } } else { TI_DBG1(("tdsaPhyProfileIoctl: SPCv operation 0x%x PHY %d\n",agIOCTLPayload->MinorFunction,PhyBlob->Phy)); retcode = saGetPhyProfile( agRoot,agNULL,0,agIOCTLPayload->MinorFunction , PhyBlob->Phy); if(retcode == AGSA_RC_SUCCESS) { status = IOCTL_CALL_PENDING; } } TI_DBG2(("tdsaPhyProfileIoctl: after\n")); return status; } /***************************************************************************** * * tdsaForensicDataGetIoctl * * Purpose: This routine is called to get Forensic Data. * * Parameters: * tiRoot: Pointer to driver instance * agIOCTLPayload: Pointer to the IOCTL payload. * agParam1: Pointer to pass context handle for IOCTL DMA operation * agParam2: Pointer to pass context handle for IOCTL DMA operation * agParam3: Pointer to pass context handle for IOCTL DMA operation * * Return: * * IOCTL_CALL_SUCCESS The requested operation completed successfully. * IOCTL_CALL_FAIL Fail to complete the IOCTL request. * Detail error code is function specific and * defined by the specific IOCTL function. * IOCTL_CALL_PENDING This request is asynchronous and completed * in some other context. * IOCTL_CALL_INVALID_CODE This IOCTL function is not recognized. * * *****************************************************************************/ osGLOBAL bit32 tdsaForensicDataGetIoctl( tiRoot_t *tiRoot, tiIOCTLPayload_t *agIOCTLPayload, void *agParam1, void *agParam2, void *agParam3 ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; agsaRoot_t *agRoot = &(tdsaAllShared->agRootInt); tdForensicDataPayload_t *pForensicDataPayload = agNULL; agsaForensicData_t ForensicData; bit32 status = IOCTL_CALL_SUCCESS; pForensicDataPayload = (tdForensicDataPayload_t*)agIOCTLPayload->FunctionSpecificArea; tdsaAllShared->tdFWControlEx.buffer = agNULL; tdsaAllShared->tdFWControlEx.virtAddr = agNULL; tdsaAllShared->tdFWControlEx.usrAddr = (bit8*)&agIOCTLPayload->FunctionSpecificArea[0]; tdsaAllShared->tdFWControlEx.len = 0; tdsaAllShared->tdFWControlEx.param1 = agParam1; tdsaAllShared->tdFWControlEx.param2 = agParam2; tdsaAllShared->tdFWControlEx.payload = agIOCTLPayload; tdsaAllShared->tdFWControlEx.inProgress = 0; osti_memset(&ForensicData, 0, sizeof(agsaForensicData_t)); ForensicData.DataType = pForensicDataPayload->DataType; switch (ForensicData.DataType) { case FORENSIC_DATA_TYPE_NON_FATAL: case FORENSIC_DATA_TYPE_FATAL: ForensicData.BufferType.dataBuf.directLen = pForensicDataPayload->dataBuffer.directLen; ForensicData.BufferType.dataBuf.directOffset = pForensicDataPayload->dataBuffer.directOffset; ForensicData.BufferType.dataBuf.readLen = pForensicDataPayload->dataBuffer.readLen; ForensicData.BufferType.dataBuf.directData = (void*)pForensicDataPayload->dataBuffer.directData; break; case FORENSIC_DATA_TYPE_GSM_SPACE: ForensicData.BufferType.gsmBuf.directLen = pForensicDataPayload->gsmBuffer.directLen; ForensicData.BufferType.gsmBuf.directOffset = pForensicDataPayload->gsmBuffer.directOffset; ForensicData.BufferType.dataBuf.readLen = pForensicDataPayload->gsmBuffer.readLen; ForensicData.BufferType.gsmBuf.directData = (void*)pForensicDataPayload->gsmBuffer.directData; break; case FORENSIC_DATA_TYPE_IB_QUEUE: ForensicData.BufferType.queueBuf.directLen = pForensicDataPayload->queueBuffer.directLen; //ForensicData.BufferType.queueBuf.queueType = pForensicDataPayload->queueBuffer.queueType; ForensicData.BufferType.queueBuf.queueType = FORENSIC_DATA_TYPE_IB_QUEUE; ForensicData.BufferType.queueBuf.queueIndex = pForensicDataPayload->queueBuffer.queueIndex; ForensicData.BufferType.queueBuf.directData = (void*)pForensicDataPayload->queueBuffer.directData; break; case FORENSIC_DATA_TYPE_OB_QUEUE: ForensicData.BufferType.queueBuf.directLen = pForensicDataPayload->queueBuffer.directLen; ForensicData.BufferType.queueBuf.queueType = FORENSIC_DATA_TYPE_OB_QUEUE; ForensicData.BufferType.queueBuf.queueIndex = pForensicDataPayload->queueBuffer.queueIndex; ForensicData.BufferType.queueBuf.directData = (void*)pForensicDataPayload->queueBuffer.directData; break; default: TI_DBG1(("tdsaGetForensicDataIoctl: forensic data type error %d\n", pForensicDataPayload->DataType)); status = IOCTL_CALL_INVALID_CODE; return status; } if ( saGetForensicData(agRoot, agNULL, &ForensicData) != AGSA_RC_SUCCESS ) { status = IOCTL_CALL_FAIL; } return status; } osGLOBAL bit32 tdsaSendSMPIoctl( tiRoot_t *tiRoot, tiIOCTLPayload_t *agIOCTLPayload, void *agParam1, void *agParam2, void *agParam3 ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; agsaRoot_t *agRoot = &(tdsaAllShared->agRootInt); void *reqBuffer = agNULL; void *respBuffer = agNULL; void *osMemHandle = agNULL; bit32 status = IOCTL_CALL_SUCCESS; // bit32 Offset = 0; // bit32 RequestLength = 0; bit32 ostiMemoryStatus = 0; smp_pass_through_req_t *smp_pass_through_req; tiDeviceHandle_t *devHandle; agsaSMPFrame_t agSMPFrame; tdsaDeviceData_t *oneDeviceData = agNULL; bit32 i; TI_DBG2(("tdsaSendSMPIoctl: start\n")); smp_pass_through_req = (smp_pass_through_req_t*)agIOCTLPayload->FunctionSpecificArea; for(i=0;i<8;i++) TI_DBG2(("SAS Address[%d]:%x",i,smp_pass_through_req->exp_sas_addr[i])); TI_DBG2(("SAS Request Length:%d",smp_pass_through_req->smp_req_len)); TI_DBG2(("SAS Response Length:%d",smp_pass_through_req->smp_resp_len)); for(i=0;ismp_req_len;i++) TI_DBG2(("SAS request + %d:%x",i,smp_pass_through_req->smp_req_resp[i])); devHandle = ostiGetDevHandleFromSasAddr(tiRoot, smp_pass_through_req->exp_sas_addr); if(devHandle == NULL) { status = IOCTL_CALL_FAIL; agIOCTLPayload->Status = IOCTL_ERR_STATUS_INTERNAL_ERROR; return status; } //agIOCTLPayload->Status = IOCTL_ERR_STATUS_OK; agIOCTLPayload->Status = IOCTL_ERR_STATUS_NOT_RESPONDING; if((ostiMemoryStatus != tiSuccess) && (reqBuffer == agNULL )) return IOCTL_CALL_FAIL; tdsaAllShared->tdFWControlEx.param3 = osMemHandle; agSMPFrame.outFrameBuf = smp_pass_through_req->smp_req_resp; agSMPFrame.expectedRespLen = smp_pass_through_req->smp_resp_len; agSMPFrame.inFrameLen = smp_pass_through_req->smp_resp_len - 4; if(!(smp_pass_through_req->smp_req_len - 8) && !tiIS_SPC(agRoot)) { agSMPFrame.flag = 1; // Direct request Indirect response agSMPFrame.outFrameLen = smp_pass_through_req->smp_req_len - 4; //Exclude header } else { agSMPFrame.flag = 3; //Indirect request and Indirect response ostiMemoryStatus = ostiAllocMemory( tiRoot, &osMemHandle, (void **)&reqBuffer, &(agSMPFrame.outFrameAddrUpper32), &(agSMPFrame.outFrameAddrLower32), 8, smp_pass_through_req->smp_req_len, agFALSE); tdsaAllShared->tdFWControlEx.param3 = osMemHandle; if(tiIS_SPC(agRoot)) { agSMPFrame.outFrameLen = smp_pass_through_req->smp_req_len - 4; //Exclude crc osti_memcpy((void *)reqBuffer, (void *)(smp_pass_through_req->smp_req_resp), smp_pass_through_req->smp_req_len); } else { agSMPFrame.outFrameLen = smp_pass_through_req->smp_req_len - 8; //Exclude header and crc osti_memcpy((void *)reqBuffer, (void *)(smp_pass_through_req->smp_req_resp + 4), smp_pass_through_req->smp_req_len - 4); } } ostiMemoryStatus = ostiAllocMemory( tiRoot, &osMemHandle, (void **)&respBuffer, &(agSMPFrame.inFrameAddrUpper32), &(agSMPFrame.inFrameAddrLower32), 8, smp_pass_through_req->smp_resp_len + 4, agFALSE); if((ostiMemoryStatus != tiSuccess) && (respBuffer == agNULL )) return IOCTL_CALL_FAIL; osti_memset((void *)respBuffer, 0, smp_pass_through_req->smp_resp_len); // use FW control place in shared structure to keep the neccesary information tdsaAllShared->tdFWControlEx.buffer = osMemHandle; tdsaAllShared->tdFWControlEx.virtAddr = respBuffer; tdsaAllShared->tdFWControlEx.usrAddr = (bit8*)smp_pass_through_req->smp_req_resp + smp_pass_through_req->smp_req_len; tdsaAllShared->tdFWControlEx.len = smp_pass_through_req->smp_resp_len; tdsaAllShared->tdFWControlEx.param1 = agParam1; tdsaAllShared->tdFWControlEx.param2 = agParam2; tdsaAllShared->tdFWControlEx.payload = agIOCTLPayload; tdsaAllShared->tdFWControlEx.inProgress = 1; status = IOCTL_CALL_PENDING; oneDeviceData = (tdsaDeviceData_t *)devHandle->tdData; if(saSendSMPIoctl(agRoot, oneDeviceData->agDevHandle, 0, &agSMPFrame, &ossaSMPIoctlCompleted) != AGSA_RC_SUCCESS) { status = IOCTL_CALL_FAIL; agIOCTLPayload->Status = IOCTL_ERR_STATUS_INTERNAL_ERROR; } return status; } osGLOBAL void ostiSendSMPIOCTLRsp( tiRoot_t *tiRoot, bit32 status ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; tiIOCTLPayload_t *agIOCTLPayload; agIOCTLPayload = (tiIOCTLPayload_t *)(tdsaAllShared->tdFWControlEx.payload); agIOCTLPayload->Status = (bit16)status; TI_DBG1(("ostiSendSMPIOCTLRsp: start, status = %d\n", status)); // if(tdsaAllShared->tdFWControlEx.param1 != agNULL) // { osti_memcpy((void *)(tdsaAllShared->tdFWControlEx.usrAddr), (void *)(tdsaAllShared->tdFWControlEx.virtAddr), tdsaAllShared->tdFWControlEx.len); // } ostiFreeMemory(tiRoot, tdsaAllShared->tdFWControlEx.buffer, tdsaAllShared->tdFWControlEx.len); ostiFreeMemory(tiRoot, tdsaAllShared->tdFWControlEx.param3, tdsaAllShared->tdFWControlEx.len); //if(tdsaAllShared->tdFWControlEx.param1 != agNULL) // { ostiIOCTLComplete(tiRoot, tdsaAllShared->tdFWControlEx.param1, tdsaAllShared->tdFWControlEx.param2, NULL); // } } /***************************************************************************** * * tdsaSendBISTIoctl * * Purpose: This routine is called to get Forensic Data. * * Parameters: * tiRoot: Pointer to driver instance * agIOCTLPayload: Pointer to the IOCTL payload. * agParam1: Pointer to pass context handle for IOCTL DMA operation * agParam2: Pointer to pass context handle for IOCTL DMA operation * agParam3: Pointer to pass context handle for IOCTL DMA operation * * Return: * * IOCTL_CALL_SUCCESS The requested operation completed successfully. * IOCTL_CALL_FAIL Fail to complete the IOCTL request. * Detail error code is function specific and * defined by the specific IOCTL function. * IOCTL_CALL_PENDING This request is asynchronous and completed * in some other context. * IOCTL_CALL_INVALID_CODE This IOCTL function is not recognized. * * *****************************************************************************/ osGLOBAL bit32 tdsaSendBISTIoctl( tiRoot_t *tiRoot, tiIOCTLPayload_t *agIOCTLPayload, void *agParam1, void *agParam2, void *agParam3 ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; agsaRoot_t *agRoot = &(tdsaAllShared->agRootInt); tdBistPayload_t *pBistPayload; // bit32 length = 0; // bit32 status = IOCTL_CALL_SUCCESS; bit32 status = IOCTL_CALL_FAIL; pBistPayload = (tdBistPayload_t*)agIOCTLPayload->FunctionSpecificArea; tdsaAllShared->tdFWControlEx.buffer = agNULL; tdsaAllShared->tdFWControlEx.virtAddr = agNULL; tdsaAllShared->tdFWControlEx.usrAddr = (bit8*)&agIOCTLPayload->FunctionSpecificArea[0]; tdsaAllShared->tdFWControlEx.len = 0; tdsaAllShared->tdFWControlEx.param1 = agParam1; tdsaAllShared->tdFWControlEx.param2 = agParam2; tdsaAllShared->tdFWControlEx.payload = agIOCTLPayload; tdsaAllShared->tdFWControlEx.inProgress = 0; TI_DBG1(("tdsaSendBISTIoctl: Type %d Length %d Data %p\n", pBistPayload->testType, pBistPayload->testLength, pBistPayload->testData )); // pBistPayload->testtype = AGSA_BIST_TEST; if( pBistPayload->testType == AGSA_BIST_TEST) { if( pBistPayload->testLength != sizeof(agsaEncryptSelfTestBitMap_t)) { return status; } } else if( pBistPayload->testType == AGSA_SHA_TEST) { if( pBistPayload->testLength != sizeof(agsaEncryptSHATestDescriptor_t) ) { return status; } } else if( pBistPayload->testType == AGSA_HMAC_TEST ) { if( pBistPayload->testLength != sizeof(agsaEncryptHMACTestDescriptor_t)) { return status; } } /* GLOBAL bit32 saEncryptSelftestExecute( agsaRoot_t *agRoot, agsaContext_t *agContext, bit32 queueNum, bit32 type, bit32 length, void *TestDescriptor); */ if ( saEncryptSelftestExecute(agRoot, agNULL, 0, pBistPayload->testType, pBistPayload->testLength, pBistPayload->testData ) != AGSA_RC_SUCCESS ) { status = IOCTL_CALL_FAIL; } return status; } osGLOBAL bit32 tdsaSendTMFIoctl( tiRoot_t *tiRoot, tiIOCTLPayload_t *agIOCTLPayload, void *agParam1, void *agParam2, unsigned long resetType ) { bit32 status; tmf_pass_through_req_t *tmf_req = (tmf_pass_through_req_t*)agIOCTLPayload->FunctionSpecificArea; #if !(defined(__FreeBSD__)) status = ostiSendResetDeviceIoctl(tiRoot, agParam2, tmf_req->pathId, tmf_req->targetId, tmf_req->lun, resetType); #endif TI_DBG3(("Status returned from ostiSendResetDeviceIoctl is %d\n",status)); if(status != IOCTL_CALL_SUCCESS) { agIOCTLPayload->Status = status; return status; } status = IOCTL_CALL_SUCCESS; return status; } #ifdef VPD_TESTING /* temporary to test saSetVPDCommand() and saGetVPDCommand */ osGLOBAL bit32 tdsaVPDSet( tiRoot_t *tiRoot ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; agsaRoot_t *agRoot = &(tdsaAllShared->agRootInt); bit32 status = IOCTL_CALL_SUCCESS; agsaVPD_t VPDInfo; bit32 ret = AGSA_RC_SUCCESS; bit32 bufAddrUpper = 0; bit32 bufAddrLower = 0; tdVPDControl_t *VPDControl; void *osMemHandle = agNULL; void *buffer; bit32 timeCount=0; bit8 ioctlErr=0; bit8 VPDPayload[32]; bit8 i; TI_DBG2(("tdsaVPDSet: start\n")); for(i=0;itdFWControlEx.buffer = osMemHandle; tdsaAllShared->tdFWControlEx.param1 = agParam1; tdsaAllShared->tdFWControlEx.param2 = agParam2; /* for testing only */ tdsaAllShared->addrUpper = bufAddrUpper; tdsaAllShared->addrLower = bufAddrLower; ret = saSetVPDCommand(agRoot, agNULL, 0, &VPDInfo); if (ret == AGSA_RC_SUCCESS) { status = tiSuccess; } else { status = tiError; } ostiFreeMemory(tiRoot, osMemHandle, sizeof(VPDPayload)); return status; } /* temporary to test saSetVPDCommand() and saGetVPDCommand */ osGLOBAL bit32 tdsaVPDGet(tiRoot_t *tiRoot) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; agsaRoot_t *agRoot = &(tdsaAllShared->agRootInt); bit32 status = IOCTL_CALL_SUCCESS; agsaVPD_t VPDInfo; bit32 ret = AGSA_RC_SUCCESS; TI_DBG2(("tdsaVPDGet: start\n")); osti_memset(&VPDInfo, 0, sizeof(agsaVPD_t)); /* direct mode worked */ VPDInfo.indirectMode = 0; /* direct mode */ VPDInfo.VPDDevice = 1; /* SEEPROM-1*/ VPDInfo.directLen = 32; VPDInfo.VPDOffset = 0; VPDInfo.directData = agNULL; VPDInfo.indirectAddrUpper32 = 0; VPDInfo.indirectAddrLower32 = 0; VPDInfo.indirectLen = 0; #ifdef NOT_YET /* worked; can't read VPD in ossaGetVPDResponseCB() because of indirect */ VPDInfo.indirectMode = 1; /* direct mode */ VPDInfo.VPDDevice = 1; /* SEEPROM-1*/ VPDInfo.directLen = 0; VPDInfo.VPDOffset = 0; VPDInfo.directData = agNULL; VPDInfo.indirectAddrUpper32 = tdsaAllShared->addrUpper; VPDInfo.indirectAddrLower32 = tdsaAllShared->addrLower; VPDInfo.indirectLen = 32; #endif ret = saGetVPDCommand(agRoot, agNULL, 0, &VPDInfo); if (ret == AGSA_RC_SUCCESS) { status = tiSuccess; } else { status = tiError; } return status; } #endif /***************************************************************************** * * tdsaGetNumOfLUNIOCTL * * Purpose: This routine is called to send Report LUN SSP command request. * * Parameters: * tiRoot: Pointer to driver instance * tiIOCTLPayload_t: Status of the Controller Reset. * agParam1: Void pointer to device extension * agParam2: Void pointer to SRB * agParam3: NULL * * Return: status * * *****************************************************************************/ osGLOBAL bit32 tdsaGetNumOfLUNIOCTL( tiRoot_t *tiRoot, tiIOCTLPayload_t *agIOCTLPayload, void *agParam1, void *agParam2, void *agParam3 ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; agsaRoot_t *agRoot = &(tdsaAllShared->agRootInt); tdDeviceLUNInfoIOCTL_t *pDeviceLUNInfo = agNULL; tiDeviceHandle_t *devHandle = agNULL; void *tiRequestBody = agNULL; tiIORequest_t *tiIORequest = agNULL; bit32 status = IOCTL_CALL_SUCCESS; TI_DBG2(("tdsaGetNumOfLUNIOCTL: Start\n")); do { pDeviceLUNInfo = (tdDeviceLUNInfoIOCTL_t*)agIOCTLPayload->FunctionSpecificArea; if (agIOCTLPayload->Length < sizeof(tdDeviceLUNInfoIOCTL_t)) { status = IOCTL_CALL_FAIL; break; } if(!pDeviceLUNInfo->tiDeviceHandle) { status = IOCTL_CALL_FAIL; agIOCTLPayload->Status = IOCTL_ERR_STATUS_INTERNAL_ERROR; break; } devHandle = (tiDeviceHandle_t*)pDeviceLUNInfo->tiDeviceHandle; agIOCTLPayload->Status = IOCTL_ERR_STATUS_OK; status = ostiNumOfLUNIOCTLreq(tiRoot,agParam1,agParam2,&tiRequestBody,&tiIORequest); if(status != AGSA_RC_SUCCESS) { agIOCTLPayload->Status = IOCTL_ERR_STATUS_INTERNAL_ERROR; break; } status = tiNumOfLunIOCTLreq(tiRoot,tiIORequest,devHandle,tiRequestBody,agIOCTLPayload,agParam1,agParam2); if(status != AGSA_RC_SUCCESS) { agIOCTLPayload->Status = IOCTL_ERR_STATUS_INTERNAL_ERROR; break; } // ostiIOCTLWaitForSignal (tiRoot, agParam1, agParam2, agParam3); }while(0); TI_DBG2(("tdsaGetNumOfLUNIOCTL: End\n")); return status; } /***************************************************************************** * * ostiNumOfLUNIOCTLRsp * * Purpose: This routine is called when a Report LUN SSP command response id recieved. * * Parameters: * tiRoot: Pointer to driver instance * bit32 status * * Return: none * * *****************************************************************************/ osGLOBAL void ostiNumOfLUNIOCTLRsp( tiRoot_t *tiRoot, bit32 status ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; tiIOCTLPayload_t *agIOCTLPayload; tdDeviceLUNInfoIOCTL_t *pDeviceLUNInfo = NULL; bit32 count = 0; bit32 numOfLUN =0; TI_DBG1(("ostiNumOfLUNIOCTLRsp: start, status = %d\n", status)); if(tdsaAllShared->tdFWControlEx.inProgress == 1) { agIOCTLPayload = (tiIOCTLPayload_t *)(tdsaAllShared->tdFWControlEx.payload); if ((agIOCTLPayload) && (PMC_IOCTL_SIGNATURE == agIOCTLPayload->Signature)&& (IOCTL_MJ_GET_DEVICE_LUN == agIOCTLPayload->MajorFunction)) { agIOCTLPayload->Status = (bit16)status; pDeviceLUNInfo = (tdDeviceLUNInfoIOCTL_t*)agIOCTLPayload->FunctionSpecificArea; numOfLUN = ((tdsaAllShared->tdFWControlEx.virtAddr[0] << 24)|(tdsaAllShared->tdFWControlEx.virtAddr[1] << 16)|\ (tdsaAllShared->tdFWControlEx.virtAddr[2] << 8)|(tdsaAllShared->tdFWControlEx.virtAddr[3])); numOfLUN = numOfLUN/8; pDeviceLUNInfo->numOfLun = numOfLUN; // ostiFreeMemory(tiRoot, // tdsaAllShared->tdFWControlEx.virtAddr, // tdsaAllShared->tdFWControlEx.len); // if(tdsaAllShared->tdFWControlEx.param1 != agNULL) // { ostiIOCTLSetSignal(tiRoot, tdsaAllShared->tdFWControlEx.param1, tdsaAllShared->tdFWControlEx.param2, NULL); tdsaAllShared->tdFWControlEx.payload = NULL; // } tdsaAllShared->tdFWControlEx.inProgress = 0; } } TI_DBG1(("ostiNumOfLUNIOCTLRsp: End\n")); } Index: head/sys/dev/pms/freebsd/driver/ini/src/agtiapi.c =================================================================== --- head/sys/dev/pms/freebsd/driver/ini/src/agtiapi.c (revision 313981) +++ head/sys/dev/pms/freebsd/driver/ini/src/agtiapi.c (revision 313982) @@ -1,6655 +1,6655 @@ /******************************************************************************* ** *Copyright (c) 2014 PMC-Sierra, Inc. All rights reserved. * *Redistribution and use in source and binary forms, with or without modification, are permitted provided *that the following conditions are met: *1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. *2. Redistributions in binary form must reproduce the above copyright notice, *this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * *THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, * *INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE *ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, *SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS *OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, *WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF *THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE ** *******************************************************************************/ #include __FBSDID("$FreeBSD$"); #include #define MAJOR_REVISION 1 #define MINOR_REVISION 3 #define BUILD_REVISION 10800 #include // defines used in kernel.h #include #include #include #include #include // types used in module initialization #include // cdevsw struct #include // uio struct #include #include #include // structs, prototypes for pci bus stuff #include #include #include #include // 1. for vtophys #include // 2. for vtophys #include // For pci_get macros #include #include #include #include #include #include #include #include #include #include #include #include #include // #include #include #include #include #include #include #include #include #include #include #include #include MALLOC_DEFINE( M_PMC_MCCB, "CCB List", "CCB List for PMCS driver" ); MALLOC_DEFINE( M_PMC_MSTL, "STLock malloc", "allocated in agtiapi_attach as memory for lock use" ); MALLOC_DEFINE( M_PMC_MDVT, "ag_device_t malloc", "allocated in agtiapi_attach as mem for ag_device_t pDevList" ); MALLOC_DEFINE( M_PMC_MPRT, "ag_portal_data_t malloc", "allocated in agtiapi_attach as mem for *pPortalData" ); MALLOC_DEFINE( M_PMC_MDEV, "tiDeviceHandle_t * malloc", "allocated in agtiapi_GetDevHandle as local mem for **agDev" ); MALLOC_DEFINE( M_PMC_MFLG, "lDevFlags * malloc", "allocated in agtiapi_GetDevHandle as local mem for * flags" ); #ifdef LINUX_PERBI_SUPPORT MALLOC_DEFINE( M_PMC_MSLR, "ag_slr_map_t malloc", "mem allocated in agtiapi_attach for pSLRList" ); MALLOC_DEFINE( M_PMC_MTGT, "ag_tgt_map_t malloc", "mem allocated in agtiapi_attach for pWWNList" ); #endif MALLOC_DEFINE(TEMP,"tempbuff","buffer for payload"); MALLOC_DEFINE(TEMP2, "tempbuff", "buffer for agtiapi_getdevlist"); STATIC U32 agtiapi_intx_mode = 0; STATIC U08 ag_Perbi = 0; STATIC U32 agtiapi_polling_mode = 0; STATIC U32 ag_card_good = 0; // * total card initialized STATIC U32 ag_option_flag = 0; // * adjustable parameter flag STATIC U32 agtiapi_1st_time = 1; STATIC U32 ag_timeout_secs = 10; //Made timeout equivalent to linux U32 gTiDebugLevel = 1; S32 ag_encryption_enable = 0; atomic_t outstanding_encrypted_io_count; #define cache_line_size() CACHE_LINE_SIZE #define PMCoffsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) #define CPU_TO_LE32(dst, src) \ dst.lower = htole32(LOW_32_BITS(src)); \ dst.upper = htole32(HIGH_32_BITS(src)) #define CMND_TO_CHANNEL( ccb ) ( ccb->ccb_h.path_id ) #define CMND_TO_TARGET( ccb ) ( ccb->ccb_h.target_id ) #define CMND_TO_LUN( ccb ) ( ccb->ccb_h.target_lun ) STATIC U08 agtiapi_AddrModes[AGTIAPI_MAX_CHANNEL_NUM + 1] = { AGTIAPI_PERIPHERAL }; #ifdef LINUX_PERBI_SUPPORT // Holding area for target-WWN mapping assignments on the boot line static ag_mapping_t *agMappingList = NULL; // modified by agtiapi_Setup() #endif // * For Debugging Purpose #ifdef AGTIAPI_DEBUG #define AGTIAPI_WWN(name, len) wwnprintk(name, len) #else #define AGTIAPI_WWN(name, len) #endif #define AGTIAPI_WWNPRINTK(name, len, format, a...) \ AGTIAPI_PRINTK(format "name ", a); \ AGTIAPI_WWN((unsigned char*)name, len); #define AGTIAPI_ERR_WWNPRINTK(name, len, format, a...) \ printk(KERN_DEBUG format "name ", ## a); \ wwnprintk((unsigned char*)name, len); #define AGTIAPI_CPY_DEV_INFO(root, dev, pDev) \ tiINIGetDeviceInfo(root, dev, &pDev->devInfo); \ wwncpy(pDev); #ifdef AGTIAPI_LOCAL_LOCK #define AG_CARD_LOCAL_LOCK(lock) ,(lock) #define AG_SPIN_LOCK_IRQ(lock, flags) #define AG_SPIN_UNLOCK_IRQ(lock, flags) #define AG_SPIN_LOCK(lock) #define AG_SPIN_UNLOCK(lock) #define AG_GLOBAL_ARG(arg) #define AG_PERF_SPINLOCK(lock) #define AG_PERF_SPINLOCK_IRQ(lock, flags) #define AG_LOCAL_LOCK(lock) if (lock) \ mtx_lock(lock) #define AG_LOCAL_UNLOCK(lock) if (lock) \ mtx_unlock(lock) #define AG_LOCAL_FLAGS(_flags) unsigned long _flags = 0 #endif #define AG_GET_DONE_PCCB(pccb, pmcsc) \ { \ AG_LOCAL_LOCK(&pmcsc->doneLock); \ pccb = pmcsc->ccbDoneHead; \ if (pccb != NULL) \ { \ pmcsc->ccbDoneHead = NULL; \ pmcsc->ccbDoneTail = NULL; \ AG_LOCAL_UNLOCK(&pmcsc->doneLock); \ agtiapi_Done(pmcsc, pccb); \ } \ else \ AG_LOCAL_UNLOCK(&pmcsc->doneLock); \ } #define AG_GET_DONE_SMP_PCCB(pccb, pmcsc) \ { \ AG_LOCAL_LOCK(&pmcsc->doneSMPLock); \ pccb = pmcsc->smpDoneHead; \ if (pccb != NULL) \ { \ pmcsc->smpDoneHead = NULL; \ pmcsc->smpDoneTail = NULL; \ AG_LOCAL_UNLOCK(&pmcsc->doneSMPLock); \ agtiapi_SMPDone(pmcsc, pccb); \ } \ else \ AG_LOCAL_UNLOCK(&pmcsc->doneSMPLock); \ } #ifdef AGTIAPI_DUMP_IO_DEBUG #define AG_IO_DUMPCCB(pccb) agtiapi_DumpCCB(pccb) #else #define AG_IO_DUMPCCB(pccb) #endif #define SCHED_DELAY_JIFFIES 4 /* in seconds */ #ifdef HOTPLUG_SUPPORT #define AG_HOTPLUG_LOCK_INIT(lock) mxt_init(lock) #define AG_LIST_LOCK(lock) mtx_lock(lock) #define AG_LIST_UNLOCK(lock) mtx_unlock(lock) #else #define AG_HOTPLUG_LOCK_INIT(lock) #define AG_LIST_LOCK(lock) #define AG_LIST_UNLOCK(lock) #endif STATIC void agtiapi_CheckIOTimeout(void *data); static ag_card_info_t agCardInfoList[ AGTIAPI_MAX_CARDS ]; // card info list static void agtiapi_cam_action( struct cam_sim *, union ccb * ); static void agtiapi_cam_poll( struct cam_sim * ); // Function prototypes static d_open_t agtiapi_open; static d_close_t agtiapi_close; static d_read_t agtiapi_read; static d_write_t agtiapi_write; static d_ioctl_t agtiapi_CharIoctl; static void agtiapi_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); void agtiapi_adjust_queue_depth(struct cam_path *path, bit32 QueueDepth); // Character device entry points static struct cdevsw agtiapi_cdevsw = { .d_version = D_VERSION, .d_open = agtiapi_open, .d_close = agtiapi_close, .d_read = agtiapi_read, .d_write = agtiapi_write, .d_ioctl = agtiapi_CharIoctl, .d_name = "pmspcv", }; U32 maxTargets = 0; U32 ag_portal_count = 0; // In the cdevsw routines, we find our softc by using the si_drv1 member // of struct cdev. We set this variable to point to our softc in our // attach routine when we create the /dev entry. int agtiapi_open( struct cdev *dev, int oflags, int devtype, struct thread *td ) { struct agtiapi_softc *sc; /* Look up our softc. */ sc = dev->si_drv1; AGTIAPI_PRINTK("agtiapi_open\n"); AGTIAPI_PRINTK("Opened successfully. sc->my_dev %p\n", sc->my_dev); return( 0 ); } int agtiapi_close( struct cdev *dev, int fflag, int devtype, struct thread *td ) { struct agtiapi_softc *sc; // Look up our softc sc = dev->si_drv1; AGTIAPI_PRINTK("agtiapi_close\n"); AGTIAPI_PRINTK("Closed. sc->my_dev %p\n", sc->my_dev); return( 0 ); } int agtiapi_read( struct cdev *dev, struct uio *uio, int ioflag ) { struct agtiapi_softc *sc; // Look up our softc sc = dev->si_drv1; AGTIAPI_PRINTK( "agtiapi_read\n" ); AGTIAPI_PRINTK( "Asked to read %lu bytes. sc->my_dev %p\n", uio->uio_resid, sc->my_dev ); return( 0 ); } int agtiapi_write( struct cdev *dev, struct uio *uio, int ioflag ) { struct agtiapi_softc *sc; // Look up our softc sc = dev->si_drv1; AGTIAPI_PRINTK( "agtiapi_write\n" ); AGTIAPI_PRINTK( "Asked to write %lu bytes. sc->my_dev %p\n", uio->uio_resid, sc->my_dev ); return( 0 ); } int agtiapi_getdevlist( struct agtiapi_softc *pCard, tiIOCTLPayload_t *agIOCTLPayload ) { tdDeviceListPayload_t *pIoctlPayload = (tdDeviceListPayload_t *) agIOCTLPayload->FunctionSpecificArea; tdDeviceInfoIOCTL_t *pDeviceInfo = NULL; bit8 *pDeviceInfoOrg; tdsaDeviceData_t *pDeviceData = NULL; tiDeviceHandle_t **devList = NULL; tiDeviceHandle_t **devHandleArray = NULL; tiDeviceHandle_t *pDeviceHandle = NULL; bit32 x, memNeeded1; bit32 count, total; bit32 MaxDeviceCount; bit32 ret_val=IOCTL_CALL_INVALID_CODE; ag_portal_data_t *pPortalData; bit8 *pDeviceHandleList = NULL; AGTIAPI_PRINTK( "agtiapi_getdevlist: Enter\n" ); pDeviceInfoOrg = pIoctlPayload -> pDeviceInfo; MaxDeviceCount = pCard->devDiscover; if (MaxDeviceCount > pIoctlPayload->deviceLength ) { AGTIAPI_PRINTK( "agtiapi_getdevlist: MaxDeviceCount: %d > Requested device length: %d\n", MaxDeviceCount, pIoctlPayload->deviceLength ); MaxDeviceCount = pIoctlPayload->deviceLength; ret_val = IOCTL_CALL_FAIL; } AGTIAPI_PRINTK( "agtiapi_getdevlist: MaxDeviceCount: %d > Requested device length: %d\n", MaxDeviceCount, pIoctlPayload->deviceLength ); memNeeded1 = AG_ALIGNSIZE( MaxDeviceCount * sizeof(tiDeviceHandle_t *), sizeof(void *) ); AGTIAPI_PRINTK("agtiapi_getdevlist: portCount %d\n", pCard->portCount); devList = malloc(memNeeded1, TEMP2, M_WAITOK); if (devList == NULL) { AGTIAPI_PRINTK("agtiapi_getdevlist: failed to allocate memory\n"); ret_val = IOCTL_CALL_FAIL; agIOCTLPayload->Status = IOCTL_ERR_STATUS_INTERNAL_ERROR; return ret_val; } osti_memset(devList, 0, memNeeded1); pPortalData = &pCard->pPortalData[0]; pDeviceHandleList = (bit8*)devList; for (total = x = 0; x < pCard->portCount; x++, pPortalData++) { count = tiINIGetDeviceHandlesForWinIOCTL(&pCard->tiRoot, &pPortalData->portalInfo.tiPortalContext, ( tiDeviceHandle_t **)pDeviceHandleList ,MaxDeviceCount ); if (count == DISCOVERY_IN_PROGRESS) { AGTIAPI_PRINTK( "agtiapi_getdevlist: DISCOVERY_IN_PROGRESS on " "portal %d\n", x ); free(devList, TEMP2); ret_val = IOCTL_CALL_FAIL; agIOCTLPayload->Status = IOCTL_ERR_STATUS_INTERNAL_ERROR; return ret_val; } total += count; pDeviceHandleList+= count*sizeof(tiDeviceHandle_t *); MaxDeviceCount-= count; } if (total > pIoctlPayload->deviceLength) { total = pIoctlPayload->deviceLength; } // dump device information from device handle list count = 0; devHandleArray = devList; for (x = 0; x < pCard->devDiscover; x++) { pDeviceHandle = (tiDeviceHandle_t*)devHandleArray[x]; if (devList[x] != agNULL) { pDeviceData = devList [x]->tdData; pDeviceInfo = (tdDeviceInfoIOCTL_t*)(pDeviceInfoOrg + sizeof(tdDeviceInfoIOCTL_t) * count); if (pDeviceData != agNULL && pDeviceInfo != agNULL) { osti_memcpy( &pDeviceInfo->sasAddressHi, pDeviceData->agDeviceInfo.sasAddressHi, sizeof(bit32) ); osti_memcpy( &pDeviceInfo->sasAddressLo, pDeviceData->agDeviceInfo.sasAddressLo, sizeof(bit32) ); #if 0 pDeviceInfo->sasAddressHi = DMA_BEBIT32_TO_BIT32( pDeviceInfo->sasAddressHi ); pDeviceInfo->sasAddressLo = DMA_BEBIT32_TO_BIT32( pDeviceInfo->sasAddressLo ); #endif pDeviceInfo->deviceType = ( pDeviceData->agDeviceInfo.devType_S_Rate & 0x30 ) >> 4; pDeviceInfo->linkRate = pDeviceData->agDeviceInfo.devType_S_Rate & 0x0F; pDeviceInfo->phyId = pDeviceData->phyID; pDeviceInfo->ishost = pDeviceData->target_ssp_stp_smp; pDeviceInfo->DeviceHandle= (unsigned long)pDeviceHandle; if(pDeviceInfo->deviceType == 0x02) { bit8 *sasAddressHi; bit8 *sasAddressLo; tiIniGetDirectSataSasAddr(&pCard->tiRoot, pDeviceData->phyID, &sasAddressHi, &sasAddressLo); pDeviceInfo->sasAddressHi = DMA_BEBIT32_TO_BIT32(*(bit32*)sasAddressHi); pDeviceInfo->sasAddressLo = DMA_BEBIT32_TO_BIT32(*(bit32*)sasAddressLo) + pDeviceData->phyID + 16; } else { pDeviceInfo->sasAddressHi = DMA_BEBIT32_TO_BIT32( pDeviceInfo->sasAddressHi ); pDeviceInfo->sasAddressLo = DMA_BEBIT32_TO_BIT32( pDeviceInfo->sasAddressLo ); } AGTIAPI_PRINTK( "agtiapi_getdevlist: devicetype %x\n", pDeviceInfo->deviceType ); AGTIAPI_PRINTK( "agtiapi_getdevlist: linkrate %x\n", pDeviceInfo->linkRate ); AGTIAPI_PRINTK( "agtiapi_getdevlist: phyID %x\n", pDeviceInfo->phyId ); AGTIAPI_PRINTK( "agtiapi_getdevlist: addresshi %x\n", pDeviceInfo->sasAddressHi ); AGTIAPI_PRINTK( "agtiapi_getdevlist: addresslo %x\n", pDeviceInfo->sasAddressHi ); } else { AGTIAPI_PRINTK( "agtiapi_getdevlist: pDeviceData %p or pDeviceInfo " "%p is NULL %d\n", pDeviceData, pDeviceInfo, x ); } count++; } } pIoctlPayload->realDeviceCount = count; AGTIAPI_PRINTK( "agtiapi_getdevlist: Exit RealDeviceCount = %d\n", count ); if (devList) { free(devList, TEMP2); } if(ret_val != IOCTL_CALL_FAIL) { ret_val = IOCTL_CALL_SUCCESS; } agIOCTLPayload->Status = IOCTL_ERR_STATUS_OK; return ret_val; } /****************************************************************************** agtiapi_getCardInfo() Purpose: This function retrives the Card information Parameters: Return: A number - error 0 - HBA has been detected Note: ******************************************************************************/ int agtiapi_getCardInfo ( struct agtiapi_softc *pCard, U32_64 size, void *buffer ) { CardInfo_t *pCardInfo; pCardInfo = (CardInfo_t *)buffer; pCardInfo->deviceId = pci_get_device(pCard->my_dev); pCardInfo->vendorId =pci_get_vendor(pCard->my_dev) ; memcpy( pCardInfo->pciMemBaseSpc, pCard->pCardInfo->pciMemBaseSpc, ((sizeof(U32_64))*PCI_NUMBER_BARS) ); pCardInfo->deviceNum = pci_get_slot(pCard->my_dev); pCardInfo->pciMemBase = pCard->pCardInfo->pciMemBase; pCardInfo->pciIOAddrLow = pCard->pCardInfo->pciIOAddrLow; pCardInfo->pciIOAddrUp = pCard->pCardInfo->pciIOAddrUp; pCardInfo->busNum =pci_get_bus(pCard->my_dev); return 0; } void agtiapi_adjust_queue_depth(struct cam_path *path, bit32 QueueDepth) { struct ccb_relsim crs; xpt_setup_ccb(&crs.ccb_h, path, 5); crs.ccb_h.func_code = XPT_REL_SIMQ; crs.ccb_h.flags = CAM_DEV_QFREEZE; crs.release_flags = RELSIM_ADJUST_OPENINGS; crs.openings = QueueDepth; xpt_action((union ccb *)&crs); if(crs.ccb_h.status != CAM_REQ_CMP) { printf("XPT_REL_SIMQ failed\n"); } } static void agtiapi_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct agtiapi_softc *pmsc; U32 TID; ag_device_t *targ; pmsc = (struct agtiapi_softc*)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { break; } TID = cgd->ccb_h.target_id; if (TID >= 0 && TID < maxTargets){ if (pmsc != NULL){ TID = INDEX(pmsc, TID); targ = &pmsc->pDevList[TID]; agtiapi_adjust_queue_depth(path, targ->qdepth); } } break; } default: break; } } /****************************************************************************** agtiapi_CharIoctl() Purpose: This function handles the ioctl from application layer Parameters: Return: A number - error 0 - HBA has been detected Note: ******************************************************************************/ static int agtiapi_CharIoctl( struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td ) { struct sema mx; datatosend *load; // structure defined in lxcommon.h tiIOCTLPayload_t *pIoctlPayload; struct agtiapi_softc *pCard; pCard=dev->si_drv1; void *param1 = NULL; void *param2 = NULL; void *param3 = NULL; U32 status = 0; U32 retValue; int err = 0; int error = 0; tdDeviceListPayload_t *pDeviceList = NULL; unsigned long flags; switch (cmd) { case AGTIAPI_IOCTL: load=(datatosend*)data; pIoctlPayload = malloc(load->datasize,TEMP,M_WAITOK); AGTIAPI_PRINTK( "agtiapi_CharIoctl: old load->datasize = %d\n", load->datasize ); //Copy payload to kernel buffer, on success it returns 0 err = copyin(load->data,pIoctlPayload,load->datasize); if (err) { status = IOCTL_CALL_FAIL; return status; } sema_init(&mx,0,"sem"); pCard->pIoctlSem =&mx; pCard->up_count = pCard->down_count = 0; if ( pIoctlPayload->MajorFunction == IOCTL_MJ_GET_DEVICE_LIST ) { retValue = agtiapi_getdevlist(pCard, pIoctlPayload); if (retValue == 0) { pIoctlPayload->Status = IOCTL_CALL_SUCCESS; status = IOCTL_CALL_SUCCESS; } else { pIoctlPayload->Status = IOCTL_CALL_FAIL; status = IOCTL_CALL_FAIL; } //update new device length pDeviceList = (tdDeviceListPayload_t*)pIoctlPayload->FunctionSpecificArea; load->datasize =load->datasize - sizeof(tdDeviceInfoIOCTL_t) * (pDeviceList->deviceLength - pDeviceList->realDeviceCount); AGTIAPI_PRINTK( "agtiapi_CharIoctl: new load->datasize = %d\n", load->datasize ); } else if (pIoctlPayload->MajorFunction == IOCTL_MN_GET_CARD_INFO) { retValue = agtiapi_getCardInfo( pCard, pIoctlPayload->Length, (pIoctlPayload->FunctionSpecificArea) ); if (retValue == 0) { pIoctlPayload->Status = IOCTL_CALL_SUCCESS; status = IOCTL_CALL_SUCCESS; } else { pIoctlPayload->Status = IOCTL_CALL_FAIL; status = IOCTL_CALL_FAIL; } } else if ( pIoctlPayload->MajorFunction == IOCTL_MJ_CHECK_DPMC_EVENT ) { if ( pCard->flags & AGTIAPI_PORT_PANIC ) { strcpy ( pIoctlPayload->FunctionSpecificArea, "DPMC LEAN\n" ); } else { strcpy ( pIoctlPayload->FunctionSpecificArea, "do not dpmc lean\n" ); } pIoctlPayload->Status = IOCTL_CALL_SUCCESS; status = IOCTL_CALL_SUCCESS; } else if (pIoctlPayload->MajorFunction == IOCTL_MJ_CHECK_FATAL_ERROR ) { AGTIAPI_PRINTK("agtiapi_CharIoctl: IOCTL_MJ_CHECK_FATAL_ERROR call received for card %d\n", pCard->cardNo); //read port status to see if there is a fatal event if(pCard->flags & AGTIAPI_PORT_PANIC) { printf("agtiapi_CharIoctl: Port Panic Status For Card %d is True\n",pCard->cardNo); pIoctlPayload->Status = IOCTL_MJ_FATAL_ERR_CHK_SEND_TRUE; } else { AGTIAPI_PRINTK("agtiapi_CharIoctl: Port Panic Status For Card %d is False\n",pCard->cardNo); pIoctlPayload->Status = IOCTL_MJ_FATAL_ERR_CHK_SEND_FALSE; } status = IOCTL_CALL_SUCCESS; } else if (pIoctlPayload->MajorFunction == IOCTL_MJ_FATAL_ERROR_DUMP_COMPLETE) { AGTIAPI_PRINTK("agtiapi_CharIoctl: IOCTL_MJ_FATAL_ERROR_DUMP_COMPLETE call received for card %d\n", pCard->cardNo); //set flags bit status to be a soft reset pCard->flags |= AGTIAPI_SOFT_RESET; //trigger soft reset for the card retValue = agtiapi_ResetCard (pCard, &flags); if(retValue == AGTIAPI_SUCCESS) { //clear port panic status pCard->flags &= ~AGTIAPI_PORT_PANIC; pIoctlPayload->Status = IOCTL_MJ_FATAL_ERROR_SOFT_RESET_TRIG; status = IOCTL_CALL_SUCCESS; } else { pIoctlPayload->Status = IOCTL_CALL_FAIL; status = IOCTL_CALL_FAIL; } } else { status = tiCOMMgntIOCTL( &pCard->tiRoot, pIoctlPayload, pCard, param2, param3 ); if (status == IOCTL_CALL_PENDING) { ostiIOCTLWaitForSignal(&pCard->tiRoot,NULL, NULL, NULL); status = IOCTL_CALL_SUCCESS; } } pCard->pIoctlSem = NULL; err = 0; //copy kernel buffer to userland buffer err=copyout(pIoctlPayload,load->data,load->datasize); if (err) { status = IOCTL_CALL_FAIL; return status; } free(pIoctlPayload,TEMP); pIoctlPayload=NULL; break; default: error = ENOTTY; break; } return(status); } /****************************************************************************** agtiapi_probe() Purpose: This function initialize and registere all detected HBAs. The first function being called in driver after agtiapi_probe() Parameters: device_t dev (IN) - device pointer Return: A number - error 0 - HBA has been detected Note: ******************************************************************************/ static int agtiapi_probe( device_t dev ) { int retVal; int thisCard; ag_card_info_t *thisCardInst; thisCard = device_get_unit( dev ); if ( thisCard >= AGTIAPI_MAX_CARDS ) { device_printf( dev, "Too many PMC-Sierra cards detected ERROR!\n" ); return (ENXIO); // maybe change to different return value? } thisCardInst = &agCardInfoList[ thisCard ]; retVal = agtiapi_ProbeCard( dev, thisCardInst, thisCard ); if ( retVal ) return (ENXIO); // maybe change to different return value? return( BUS_PROBE_DEFAULT ); // successful probe } /****************************************************************************** agtiapi_attach() Purpose: This function initialize and registere all detected HBAs. The first function being called in driver after agtiapi_probe() Parameters: device_t dev (IN) - device pointer Return: A number - error 0 - HBA has been detected Note: ******************************************************************************/ static int agtiapi_attach( device_t devx ) { // keeping get_unit call to once int thisCard = device_get_unit( devx ); struct agtiapi_softc *pmsc; ag_card_info_t *thisCardInst = &agCardInfoList[ thisCard ]; ag_resource_info_t *pRscInfo; int idx; int lenRecv; char buffer [256], *pLastUsedChar; union ccb *ccb; int bus, tid, lun; struct ccb_setasync csa; AGTIAPI_PRINTK("agtiapi_attach: start dev %p thisCard %d\n", devx, thisCard); // AGTIAPI_PRINTK( "agtiapi_attach: entry pointer values A %p / %p\n", // thisCardInst->pPCIDev, thisCardInst ); AGTIAPI_PRINTK( "agtiapi_attach: deviceID: 0x%x\n", pci_get_devid( devx ) ); TUNABLE_INT_FETCH( "DPMC_TIMEOUT_SECS", &ag_timeout_secs ); TUNABLE_INT_FETCH( "DPMC_TIDEBUG_LEVEL", &gTiDebugLevel ); // printf( "agtiapi_attach: debugLevel %d, timeout %d\n", // gTiDebugLevel, ag_timeout_secs ); if ( ag_timeout_secs < 1 ) { ag_timeout_secs = 1; // set minimum timeout value of 1 second } ag_timeout_secs = (ag_timeout_secs * 1000); // convert to millisecond notation // Look up our softc and initialize its fields. pmsc = device_get_softc( devx ); pmsc->my_dev = devx; /* Get NumberOfPortals */ if ((ostiGetTransportParam( &pmsc->tiRoot, "Global", "CardDefault", agNULL, agNULL, agNULL, agNULL, "NumberOfPortals", buffer, 255, &lenRecv ) == tiSuccess) && (lenRecv != 0)) { if (osti_strncmp(buffer, "0x", 2) == 0) { ag_portal_count = osti_strtoul (buffer, &pLastUsedChar, 0); } else { ag_portal_count = osti_strtoul (buffer, &pLastUsedChar, 10); } if (ag_portal_count > AGTIAPI_MAX_PORTALS) ag_portal_count = AGTIAPI_MAX_PORTALS; } else { ag_portal_count = AGTIAPI_MAX_PORTALS; } AGTIAPI_PRINTK( "agtiapi_attach: ag_portal_count=%d\n", ag_portal_count ); // initialize hostdata structure pmsc->flags |= AGTIAPI_INIT_TIME | AGTIAPI_SCSI_REGISTERED | AGTIAPI_INITIATOR; pmsc->cardNo = thisCard; pmsc->ccbTotal = 0; pmsc->portCount = ag_portal_count; pmsc->pCardInfo = thisCardInst; pmsc->tiRoot.osData = pmsc; pmsc->pCardInfo->pCard = (void *)pmsc; pmsc->VidDid = ( pci_get_vendor(devx) << 16 ) | pci_get_device( devx ); pmsc->SimQFrozen = agFALSE; pmsc->devq_flag = agFALSE; pRscInfo = &thisCardInst->tiRscInfo; osti_memset(buffer, 0, 256); lenRecv = 0; /* Get MaxTargets */ if ((ostiGetTransportParam( &pmsc->tiRoot, "Global", "InitiatorParms", agNULL, agNULL, agNULL, agNULL, "MaxTargets", buffer, sizeof(buffer), &lenRecv ) == tiSuccess) && (lenRecv != 0)) { if (osti_strncmp(buffer, "0x", 2) == 0) { maxTargets = osti_strtoul (buffer, &pLastUsedChar, 0); AGTIAPI_PRINTK( "agtiapi_attach: maxTargets = osti_strtoul 0 \n" ); } else { maxTargets = osti_strtoul (buffer, &pLastUsedChar, 10); AGTIAPI_PRINTK( "agtiapi_attach: maxTargets = osti_strtoul 10\n" ); } } else { if(Is_ADP8H(pmsc)) maxTargets = AGTIAPI_MAX_DEVICE_8H; else if(Is_ADP7H(pmsc)) maxTargets = AGTIAPI_MAX_DEVICE_7H; else maxTargets = AGTIAPI_MAX_DEVICE; } if (maxTargets > AGTIAPI_HW_LIMIT_DEVICE) { AGTIAPI_PRINTK( "agtiapi_attach: maxTargets: %d > AGTIAPI_HW_LIMIT_DEVICE: %d\n", maxTargets, AGTIAPI_HW_LIMIT_DEVICE ); AGTIAPI_PRINTK( "agtiapi_attach: change maxTargets = AGTIAPI_HW_LIMIT_DEVICE\n" ); maxTargets = AGTIAPI_HW_LIMIT_DEVICE; } pmsc->devDiscover = maxTargets ; #ifdef HIALEAH_ENCRYPTION ag_encryption_enable = 1; if(ag_encryption_enable && pci_get_device(pmsc->pCardInfo->pPCIDev) == PCI_DEVICE_ID_HIALEAH_HBA_SPCVE) { pmsc->encrypt = 1; pRscInfo->tiLoLevelResource.loLevelOption.encryption = agTRUE; printf("agtiapi_attach: Encryption Enabled\n" ); } #endif // ## for now, skip calls to ostiGetTransportParam(...) // ## for now, skip references to DIF & EDC // Create a /dev entry for this device. The kernel will assign us // a major number automatically. We use the unit number of this // device as the minor number and name the character device // "agtiapi". pmsc->my_cdev = make_dev( &agtiapi_cdevsw, thisCard, UID_ROOT, GID_WHEEL, 0600, "spcv%u", thisCard ); pmsc->my_cdev->si_drv1 = pmsc; mtx_init( &thisCardInst->pmIOLock, "pmc SAS I/O lock", NULL, MTX_DEF|MTX_RECURSE ); struct cam_devq *devq; /* set the maximum number of pending IOs */ devq = cam_simq_alloc( AGTIAPI_MAX_CAM_Q_DEPTH ); if (devq == NULL) { AGTIAPI_PRINTK("agtiapi_attach: cam_simq_alloc is NULL\n" ); return( EIO ); } struct cam_sim *lsim; lsim = cam_sim_alloc( agtiapi_cam_action, agtiapi_cam_poll, "pmspcbsd", pmsc, thisCard, &thisCardInst->pmIOLock, 1, // queued per target AGTIAPI_MAX_CAM_Q_DEPTH, // max tag depth devq ); if ( lsim == NULL ) { cam_simq_free( devq ); AGTIAPI_PRINTK("agtiapi_attach: cam_sim_alloc is NULL\n" ); return( EIO ); } pmsc->dev_scan = agFALSE; //one cam sim per scsi bus mtx_lock( &thisCardInst->pmIOLock ); if ( xpt_bus_register( lsim, devx, 0 ) != CAM_SUCCESS ) { // bus 0 cam_sim_free( lsim, TRUE ); mtx_unlock( &thisCardInst->pmIOLock ); AGTIAPI_PRINTK("agtiapi_attach: xpt_bus_register fails\n" ); return( EIO ); } pmsc->sim = lsim; bus = cam_sim_path(pmsc->sim); tid = CAM_TARGET_WILDCARD; lun = CAM_LUN_WILDCARD; ccb = xpt_alloc_ccb_nowait(); if (ccb == agNULL) { mtx_unlock( &thisCardInst->pmIOLock ); cam_sim_free( lsim, TRUE ); cam_simq_free( devq ); return ( EIO ); } if (xpt_create_path(&ccb->ccb_h.path, agNULL, bus, tid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mtx_unlock( &thisCardInst->pmIOLock ); cam_sim_free( lsim, TRUE ); cam_simq_free( devq ); xpt_free_ccb(ccb); return( EIO ); } pmsc->path = ccb->ccb_h.path; xpt_setup_ccb(&csa.ccb_h, pmsc->path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_FOUND_DEVICE; csa.callback = agtiapi_async; csa.callback_arg = pmsc; xpt_action((union ccb *)&csa); if (csa.ccb_h.status != CAM_REQ_CMP) { AGTIAPI_PRINTK("agtiapi_attach: Unable to register AC_FOUND_DEVICE\n" ); } lsim->devq = devq; mtx_unlock( &thisCardInst->pmIOLock ); // get TD and lower layer memory requirements tiCOMGetResource( &pmsc->tiRoot, &pRscInfo->tiLoLevelResource, &pRscInfo->tiInitiatorResource, NULL, &pRscInfo->tiSharedMem ); agtiapi_ScopeDMARes( thisCardInst ); AGTIAPI_PRINTK( "agtiapi_attach: size from the call agtiapi_ScopeDMARes" " 0x%x \n", pmsc->typhn ); // initialize card information and get resource ready if( agtiapi_InitResource( thisCardInst ) == AGTIAPI_FAIL ) { AGTIAPI_PRINTK( "agtiapi_attach: Card %d initialize resource ERROR\n", thisCard ); } // begin: allocate and initialize card portal info resource ag_portal_data_t *pPortalData; if (pmsc->portCount == 0) { pmsc->pPortalData = NULL; } else { pmsc->pPortalData = (ag_portal_data_t *) malloc( sizeof(ag_portal_data_t) * pmsc->portCount, M_PMC_MPRT, M_ZERO | M_WAITOK ); if (pmsc->pPortalData == NULL) { AGTIAPI_PRINTK( "agtiapi_attach: Portal memory allocation ERROR\n" ); } } pPortalData = pmsc->pPortalData; for( idx = 0; idx < pmsc->portCount; idx++ ) { pPortalData->pCard = pmsc; pPortalData->portalInfo.portID = idx; pPortalData->portalInfo.tiPortalContext.osData = (void *)pPortalData; pPortalData++; } // end: allocate and initialize card portal info resource // begin: enable msix // setup msix // map to interrupt handler int error = 0; int mesgs = MAX_MSIX_NUM_VECTOR; int i, cnt; void (*intrHandler[MAX_MSIX_NUM_ISR])(void *arg) = { agtiapi_IntrHandler0, agtiapi_IntrHandler1, agtiapi_IntrHandler2, agtiapi_IntrHandler3, agtiapi_IntrHandler4, agtiapi_IntrHandler5, agtiapi_IntrHandler6, agtiapi_IntrHandler7, agtiapi_IntrHandler8, agtiapi_IntrHandler9, agtiapi_IntrHandler10, agtiapi_IntrHandler11, agtiapi_IntrHandler12, agtiapi_IntrHandler13, agtiapi_IntrHandler14, agtiapi_IntrHandler15 }; cnt = pci_msix_count(devx); AGTIAPI_PRINTK("supported MSIX %d\n", cnt); //this should be 64 mesgs = MIN(mesgs, cnt); error = pci_alloc_msix(devx, &mesgs); if (error != 0) { printf( "pci_alloc_msix error %d\n", error ); AGTIAPI_PRINTK("error %d\n", error); return( EIO ); } for(i=0; i < mesgs; i++) { pmsc->rscID[i] = i + 1; pmsc->irq[i] = bus_alloc_resource_any( devx, SYS_RES_IRQ, &pmsc->rscID[i], RF_ACTIVE ); if( pmsc->irq[i] == NULL ) { printf( "RES_IRQ went terribly bad at %d\n", i ); return( EIO ); } if ( (error = bus_setup_intr( devx, pmsc->irq[i], INTR_TYPE_CAM | INTR_MPSAFE, NULL, intrHandler[i], pmsc, &pmsc->intrcookie[i] ) ) != 0 ) { device_printf( devx, "Failed to register handler" ); return( EIO ); } } pmsc->flags |= AGTIAPI_IRQ_REQUESTED; pmsc->pCardInfo->maxInterruptVectors = MAX_MSIX_NUM_VECTOR; // end: enable msix int ret = 0; ret = agtiapi_InitCardSW(pmsc); if (ret == AGTIAPI_FAIL || ret == AGTIAPI_UNKNOWN) { AGTIAPI_PRINTK( "agtiapi_attach: agtiapi_InitCardSW failure %d\n", ret ); return( EIO ); } pmsc->ccbFreeList = NULL; pmsc->ccbChainList = NULL; pmsc->ccbAllocList = NULL; pmsc->flags |= ( AGTIAPI_INSTALLED ); ret = agtiapi_alloc_requests( pmsc ); if( ret != 0 ) { AGTIAPI_PRINTK( "agtiapi_attach: agtiapi_alloc_requests failure %d\n", ret ); return( EIO ); } ret = agtiapi_alloc_ostimem( pmsc ); if (ret != AGTIAPI_SUCCESS) { AGTIAPI_PRINTK( "agtiapi_attach: agtiapi_alloc_ostimem failure %d\n", ret ); return( EIO ); } ret = agtiapi_InitCardHW( pmsc ); if (ret != 0) { AGTIAPI_PRINTK( "agtiapi_attach: agtiapi_InitCardHW failure %d\n", ret ); return( EIO ); } #ifdef HIALEAH_ENCRYPTION if(pmsc->encrypt) { if((agtiapi_SetupEncryption(pmsc)) < 0) AGTIAPI_PRINTK("SetupEncryption returned less than 0\n"); } #endif pmsc->flags &= ~AGTIAPI_INIT_TIME; return( 0 ); } /****************************************************************************** agtiapi_InitCardSW() Purpose: Host Bus Adapter Initialization Parameters: struct agtiapi_softc *pmsc (IN) Pointer to the HBA data structure Return: AGTIAPI_SUCCESS - success AGTIAPI_FAIL - fail Note: TBD, need chip register information ******************************************************************************/ STATIC agBOOLEAN agtiapi_InitCardSW( struct agtiapi_softc *pmsc ) { ag_card_info_t *thisCardInst = pmsc->pCardInfo; ag_resource_info_t *pRscInfo = &thisCardInst->tiRscInfo; int initSWIdx; // begin: agtiapi_InitCardSW() // now init some essential locks n agtiapi_InitCardSW mtx_init( &pmsc->sendLock, "local q send lock", NULL, MTX_DEF ); mtx_init( &pmsc->doneLock, "local q done lock", NULL, MTX_DEF ); mtx_init( &pmsc->sendSMPLock, "local q send lock", NULL, MTX_DEF ); mtx_init( &pmsc->doneSMPLock, "local q done lock", NULL, MTX_DEF ); mtx_init( &pmsc->ccbLock, "ccb list lock", NULL, MTX_DEF ); mtx_init( &pmsc->devListLock, "hotP devListLock", NULL, MTX_DEF ); mtx_init( &pmsc->memLock, "dynamic memory lock", NULL, MTX_DEF ); mtx_init( &pmsc->freezeLock, "sim freeze lock", NULL, MTX_DEF | MTX_RECURSE); // initialize lower layer resources //## if (pCard->flags & AGTIAPI_INIT_TIME) { #ifdef HIALEAH_ENCRYPTION /* Enable encryption if chip supports it */ if (pci_get_device(pmsc->pCardInfo->pPCIDev) == PCI_DEVICE_ID_HIALEAH_HBA_SPCVE) pmsc->encrypt = 1; if (pmsc->encrypt) pRscInfo->tiLoLevelResource.loLevelOption.encryption = agTRUE; #endif pmsc->flags &= ~(AGTIAPI_PORT_INITIALIZED | AGTIAPI_SYS_INTR_ON); // For now, up to 16 MSIX vectors are supported thisCardInst->tiRscInfo.tiLoLevelResource.loLevelOption. maxInterruptVectors = pmsc->pCardInfo->maxInterruptVectors; AGTIAPI_PRINTK( "agtiapi_InitCardSW: maxInterruptVectors set to %d", pmsc->pCardInfo->maxInterruptVectors ); thisCardInst->tiRscInfo.tiLoLevelResource.loLevelOption.max_MSI_InterruptVectors = 0; thisCardInst->tiRscInfo.tiLoLevelResource.loLevelOption.flag = 0; pRscInfo->tiLoLevelResource.loLevelOption.maxNumOSLocks = 0; AGTIAPI_PRINTK( "agtiapi_InitCardSW: tiCOMInit root %p, dev %p, pmsc %p\n", &pmsc->tiRoot, pmsc->my_dev, pmsc ); if( tiCOMInit( &pmsc->tiRoot, &thisCardInst->tiRscInfo.tiLoLevelResource, &thisCardInst->tiRscInfo.tiInitiatorResource, NULL, &thisCardInst->tiRscInfo.tiSharedMem ) != tiSuccess ) { AGTIAPI_PRINTK( "agtiapi_InitCardSW: tiCOMInit ERROR\n" ); return AGTIAPI_FAIL; } int maxLocks; maxLocks = pRscInfo->tiLoLevelResource.loLevelOption.numOfQueuesPerPort; pmsc->STLock = malloc( ( maxLocks * sizeof(struct mtx) ), M_PMC_MSTL, M_ZERO | M_WAITOK ); for( initSWIdx = 0; initSWIdx < maxLocks; initSWIdx++ ) { // init all indexes mtx_init( &pmsc->STLock[initSWIdx], "LL & TD lock", NULL, MTX_DEF ); } if( tiCOMPortInit( &pmsc->tiRoot, agFALSE ) != tiSuccess ) { printf( "agtiapi_InitCardSW: tiCOMPortInit ERROR -- AGTIAPI_FAIL\n" ); return AGTIAPI_FAIL; } AGTIAPI_PRINTK( "agtiapi_InitCardSW: tiCOMPortInit" " root %p, dev %p, pmsc %p\n", &pmsc->tiRoot, pmsc->my_dev, pmsc ); pmsc->flags |= AGTIAPI_PORT_INITIALIZED; pmsc->freezeSim = agFALSE; #ifdef HIALEAH_ENCRYPTION atomic_set(&outstanding_encrypted_io_count, 0); /*fix below*/ /*if(pmsc->encrypt && (pmsc->flags & AGTIAPI_INIT_TIME)) if((agtiapi_SetupEncryptionPools(pmsc)) != 0) printf("SetupEncryptionPools failed\n"); */ #endif return AGTIAPI_SUCCESS; // end: agtiapi_InitCardSW() } /****************************************************************************** agtiapi_InitCardHW() Purpose: Host Bus Adapter Initialization Parameters: struct agtiapi_softc *pmsc (IN) Pointer to the HBA data structure Return: AGTIAPI_SUCCESS - success AGTIAPI_FAIL - fail Note: TBD, need chip register information ******************************************************************************/ STATIC agBOOLEAN agtiapi_InitCardHW( struct agtiapi_softc *pmsc ) { U32 numVal; U32 count; U32 loop; // begin: agtiapi_InitCardHW() ag_portal_info_t *pPortalInfo = NULL; ag_portal_data_t *pPortalData; // ISR is registered, enable chip interrupt. tiCOMSystemInterruptsActive( &pmsc->tiRoot, agTRUE ); pmsc->flags |= AGTIAPI_SYS_INTR_ON; numVal = sizeof(ag_device_t) * pmsc->devDiscover; pmsc->pDevList = (ag_device_t *)malloc( numVal, M_PMC_MDVT, M_ZERO | M_WAITOK ); if( !pmsc->pDevList ) { AGTIAPI_PRINTK( "agtiapi_InitCardHW: kmalloc %d DevList ERROR\n", numVal ); panic( "agtiapi_InitCardHW\n" ); return AGTIAPI_FAIL; } #ifdef LINUX_PERBI_SUPPORT numVal = sizeof(ag_slr_map_t) * pmsc->devDiscover; pmsc->pSLRList = (ag_slr_map_t *)malloc( numVal, M_PMC_MSLR, M_ZERO | M_WAITOK ); if( !pmsc->pSLRList ) { AGTIAPI_PRINTK( "agtiapi_InitCardHW: kmalloc %d SLRList ERROR\n", numVal ); panic( "agtiapi_InitCardHW SLRL\n" ); return AGTIAPI_FAIL; } numVal = sizeof(ag_tgt_map_t) * pmsc->devDiscover; pmsc->pWWNList = (ag_tgt_map_t *)malloc( numVal, M_PMC_MTGT, M_ZERO | M_WAITOK ); if( !pmsc->pWWNList ) { AGTIAPI_PRINTK( "agtiapi_InitCardHW: kmalloc %d WWNList ERROR\n", numVal ); panic( "agtiapi_InitCardHW WWNL\n" ); return AGTIAPI_FAIL; } // Get the WWN_to_target_ID mappings from the // holding area which contains the input of the // system configuration file. if( ag_Perbi ) agtiapi_GetWWNMappings( pmsc, agMappingList ); else { agtiapi_GetWWNMappings( pmsc, 0 ); if( agMappingList ) printf( "agtiapi_InitCardHW: WWN PERBI disabled WARN\n" ); } #endif //agtiapi_DelaySec(5); DELAY( 500000 ); pmsc->tgtCount = 0; pmsc->flags &= ~AGTIAPI_CB_DONE; pPortalData = pmsc->pPortalData; //start port for (count = 0; count < pmsc->portCount; count++) { AG_SPIN_LOCK_IRQ( agtiapi_host_lock, flags ); pPortalInfo = &pPortalData->portalInfo; pPortalInfo->portStatus &= ~( AGTIAPI_PORT_START | AGTIAPI_PORT_DISC_READY | AGTIAPI_DISC_DONE | AGTIAPI_DISC_COMPLETE ); for (loop = 0; loop < AGTIAPI_LOOP_MAX; loop++) { AGTIAPI_PRINTK( "tiCOMPortStart entry data %p / %d / %p\n", &pmsc->tiRoot, pPortalInfo->portID, &pPortalInfo->tiPortalContext ); if( tiCOMPortStart( &pmsc->tiRoot, pPortalInfo->portID, &pPortalInfo->tiPortalContext, 0 ) != tiSuccess ) { AG_SPIN_UNLOCK_IRQ( agtiapi_host_lock, flags ); agtiapi_DelayMSec( AGTIAPI_EXTRA_DELAY ); AG_SPIN_LOCK_IRQ(agtiapi_host_lock, flags); AGTIAPI_PRINTK( "tiCOMPortStart failed -- no loop, portalData %p\n", pPortalData ); } else { AGTIAPI_PRINTK( "tiCOMPortStart success no loop, portalData %p\n", pPortalData ); break; } } // end of for loop /* release lock */ AG_SPIN_UNLOCK_IRQ( agtiapi_host_lock, flags ); if( loop >= AGTIAPI_LOOP_MAX ) { return AGTIAPI_FAIL; } tiCOMGetPortInfo( &pmsc->tiRoot, &pPortalInfo->tiPortalContext, &pPortalInfo->tiPortInfo ); pPortalData++; } /* discover target device */ #ifndef HOTPLUG_SUPPORT agtiapi_DiscoverTgt( pCard ); #endif pmsc->flags |= AGTIAPI_INSTALLED; if( pmsc->flags & AGTIAPI_INIT_TIME ) { agtiapi_TITimer( (void *)pmsc ); pmsc->flags |= AGTIAPI_TIMER_ON; } return 0; } /****************************************************************************** agtiapi_IntrHandlerx_() Purpose: Interrupt service routine. Parameters: void arg (IN) Pointer to the HBA data structure bit32 idx (IN) Vector index ******************************************************************************/ void agtiapi_IntrHandlerx_( void *arg, int index ) { struct agtiapi_softc *pCard; int rv; pCard = (struct agtiapi_softc *)arg; #ifndef AGTIAPI_DPC ccb_t *pccb; #endif AG_LOCAL_LOCK(&(pCard->pCardInfo->pmIOLock)); AG_PERF_SPINLOCK(agtiapi_host_lock); if (pCard->flags & AGTIAPI_SHUT_DOWN) goto ext; rv = tiCOMInterruptHandler(&pCard->tiRoot, index); if (rv == agFALSE) { /* not our irq */ AG_SPIN_UNLOCK(agtiapi_host_lock); AG_LOCAL_UNLOCK(&(pCard->pCardInfo->pmIOLock)); return; } #ifdef AGTIAPI_DPC tasklet_hi_schedule(&pCard->tasklet_dpc[idx]); #else /* consume all completed entries, 100 is random number to be big enough */ tiCOMDelayedInterruptHandler(&pCard->tiRoot, index, 100, tiInterruptContext); AG_GET_DONE_PCCB(pccb, pCard); AG_GET_DONE_SMP_PCCB(pccb, pCard); #endif ext: AG_SPIN_UNLOCK(agtiapi_host_lock); AG_LOCAL_UNLOCK(&(pCard->pCardInfo->pmIOLock)); return; } /****************************************************************************** agtiapi_IntrHandler0() Purpose: Interrupt service routine for interrupt vector index 0. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler0( void *arg ) { agtiapi_IntrHandlerx_( arg, 0 ); return; } /****************************************************************************** agtiapi_IntrHandler1() Purpose: Interrupt service routine for interrupt vector index 1. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler1( void *arg ) { agtiapi_IntrHandlerx_( arg, 1 ); return; } /****************************************************************************** agtiapi_IntrHandler2() Purpose: Interrupt service routine for interrupt vector index 2. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler2( void *arg ) { agtiapi_IntrHandlerx_( arg, 2 ); return; } /****************************************************************************** agtiapi_IntrHandler3() Purpose: Interrupt service routine for interrupt vector index 3. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler3( void *arg ) { agtiapi_IntrHandlerx_( arg, 3 ); return; } /****************************************************************************** agtiapi_IntrHandler4() Purpose: Interrupt service routine for interrupt vector index 4. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler4( void *arg ) { agtiapi_IntrHandlerx_( arg, 4 ); return; } /****************************************************************************** agtiapi_IntrHandler5() Purpose: Interrupt service routine for interrupt vector index 5. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler5( void *arg ) { agtiapi_IntrHandlerx_( arg, 5 ); return; } /****************************************************************************** agtiapi_IntrHandler6() Purpose: Interrupt service routine for interrupt vector index 6. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler6( void *arg ) { agtiapi_IntrHandlerx_( arg, 6 ); return; } /****************************************************************************** agtiapi_IntrHandler7() Purpose: Interrupt service routine for interrupt vector index 7. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler7( void *arg ) { agtiapi_IntrHandlerx_( arg, 7 ); return; } /****************************************************************************** agtiapi_IntrHandler8() Purpose: Interrupt service routine for interrupt vector index 8. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler8( void *arg ) { agtiapi_IntrHandlerx_( arg, 8 ); return; } /****************************************************************************** agtiapi_IntrHandler9() Purpose: Interrupt service routine for interrupt vector index 9. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler9( void *arg ) { agtiapi_IntrHandlerx_( arg, 9 ); return; } /****************************************************************************** agtiapi_IntrHandler10() Purpose: Interrupt service routine for interrupt vector index 10. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler10( void *arg ) { agtiapi_IntrHandlerx_( arg, 10 ); return; } /****************************************************************************** agtiapi_IntrHandler11() Purpose: Interrupt service routine for interrupt vector index 11. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler11( void *arg ) { agtiapi_IntrHandlerx_( arg, 11 ); return; } /****************************************************************************** agtiapi_IntrHandler12() Purpose: Interrupt service routine for interrupt vector index 12. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler12( void *arg ) { agtiapi_IntrHandlerx_( arg, 12 ); return; } /****************************************************************************** agtiapi_IntrHandler13() Purpose: Interrupt service routine for interrupt vector index 13. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler13( void *arg ) { agtiapi_IntrHandlerx_( arg, 13 ); return; } /****************************************************************************** agtiapi_IntrHandler14() Purpose: Interrupt service routine for interrupt vector index 14. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler14( void *arg ) { agtiapi_IntrHandlerx_( arg, 14 ); return; } /****************************************************************************** agtiapi_IntrHandler15() Purpose: Interrupt service routine for interrupt vector index 15. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler15( void *arg ) { agtiapi_IntrHandlerx_( arg, 15 ); return; } static void agtiapi_SglMemoryCB( void *arg, bus_dma_segment_t *dm_segs, int nseg, int error ) { bus_addr_t *addr; AGTIAPI_PRINTK("agtiapi_SglMemoryCB: start\n"); if (error != 0) { AGTIAPI_PRINTK("agtiapi_SglMemoryCB: error %d\n", error); panic("agtiapi_SglMemoryCB: error %d\n", error); return; } addr = arg; *addr = dm_segs[0].ds_addr; return; } static void agtiapi_MemoryCB( void *arg, bus_dma_segment_t *dm_segs, int nseg, int error ) { bus_addr_t *addr; AGTIAPI_PRINTK("agtiapi_MemoryCB: start\n"); if (error != 0) { AGTIAPI_PRINTK("agtiapi_MemoryCB: error %d\n", error); panic("agtiapi_MemoryCB: error %d\n", error); return; } addr = arg; *addr = dm_segs[0].ds_addr; return; } /****************************************************************************** agtiapi_alloc_requests() Purpose: Allocates resources such as dma tag and timer Parameters: struct agtiapi_softc *pmsc (IN) Pointer to the HBA data structure Return: AGTIAPI_SUCCESS - success AGTIAPI_FAIL - fail Note: ******************************************************************************/ int agtiapi_alloc_requests( struct agtiapi_softc *pmcsc ) { int rsize, nsegs; U32 next_tick; nsegs = AGTIAPI_NSEGS; rsize = AGTIAPI_MAX_DMA_SEGS; // 128 AGTIAPI_PRINTK( "agtiapi_alloc_requests: MAXPHYS 0x%x PAGE_SIZE 0x%x \n", MAXPHYS, PAGE_SIZE ); AGTIAPI_PRINTK( "agtiapi_alloc_requests: nsegs %d rsize %d \n", nsegs, rsize ); // 32, 128 // This is for csio->data_ptr if( bus_dma_tag_create( agNULL, // parent 1, // alignment 0, // boundary BUS_SPACE_MAXADDR, // lowaddr BUS_SPACE_MAXADDR, // highaddr NULL, // filter NULL, // filterarg BUS_SPACE_MAXSIZE_32BIT, // maxsize nsegs, // nsegments BUS_SPACE_MAXSIZE_32BIT, // maxsegsize BUS_DMA_ALLOCNOW, // flags busdma_lock_mutex, // lockfunc &pmcsc->pCardInfo->pmIOLock, // lockarg &pmcsc->buffer_dmat ) ) { AGTIAPI_PRINTK( "agtiapi_alloc_requests: Cannot alloc request DMA tag\n" ); return( ENOMEM ); } // This is for tiSgl_t of pccb in agtiapi_PrepCCBs() rsize = (sizeof(tiSgl_t) * AGTIAPI_NSEGS) * AGTIAPI_CCB_PER_DEVICE * maxTargets; AGTIAPI_PRINTK( "agtiapi_alloc_requests: rsize %d \n", rsize ); // 32, 128 if( bus_dma_tag_create( agNULL, // parent 32, // alignment 0, // boundary BUS_SPACE_MAXADDR_32BIT, // lowaddr BUS_SPACE_MAXADDR, // highaddr NULL, // filter NULL, // filterarg rsize, // maxsize 1, // nsegments rsize, // maxsegsize BUS_DMA_ALLOCNOW, // flags NULL, // lockfunc NULL, // lockarg &pmcsc->tisgl_dmat ) ) { AGTIAPI_PRINTK( "agtiapi_alloc_requests: Cannot alloc request DMA tag\n" ); return( ENOMEM ); } if( bus_dmamem_alloc( pmcsc->tisgl_dmat, (void **)&pmcsc->tisgl_mem, BUS_DMA_NOWAIT, &pmcsc->tisgl_map ) ) { AGTIAPI_PRINTK( "agtiapi_alloc_requests: Cannot allocate SGL memory\n" ); return( ENOMEM ); } bzero( pmcsc->tisgl_mem, rsize ); bus_dmamap_load( pmcsc->tisgl_dmat, pmcsc->tisgl_map, pmcsc->tisgl_mem, rsize, agtiapi_SglMemoryCB, &pmcsc->tisgl_busaddr, BUS_DMA_NOWAIT /* 0 */ ); mtx_init( &pmcsc->OS_timer_lock, "OS timer lock", NULL, MTX_DEF ); mtx_init( &pmcsc->IO_timer_lock, "IO timer lock", NULL, MTX_DEF ); mtx_init( &pmcsc->devRmTimerLock, "targ rm timer lock", NULL, MTX_DEF ); callout_init_mtx( &pmcsc->OS_timer, &pmcsc->OS_timer_lock, 0 ); callout_init_mtx( &pmcsc->IO_timer, &pmcsc->IO_timer_lock, 0 ); callout_init_mtx( &pmcsc->devRmTimer, &pmcsc->devRmTimerLock, 0); next_tick = pmcsc->pCardInfo->tiRscInfo.tiLoLevelResource. loLevelOption.usecsPerTick / USEC_PER_TICK; AGTIAPI_PRINTK( "agtiapi_alloc_requests: before callout_reset, " "next_tick 0x%x\n", next_tick ); callout_reset( &pmcsc->OS_timer, next_tick, agtiapi_TITimer, pmcsc ); return 0; } /****************************************************************************** agtiapi_alloc_ostimem() Purpose: Allocates memory used later in ostiAllocMemory Parameters: struct agtiapi_softc *pmcsc (IN) Pointer to the HBA data structure Return: AGTIAPI_SUCCESS - success AGTIAPI_FAIL - fail Note: This is a pre-allocation for ostiAllocMemory() "non-cacheable" function calls ******************************************************************************/ int agtiapi_alloc_ostimem( struct agtiapi_softc *pmcsc ) { int rsize, nomsize; nomsize = 4096; rsize = AGTIAPI_DYNAMIC_MAX * nomsize; // 8M AGTIAPI_PRINTK("agtiapi_alloc_ostimem: rsize %d \n", rsize); if( bus_dma_tag_create( agNULL, // parent 32, // alignment 0, // boundary BUS_SPACE_MAXADDR, // lowaddr BUS_SPACE_MAXADDR, // highaddr NULL, // filter NULL, // filterarg rsize, // maxsize (size) 1, // number of segments rsize, // maxsegsize 0, // flags NULL, // lockfunc NULL, // lockarg &pmcsc->osti_dmat ) ) { AGTIAPI_PRINTK( "agtiapi_alloc_ostimem: Can't create no-cache mem tag\n" ); return AGTIAPI_FAIL; } if( bus_dmamem_alloc( pmcsc->osti_dmat, &pmcsc->osti_mem, BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_NOCACHE, &pmcsc->osti_mapp ) ) { AGTIAPI_PRINTK( "agtiapi_alloc_ostimem: Cannot allocate cache mem %d\n", rsize ); return AGTIAPI_FAIL; } bus_dmamap_load( pmcsc->osti_dmat, pmcsc->osti_mapp, pmcsc->osti_mem, rsize, agtiapi_MemoryCB, // try reuse of CB for same goal &pmcsc->osti_busaddr, BUS_DMA_NOWAIT ); // populate all the ag_dma_addr_t osti_busaddr/mem fields with addresses for // handy reference when driver is in motion int idx; ag_card_info_t *pCardInfo = pmcsc->pCardInfo; ag_dma_addr_t *pMem; for( idx = 0; idx < AGTIAPI_DYNAMIC_MAX; idx++ ) { pMem = &pCardInfo->dynamicMem[idx]; pMem->nocache_busaddr = pmcsc->osti_busaddr + ( idx * nomsize ); pMem->nocache_mem = (void*)((U64)pmcsc->osti_mem + ( idx * nomsize )); pCardInfo->freeDynamicMem[idx] = &pCardInfo->dynamicMem[idx]; } pCardInfo->topOfFreeDynamicMem = AGTIAPI_DYNAMIC_MAX; return AGTIAPI_SUCCESS; } /****************************************************************************** agtiapi_cam_action() Purpose: Parses CAM frames and triggers a corresponding action Parameters: struct cam_sim *sim (IN) Pointer to SIM data structure union ccb * ccb (IN) Pointer to CAM ccb data structure Return: Note: ******************************************************************************/ static void agtiapi_cam_action( struct cam_sim *sim, union ccb * ccb ) { struct agtiapi_softc *pmcsc; tiDeviceHandle_t *pDevHandle = NULL; // acts as flag as well tiDeviceInfo_t devInfo; int pathID, targetID, lunID; int lRetVal; U32 TID; U32 speed = 150000; pmcsc = cam_sim_softc( sim ); AGTIAPI_IO( "agtiapi_cam_action: start pmcs %p\n", pmcsc ); if (pmcsc == agNULL) { AGTIAPI_PRINTK( "agtiapi_cam_action: start pmcs is NULL\n" ); return; } mtx_assert( &(pmcsc->pCardInfo->pmIOLock), MA_OWNED ); AGTIAPI_IO( "agtiapi_cam_action: cardNO %d func_code 0x%x\n", pmcsc->cardNo, ccb->ccb_h.func_code ); pathID = xpt_path_path_id( ccb->ccb_h.path ); targetID = xpt_path_target_id( ccb->ccb_h.path ); lunID = xpt_path_lun_id( ccb->ccb_h.path ); AGTIAPI_IO( "agtiapi_cam_action: P 0x%x T 0x%x L 0x%x\n", pathID, targetID, lunID ); switch (ccb->ccb_h.func_code) { case XPT_PATH_INQ: { struct ccb_pathinq *cpi; /* See architecure book p180*/ cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE | PI_WIDE_16; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET | PIM_SEQSCAN; cpi->hba_eng_cnt = 0; cpi->max_target = maxTargets - 1; cpi->max_lun = AGTIAPI_MAX_LUN; cpi->maxio = 1024 *1024; /* Max supported I/O size, in bytes. */ cpi->initiator_id = 255; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "PMC", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); // rate is set when XPT_GET_TRAN_SETTINGS is processed cpi->base_transfer_speed = 150000; cpi->transport = XPORT_SAS; cpi->transport_version = 0; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_SPC3; cpi->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts; struct ccb_trans_settings_sas *sas; struct ccb_trans_settings_scsi *scsi; if ( pmcsc->flags & AGTIAPI_SHUT_DOWN ) { return; } cts = &ccb->cts; sas = &ccb->cts.xport_specific.sas; scsi = &cts->proto_specific.scsi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_SPC3; cts->transport = XPORT_SAS; cts->transport_version = 0; sas->valid = CTS_SAS_VALID_SPEED; /* this sets the "MB/s transfers" */ if (pmcsc != NULL && targetID >= 0 && targetID < maxTargets) { if (pmcsc->pWWNList != NULL) { TID = INDEX(pmcsc, targetID); if (TID < maxTargets) { pDevHandle = pmcsc->pDevList[TID].pDevHandle; } } } if (pDevHandle) { tiINIGetDeviceInfo( &pmcsc->tiRoot, pDevHandle, &devInfo ); switch (devInfo.info.devType_S_Rate & 0xF) { case 0x8: speed = 150000; break; case 0x9: speed = 300000; break; case 0xA: speed = 600000; break; case 0xB: speed = 1200000; break; default: speed = 150000; break; } } sas->bitrate = speed; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_RESET_BUS: { lRetVal = agtiapi_eh_HostReset( pmcsc, ccb ); // usually works first time if ( SUCCESS == lRetVal ) { AGTIAPI_PRINTK( "agtiapi_cam_action: bus reset success.\n" ); } else { AGTIAPI_PRINTK( "agtiapi_cam_action: bus reset failed.\n" ); } ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_RESET_DEV: { ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_ABORT: { ccb->ccb_h.status = CAM_REQ_CMP; break; } #if __FreeBSD_version >= 900026 case XPT_SMP_IO: { agtiapi_QueueSMP( pmcsc, ccb ); return; } #endif /* __FreeBSD_version >= 900026 */ case XPT_SCSI_IO: { if(pmcsc->dev_scan == agFALSE) { ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; } if (pmcsc->flags & AGTIAPI_SHUT_DOWN) { AGTIAPI_PRINTK( "agtiapi_cam_action: shutdown, XPT_SCSI_IO 0x%x\n", XPT_SCSI_IO ); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; } else { AGTIAPI_IO( "agtiapi_cam_action: Zero XPT_SCSI_IO 0x%x, doing IOs\n", XPT_SCSI_IO ); agtiapi_QueueCmnd_( pmcsc, ccb ); return; } } case XPT_CALC_GEOMETRY: { cam_calc_geometry(&ccb->ccg, 1); ccb->ccb_h.status = CAM_REQ_CMP; break; } default: { /* XPT_SET_TRAN_SETTINGS */ AGTIAPI_IO( "agtiapi_cam_action: default function code 0x%x\n", ccb->ccb_h.func_code ); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; } } /* switch */ xpt_done(ccb); } /****************************************************************************** agtiapi_GetCCB() Purpose: Get a ccb from free list or allocate a new one Parameters: struct agtiapi_softc *pmcsc (IN) Pointer to HBA structure Return: Pointer to a ccb structure, or NULL if not available Note: ******************************************************************************/ STATIC pccb_t agtiapi_GetCCB( struct agtiapi_softc *pmcsc ) { pccb_t pccb; AGTIAPI_IO( "agtiapi_GetCCB: start\n" ); AG_LOCAL_LOCK( &pmcsc->ccbLock ); /* get the ccb from the head of the free list */ if ((pccb = (pccb_t)pmcsc->ccbFreeList) != NULL) { pmcsc->ccbFreeList = (caddr_t *)pccb->pccbNext; pccb->pccbNext = NULL; pccb->flags = ACTIVE; pccb->startTime = 0; pmcsc->activeCCB++; AGTIAPI_IO( "agtiapi_GetCCB: re-allocated ccb %p\n", pccb ); } else { AGTIAPI_PRINTK( "agtiapi_GetCCB: kmalloc ERROR - no ccb allocated\n" ); } AG_LOCAL_UNLOCK( &pmcsc->ccbLock ); return pccb; } /****************************************************************************** agtiapi_QueueCmnd_() Purpose: Calls for sending CCB and excuting on HBA. Parameters: struct agtiapi_softc *pmsc (IN) Pointer to the HBA data structure union ccb * ccb (IN) Pointer to CAM ccb data structure Return: 0 - Command is pending to execute 1 - Command returned without further process Note: ******************************************************************************/ int agtiapi_QueueCmnd_(struct agtiapi_softc *pmcsc, union ccb * ccb) { struct ccb_scsiio *csio = &ccb->csio; pccb_t pccb = agNULL; // call dequeue int status = tiSuccess; U32 Channel = CMND_TO_CHANNEL(ccb); U32 TID = CMND_TO_TARGET(ccb); U32 LUN = CMND_TO_LUN(ccb); AGTIAPI_IO( "agtiapi_QueueCmnd_: start\n" ); /* no support for CBD > 16 */ if (csio->cdb_len > 16) { AGTIAPI_PRINTK( "agtiapi_QueueCmnd_: unsupported CDB length %d\n", csio->cdb_len ); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQ_INVALID;//CAM_REQ_CMP; xpt_done(ccb); return tiError; } if (TID < 0 || TID >= maxTargets) { AGTIAPI_PRINTK("agtiapi_QueueCmnd_: INVALID TID ERROR\n"); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_DEV_NOT_THERE;//CAM_REQ_CMP; xpt_done(ccb); return tiError; } /* get a ccb */ if ((pccb = agtiapi_GetCCB(pmcsc)) == NULL) { ag_device_t *targ; AGTIAPI_PRINTK("agtiapi_QueueCmnd_: GetCCB ERROR\n"); if (pmcsc != NULL) { TID = INDEX(pmcsc, TID); targ = &pmcsc->pDevList[TID]; } if (targ != NULL) { agtiapi_adjust_queue_depth(ccb->ccb_h.path,targ->qdepth); } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQUEUE_REQ; xpt_done(ccb); return tiBusy; } pccb->pmcsc = pmcsc; /* initialize Command Control Block (CCB) */ pccb->targetId = TID; pccb->lun = LUN; pccb->channel = Channel; pccb->ccb = ccb; /* for struct scsi_cmnd */ pccb->senseLen = csio->sense_len; pccb->startTime = ticks; pccb->pSenseData = (caddr_t) &csio->sense_data; pccb->tiSuperScsiRequest.flags = 0; /* each channel is reserved for different addr modes */ pccb->addrMode = agtiapi_AddrModes[Channel]; status = agtiapi_PrepareSGList(pmcsc, pccb); if (status != tiSuccess) { AGTIAPI_PRINTK("agtiapi_QueueCmnd_: agtiapi_PrepareSGList failure\n"); agtiapi_FreeCCB(pmcsc, pccb); if (status == tiReject) { ccb->ccb_h.status = CAM_REQ_INVALID; } else { ccb->ccb_h.status = CAM_REQ_CMP; } xpt_done( ccb ); return tiError; } return status; } /****************************************************************************** agtiapi_DumpCDB() Purpose: Prints out CDB Parameters: const char *ptitle (IN) A string to be printed ccb_t *pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Return: Note: ******************************************************************************/ STATIC void agtiapi_DumpCDB(const char *ptitle, ccb_t *pccb) { union ccb *ccb; struct ccb_scsiio *csio; bit8 cdb[64]; int len; if (pccb == NULL) { printf( "agtiapi_DumpCDB: no pccb here \n" ); panic("agtiapi_DumpCDB: pccb is NULL. called from %s\n", ptitle); return; } ccb = pccb->ccb; if (ccb == NULL) { printf( "agtiapi_DumpCDB: no ccb here \n" ); panic( "agtiapi_DumpCDB: pccb %p ccb %p flags %d ccb NULL! " "called from %s\n", pccb, pccb->ccb, pccb->flags, ptitle ); return; } csio = &ccb->csio; if (csio == NULL) { printf( "agtiapi_DumpCDB: no csio here \n" ); panic( "agtiapi_DumpCDB: pccb%p ccb%p flags%d csio NULL! called from %s\n", pccb, pccb->ccb, pccb->flags, ptitle ); return; } len = MIN(64, csio->cdb_len); if (csio->ccb_h.flags & CAM_CDB_POINTER) { bcopy(csio->cdb_io.cdb_ptr, &cdb[0], len); } else { bcopy(csio->cdb_io.cdb_bytes, &cdb[0], len); } AGTIAPI_IO( "agtiapi_DumpCDB: pccb%p CDB0x%x csio->cdb_len %d" " len %d from %s\n", pccb, cdb[0], csio->cdb_len, len, ptitle ); return; } /****************************************************************************** agtiapi_DoSoftReset() Purpose: Do card reset Parameters: *data (IN) point to pmcsc (struct agtiapi_softc *) Return: Note: ******************************************************************************/ int agtiapi_DoSoftReset (struct agtiapi_softc *pmcsc) { int ret; unsigned long flags; pmcsc->flags |= AGTIAPI_SOFT_RESET; AG_SPIN_LOCK_IRQ( agtiapi_host_lock, flags ); ret = agtiapi_ResetCard( pmcsc, &flags ); AG_SPIN_UNLOCK_IRQ( agtiapi_host_lock, flags ); if( ret != AGTIAPI_SUCCESS ) return tiError; return SUCCESS; } /****************************************************************************** agtiapi_CheckIOTimeout() Purpose: Timeout function for SCSI IO or TM Parameters: *data (IN) point to pCard (ag_card_t *) Return: Note: ******************************************************************************/ STATIC void agtiapi_CheckIOTimeout(void *data) { U32 status = AGTIAPI_SUCCESS; ccb_t *pccb; struct agtiapi_softc *pmcsc; pccb_t pccb_curr; pccb_t pccb_next; pmcsc = (struct agtiapi_softc *)data; //AGTIAPI_PRINTK("agtiapi_CheckIOTimeout: Enter\n"); //AGTIAPI_PRINTK("agtiapi_CheckIOTimeout: Active CCB %d\n", pmcsc->activeCCB); pccb = (pccb_t)pmcsc->ccbChainList; /* if link is down, do nothing */ if ((pccb == NULL) || (pmcsc->activeCCB == 0)) { //AGTIAPI_PRINTK("agtiapi_CheckIOTimeout: goto restart_timer\n"); goto restart_timer; } AG_SPIN_LOCK_IRQ(agtiapi_host_lock, flags); if (pmcsc->flags & AGTIAPI_SHUT_DOWN) goto ext; pccb_curr = pccb; /* Walk thorugh the IO Chain linked list to find the pending io */ /* Set the TM flag based on the pccb type, i.e SCSI IO or TM cmd */ while (pccb_curr != NULL) { /* start from 1st ccb in the chain */ pccb_next = pccb_curr->pccbChainNext; if( (pccb_curr->flags == 0) || (pccb_curr->tiIORequest.tdData == NULL) || (pccb_curr->startTime == 0) /* && (pccb->startTime == 0) */) { //AGTIAPI_PRINTK("agtiapi_CheckIOTimeout: move to next element\n"); } else if ( ( (ticks-pccb_curr->startTime) >= ag_timeout_secs ) && !(pccb_curr->flags & TIMEDOUT) ) { AGTIAPI_PRINTK( "agtiapi_CheckIOTimeout: pccb %p timed out, call TM " "function -- flags=%x startTime=%ld tdData = %p\n", pccb_curr, pccb_curr->flags, pccb->startTime, pccb_curr->tiIORequest.tdData ); pccb_curr->flags |= TIMEDOUT; status = agtiapi_StartTM(pmcsc, pccb_curr); if (status == AGTIAPI_SUCCESS) { AGTIAPI_PRINTK( "agtiapi_CheckIOTimeout: TM Request sent with " "success\n" ); goto restart_timer; } else { #ifdef AGTIAPI_LOCAL_RESET /* abort request did not go through */ AGTIAPI_PRINTK("agtiapi_CheckIOTimeout: Abort request failed\n"); /* TODO: call Soft reset here */ AGTIAPI_PRINTK( "agtiapi_CheckIOTimeout:in agtiapi_CheckIOTimeout() " "abort request did not go thru ==> soft reset#7, then " "restart timer\n" ); agtiapi_DoSoftReset (pmcsc); goto restart_timer; #endif } } pccb_curr = pccb_next; } restart_timer: callout_reset(&pmcsc->IO_timer, 1*hz, agtiapi_CheckIOTimeout, pmcsc); ext: AG_SPIN_UNLOCK_IRQ(agtiapi_host_lock, flags); return; } /****************************************************************************** agtiapi_StartTM() Purpose: DDI calls for aborting outstanding IO command Parameters: struct scsi_cmnd *pccb (IN) Pointer to the command to be aborted unsigned long flags (IN/out) spinlock flags used in locking from calling layers Return: AGTIAPI_SUCCESS - success AGTIAPI_FAIL - fail ******************************************************************************/ int agtiapi_StartTM(struct agtiapi_softc *pCard, ccb_t *pccb) { ccb_t *pTMccb = NULL; U32 status = AGTIAPI_SUCCESS; ag_device_t *pDevice = NULL; U32 TMstatus = tiSuccess; AGTIAPI_PRINTK( "agtiapi_StartTM: pccb %p, pccb->flags %x\n", pccb, pccb->flags ); if (pccb == NULL) { AGTIAPI_PRINTK("agtiapi_StartTM: %p not found\n",pccb); status = AGTIAPI_SUCCESS; goto ext; } if (!pccb->tiIORequest.tdData) { /* should not be the case */ AGTIAPI_PRINTK("agtiapi_StartTM: ccb %p flag 0x%x tid %d no tdData " "ERROR\n", pccb, pccb->flags, pccb->targetId); status = AGTIAPI_FAIL; } else { /* If timedout CCB is TM_ABORT_TASK command, issue LocalAbort first to clear pending TM_ABORT_TASK */ /* Else Device State will not be put back to Operational, (refer FW) */ if (pccb->flags & TASK_MANAGEMENT) { if (tiINIIOAbort(&pCard->tiRoot, &pccb->tiIORequest) != tiSuccess) { AGTIAPI_PRINTK( "agtiapi_StartTM: LocalAbort Request for Abort_TASK " "TM failed\n" ); /* TODO: call Soft reset here */ AGTIAPI_PRINTK( "agtiapi_StartTM: in agtiapi_StartTM() abort " "tiINIIOAbort() failed ==> soft reset#8\n" ); agtiapi_DoSoftReset( pCard ); } else { AGTIAPI_PRINTK( "agtiapi_StartTM: LocalAbort for Abort_TASK TM " "Request sent\n" ); status = AGTIAPI_SUCCESS; } } else { /* get a ccb */ if ((pTMccb = agtiapi_GetCCB(pCard)) == NULL) { AGTIAPI_PRINTK("agtiapi_StartTM: TM resource unavailable!\n"); status = AGTIAPI_FAIL; goto ext; } pTMccb->pmcsc = pCard; pTMccb->targetId = pccb->targetId; pTMccb->devHandle = pccb->devHandle; if (pTMccb->targetId >= pCard->devDiscover) { AGTIAPI_PRINTK("agtiapi_StartTM: Incorrect dev Id in TM!\n"); status = AGTIAPI_FAIL; goto ext; } if (pTMccb->targetId < 0 || pTMccb->targetId >= maxTargets) { return AGTIAPI_FAIL; } if (INDEX(pCard, pTMccb->targetId) >= maxTargets) { return AGTIAPI_FAIL; } pDevice = &pCard->pDevList[INDEX(pCard, pTMccb->targetId)]; if ((pDevice == NULL) || !(pDevice->flags & ACTIVE)) { return AGTIAPI_FAIL; } /* save pending io to issue local abort at Task mgmt CB */ pTMccb->pccbIO = pccb; AGTIAPI_PRINTK( "agtiapi_StartTM: pTMccb %p flag %x tid %d via TM " "request !\n", pTMccb, pTMccb->flags, pTMccb->targetId ); pTMccb->flags &= ~(TASK_SUCCESS | ACTIVE); pTMccb->flags |= TASK_MANAGEMENT; TMstatus = tiINITaskManagement(&pCard->tiRoot, pccb->devHandle, AG_ABORT_TASK, &pccb->tiSuperScsiRequest.scsiCmnd.lun, &pccb->tiIORequest, &pTMccb->tiIORequest); if (TMstatus == tiSuccess) { AGTIAPI_PRINTK( "agtiapi_StartTM: TM_ABORT_TASK request success ccb " "%p, pTMccb %p\n", pccb, pTMccb ); pTMccb->startTime = ticks; status = AGTIAPI_SUCCESS; } else if (TMstatus == tiIONoDevice) { AGTIAPI_PRINTK( "agtiapi_StartTM: TM_ABORT_TASK request tiIONoDevice ccb " "%p, pTMccb %p\n", pccb, pTMccb ); status = AGTIAPI_SUCCESS; } else { AGTIAPI_PRINTK( "agtiapi_StartTM: TM_ABORT_TASK request failed ccb %p, " "pTMccb %p\n", pccb, pTMccb ); status = AGTIAPI_FAIL; agtiapi_FreeTMCCB(pCard, pTMccb); /* TODO */ /* call TM_TARGET_RESET */ } } } ext: AGTIAPI_PRINTK("agtiapi_StartTM: return %d flgs %x\n", status, (pccb) ? pccb->flags : -1); return status; } /* agtiapi_StartTM */ #if __FreeBSD_version > 901000 /****************************************************************************** agtiapi_PrepareSGList() Purpose: This function prepares scatter-gather list for the given ccb Parameters: struct agtiapi_softc *pmsc (IN) Pointer to the HBA data structure ccb_t *pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Return: 0 - success 1 - failure Note: ******************************************************************************/ static int agtiapi_PrepareSGList(struct agtiapi_softc *pmcsc, ccb_t *pccb) { union ccb *ccb = pccb->ccb; struct ccb_scsiio *csio = &ccb->csio; struct ccb_hdr *ccbh = &ccb->ccb_h; AGTIAPI_IO( "agtiapi_PrepareSGList: start\n" ); // agtiapi_DumpCDB("agtiapi_PrepareSGList", pccb); AGTIAPI_IO( "agtiapi_PrepareSGList: dxfer_len %d\n", csio->dxfer_len ); if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { switch((ccbh->flags & CAM_DATA_MASK)) { int error; struct bus_dma_segment seg; case CAM_DATA_VADDR: /* Virtual address that needs to translated into one or more physical address ranges. */ // int error; // AG_LOCAL_LOCK(&(pmcsc->pCardInfo->pmIOLock)); AGTIAPI_IO( "agtiapi_PrepareSGList: virtual address\n" ); error = bus_dmamap_load( pmcsc->buffer_dmat, pccb->CCB_dmamap, csio->data_ptr, csio->dxfer_len, agtiapi_PrepareSGListCB, pccb, BUS_DMA_NOWAIT/* 0 */ ); // AG_LOCAL_UNLOCK( &(pmcsc->pCardInfo->pmIOLock) ); if (error == EINPROGRESS) { /* So as to maintain ordering, freeze the controller queue until our mapping is returned. */ AGTIAPI_PRINTK("agtiapi_PrepareSGList: EINPROGRESS\n"); xpt_freeze_simq(pmcsc->sim, 1); pmcsc->SimQFrozen = agTRUE; ccbh->status |= CAM_RELEASE_SIMQ; } break; case CAM_DATA_PADDR: /* We have been given a pointer to single physical buffer. */ /* pccb->tiSuperScsiRequest.sglVirtualAddr = seg.ds_addr; */ //struct bus_dma_segment seg; AGTIAPI_PRINTK("agtiapi_PrepareSGList: physical address\n"); seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr; seg.ds_len = csio->dxfer_len; // * 0xFF to be defined agtiapi_PrepareSGListCB(pccb, &seg, 1, 0xAABBCCDD); break; default: AGTIAPI_PRINTK("agtiapi_PrepareSGList: unexpected case\n"); return tiReject; } } else { agtiapi_PrepareSGListCB(pccb, NULL, 0, 0xAAAAAAAA); } return tiSuccess; } #else /****************************************************************************** agtiapi_PrepareSGList() Purpose: This function prepares scatter-gather list for the given ccb Parameters: struct agtiapi_softc *pmsc (IN) Pointer to the HBA data structure ccb_t *pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Return: 0 - success 1 - failure Note: ******************************************************************************/ static int agtiapi_PrepareSGList(struct agtiapi_softc *pmcsc, ccb_t *pccb) { union ccb *ccb = pccb->ccb; struct ccb_scsiio *csio = &ccb->csio; struct ccb_hdr *ccbh = &ccb->ccb_h; AGTIAPI_IO( "agtiapi_PrepareSGList: start\n" ); // agtiapi_DumpCDB("agtiapi_PrepareSGList", pccb); AGTIAPI_IO( "agtiapi_PrepareSGList: dxfer_len %d\n", csio->dxfer_len ); if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { /* We've been given a pointer to a single buffer. */ if ((ccbh->flags & CAM_DATA_PHYS) == 0) { /* Virtual address that needs to translated into one or more physical address ranges. */ int error; // AG_LOCAL_LOCK(&(pmcsc->pCardInfo->pmIOLock)); AGTIAPI_IO( "agtiapi_PrepareSGList: virtual address\n" ); error = bus_dmamap_load( pmcsc->buffer_dmat, pccb->CCB_dmamap, csio->data_ptr, csio->dxfer_len, agtiapi_PrepareSGListCB, pccb, BUS_DMA_NOWAIT/* 0 */ ); // AG_LOCAL_UNLOCK( &(pmcsc->pCardInfo->pmIOLock) ); if (error == EINPROGRESS) { /* So as to maintain ordering, freeze the controller queue until our mapping is returned. */ AGTIAPI_PRINTK("agtiapi_PrepareSGList: EINPROGRESS\n"); xpt_freeze_simq(pmcsc->sim, 1); pmcsc->SimQFrozen = agTRUE; ccbh->status |= CAM_RELEASE_SIMQ; } } else { /* We have been given a pointer to single physical buffer. */ /* pccb->tiSuperScsiRequest.sglVirtualAddr = seg.ds_addr; */ struct bus_dma_segment seg; AGTIAPI_PRINTK("agtiapi_PrepareSGList: physical address\n"); seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr; seg.ds_len = csio->dxfer_len; // * 0xFF to be defined agtiapi_PrepareSGListCB(pccb, &seg, 1, 0xAABBCCDD); } } else { AGTIAPI_PRINTK("agtiapi_PrepareSGList: unexpected case\n"); return tiReject; } } else { agtiapi_PrepareSGListCB(pccb, NULL, 0, 0xAAAAAAAA); } return tiSuccess; } #endif /****************************************************************************** agtiapi_PrepareSGListCB() Purpose: Callback function for bus_dmamap_load() This fuctions sends IO to LL layer. Parameters: void *arg (IN) Pointer to the HBA data structure bus_dma_segment_t *segs (IN) Pointer to dma segment int nsegs (IN) number of dma segment int error (IN) error Return: Note: ******************************************************************************/ static void agtiapi_PrepareSGListCB( void *arg, bus_dma_segment_t *segs, int nsegs, int error ) { pccb_t pccb = arg; union ccb *ccb = pccb->ccb; struct ccb_scsiio *csio = &ccb->csio; struct agtiapi_softc *pmcsc; tiIniScsiCmnd_t *pScsiCmnd; bit32 i; bus_dmasync_op_t op; U32_64 phys_addr; U08 *CDB; int io_is_encryptable = 0; unsigned long long start_lba = 0; ag_device_t *pDev; U32 TID = CMND_TO_TARGET(ccb); AGTIAPI_IO( "agtiapi_PrepareSGListCB: start, nsegs %d error 0x%x\n", nsegs, error ); pmcsc = pccb->pmcsc; if (error != tiSuccess) { if (error == 0xAABBCCDD || error == 0xAAAAAAAA) { // do nothing } else { AGTIAPI_PRINTK("agtiapi_PrepareSGListCB: error status 0x%x\n", error); bus_dmamap_unload(pmcsc->buffer_dmat, pccb->CCB_dmamap); bus_dmamap_destroy(pmcsc->buffer_dmat, pccb->CCB_dmamap); agtiapi_FreeCCB(pmcsc, pccb); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); return; } } if (nsegs > AGTIAPI_MAX_DMA_SEGS) { AGTIAPI_PRINTK( "agtiapi_PrepareSGListCB: over the limit. nsegs %d" " AGTIAPI_MAX_DMA_SEGS %d\n", nsegs, AGTIAPI_MAX_DMA_SEGS ); bus_dmamap_unload(pmcsc->buffer_dmat, pccb->CCB_dmamap); bus_dmamap_destroy(pmcsc->buffer_dmat, pccb->CCB_dmamap); agtiapi_FreeCCB(pmcsc, pccb); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); return; } /* fill in IO information */ pccb->dataLen = csio->dxfer_len; /* start fill in sgl structure */ if (nsegs == 1 && error == 0xAABBCCDD) { /* to be tested */ /* A single physical buffer */ AGTIAPI_PRINTK("agtiapi_PrepareSGListCB: nsegs is 1\n"); CPU_TO_LE32(pccb->tiSuperScsiRequest.agSgl1, segs[0].ds_addr); pccb->tiSuperScsiRequest.agSgl1.len = htole32(pccb->dataLen); pccb->tiSuperScsiRequest.agSgl1.type = htole32(tiSgl); pccb->tiSuperScsiRequest.sglVirtualAddr = (void *)segs->ds_addr; pccb->numSgElements = 1; } else if (nsegs == 0 && error == 0xAAAAAAAA) { /* no data transfer */ AGTIAPI_IO( "agtiapi_PrepareSGListCB: no data transfer\n" ); pccb->tiSuperScsiRequest.agSgl1.len = 0; pccb->dataLen = 0; pccb->numSgElements = 0; } else { /* virtual/logical buffer */ if (nsegs == 1) { pccb->dataLen = segs[0].ds_len; CPU_TO_LE32(pccb->tiSuperScsiRequest.agSgl1, segs[0].ds_addr); pccb->tiSuperScsiRequest.agSgl1.type = htole32(tiSgl); pccb->tiSuperScsiRequest.agSgl1.len = htole32(segs[0].ds_len); pccb->tiSuperScsiRequest.sglVirtualAddr = (void *)csio->data_ptr; pccb->numSgElements = nsegs; } else { pccb->dataLen = 0; /* loop */ for (i = 0; i < nsegs; i++) { pccb->sgList[i].len = htole32(segs[i].ds_len); CPU_TO_LE32(pccb->sgList[i], segs[i].ds_addr); pccb->sgList[i].type = htole32(tiSgl); pccb->dataLen += segs[i].ds_len; } /* for */ pccb->numSgElements = nsegs; /* set up sgl buffer address */ CPU_TO_LE32(pccb->tiSuperScsiRequest.agSgl1, pccb->tisgl_busaddr); pccb->tiSuperScsiRequest.agSgl1.type = htole32(tiSglList); pccb->tiSuperScsiRequest.agSgl1.len = htole32(pccb->dataLen); pccb->tiSuperScsiRequest.sglVirtualAddr = (void *)csio->data_ptr; pccb->numSgElements = nsegs; } /* else */ } /* set data transfer direction */ if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { op = BUS_DMASYNC_PREWRITE; pccb->tiSuperScsiRequest.dataDirection = tiDirectionOut; } else { op = BUS_DMASYNC_PREREAD; pccb->tiSuperScsiRequest.dataDirection = tiDirectionIn; } pScsiCmnd = &pccb->tiSuperScsiRequest.scsiCmnd; pScsiCmnd->expDataLength = pccb->dataLen; if (csio->ccb_h.flags & CAM_CDB_POINTER) { bcopy(csio->cdb_io.cdb_ptr, &pScsiCmnd->cdb[0], csio->cdb_len); } else { bcopy(csio->cdb_io.cdb_bytes, &pScsiCmnd->cdb[0],csio->cdb_len); } CDB = &pScsiCmnd->cdb[0]; switch (CDB[0]) { case REQUEST_SENSE: /* requires different buffer */ /* This code should not be excercised because SAS support auto sense For the completeness, vtophys() is still used here. */ AGTIAPI_PRINTK("agtiapi_PrepareSGListCB: QueueCmnd - REQUEST SENSE new\n"); pccb->tiSuperScsiRequest.agSgl1.len = htole32(pccb->senseLen); phys_addr = vtophys(&csio->sense_data); CPU_TO_LE32(pccb->tiSuperScsiRequest.agSgl1, phys_addr); pccb->tiSuperScsiRequest.agSgl1.type = htole32(tiSgl); pccb->dataLen = pccb->senseLen; pccb->numSgElements = 1; break; case INQUIRY: /* only using lun 0 for device type detection */ pccb->flags |= AGTIAPI_INQUIRY; break; case TEST_UNIT_READY: case RESERVE: case RELEASE: case START_STOP: pccb->tiSuperScsiRequest.agSgl1.len = 0; pccb->dataLen = 0; break; case READ_6: case WRITE_6: /* Extract LBA */ start_lba = ((CDB[1] & 0x1f) << 16) | (CDB[2] << 8) | (CDB[3]); #ifdef HIALEAH_ENCRYPTION io_is_encryptable = 1; #endif break; case READ_10: case WRITE_10: case READ_12: case WRITE_12: /* Extract LBA */ start_lba = (CDB[2] << 24) | (CDB[3] << 16) | (CDB[4] << 8) | (CDB[5]); #ifdef HIALEAH_ENCRYPTION io_is_encryptable = 1; #endif break; case READ_16: case WRITE_16: /* Extract LBA */ start_lba = (CDB[2] << 24) | (CDB[3] << 16) | (CDB[4] << 8) | (CDB[5]); start_lba <<= 32; start_lba |= ((CDB[6] << 24) | (CDB[7] << 16) | (CDB[8] << 8) | (CDB[9])); #ifdef HIALEAH_ENCRYPTION io_is_encryptable = 1; #endif break; default: break; } /* fill device lun based one address mode */ agtiapi_SetLunField(pccb); if (pccb->targetId < 0 || pccb->targetId >= maxTargets) { pccb->ccbStatus = tiIOFailed; pccb->scsiStatus = tiDetailNoLogin; agtiapi_FreeCCB(pmcsc, pccb); ccb->ccb_h.status = CAM_DEV_NOT_THERE; // ## v. CAM_FUNC_NOTAVAIL xpt_done(ccb); pccb->ccb = NULL; return; } if (INDEX(pmcsc, pccb->targetId) >= maxTargets) { pccb->ccbStatus = tiIOFailed; pccb->scsiStatus = tiDetailNoLogin; agtiapi_FreeCCB(pmcsc, pccb); ccb->ccb_h.status = CAM_DEV_NOT_THERE; // ## v. CAM_FUNC_NOTAVAIL xpt_done(ccb); pccb->ccb = NULL; return; } pDev = &pmcsc->pDevList[INDEX(pmcsc, pccb->targetId)]; #if 1 if ((pmcsc->flags & EDC_DATA) && (pDev->flags & EDC_DATA)) { /* * EDC support: * * Possible command supported - * READ_6, READ_10, READ_12, READ_16, READ_LONG, READ_BUFFER, * READ_DEFECT_DATA, etc. * WRITE_6, WRITE_10, WRITE_12, WRITE_16, WRITE_LONG, WRITE_LONG2, * WRITE_BUFFER, WRITE_VERIFY, WRITE_VERIFY_12, etc. * * Do some data length adjustment and set chip operation instruction. */ switch (CDB[0]) { case READ_6: case READ_10: case READ_12: case READ_16: // BUG_ON(pccb->tiSuperScsiRequest.flags & TI_SCSI_INITIATOR_ENCRYPT); #ifdef AGTIAPI_TEST_DIF pccb->tiSuperScsiRequest.flags |= TI_SCSI_INITIATOR_DIF; #endif pccb->flags |= EDC_DATA; #ifdef TEST_VERIFY_AND_FORWARD pccb->tiSuperScsiRequest.Dif.flags = DIF_VERIFY_FORWARD | DIF_UDT_REF_BLOCK_COUNT; if(pDev->sector_size == 520) { pScsiCmnd->expDataLength += (pccb->dataLen / 512) * 8; } else if(pDev->sector_size == 4104) { pScsiCmnd->expDataLength += (pccb->dataLen / 4096) * 8; } #else #ifdef AGTIAPI_TEST_DIF pccb->tiSuperScsiRequest.Dif.flags = DIF_VERIFY_DELETE | DIF_UDT_REF_BLOCK_COUNT; #endif #endif #ifdef AGTIAPI_TEST_DIF switch(pDev->sector_size) { case 528: pccb->tiSuperScsiRequest.Dif.flags |= ( DIF_BLOCK_SIZE_520 << 16 ); break; case 4104: pccb->tiSuperScsiRequest.Dif.flags |= ( DIF_BLOCK_SIZE_4096 << 16 ); break; case 4168: pccb->tiSuperScsiRequest.Dif.flags |= ( DIF_BLOCK_SIZE_4160 << 16 ); break; } if(pCard->flags & EDC_DATA_CRC) pccb->tiSuperScsiRequest.Dif.flags |= DIF_CRC_VERIFICATION; /* Turn on upper 4 bits of UVM */ pccb->tiSuperScsiRequest.Dif.flags |= 0x03c00000; #endif #ifdef AGTIAPI_TEST_DPL if(agtiapi_SetupDifPerLA(pCard, pccb, start_lba) < 0) { printk(KERN_ERR "SetupDifPerLA Failed.\n"); cmnd->result = SCSI_HOST(DID_ERROR); goto err; } pccb->tiSuperScsiRequest.Dif.enableDIFPerLA = TRUE; #endif #ifdef AGTIAPI_TEST_DIF /* Set App Tag */ pccb->tiSuperScsiRequest.Dif.udtArray[0] = 0xaa; pccb->tiSuperScsiRequest.Dif.udtArray[1] = 0xbb; /* Set LBA in UDT array */ if(CDB[0] == READ_6) { pccb->tiSuperScsiRequest.Dif.udtArray[2] = CDB[3]; pccb->tiSuperScsiRequest.Dif.udtArray[3] = CDB[2]; pccb->tiSuperScsiRequest.Dif.udtArray[4] = CDB[1] & 0x1f; pccb->tiSuperScsiRequest.Dif.udtArray[5] = 0; } else if(CDB[0] == READ_10 || CDB[0] == READ_12) { pccb->tiSuperScsiRequest.Dif.udtArray[2] = CDB[5]; pccb->tiSuperScsiRequest.Dif.udtArray[3] = CDB[4]; pccb->tiSuperScsiRequest.Dif.udtArray[4] = CDB[3]; pccb->tiSuperScsiRequest.Dif.udtArray[5] = CDB[2]; } else if(CDB[0] == READ_16) { pccb->tiSuperScsiRequest.Dif.udtArray[2] = CDB[9]; pccb->tiSuperScsiRequest.Dif.udtArray[3] = CDB[8]; pccb->tiSuperScsiRequest.Dif.udtArray[4] = CDB[7]; pccb->tiSuperScsiRequest.Dif.udtArray[5] = CDB[6]; /* Note: 32 bits lost */ } #endif break; case WRITE_6: case WRITE_10: case WRITE_12: case WRITE_16: // BUG_ON(pccb->tiSuperScsiRequest.flags & TI_SCSI_INITIATOR_ENCRYPT); pccb->flags |= EDC_DATA; #ifdef AGTIAPI_TEST_DIF pccb->tiSuperScsiRequest.flags |= TI_SCSI_INITIATOR_DIF; pccb->tiSuperScsiRequest.Dif.flags = DIF_INSERT | DIF_UDT_REF_BLOCK_COUNT; switch(pDev->sector_size) { case 528: pccb->tiSuperScsiRequest.Dif.flags |= (DIF_BLOCK_SIZE_520 << 16); break; case 4104: pccb->tiSuperScsiRequest.Dif.flags |= ( DIF_BLOCK_SIZE_4096 << 16 ); break; case 4168: pccb->tiSuperScsiRequest.Dif.flags |= ( DIF_BLOCK_SIZE_4160 << 16 ); break; } /* Turn on upper 4 bits of UUM */ pccb->tiSuperScsiRequest.Dif.flags |= 0xf0000000; #endif #ifdef AGTIAPI_TEST_DPL if(agtiapi_SetupDifPerLA(pCard, pccb, start_lba) < 0) { printk(KERN_ERR "SetupDifPerLA Failed.\n"); cmnd->result = SCSI_HOST(DID_ERROR); goto err; } pccb->tiSuperScsiRequest.Dif.enableDIFPerLA = TRUE; #endif #ifdef AGTIAPI_TEST_DIF /* Set App Tag */ pccb->tiSuperScsiRequest.Dif.udtArray[0] = 0xaa; pccb->tiSuperScsiRequest.Dif.udtArray[1] = 0xbb; /* Set LBA in UDT array */ if(CDB[0] == WRITE_6) { pccb->tiSuperScsiRequest.Dif.udtArray[2] = CDB[3]; pccb->tiSuperScsiRequest.Dif.udtArray[3] = CDB[2]; pccb->tiSuperScsiRequest.Dif.udtArray[4] = CDB[1] & 0x1f; } else if(CDB[0] == WRITE_10 || CDB[0] == WRITE_12) { pccb->tiSuperScsiRequest.Dif.udtArray[2] = CDB[5]; pccb->tiSuperScsiRequest.Dif.udtArray[3] = CDB[4]; pccb->tiSuperScsiRequest.Dif.udtArray[4] = CDB[3]; pccb->tiSuperScsiRequest.Dif.udtArray[5] = CDB[2]; } else if(CDB[0] == WRITE_16) { pccb->tiSuperScsiRequest.Dif.udtArray[2] = CDB[5]; pccb->tiSuperScsiRequest.Dif.udtArray[3] = CDB[4]; pccb->tiSuperScsiRequest.Dif.udtArray[4] = CDB[3]; pccb->tiSuperScsiRequest.Dif.udtArray[5] = CDB[2]; /* Note: 32 bits lost */ } #endif break; } } #endif /* end of DIF */ if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { switch(csio->tag_action) { case MSG_HEAD_OF_Q_TAG: pScsiCmnd->taskAttribute = TASK_HEAD_OF_QUEUE; break; case MSG_ACA_TASK: pScsiCmnd->taskAttribute = TASK_ACA; break; case MSG_ORDERED_Q_TAG: pScsiCmnd->taskAttribute = TASK_ORDERED; break; case MSG_SIMPLE_Q_TAG: /* fall through */ default: pScsiCmnd->taskAttribute = TASK_SIMPLE; break; } } if (pccb->tiSuperScsiRequest.agSgl1.len != 0 && pccb->dataLen != 0) { /* should be just before start IO */ bus_dmamap_sync(pmcsc->buffer_dmat, pccb->CCB_dmamap, op); } /* * If assigned pDevHandle is not available * then there is no need to send it to StartIO() */ if (pccb->targetId < 0 || pccb->targetId >= maxTargets) { pccb->ccbStatus = tiIOFailed; pccb->scsiStatus = tiDetailNoLogin; agtiapi_FreeCCB(pmcsc, pccb); ccb->ccb_h.status = CAM_DEV_NOT_THERE; // ## v. CAM_FUNC_NOTAVAIL xpt_done(ccb); pccb->ccb = NULL; return; } TID = INDEX(pmcsc, pccb->targetId); if ((TID >= pmcsc->devDiscover) || !(pccb->devHandle = pmcsc->pDevList[TID].pDevHandle)) { /* AGTIAPI_PRINTK( "agtiapi_PrepareSGListCB: not sending ccb devH %p," " target %d tid %d/%d card %p ERROR pccb %p\n", pccb->devHandle, pccb->targetId, TID, pmcsc->devDiscover, pmcsc, pccb ); */ pccb->ccbStatus = tiIOFailed; pccb->scsiStatus = tiDetailNoLogin; agtiapi_FreeCCB(pmcsc, pccb); ccb->ccb_h.status = CAM_DEV_NOT_THERE; // ## v. CAM_FUNC_NOTAVAIL xpt_done(ccb); pccb->ccb = NULL; return; } AGTIAPI_IO( "agtiapi_PrepareSGListCB: send ccb pccb->devHandle %p, " "pccb->targetId %d TID %d pmcsc->devDiscover %d card %p\n", pccb->devHandle, pccb->targetId, TID, pmcsc->devDiscover, pmcsc ); #ifdef HIALEAH_ENCRYPTION if(pmcsc->encrypt && io_is_encryptable) { agtiapi_SetupEncryptedIO(pmcsc, pccb, start_lba); } else{ io_is_encryptable = 0; pccb->tiSuperScsiRequest.flags = 0; } #endif // put the request in send queue agtiapi_QueueCCB( pmcsc, &pmcsc->ccbSendHead, &pmcsc->ccbSendTail AG_CARD_LOCAL_LOCK(&pmcsc->sendLock), pccb ); agtiapi_StartIO(pmcsc); return; } /****************************************************************************** agtiapi_StartIO() Purpose: Send IO request down for processing. Parameters: (struct agtiapi_softc *pmcsc (IN) Pointer to HBA data structure Return: Note: ******************************************************************************/ STATIC void agtiapi_StartIO( struct agtiapi_softc *pmcsc ) { ccb_t *pccb; int TID; ag_device_t *targ; struct ccb_relsim crs; AGTIAPI_IO( "agtiapi_StartIO: start\n" ); AG_LOCAL_LOCK( &pmcsc->sendLock ); pccb = pmcsc->ccbSendHead; /* if link is down, do nothing */ if ((pccb == NULL) || pmcsc->flags & AGTIAPI_RESET) { AG_LOCAL_UNLOCK( &pmcsc->sendLock ); AGTIAPI_PRINTK( "agtiapi_StartIO: goto ext\n" ); goto ext; } if (pmcsc != NULL && pccb->targetId >= 0 && pccb->targetId < maxTargets) { TID = INDEX(pmcsc, pccb->targetId); targ = &pmcsc->pDevList[TID]; } /* clear send queue */ pmcsc->ccbSendHead = NULL; pmcsc->ccbSendTail = NULL; AG_LOCAL_UNLOCK( &pmcsc->sendLock ); /* send all ccbs down */ while (pccb) { pccb_t pccb_next; U32 status; pccb_next = pccb->pccbNext; pccb->pccbNext = NULL; if (!pccb->ccb) { AGTIAPI_PRINTK( "agtiapi_StartIO: pccb->ccb is NULL ERROR!\n" ); pccb = pccb_next; continue; } AG_IO_DUMPCCB( pccb ); if (!pccb->devHandle) { agtiapi_DumpCCB( pccb ); AGTIAPI_PRINTK( "agtiapi_StartIO: ccb NULL device ERROR!\n" ); pccb = pccb_next; continue; } AGTIAPI_IO( "agtiapi_StartIO: ccb %p retry %d\n", pccb, pccb->retryCount ); #ifndef ABORT_TEST if( !pccb->devHandle || !pccb->devHandle->osData || /* in rmmod case */ !(((ag_device_t *)(pccb->devHandle->osData))->flags & ACTIVE)) { AGTIAPI_PRINTK( "agtiapi_StartIO: device %p not active! ERROR\n", pccb->devHandle ); if( pccb->devHandle ) { AGTIAPI_PRINTK( "agtiapi_StartIO: device not active detail" " -- osData:%p\n", pccb->devHandle->osData ); if( pccb->devHandle->osData ) { AGTIAPI_PRINTK( "agtiapi_StartIO: more device not active detail" " -- active flag:%d\n", ( (ag_device_t *) (pccb->devHandle->osData))->flags & ACTIVE ); } } pccb->ccbStatus = tiIOFailed; pccb->scsiStatus = tiDetailNoLogin; agtiapi_Done( pmcsc, pccb ); pccb = pccb_next; continue; } #endif #ifdef FAST_IO_TEST status = agtiapi_FastIOTest( pmcsc, pccb ); #else status = tiINISuperIOStart( &pmcsc->tiRoot, &pccb->tiIORequest, pccb->devHandle, &pccb->tiSuperScsiRequest, (void *)&pccb->tdIOReqBody, tiInterruptContext ); #endif switch( status ) { case tiSuccess: /* static int squelchCount = 0; if ( 200000 == squelchCount++ ) // squelch prints { AGTIAPI_PRINTK( "agtiapi_StartIO: tiINIIOStart stat tiSuccess %p\n", pccb ); squelchCount = 0; // reset count } */ break; case tiDeviceBusy: AGTIAPI_PRINTK( "agtiapi_StartIO: tiINIIOStart status tiDeviceBusy %p\n", pccb->ccb ); #ifdef LOGEVENT agtiapi_LogEvent( pmcsc, IOCTL_EVT_SEV_INFORMATIONAL, 0, agNULL, 0, "tiINIIOStart tiDeviceBusy " ); #endif pccb->ccbStatus = tiIOFailed; pccb->scsiStatus = tiDeviceBusy; agtiapi_Done(pmcsc, pccb); break; case tiBusy: AGTIAPI_PRINTK( "agtiapi_StartIO: tiINIIOStart status tiBusy %p\n", pccb->ccb ); #ifdef LOGEVENT agtiapi_LogEvent( pmcsc, IOCTL_EVT_SEV_INFORMATIONAL, 0, agNULL, 0, "tiINIIOStart tiBusy " ); #endif pccb->ccbStatus = tiIOFailed; pccb->scsiStatus = tiBusy; agtiapi_Done(pmcsc, pccb); break; case tiIONoDevice: AGTIAPI_PRINTK( "agtiapi_StartIO: tiINIIOStart status tiNoDevice %p " "ERROR\n", pccb->ccb ); #ifdef LOGEVENT agtiapi_LogEvent( pmcsc, IOCTL_EVT_SEV_INFORMATIONAL, 0, agNULL, 0, "tiINIIOStart invalid device handle " ); #endif #ifndef ABORT_TEST /* return command back to OS due to no device available */ ((ag_device_t *)(pccb->devHandle->osData))->flags &= ~ACTIVE; pccb->ccbStatus = tiIOFailed; pccb->scsiStatus = tiDetailNoLogin; agtiapi_Done(pmcsc, pccb); #else /* for short cable pull, we want IO retried - 3-18-2005 */ agtiapi_QueueCCB(pmcsc, &pmcsc->ccbSendHead, &pmcsc->ccbSendTail AG_CARD_LOCAL_LOCK(&pmcsc->sendLock), pccb); #endif break; case tiError: AGTIAPI_PRINTK("agtiapi_StartIO: tiINIIOStart status tiError %p\n", pccb->ccb); #ifdef LOGEVENT agtiapi_LogEvent(pmcsc, IOCTL_EVT_SEV_INFORMATIONAL, 0, agNULL, 0, "tiINIIOStart tiError "); #endif pccb->ccbStatus = tiIOFailed; pccb->scsiStatus = tiDetailOtherError; agtiapi_Done(pmcsc, pccb); break; default: AGTIAPI_PRINTK("agtiapi_StartIO: tiINIIOStart status default %x %p\n", status, pccb->ccb); #ifdef LOGEVENT agtiapi_LogEvent(pmcsc, IOCTL_EVT_SEV_ERROR, 0, agNULL, 0, "tiINIIOStart unexpected status "); #endif pccb->ccbStatus = tiIOFailed; pccb->scsiStatus = tiDetailOtherError; agtiapi_Done(pmcsc, pccb); } pccb = pccb_next; } ext: /* some IO requests might have been completed */ AG_GET_DONE_PCCB(pccb, pmcsc); return; } /****************************************************************************** agtiapi_StartSMP() Purpose: Send SMP request down for processing. Parameters: (struct agtiapi_softc *pmcsc (IN) Pointer to HBA data structure Return: Note: ******************************************************************************/ STATIC void agtiapi_StartSMP(struct agtiapi_softc *pmcsc) { ccb_t *pccb; AGTIAPI_PRINTK("agtiapi_StartSMP: start\n"); AG_LOCAL_LOCK(&pmcsc->sendSMPLock); pccb = pmcsc->smpSendHead; /* if link is down, do nothing */ if ((pccb == NULL) || pmcsc->flags & AGTIAPI_RESET) { AG_LOCAL_UNLOCK(&pmcsc->sendSMPLock); AGTIAPI_PRINTK("agtiapi_StartSMP: goto ext\n"); goto ext; } /* clear send queue */ pmcsc->smpSendHead = NULL; pmcsc->smpSendTail = NULL; AG_LOCAL_UNLOCK(&pmcsc->sendSMPLock); /* send all ccbs down */ while (pccb) { pccb_t pccb_next; U32 status; pccb_next = pccb->pccbNext; pccb->pccbNext = NULL; if (!pccb->ccb) { AGTIAPI_PRINTK("agtiapi_StartSMP: pccb->ccb is NULL ERROR!\n"); pccb = pccb_next; continue; } if (!pccb->devHandle) { AGTIAPI_PRINTK("agtiapi_StartSMP: ccb NULL device ERROR!\n"); pccb = pccb_next; continue; } pccb->flags |= TAG_SMP; // mark as SMP for later tracking AGTIAPI_PRINTK( "agtiapi_StartSMP: ccb %p retry %d\n", pccb, pccb->retryCount ); status = tiINISMPStart( &pmcsc->tiRoot, &pccb->tiIORequest, pccb->devHandle, &pccb->tiSMPFrame, (void *)&pccb->tdIOReqBody, tiInterruptContext); switch (status) { case tiSuccess: break; case tiBusy: AGTIAPI_PRINTK("agtiapi_StartSMP: tiINISMPStart status tiBusy %p\n", pccb->ccb); /* pending ccb back to send queue */ agtiapi_QueueCCB(pmcsc, &pmcsc->smpSendHead, &pmcsc->smpSendTail AG_CARD_LOCAL_LOCK(&pmcsc->sendSMPLock), pccb); break; case tiError: AGTIAPI_PRINTK("agtiapi_StartIO: tiINIIOStart status tiError %p\n", pccb->ccb); pccb->ccbStatus = tiSMPFailed; agtiapi_SMPDone(pmcsc, pccb); break; default: AGTIAPI_PRINTK("agtiapi_StartIO: tiINIIOStart status default %x %p\n", status, pccb->ccb); pccb->ccbStatus = tiSMPFailed; agtiapi_SMPDone(pmcsc, pccb); } pccb = pccb_next; } ext: /* some SMP requests might have been completed */ AG_GET_DONE_SMP_PCCB(pccb, pmcsc); return; } #if __FreeBSD_version > 901000 /****************************************************************************** agtiapi_PrepareSMPSGList() Purpose: This function prepares scatter-gather list for the given ccb Parameters: struct agtiapi_softc *pmsc (IN) Pointer to the HBA data structure ccb_t *pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Return: 0 - success 1 - failure Note: ******************************************************************************/ static int agtiapi_PrepareSMPSGList( struct agtiapi_softc *pmcsc, ccb_t *pccb ) { /* Pointer to CAM's ccb */ union ccb *ccb = pccb->ccb; struct ccb_smpio *csmpio = &ccb->smpio; struct ccb_hdr *ccbh = &ccb->ccb_h; AGTIAPI_PRINTK("agtiapi_PrepareSMPSGList: start\n"); switch((ccbh->flags & CAM_DATA_MASK)) { case CAM_DATA_PADDR: case CAM_DATA_SG_PADDR: AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGList: Physical Address not supported\n"); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return tiReject; case CAM_DATA_SG: /* * Currently we do not support Multiple SG list * return error for now */ if ( (csmpio->smp_request_sglist_cnt > 1) || (csmpio->smp_response_sglist_cnt > 1) ) { AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGList: Multiple SG list not supported\n"); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return tiReject; } } if ( csmpio->smp_request_sglist_cnt != 0 ) { /* * Virtual address that needs to translated into * one or more physical address ranges. */ int error; //AG_LOCAL_LOCK(&(pmcsc->pCardInfo->pmIOLock)); AGTIAPI_PRINTK("agtiapi_PrepareSGList: virtual address\n"); error = bus_dmamap_load( pmcsc->buffer_dmat, pccb->CCB_dmamap, csmpio->smp_request, csmpio->smp_request_len, agtiapi_PrepareSMPSGListCB, pccb, BUS_DMA_NOWAIT /* 0 */ ); //AG_LOCAL_UNLOCK(&(pmcsc->pCardInfo->pmIOLock)); if (error == EINPROGRESS) { /* * So as to maintain ordering, * freeze the controller queue * until our mapping is * returned. */ AGTIAPI_PRINTK( "agtiapi_PrepareSGList: EINPROGRESS\n" ); xpt_freeze_simq( pmcsc->sim, 1 ); pmcsc->SimQFrozen = agTRUE; ccbh->status |= CAM_RELEASE_SIMQ; } } if( csmpio->smp_response_sglist_cnt != 0 ) { /* * Virtual address that needs to translated into * one or more physical address ranges. */ int error; //AG_LOCAL_LOCK( &(pmcsc->pCardInfo->pmIOLock) ); AGTIAPI_PRINTK( "agtiapi_PrepareSGList: virtual address\n" ); error = bus_dmamap_load( pmcsc->buffer_dmat, pccb->CCB_dmamap, csmpio->smp_response, csmpio->smp_response_len, agtiapi_PrepareSMPSGListCB, pccb, BUS_DMA_NOWAIT /* 0 */ ); //AG_LOCAL_UNLOCK( &(pmcsc->pCardInfo->pmIOLock) ); if ( error == EINPROGRESS ) { /* * So as to maintain ordering, * freeze the controller queue * until our mapping is * returned. */ AGTIAPI_PRINTK( "agtiapi_PrepareSGList: EINPROGRESS\n" ); xpt_freeze_simq( pmcsc->sim, 1 ); pmcsc->SimQFrozen = agTRUE; ccbh->status |= CAM_RELEASE_SIMQ; } } else { if ( (csmpio->smp_request_sglist_cnt == 0) && (csmpio->smp_response_sglist_cnt == 0) ) { AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGList: physical address\n" ); pccb->tiSMPFrame.outFrameBuf = (void *)csmpio->smp_request; pccb->tiSMPFrame.outFrameLen = csmpio->smp_request_len; pccb->tiSMPFrame.expectedRespLen = csmpio->smp_response_len; // 0xFF to be defined agtiapi_PrepareSMPSGListCB( pccb, NULL, 0, 0xAABBCCDD ); } pccb->tiSMPFrame.flag = 0; } return tiSuccess; } #else /****************************************************************************** agtiapi_PrepareSMPSGList() Purpose: This function prepares scatter-gather list for the given ccb Parameters: struct agtiapi_softc *pmsc (IN) Pointer to the HBA data structure ccb_t *pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Return: 0 - success 1 - failure Note: ******************************************************************************/ static int agtiapi_PrepareSMPSGList( struct agtiapi_softc *pmcsc, ccb_t *pccb ) { /* Pointer to CAM's ccb */ union ccb *ccb = pccb->ccb; struct ccb_smpio *csmpio = &ccb->smpio; struct ccb_hdr *ccbh = &ccb->ccb_h; AGTIAPI_PRINTK("agtiapi_PrepareSMPSGList: start\n"); if (ccbh->flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) { AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGList: Physical Address " "not supported\n" ); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return tiReject;; } if (ccbh->flags & CAM_SCATTER_VALID) { /* * Currently we do not support Multiple SG list * return error for now */ if ( (csmpio->smp_request_sglist_cnt > 1) || (csmpio->smp_response_sglist_cnt > 1) ) { AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGList: Multiple SG list " "not supported\n" ); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return tiReject;; } if ( csmpio->smp_request_sglist_cnt != 0 ) { /* * Virtual address that needs to translated into * one or more physical address ranges. */ int error; //AG_LOCAL_LOCK(&(pmcsc->pCardInfo->pmIOLock)); AGTIAPI_PRINTK("agtiapi_PrepareSGList: virtual address\n"); error = bus_dmamap_load( pmcsc->buffer_dmat, pccb->CCB_dmamap, csmpio->smp_request, csmpio->smp_request_len, agtiapi_PrepareSMPSGListCB, pccb, BUS_DMA_NOWAIT /* 0 */ ); //AG_LOCAL_UNLOCK(&(pmcsc->pCardInfo->pmIOLock)); if (error == EINPROGRESS) { /* * So as to maintain ordering, * freeze the controller queue * until our mapping is * returned. */ AGTIAPI_PRINTK( "agtiapi_PrepareSGList: EINPROGRESS\n" ); xpt_freeze_simq( pmcsc->sim, 1 ); pmcsc->SimQFrozen = agTRUE; ccbh->status |= CAM_RELEASE_SIMQ; } } if( csmpio->smp_response_sglist_cnt != 0 ) { /* * Virtual address that needs to translated into * one or more physical address ranges. */ int error; //AG_LOCAL_LOCK( &(pmcsc->pCardInfo->pmIOLock) ); AGTIAPI_PRINTK( "agtiapi_PrepareSGList: virtual address\n" ); error = bus_dmamap_load( pmcsc->buffer_dmat, pccb->CCB_dmamap, csmpio->smp_response, csmpio->smp_response_len, agtiapi_PrepareSMPSGListCB, pccb, BUS_DMA_NOWAIT /* 0 */ ); //AG_LOCAL_UNLOCK( &(pmcsc->pCardInfo->pmIOLock) ); if ( error == EINPROGRESS ) { /* * So as to maintain ordering, * freeze the controller queue * until our mapping is * returned. */ AGTIAPI_PRINTK( "agtiapi_PrepareSGList: EINPROGRESS\n" ); xpt_freeze_simq( pmcsc->sim, 1 ); pmcsc->SimQFrozen = agTRUE; ccbh->status |= CAM_RELEASE_SIMQ; } } } else { if ( (csmpio->smp_request_sglist_cnt == 0) && (csmpio->smp_response_sglist_cnt == 0) ) { AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGList: physical address\n" ); pccb->tiSMPFrame.outFrameBuf = (void *)csmpio->smp_request; pccb->tiSMPFrame.outFrameLen = csmpio->smp_request_len; pccb->tiSMPFrame.expectedRespLen = csmpio->smp_response_len; // 0xFF to be defined agtiapi_PrepareSMPSGListCB( pccb, NULL, 0, 0xAABBCCDD ); } pccb->tiSMPFrame.flag = 0; } return tiSuccess; } #endif /****************************************************************************** agtiapi_PrepareSMPSGListCB() Purpose: Callback function for bus_dmamap_load() This fuctions sends IO to LL layer. Parameters: void *arg (IN) Pointer to the HBA data structure bus_dma_segment_t *segs (IN) Pointer to dma segment int nsegs (IN) number of dma segment int error (IN) error Return: Note: ******************************************************************************/ static void agtiapi_PrepareSMPSGListCB( void *arg, bus_dma_segment_t *segs, int nsegs, int error ) { pccb_t pccb = arg; union ccb *ccb = pccb->ccb; struct agtiapi_softc *pmcsc; U32 TID = CMND_TO_TARGET(ccb); int status; tiDeviceHandle_t *tiExpDevHandle; tiPortalContext_t *tiExpPortalContext; ag_portal_info_t *tiExpPortalInfo; AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGListCB: start, nsegs %d error 0x%x\n", nsegs, error ); pmcsc = pccb->pmcsc; if ( error != tiSuccess ) { if (error == 0xAABBCCDD) { // do nothing } else { AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGListCB: error status 0x%x\n", error ); bus_dmamap_unload( pmcsc->buffer_dmat, pccb->CCB_dmamap ); bus_dmamap_destroy( pmcsc->buffer_dmat, pccb->CCB_dmamap ); agtiapi_FreeCCB( pmcsc, pccb ); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done( ccb ); return; } } if ( nsegs > AGTIAPI_MAX_DMA_SEGS ) { AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGListCB: over the limit. nsegs %d " "AGTIAPI_MAX_DMA_SEGS %d\n", nsegs, AGTIAPI_MAX_DMA_SEGS ); bus_dmamap_unload( pmcsc->buffer_dmat, pccb->CCB_dmamap ); bus_dmamap_destroy( pmcsc->buffer_dmat, pccb->CCB_dmamap ); agtiapi_FreeCCB( pmcsc, pccb ); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done( ccb ); return; } /* * If assigned pDevHandle is not available * then there is no need to send it to StartIO() */ /* TODO: Add check for deviceType */ if ( pccb->targetId < 0 || pccb->targetId >= maxTargets ) { agtiapi_FreeCCB( pmcsc, pccb ); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; xpt_done(ccb); pccb->ccb = NULL; return; } TID = INDEX( pmcsc, pccb->targetId ); if ( (TID >= pmcsc->devDiscover) || !(pccb->devHandle = pmcsc->pDevList[TID].pDevHandle) ) { AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGListCB: not sending ccb devH %p, " "target %d tid %d/%d " "card %p ERROR pccb %p\n", pccb->devHandle, pccb->targetId, TID, pmcsc->devDiscover, pmcsc, pccb ); agtiapi_FreeCCB( pmcsc, pccb ); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; xpt_done( ccb ); pccb->ccb = NULL; return; } /* TODO: add indirect handling */ /* set the flag correctly based on Indiret SMP request and response */ AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGListCB: send ccb pccb->devHandle %p, " "pccb->targetId %d TID %d pmcsc->devDiscover %d card %p\n", pccb->devHandle, pccb->targetId, TID, pmcsc->devDiscover, pmcsc ); tiExpDevHandle = pccb->devHandle; tiExpPortalInfo = pmcsc->pDevList[TID].pPortalInfo; tiExpPortalContext = &tiExpPortalInfo->tiPortalContext; /* Look for the expander associated with the ses device */ status = tiINIGetExpander( &pmcsc->tiRoot, tiExpPortalContext, pccb->devHandle, &tiExpDevHandle ); if ( status != tiSuccess ) { AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGListCB: Error getting Expander " "device\n" ); agtiapi_FreeCCB( pmcsc, pccb ); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; xpt_done( ccb ); pccb->ccb = NULL; return; } /* this is expander device */ pccb->devHandle = tiExpDevHandle; /* put the request in send queue */ agtiapi_QueueCCB( pmcsc, &pmcsc->smpSendHead, &pmcsc->smpSendTail AG_CARD_LOCAL_LOCK(&pmcsc->sendSMPLock), pccb ); agtiapi_StartSMP( pmcsc ); return; } /****************************************************************************** agtiapi_Done() Purpose: Processing completed ccbs Parameters: struct agtiapi_softc *pmcsc (IN) Pointer to HBA data structure ccb_t *pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Return: Note: ******************************************************************************/ STATIC void agtiapi_Done(struct agtiapi_softc *pmcsc, ccb_t *pccb) { pccb_t pccb_curr = pccb; pccb_t pccb_next; tiIniScsiCmnd_t *cmnd; union ccb * ccb; AGTIAPI_IO("agtiapi_Done: start\n"); while (pccb_curr) { /* start from 1st ccb in the chain */ pccb_next = pccb_curr->pccbNext; if (agtiapi_CheckError(pmcsc, pccb_curr) != 0) { /* send command back and release the ccb */ cmnd = &pccb_curr->tiSuperScsiRequest.scsiCmnd; if (cmnd->cdb[0] == RECEIVE_DIAGNOSTIC) { AGTIAPI_PRINTK("agtiapi_Done: RECEIVE_DIAG pg %d id %d cmnd %p pccb " "%p\n", cmnd->cdb[2], pccb_curr->targetId, cmnd, pccb_curr); } CMND_DMA_UNMAP(pmcsc, ccb); /* send the request back to the CAM */ ccb = pccb_curr->ccb; agtiapi_FreeCCB(pmcsc, pccb_curr); xpt_done(ccb); } pccb_curr = pccb_next; } return; } /****************************************************************************** agtiapi_SMPDone() Purpose: Processing completed ccbs Parameters: struct agtiapi_softc *pmcsc (IN) Ponter to HBA data structure ccb_t *pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Return: Note: ******************************************************************************/ STATIC void agtiapi_SMPDone(struct agtiapi_softc *pmcsc, ccb_t *pccb) { pccb_t pccb_curr = pccb; pccb_t pccb_next; union ccb * ccb; AGTIAPI_PRINTK("agtiapi_SMPDone: start\n"); while (pccb_curr) { /* start from 1st ccb in the chain */ pccb_next = pccb_curr->pccbNext; if (agtiapi_CheckSMPError(pmcsc, pccb_curr) != 0) { CMND_DMA_UNMAP(pmcsc, ccb); /* send the request back to the CAM */ ccb = pccb_curr->ccb; agtiapi_FreeSMPCCB(pmcsc, pccb_curr); xpt_done(ccb); } pccb_curr = pccb_next; } AGTIAPI_PRINTK("agtiapi_SMPDone: Done\n"); return; } /****************************************************************************** agtiapi_hexdump() Purpose: Utility function for dumping in hex Parameters: const char *ptitle (IN) A string to be printed bit8 *pbuf (IN) A pointer to a buffer to be printed. int len (IN) The lengther of the buffer Return: Note: ******************************************************************************/ void agtiapi_hexdump(const char *ptitle, bit8 *pbuf, int len) { int i; AGTIAPI_PRINTK("%s - hexdump(len=%d):\n", ptitle, (int)len); if (!pbuf) { AGTIAPI_PRINTK("pbuf is NULL\n"); return; } for (i = 0; i < len; ) { if (len - i > 4) { AGTIAPI_PRINTK( " 0x%02x, 0x%02x, 0x%02x, 0x%02x,\n", pbuf[i], pbuf[i+1], pbuf[i+2], pbuf[i+3] ); i += 4; } else { AGTIAPI_PRINTK(" 0x%02x,", pbuf[i]); i++; } } AGTIAPI_PRINTK("\n"); } /****************************************************************************** agtiapi_CheckError() Purpose: Processes status pertaining to the ccb -- whether it was completed successfully, aborted, or error encountered. Parameters: ag_card_t *pCard (IN) Pointer to HBA data structure ccb_t *pccd (IN) A pointer to the driver's own CCB, not CAM's CCB Return: 0 - the command retry is required 1 - the command process is completed Note: ******************************************************************************/ STATIC U32 agtiapi_CheckError(struct agtiapi_softc *pmcsc, ccb_t *pccb) { ag_device_t *pDevice; // union ccb * ccb = pccb->ccb; union ccb * ccb; int is_error, TID; if (pccb == NULL) { return 0; } ccb = pccb->ccb; AGTIAPI_IO("agtiapi_CheckError: start\n"); if (ccb == NULL) { /* shouldn't be here but just in case we do */ AGTIAPI_PRINTK("agtiapi_CheckError: CCB orphan = %p ERROR\n", pccb); agtiapi_FreeCCB(pmcsc, pccb); return 0; } is_error = 1; pDevice = NULL; if (pmcsc != NULL && pccb->targetId >= 0 && pccb->targetId < maxTargets) { if (pmcsc->pWWNList != NULL) { TID = INDEX(pmcsc, pccb->targetId); if (TID < maxTargets) { pDevice = &pmcsc->pDevList[TID]; if (pDevice != NULL) { is_error = 0; } } } } if (is_error) { AGTIAPI_PRINTK("agtiapi_CheckError: pDevice == NULL\n"); agtiapi_FreeCCB(pmcsc, pccb); return 0; } /* SCSI status */ ccb->csio.scsi_status = pccb->scsiStatus; if(pDevice->CCBCount > 0){ atomic_subtract_int(&pDevice->CCBCount,1); } AG_LOCAL_LOCK(&pmcsc->freezeLock); if(pmcsc->freezeSim == agTRUE) { pmcsc->freezeSim = agFALSE; xpt_release_simq(pmcsc->sim, 1); } AG_LOCAL_UNLOCK(&pmcsc->freezeLock); switch (pccb->ccbStatus) { case tiIOSuccess: AGTIAPI_IO("agtiapi_CheckError: tiIOSuccess pccb %p\n", pccb); /* CAM status */ if (pccb->scsiStatus == SCSI_STATUS_OK) { ccb->ccb_h.status = CAM_REQ_CMP; } else if (pccb->scsiStatus == SCSI_TASK_ABORTED) { ccb->ccb_h.status = CAM_REQ_ABORTED; } else { ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; } if (ccb->csio.scsi_status == SCSI_CHECK_CONDITION) { ccb->ccb_h.status |= CAM_AUTOSNS_VALID; } break; case tiIOOverRun: AGTIAPI_PRINTK("agtiapi_CheckError: tiIOOverRun pccb %p\n", pccb); /* resid is ignored for this condition */ ccb->csio.resid = 0; ccb->ccb_h.status = CAM_DATA_RUN_ERR; break; case tiIOUnderRun: AGTIAPI_PRINTK("agtiapi_CheckError: tiIOUnderRun pccb %p\n", pccb); ccb->csio.resid = pccb->scsiStatus; ccb->ccb_h.status = CAM_REQ_CMP; ccb->csio.scsi_status = SCSI_STATUS_OK; break; case tiIOFailed: AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOFailed %d id %d ERROR\n", pccb, pccb->scsiStatus, pccb->targetId ); if (pccb->scsiStatus == tiDeviceBusy) { AGTIAPI_IO( "agtiapi_CheckError: pccb %p tiIOFailed - tiDetailBusy\n", pccb ); ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQUEUE_REQ; if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { ccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); } } else if(pccb->scsiStatus == tiBusy) { AG_LOCAL_LOCK(&pmcsc->freezeLock); if(pmcsc->freezeSim == agFALSE) { pmcsc->freezeSim = agTRUE; xpt_freeze_simq(pmcsc->sim, 1); } AG_LOCAL_UNLOCK(&pmcsc->freezeLock); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status |= CAM_REQUEUE_REQ; } else if (pccb->scsiStatus == tiDetailNoLogin) { AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOFailed - " "tiDetailNoLogin ERROR\n", pccb ); ccb->ccb_h.status = CAM_DEV_NOT_THERE; } else if (pccb->scsiStatus == tiDetailNotValid) { AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOFailed - " "tiDetailNotValid ERROR\n", pccb ); ccb->ccb_h.status = CAM_REQ_INVALID; } else if (pccb->scsiStatus == tiDetailAbortLogin) { AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOFailed - " "tiDetailAbortLogin ERROR\n", pccb ); ccb->ccb_h.status = CAM_REQ_ABORTED; } else if (pccb->scsiStatus == tiDetailAbortReset) { AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOFailed - " "tiDetailAbortReset ERROR\n", pccb ); ccb->ccb_h.status = CAM_REQ_ABORTED; } else if (pccb->scsiStatus == tiDetailAborted) { AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOFailed - " "tiDetailAborted ERROR\n", pccb ); ccb->ccb_h.status = CAM_REQ_ABORTED; } else if (pccb->scsiStatus == tiDetailOtherError) { AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOFailed - " "tiDetailOtherError ERROR\n", pccb ); ccb->ccb_h.status = CAM_REQ_ABORTED; } break; case tiIODifError: AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOFailed %d id %d ERROR\n", pccb, pccb->scsiStatus, pccb->targetId ); if (pccb->scsiStatus == tiDetailDifAppTagMismatch) { AGTIAPI_IO( "agtiapi_CheckError: pccb %p tiIOFailed - " "tiDetailDifAppTagMismatch\n", pccb ); ccb->ccb_h.status = CAM_REQ_CMP_ERR; } else if (pccb->scsiStatus == tiDetailDifRefTagMismatch) { AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOFailed - " "tiDetailDifRefTagMismatch\n", pccb ); ccb->ccb_h.status = CAM_REQ_CMP_ERR; } else if (pccb->scsiStatus == tiDetailDifCrcMismatch) { AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOFailed - " "tiDetailDifCrcMismatch\n", pccb ); ccb->ccb_h.status = CAM_REQ_CMP_ERR; } break; #ifdef HIALEAH_ENCRYPTION case tiIOEncryptError: AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOFailed %d id %d ERROR\n", pccb, pccb->scsiStatus, pccb->targetId ); if (pccb->scsiStatus == tiDetailDekKeyCacheMiss) { AGTIAPI_PRINTK( "agtiapi_CheckError: %s: pccb %p tiIOFailed - " "tiDetailDekKeyCacheMiss ERROR\n", __FUNCTION__, pccb ); ccb->ccb_h.status = CAM_REQ_ABORTED; agtiapi_HandleEncryptedIOFailure(pDevice, pccb); } else if (pccb->scsiStatus == tiDetailDekIVMismatch) { AGTIAPI_PRINTK( "agtiapi_CheckError: %s: pccb %p tiIOFailed - " "tiDetailDekIVMismatch ERROR\n", __FUNCTION__, pccb ); ccb->ccb_h.status = CAM_REQ_ABORTED; agtiapi_HandleEncryptedIOFailure(pDevice, pccb); } break; #endif default: AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOdefault %d id %d ERROR\n", pccb, pccb->ccbStatus, pccb->targetId ); ccb->ccb_h.status = CAM_REQ_CMP_ERR; break; } return 1; } /****************************************************************************** agtiapi_SMPCheckError() Purpose: Processes status pertaining to the ccb -- whether it was completed successfully, aborted, or error encountered. Parameters: ag_card_t *pCard (IN) Pointer to HBA data structure ccb_t *pccd (IN) A pointer to the driver's own CCB, not CAM's CCB Return: 0 - the command retry is required 1 - the command process is completed Note: ******************************************************************************/ STATIC U32 agtiapi_CheckSMPError( struct agtiapi_softc *pmcsc, ccb_t *pccb ) { union ccb * ccb = pccb->ccb; AGTIAPI_PRINTK("agtiapi_CheckSMPError: start\n"); if (!ccb) { /* shouldn't be here but just in case we do */ AGTIAPI_PRINTK( "agtiapi_CheckSMPError: CCB orphan = %p ERROR\n", pccb ); agtiapi_FreeSMPCCB(pmcsc, pccb); return 0; } switch (pccb->ccbStatus) { case tiSMPSuccess: AGTIAPI_PRINTK( "agtiapi_CheckSMPError: tiSMPSuccess pccb %p\n", pccb ); /* CAM status */ ccb->ccb_h.status = CAM_REQ_CMP; break; case tiSMPFailed: AGTIAPI_PRINTK( "agtiapi_CheckSMPError: tiSMPFailed pccb %p\n", pccb ); /* CAM status */ ccb->ccb_h.status = CAM_REQ_CMP_ERR; break; default: AGTIAPI_PRINTK( "agtiapi_CheckSMPError: pccb %p tiSMPdefault %d " "id %d ERROR\n", pccb, pccb->ccbStatus, pccb->targetId ); ccb->ccb_h.status = CAM_REQ_CMP_ERR; break; } return 1; } /****************************************************************************** agtiapi_HandleEncryptedIOFailure(): Purpose: Parameters: Return: Note: Currently not used. ******************************************************************************/ void agtiapi_HandleEncryptedIOFailure(ag_device_t *pDev, ccb_t *pccb) { AGTIAPI_PRINTK("agtiapi_HandleEncryptedIOFailure: start\n"); return; } /****************************************************************************** agtiapi_Retry() Purpose: Retry a ccb. Parameters: struct agtiapi_softc *pmcsc (IN) Pointer to the HBA structure ccb_t *pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Return: Note: Currently not used. ******************************************************************************/ STATIC void agtiapi_Retry(struct agtiapi_softc *pmcsc, ccb_t *pccb) { pccb->retryCount++; pccb->flags = ACTIVE | AGTIAPI_RETRY; pccb->ccbStatus = 0; pccb->scsiStatus = 0; pccb->startTime = ticks; AGTIAPI_PRINTK( "agtiapi_Retry: start\n" ); AGTIAPI_PRINTK( "agtiapi_Retry: ccb %p retry %d flgs x%x\n", pccb, pccb->retryCount, pccb->flags ); agtiapi_QueueCCB(pmcsc, &pmcsc->ccbSendHead, &pmcsc->ccbSendTail AG_CARD_LOCAL_LOCK(&pmcsc->sendLock), pccb); return; } /****************************************************************************** agtiapi_DumpCCB() Purpose: Dump CCB for debuging Parameters: ccb_t *pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Return: Note: ******************************************************************************/ STATIC void agtiapi_DumpCCB(ccb_t *pccb) { AGTIAPI_PRINTK("agtiapi_DumpCCB: pccb %p, devHandle %p, tid %d, lun %d\n", pccb, pccb->devHandle, pccb->targetId, pccb->lun); AGTIAPI_PRINTK("flag 0x%x, add_mode 0x%x, ccbStatus 0x%x, scsiStatus 0x%x\n", pccb->flags, pccb->addrMode, pccb->ccbStatus, pccb->scsiStatus); AGTIAPI_PRINTK("scsi comand = 0x%x, numSgElements = %d\n", pccb->tiSuperScsiRequest.scsiCmnd.cdb[0], pccb->numSgElements); AGTIAPI_PRINTK("dataLen = 0x%x, sens_len = 0x%x\n", pccb->dataLen, pccb->senseLen); AGTIAPI_PRINTK("tiSuperScsiRequest:\n"); AGTIAPI_PRINTK("scsiCmnd: expDataLength 0x%x, taskAttribute 0x%x\n", pccb->tiSuperScsiRequest.scsiCmnd.expDataLength, pccb->tiSuperScsiRequest.scsiCmnd.taskAttribute); AGTIAPI_PRINTK("cdb[0] = 0x%x, cdb[1] = 0x%x, cdb[2] = 0x%x, cdb[3] = 0x%x\n", pccb->tiSuperScsiRequest.scsiCmnd.cdb[0], pccb->tiSuperScsiRequest.scsiCmnd.cdb[1], pccb->tiSuperScsiRequest.scsiCmnd.cdb[2], pccb->tiSuperScsiRequest.scsiCmnd.cdb[3]); AGTIAPI_PRINTK("cdb[4] = 0x%x, cdb[5] = 0x%x, cdb[6] = 0x%x, cdb[7] = 0x%x\n", pccb->tiSuperScsiRequest.scsiCmnd.cdb[4], pccb->tiSuperScsiRequest.scsiCmnd.cdb[5], pccb->tiSuperScsiRequest.scsiCmnd.cdb[6], pccb->tiSuperScsiRequest.scsiCmnd.cdb[7]); AGTIAPI_PRINTK( "cdb[8] = 0x%x, cdb[9] = 0x%x, cdb[10] = 0x%x, " "cdb[11] = 0x%x\n", pccb->tiSuperScsiRequest.scsiCmnd.cdb[8], pccb->tiSuperScsiRequest.scsiCmnd.cdb[9], pccb->tiSuperScsiRequest.scsiCmnd.cdb[10], pccb->tiSuperScsiRequest.scsiCmnd.cdb[11] ); AGTIAPI_PRINTK("agSgl1: upper 0x%x, lower 0x%x, len 0x%x, type %d\n", pccb->tiSuperScsiRequest.agSgl1.upper, pccb->tiSuperScsiRequest.agSgl1.lower, pccb->tiSuperScsiRequest.agSgl1.len, pccb->tiSuperScsiRequest.agSgl1.type); } /****************************************************************************** agtiapi_eh_HostReset() Purpose: A new error handler of Host Reset command. Parameters: scsi_cmnd *cmnd (IN) Pointer to a command to the HBA to be reset Return: SUCCESS - success FAILED - fail Note: ******************************************************************************/ int agtiapi_eh_HostReset( struct agtiapi_softc *pmcsc, union ccb *cmnd ) { AGTIAPI_PRINTK( "agtiapi_eh_HostReset: ccb pointer %p\n", cmnd ); if( cmnd == NULL ) { printf( "agtiapi_eh_HostReset: null command, skipping reset.\n" ); return tiInvalidHandle; } #ifdef LOGEVENT agtiapi_LogEvent( pmcsc, IOCTL_EVT_SEV_INFORMATIONAL, 0, agNULL, 0, "agtiapi_eh_HostReset! " ); #endif return agtiapi_DoSoftReset( pmcsc ); } int agtiapi_eh_DeviceReset( struct agtiapi_softc *pmcsc, union ccb *cmnd ) { AGTIAPI_PRINTK( "agtiapi_eh_HostReset: ccb pointer %p\n", cmnd ); if( cmnd == NULL ) { printf( "agtiapi_eh_HostReset: null command, skipping reset.\n" ); return tiInvalidHandle; } return agtiapi_DoSoftReset( pmcsc ); } /****************************************************************************** agtiapi_QueueCCB() Purpose: Put ccb in ccb queue at the tail Parameters: struct agtiapi_softc *pmcsc (IN) Pointer to HBA data structure pccb_t *phead (IN) Double pointer to ccb queue head pccb_t *ptail (IN) Double pointer to ccb queue tail ccb_t *pccb (IN) Poiner to a ccb to be queued Return: Note: Put the ccb to the tail of queue ******************************************************************************/ STATIC void agtiapi_QueueCCB( struct agtiapi_softc *pmcsc, pccb_t *phead, pccb_t *ptail, #ifdef AGTIAPI_LOCAL_LOCK struct mtx *mutex, #endif ccb_t *pccb ) { AGTIAPI_IO( "agtiapi_QueueCCB: start\n" ); AGTIAPI_IO( "agtiapi_QueueCCB: %p to %p\n", pccb, phead ); if (phead == NULL || ptail == NULL) { panic( "agtiapi_QueueCCB: phead %p ptail %p", phead, ptail ); } pccb->pccbNext = NULL; AG_LOCAL_LOCK( mutex ); if (*phead == NULL) { //WARN_ON(*ptail != NULL); /* critical, just get more logs */ *phead = pccb; } else { //WARN_ON(*ptail == NULL); /* critical, just get more logs */ if (*ptail) (*ptail)->pccbNext = pccb; } *ptail = pccb; AG_LOCAL_UNLOCK( mutex ); return; } /****************************************************************************** agtiapi_QueueCCB() Purpose: Parameters: Return: Note: ******************************************************************************/ static int agtiapi_QueueSMP(struct agtiapi_softc *pmcsc, union ccb * ccb) { pccb_t pccb = agNULL; /* call dequeue */ int status = tiSuccess; int targetID = xpt_path_target_id(ccb->ccb_h.path); AGTIAPI_PRINTK("agtiapi_QueueSMP: start\n"); /* get a ccb */ if ((pccb = agtiapi_GetCCB(pmcsc)) == NULL) { AGTIAPI_PRINTK("agtiapi_QueueSMP: GetCCB ERROR\n"); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); return tiBusy; } pccb->pmcsc = pmcsc; /* initialize Command Control Block (CCB) */ pccb->targetId = targetID; pccb->ccb = ccb; /* for struct scsi_cmnd */ status = agtiapi_PrepareSMPSGList(pmcsc, pccb); if (status != tiSuccess) { AGTIAPI_PRINTK("agtiapi_QueueSMP: agtiapi_PrepareSMPSGList failure\n"); agtiapi_FreeCCB(pmcsc, pccb); if (status == tiReject) { ccb->ccb_h.status = CAM_REQ_INVALID; } else { ccb->ccb_h.status = CAM_REQ_CMP; } xpt_done(ccb); return tiError; } return status; } /****************************************************************************** agtiapi_SetLunField() Purpose: Set LUN field based on different address mode Parameters: ccb_t *pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Return: Note: ******************************************************************************/ void agtiapi_SetLunField(ccb_t *pccb) { U08 *pchar; pchar = (U08 *)&pccb->tiSuperScsiRequest.scsiCmnd.lun; // AGTIAPI_PRINTK("agtiapi_SetLunField: start\n"); switch (pccb->addrMode) { case AGTIAPI_PERIPHERAL: *pchar++ = 0; *pchar = (U08)pccb->lun; break; case AGTIAPI_VOLUME_SET: *pchar++ = (AGTIAPI_VOLUME_SET << AGTIAPI_ADDRMODE_SHIFT) | (U08)((pccb->lun >> 8) & 0x3F); *pchar = (U08)pccb->lun; break; case AGTIAPI_LUN_ADDR: *pchar++ = (AGTIAPI_LUN_ADDR << AGTIAPI_ADDRMODE_SHIFT) | pccb->targetId; *pchar = (U08)pccb->lun; break; } } /***************************************************************************** agtiapi_FreeCCB() Purpose: Free a ccb and put it back to ccbFreeList. Parameters: struct agtiapi_softc *pmcsc (IN) Pointer to HBA data structure pccb_t pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Returns: Note: *****************************************************************************/ STATIC void agtiapi_FreeCCB(struct agtiapi_softc *pmcsc, pccb_t pccb) { union ccb *ccb = pccb->ccb; bus_dmasync_op_t op; AG_LOCAL_LOCK(&pmcsc->ccbLock); AGTIAPI_IO( "agtiapi_FreeCCB: start %p\n", pccb ); #ifdef AGTIAPI_TEST_EPL tiEncrypt_t *encrypt; #endif agtiapi_DumpCDB( "agtiapi_FreeCCB", pccb ); if (pccb->sgList != agNULL) { AGTIAPI_IO( "agtiapi_FreeCCB: pccb->sgList is NOT null\n" ); } else { AGTIAPI_PRINTK( "agtiapi_FreeCCB: pccb->sgList is null\n" ); } /* set data transfer direction */ if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { op = BUS_DMASYNC_POSTWRITE; } else { op = BUS_DMASYNC_POSTREAD; } if (pccb->numSgElements == 0) { // do nothing AGTIAPI_IO( "agtiapi_FreeCCB: numSgElements zero\n" ); } else if (pccb->numSgElements == 1) { AGTIAPI_IO( "agtiapi_FreeCCB: numSgElements is one\n" ); //op is either BUS_DMASYNC_POSTWRITE or BUS_DMASYNC_POSTREAD bus_dmamap_sync(pmcsc->buffer_dmat, pccb->CCB_dmamap, op); bus_dmamap_unload(pmcsc->buffer_dmat, pccb->CCB_dmamap); } else { AGTIAPI_PRINTK( "agtiapi_FreeCCB: numSgElements 2 or higher \n" ); //op is either BUS_DMASYNC_POSTWRITE or BUS_DMASYNC_POSTREAD bus_dmamap_sync(pmcsc->buffer_dmat, pccb->CCB_dmamap, op); bus_dmamap_unload(pmcsc->buffer_dmat, pccb->CCB_dmamap); } #ifdef AGTIAPI_TEST_DPL if (pccb->tiSuperScsiRequest.Dif.enableDIFPerLA == TRUE) { if(pccb->dplPtr) memset( (char *) pccb->dplPtr, 0, MAX_DPL_REGIONS * sizeof(dplaRegion_t) ); pccb->tiSuperScsiRequest.Dif.enableDIFPerLA = FALSE; pccb->tiSuperScsiRequest.Dif.DIFPerLAAddrLo = 0; pccb->tiSuperScsiRequest.Dif.DIFPerLAAddrHi = 0; } #endif #ifdef AGTIAPI_TEST_EPL encrypt = &pccb->tiSuperScsiRequest.Encrypt; if (encrypt->enableEncryptionPerLA == TRUE) { encrypt->enableEncryptionPerLA = FALSE; encrypt->EncryptionPerLAAddrLo = 0; encrypt->EncryptionPerLAAddrHi = 0; } #endif #ifdef ENABLE_SATA_DIF if (pccb->holePtr && pccb->dmaHandleHole) pci_free_consistent( pmcsc->pCardInfo->pPCIDev, 512, pccb->holePtr, pccb->dmaHandleHole ); pccb->holePtr = 0; pccb->dmaHandleHole = 0; #endif pccb->dataLen = 0; pccb->retryCount = 0; pccb->ccbStatus = 0; pccb->scsiStatus = 0; pccb->startTime = 0; pccb->dmaHandle = 0; pccb->numSgElements = 0; pccb->tiIORequest.tdData = 0; memset((void *)&pccb->tiSuperScsiRequest, 0, AGSCSI_INIT_XCHG_LEN); #ifdef HIALEAH_ENCRYPTION if (pmcsc->encrypt) agtiapi_CleanupEncryptedIO(pmcsc, pccb); #endif pccb->flags = 0; pccb->ccb = NULL; pccb->pccbIO = NULL; pccb->pccbNext = (pccb_t)pmcsc->ccbFreeList; pmcsc->ccbFreeList = (caddr_t *)pccb; pmcsc->activeCCB--; AG_LOCAL_UNLOCK(&pmcsc->ccbLock); return; } /****************************************************************************** agtiapi_FlushCCBs() Purpose: Flush all in processed ccbs. Parameters: ag_card_t *pCard (IN) Pointer to HBA data structure U32 flag (IN) Flag to call back Return: Note: ******************************************************************************/ STATIC void agtiapi_FlushCCBs( struct agtiapi_softc *pCard, U32 flag ) { union ccb *ccb; ccb_t *pccb; AGTIAPI_PRINTK( "agtiapi_FlushCCBs: enter \n" ); for( pccb = (pccb_t)pCard->ccbChainList; pccb != NULL; pccb = pccb->pccbChainNext ) { if( pccb->flags == 0 ) { // printf( "agtiapi_FlushCCBs: nothing, continue \n" ); continue; } ccb = pccb->ccb; if ( pccb->flags & ( TASK_MANAGEMENT | DEV_RESET ) ) { AGTIAPI_PRINTK( "agtiapi_FlushCCBs: agtiapi_FreeTMCCB \n" ); agtiapi_FreeTMCCB( pCard, pccb ); } else { if ( pccb->flags & TAG_SMP ) { AGTIAPI_PRINTK( "agtiapi_FlushCCBs: agtiapi_FreeSMPCCB \n" ); agtiapi_FreeSMPCCB( pCard, pccb ); } else { AGTIAPI_PRINTK( "agtiapi_FlushCCBs: agtiapi_FreeCCB \n" ); agtiapi_FreeCCB( pCard, pccb ); } if( ccb ) { CMND_DMA_UNMAP( pCard, ccb ); if( flag == AGTIAPI_CALLBACK ) { ccb->ccb_h.status = CAM_SCSI_BUS_RESET; xpt_done( ccb ); } } } } } /***************************************************************************** agtiapi_FreeSMPCCB() Purpose: Free a ccb and put it back to ccbFreeList. Parameters: struct agtiapi_softc *pmcsc (IN) Pointer to HBA data structure pccb_t pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Returns: Note: *****************************************************************************/ STATIC void agtiapi_FreeSMPCCB(struct agtiapi_softc *pmcsc, pccb_t pccb) { union ccb *ccb = pccb->ccb; bus_dmasync_op_t op; AG_LOCAL_LOCK(&pmcsc->ccbLock); AGTIAPI_PRINTK("agtiapi_FreeSMPCCB: start %p\n", pccb); /* set data transfer direction */ if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { op = BUS_DMASYNC_POSTWRITE; } else { op = BUS_DMASYNC_POSTREAD; } if (pccb->numSgElements == 0) { // do nothing AGTIAPI_PRINTK("agtiapi_FreeSMPCCB: numSgElements 0\n"); } else if (pccb->numSgElements == 1) { AGTIAPI_PRINTK("agtiapi_FreeSMPCCB: numSgElements 1\n"); //op is either BUS_DMASYNC_POSTWRITE or BUS_DMASYNC_POSTREAD bus_dmamap_sync(pmcsc->buffer_dmat, pccb->CCB_dmamap, op); bus_dmamap_unload(pmcsc->buffer_dmat, pccb->CCB_dmamap); } else { AGTIAPI_PRINTK("agtiapi_FreeSMPCCB: numSgElements 2 or higher \n"); //op is either BUS_DMASYNC_POSTWRITE or BUS_DMASYNC_POSTREAD bus_dmamap_sync(pmcsc->buffer_dmat, pccb->CCB_dmamap, op); bus_dmamap_unload(pmcsc->buffer_dmat, pccb->CCB_dmamap); } /*dma api cleanning*/ pccb->dataLen = 0; pccb->retryCount = 0; pccb->ccbStatus = 0; pccb->startTime = 0; pccb->dmaHandle = 0; pccb->numSgElements = 0; pccb->tiIORequest.tdData = 0; memset((void *)&pccb->tiSMPFrame, 0, AGSMP_INIT_XCHG_LEN); pccb->flags = 0; pccb->ccb = NULL; pccb->pccbNext = (pccb_t)pmcsc->ccbFreeList; pmcsc->ccbFreeList = (caddr_t *)pccb; pmcsc->activeCCB--; AG_LOCAL_UNLOCK(&pmcsc->ccbLock); return; } /***************************************************************************** agtiapi_FreeTMCCB() Purpose: Free a ccb and put it back to ccbFreeList. Parameters: struct agtiapi_softc *pmcsc (IN) Pointer to HBA data structure pccb_t pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Returns: Note: *****************************************************************************/ STATIC void agtiapi_FreeTMCCB(struct agtiapi_softc *pmcsc, pccb_t pccb) { AG_LOCAL_LOCK(&pmcsc->ccbLock); AGTIAPI_PRINTK("agtiapi_FreeTMCCB: start %p\n", pccb); pccb->dataLen = 0; pccb->retryCount = 0; pccb->ccbStatus = 0; pccb->scsiStatus = 0; pccb->startTime = 0; pccb->dmaHandle = 0; pccb->numSgElements = 0; pccb->tiIORequest.tdData = 0; memset((void *)&pccb->tiSuperScsiRequest, 0, AGSCSI_INIT_XCHG_LEN); pccb->flags = 0; pccb->ccb = NULL; pccb->pccbIO = NULL; pccb->pccbNext = (pccb_t)pmcsc->ccbFreeList; pmcsc->ccbFreeList = (caddr_t *)pccb; pmcsc->activeCCB--; AG_LOCAL_UNLOCK(&pmcsc->ccbLock); return; } /****************************************************************************** agtiapi_CheckAllVectors(): Purpose: Parameters: Return: Note: Currently, not used. ******************************************************************************/ void agtiapi_CheckAllVectors( struct agtiapi_softc *pCard, bit32 context ) { #ifdef SPC_MSIX_INTR if (!agtiapi_intx_mode) { int i; for (i = 0; i < pCard->pCardInfo->maxInterruptVectors; i++) if (tiCOMInterruptHandler(&pCard->tiRoot, i) == agTRUE) tiCOMDelayedInterruptHandler(&pCard->tiRoot, i, 100, context); } else if (tiCOMInterruptHandler(&pCard->tiRoot, 0) == agTRUE) tiCOMDelayedInterruptHandler(&pCard->tiRoot, 0, 100, context); #else if (tiCOMInterruptHandler(&pCard->tiRoot, 0) == agTRUE) tiCOMDelayedInterruptHandler(&pCard->tiRoot, 0, 100, context); #endif } /****************************************************************************** agtiapi_CheckCB() Purpose: Check call back function returned event for process completion Parameters: struct agtiapi_softc *pCard Pointer to card data structure U32 milisec (IN) Waiting time for expected event U32 flag (IN) Flag of the event to check U32 *pStatus (IN) Pointer to status of the card or port to check Return: AGTIAPI_SUCCESS - event comes as expected AGTIAPI_FAIL - event not coming Note: Currently, not used ******************************************************************************/ agBOOLEAN agtiapi_CheckCB( struct agtiapi_softc *pCard, U32 milisec, U32 flag, volatile U32 *pStatus ) { U32 msecsPerTick = pCard->pCardInfo->tiRscInfo.tiInitiatorResource. initiatorOption.usecsPerTick / 1000; S32 i = milisec/msecsPerTick; AG_GLOBAL_ARG( _flags ); AGTIAPI_PRINTK( "agtiapi_CheckCB: start\n" ); AGTIAPI_FLOW( "agtiapi_CheckCB: start\n" ); if( i <= 0 ) i = 1; while (i > 0) { if (*pStatus & TASK_MANAGEMENT) { if (*pStatus & AGTIAPI_CB_DONE) { if( flag == 0 || *pStatus & flag ) return AGTIAPI_SUCCESS; else return AGTIAPI_FAIL; } } else if (pCard->flags & AGTIAPI_CB_DONE) { if( flag == 0 || *pStatus & flag ) return AGTIAPI_SUCCESS; else return AGTIAPI_FAIL; } agtiapi_DelayMSec( msecsPerTick ); AG_SPIN_LOCK_IRQ( agtiapi_host_lock, _flags ); tiCOMTimerTick( &pCard->tiRoot ); agtiapi_CheckAllVectors( pCard, tiNonInterruptContext ); AG_SPIN_UNLOCK_IRQ( agtiapi_host_lock, _flags ); i--; } if( *pStatus & TASK_MANAGEMENT ) *pStatus |= TASK_TIMEOUT; return AGTIAPI_FAIL; } /****************************************************************************** agtiapi_DiscoverTgt() Purpose: Discover available devices Parameters: struct agtiapi_softc *pCard (IN) Pointer to the HBA data structure Return: Note: ******************************************************************************/ STATIC void agtiapi_DiscoverTgt(struct agtiapi_softc *pCard) { ag_portal_data_t *pPortalData; U32 count; AGTIAPI_PRINTK("agtiapi_DiscoverTgt: start\n"); AGTIAPI_FLOW("agtiapi_DiscoverTgt\n"); AGTIAPI_INIT("agtiapi_DiscoverTgt\n"); pPortalData = pCard->pPortalData; for (count = 0; count < pCard->portCount; count++, pPortalData++) { pCard->flags &= ~AGTIAPI_CB_DONE; if (!(PORTAL_STATUS(pPortalData) & AGTIAPI_PORT_DISC_READY)) { if (pCard->flags & AGTIAPI_INIT_TIME) { if (agtiapi_CheckCB(pCard, 5000, AGTIAPI_PORT_DISC_READY, &PORTAL_STATUS(pPortalData)) == AGTIAPI_FAIL) { AGTIAPI_PRINTK( "agtiapi_DiscoverTgt: Port %p / %d not ready for " "discovery\n", pPortalData, count ); /* * There is no need to spend time on discovering device * if port is not ready to do so. */ continue; } } else continue; } AGTIAPI_FLOW( "agtiapi_DiscoverTgt: Portal %p DiscoverTargets starts\n", pPortalData ); AGTIAPI_INIT_DELAY(1000); pCard->flags &= ~AGTIAPI_CB_DONE; if (tiINIDiscoverTargets(&pCard->tiRoot, &pPortalData->portalInfo.tiPortalContext, FORCE_PERSISTENT_ASSIGN_MASK) != tiSuccess) AGTIAPI_PRINTK("agtiapi_DiscoverTgt: tiINIDiscoverTargets ERROR\n"); /* * Should wait till discovery completion to start * next portal. However, lower layer have issue on * multi-portal case under Linux. */ } pPortalData = pCard->pPortalData; for (count = 0; count < pCard->portCount; count++, pPortalData++) { if ((PORTAL_STATUS(pPortalData) & AGTIAPI_PORT_DISC_READY)) { if (agtiapi_CheckCB(pCard, 20000, AGTIAPI_DISC_COMPLETE, &PORTAL_STATUS(pPortalData)) == AGTIAPI_FAIL) { if ((PORTAL_STATUS(pPortalData) & AGTIAPI_DISC_COMPLETE)) AGTIAPI_PRINTK( "agtiapi_DiscoverTgt: Portal %p discover complete, " "status 0x%x\n", pPortalData, PORTAL_STATUS(pPortalData) ); else AGTIAPI_PRINTK( "agtiapi_DiscoverTgt: Portal %p discover is not " "completed, status 0x%x\n", pPortalData, PORTAL_STATUS(pPortalData) ); continue; } AGTIAPI_PRINTK( "agtiapi_DiscoverTgt: Portal %d discover target " "success\n", count ); } } /* * Calling to get device handle should be done per portal based * and better right after discovery is done. However, lower iscsi * layer may not returns discovery complete in correct sequence or we * ran out time. We get device handle for all portals together * after discovery is done or timed out. */ pPortalData = pCard->pPortalData; for (count = 0; count < pCard->portCount; count++, pPortalData++) { /* * We try to get device handle no matter * if discovery is completed or not. */ if (PORTAL_STATUS(pPortalData) & AGTIAPI_PORT_DISC_READY) { U32 i; for (i = 0; i < AGTIAPI_GET_DEV_MAX; i++) { if (agtiapi_GetDevHandle(pCard, &pPortalData->portalInfo, 0, 0) != 0) break; agtiapi_DelayMSec(AGTIAPI_EXTRA_DELAY); } if ((PORTAL_STATUS(pPortalData) & AGTIAPI_DISC_COMPLETE) || (pCard->tgtCount > 0)) PORTAL_STATUS(pPortalData) |= ( AGTIAPI_DISC_DONE | AGTIAPI_PORT_LINK_UP ); } } return; } /****************************************************************************** agtiapi_PrepCCBs() Purpose: Prepares CCB including DMA map. Parameters: struct agtiapi_softc *pCard (IN) Pointer to the HBA data structure ccb_hdr_t *hdr (IN) Pointer to the CCB header U32 size (IN) size U32 max_ccb (IN) count Return: Note: ******************************************************************************/ STATIC void agtiapi_PrepCCBs( struct agtiapi_softc *pCard, ccb_hdr_t *hdr, U32 size, U32 max_ccb, int tid ) { int i; U32 hdr_sz, ccb_sz; - ccb_t *pccb = 0; + ccb_t *pccb = NULL; int offset = 0; int nsegs = 0; int sgl_sz = 0; AGTIAPI_PRINTK("agtiapi_PrepCCBs: start\n"); offset = tid * AGTIAPI_CCB_PER_DEVICE; nsegs = AGTIAPI_NSEGS; sgl_sz = sizeof(tiSgl_t) * nsegs; AGTIAPI_PRINTK( "agtiapi_PrepCCBs: tid %d offset %d nsegs %d sizeof(tiSgl_t) " "%lu, max_ccb %d\n", tid, offset, nsegs, sizeof(tiSgl_t), max_ccb ); ccb_sz = roundup2(AGTIAPI_CCB_SIZE, cache_line_size()); hdr_sz = roundup2(sizeof(*hdr), cache_line_size()); AGTIAPI_PRINTK("agtiapi_PrepCCBs: after cache line\n"); memset((void *)hdr, 0, size); hdr->next = pCard->ccbAllocList; pCard->ccbAllocList = hdr; AGTIAPI_PRINTK("agtiapi_PrepCCBs: after memset\n"); pccb = (ccb_t*) ((char*)hdr + hdr_sz); for (i = 0; i < max_ccb; i++, pccb = (ccb_t*)((char*)pccb + ccb_sz)) { pccb->tiIORequest.osData = (void *)pccb; /* * Initially put all the ccbs on the free list * in addition to chainlist. * ccbChainList is a list of all available ccbs * (free/active everything) */ pccb->pccbChainNext = (pccb_t)pCard->ccbChainList; pccb->pccbNext = (pccb_t)pCard->ccbFreeList; pCard->ccbChainList = (caddr_t *)pccb; pCard->ccbFreeList = (caddr_t *)pccb; pCard->ccbTotal++; #ifdef AGTIAPI_ALIGN_CHECK if (&pccb & 0x63) AGTIAPI_PRINTK("pccb = %p\n", pccb); if (pccb->devHandle & 0x63) AGTIAPI_PRINTK("devHandle addr = %p\n", &pccb->devHandle); if (&pccb->lun & 0x63) AGTIAPI_PRINTK("lun addr = %p\n", &pccb->lun); if (&pccb->targetId & 0x63) AGTIAPI_PRINTK("tig addr = %p\n", &pccb->targetId); if (&pccb->ccbStatus & 0x63) AGTIAPI_PRINTK("ccbStatus addr = %p\n", &pccb->ccbStatus); if (&pccb->scsiStatus & 0x63) AGTIAPI_PRINTK("scsiStatus addr = %p\n", &pccb->scsiStatus); if (&pccb->dataLen & 0x63) AGTIAPI_PRINTK("dataLen addr = %p\n", &pccb->dataLen); if (&pccb->senseLen & 0x63) AGTIAPI_PRINTK("senseLen addr = %p\n", &pccb->senseLen); if (&pccb->numSgElements & 0x63) AGTIAPI_PRINTK("numSgElements addr = %p\n", &pccb->numSgElements); if (&pccb->retryCount & 0x63) AGTIAPI_PRINTK("retry cnt addr = %p\n", &pccb->retryCount); if (&pccb->flags & 0x63) AGTIAPI_PRINTK("flag addr = %p\n", &pccb->flags); if (&pccb->pSenseData & 0x63) AGTIAPI_PRINTK("senseData addr = %p\n", &pccb->pSenseData); if (&pccb->sgList[0] & 0x63) AGTIAPI_PRINTK("SgList 0 = %p\n", &pccb->sgList[0]); if (&pccb->pccbNext & 0x63) AGTIAPI_PRINTK("ccb next = %p\n", &pccb->pccbNext); if (&pccb->pccbChainNext & 0x63) AGTIAPI_PRINTK("ccbChainNext = %p\n", &pccb->pccbChainNext); if (&pccb->cmd & 0x63) AGTIAPI_PRINTK("command = %p\n", &pccb->cmd); if( &pccb->startTime & 0x63 ) AGTIAPI_PRINTK( "startTime = %p\n", &pccb->startTime ); if (&pccb->tiIORequest & 0x63) AGTIAPI_PRINTK("tiIOReq addr = %p\n", &pccb->tiIORequest); if (&pccb->tdIOReqBody & 0x63) AGTIAPI_PRINTK("tdIORequestBody addr = %p\n", &pccb->tdIOReqBody); if (&pccb->tiSuperScsiRequest & 0x63) AGTIAPI_PRINTK( "InitiatorExchange addr = %p\n", &pccb->tiSuperScsiRequest ); #endif if ( bus_dmamap_create( pCard->buffer_dmat, 0, &pccb->CCB_dmamap ) != tiSuccess) { AGTIAPI_PRINTK("agtiapi_PrepCCBs: can't create dma\n"); return; } /* assigns tiSgl_t memory to pccb */ pccb->sgList = (void*)((U64)pCard->tisgl_mem + ((i + offset) * sgl_sz)); pccb->tisgl_busaddr = pCard->tisgl_busaddr + ((i + offset) * sgl_sz); pccb->ccb = NULL; pccb->pccbIO = NULL; pccb->startTime = 0; } #ifdef AGTIAPI_ALIGN_CHECK AGTIAPI_PRINTK("ccb size = %d / %d\n", sizeof(ccb_t), ccb_sz); #endif return; } /****************************************************************************** agtiapi_InitCCBs() Purpose: Create and initialize per card based CCB pool. Parameters: struct agtiapi_softc *pCard (IN) Pointer to the HBA data structure int tgtCount (IN) Count Return: Total number of ccb allocated Note: ******************************************************************************/ STATIC U32 agtiapi_InitCCBs(struct agtiapi_softc *pCard, int tgtCount, int tid) { U32 max_ccb, size, ccb_sz, hdr_sz; int no_allocs = 0, i; - ccb_hdr_t *hdr = 0; + ccb_hdr_t *hdr = NULL; AGTIAPI_PRINTK("agtiapi_InitCCBs: start\n"); AGTIAPI_PRINTK("agtiapi_InitCCBs: tgtCount %d tid %d\n", tgtCount, tid); AGTIAPI_FLOW("agtiapi_InitCCBs: tgtCount %d tid %d\n", tgtCount, tid); #ifndef HOTPLUG_SUPPORT if (pCard->tgtCount > AGSA_MAX_INBOUND_Q) return 1; #else if (tgtCount > AGSA_MAX_INBOUND_Q) tgtCount = AGSA_MAX_INBOUND_Q; #endif max_ccb = tgtCount * AGTIAPI_CCB_PER_DEVICE;// / 4; // TBR ccb_sz = roundup2(AGTIAPI_CCB_SIZE, cache_line_size()); hdr_sz = roundup2(sizeof(*hdr), cache_line_size()); size = ccb_sz * max_ccb + hdr_sz; for (i = 0; i < (1 << no_allocs); i++) { hdr = (ccb_hdr_t*)malloc( size, M_PMC_MCCB, M_NOWAIT ); if( !hdr ) { panic( "agtiapi_InitCCBs: bug!!!\n" ); } else { agtiapi_PrepCCBs( pCard, hdr, size, max_ccb, tid ); } } return 1; } #ifdef LINUX_PERBI_SUPPORT /****************************************************************************** agtiapi_GetWWNMappings() Purpose: Get the mappings from target IDs to WWNs, if any. Store them in the WWN_list array, indexed by target ID. Leave the devListIndex field blank; this will be filled-in later. Parameters: ag_card_t *pCard (IN) Pointer to HBA data structure ag_mapping_t *pMapList (IN) Pointer to mapped device list Return: Note: The boot command line parameters are used to load the mapping information, which is contained in the system configuration file. ******************************************************************************/ STATIC void agtiapi_GetWWNMappings( struct agtiapi_softc *pCard, ag_mapping_t *pMapList ) { int devDisc; int lIdx = 0; ag_tgt_map_t *pWWNList; ag_slr_map_t *pSLRList; ag_device_t *pDevList; if( !pCard ) panic( "agtiapi_GetWWNMappings: no pCard \n" ); AGTIAPI_PRINTK( "agtiapi_GetWWNMappings: start\n" ); pWWNList = pCard->pWWNList; pSLRList = pCard->pSLRList; pDevList = pCard->pDevList; pCard->numTgtHardMapped = 0; devDisc = pCard->devDiscover; pWWNList[devDisc-1].devListIndex = maxTargets; pSLRList[devDisc-1].localeNameLen = -2; pSLRList[devDisc-1].remoteNameLen = -2; pDevList[devDisc-1].targetId = maxTargets; /* * Get the mappings from holding area which contains * the input of the system file and store them * in the WWN_list array, indexed by target ID. */ for ( lIdx = 0; lIdx < devDisc - 1; lIdx++) { pWWNList[lIdx].flags = 0; pWWNList[lIdx].devListIndex = maxTargets; pSLRList[lIdx].localeNameLen = -1; pSLRList[lIdx].remoteNameLen = -1; } // this is where we would propagate values fed to pMapList } /* agtiapi_GetWWNMappings */ #endif /****************************************************************************** agtiapi_FindWWNListNext() Purpose: finds first available new (unused) wwn list entry Parameters: ag_tgt_map_t *pWWNList Pointer to head of wwn list int lstMax Number of entries in WWNList Return: index into WWNList indicating available entry space; if available entry space is not found, return negative value ******************************************************************************/ STATIC int agtiapi_FindWWNListNext( ag_tgt_map_t *pWWNList, int lstMax ) { int lLstIdx; for ( lLstIdx = 0; lLstIdx < lstMax; lLstIdx++ ) { if ( pWWNList[lLstIdx].devListIndex == lstMax && pWWNList[lLstIdx].targetLen == 0 ) { AGTIAPI_PRINTK( "agtiapi_FindWWNListNext: %d %d %d %d v. %d\n", lLstIdx, pWWNList[lLstIdx].devListIndex, pWWNList[lLstIdx].targetLen, pWWNList[lLstIdx].portId, lstMax ); return lLstIdx; } } return -1; } /****************************************************************************** agtiapi_GetDevHandle() Purpose: Get device handle. Handles will be placed in the devlist array with same order as TargetList provided and will be mapped to a scsi target id and registered to OS later. Parameters: struct agtiapi_softc *pCard (IN) Pointer to the HBA data structure ag_portal_info_t *pPortalInfo (IN) Pointer to the portal data structure U32 eType (IN) Port event U32 eStatus (IN) Port event status Return: Number of device handle slot present Note: The sequence of device handle will match the sequence of taregt list ******************************************************************************/ STATIC U32 agtiapi_GetDevHandle( struct agtiapi_softc *pCard, ag_portal_info_t *pPortalInfo, U32 eType, U32 eStatus ) { ag_device_t *pDevice; // tiDeviceHandle_t *agDev[pCard->devDiscover]; tiDeviceHandle_t **agDev; int devIdx, szdv, devTotal, cmpsetRtn; int lDevIndex = 0, lRunScanFlag = FALSE; int *lDevFlags; tiPortInfo_t portInfT; ag_device_t lTmpDevice; ag_tgt_map_t *pWWNList; ag_slr_map_t *pSLRList; bit32 lReadRm; bit16 lReadCt; AGTIAPI_PRINTK( "agtiapi_GetDevHandle: start\n" ); AGTIAPI_PRINTK( "agtiapi_GetDevHandle: pCard->devDiscover %d / tgtCt %d\n", pCard->devDiscover, pCard->tgtCount ); AGTIAPI_FLOW( "agtiapi_GetDevHandle: portalInfo %p\n", pPortalInfo ); AGTIAPI_INIT_DELAY( 1000 ); agDev = (tiDeviceHandle_t **) malloc( sizeof(tiDeviceHandle_t *) * pCard->devDiscover, M_PMC_MDEV, M_ZERO | M_NOWAIT); if (agDev == NULL) { AGTIAPI_PRINTK( "agtiapi_GetDevHandle: failed to alloc agDev[]\n" ); return 0; } lDevFlags = (int *) malloc( sizeof(int) * pCard->devDiscover, M_PMC_MFLG, M_ZERO | M_NOWAIT ); if (lDevFlags == NULL) { free((caddr_t)agDev, M_PMC_MDEV); AGTIAPI_PRINTK( "agtiapi_GetDevHandle: failed to alloc lDevFlags[]\n" ); return 0; } pWWNList = pCard->pWWNList; pSLRList = pCard->pSLRList; memset( (void *)agDev, 0, sizeof(void *) * pCard->devDiscover ); memset( lDevFlags, 0, sizeof(int) * pCard->devDiscover ); // get device handles devTotal = tiINIGetDeviceHandles( &pCard->tiRoot, &pPortalInfo->tiPortalContext, (tiDeviceHandle_t **)agDev, pCard->devDiscover ); AGTIAPI_PRINTK( "agtiapi_GetDevHandle: portalInfo %p port id %d event %u " "status %u card %p pCard->devDiscover %d devTotal %d " "pPortalInfo->devTotal %d pPortalInfo->devPrev %d " "AGTIAPI_INIT_TIME %x\n", pPortalInfo, pPortalInfo->portID, eType, eStatus, pCard, pCard->devDiscover, devTotal, pPortalInfo->devTotal, pPortalInfo->devPrev, pCard->flags & AGTIAPI_INIT_TIME ); // reset devTotal from any previous runs of this pPortalInfo->devPrev = devTotal; pPortalInfo->devTotal = devTotal; AG_LIST_LOCK( &pCard->devListLock ); if ( tiCOMGetPortInfo( &pCard->tiRoot, &pPortalInfo->tiPortalContext, &portInfT ) != tiSuccess) { AGTIAPI_PRINTK( "agtiapi_GetDevHandle: tiCOMGetPortInfo did not succeed. \n" ); } szdv = sizeof( pPortalInfo->pDevList ) / sizeof( pPortalInfo->pDevList[0] ); if (szdv > pCard->devDiscover) { szdv = pCard->devDiscover; } // reconstructing dev list via comparison of wwn for ( devIdx = 0; devIdx < pCard->devDiscover; devIdx++ ) { - if ( agDev[devIdx] != 0 ) + if ( agDev[devIdx] != NULL ) { // AGTIAPI_PRINTK( "agtiapi_GetDevHandle: agDev %d not NULL %p\n", // devIdx, agDev[devIdx] ); // pack temp device structure for tiINIGetDeviceInfo call pDevice = &lTmpDevice; pDevice->devType = DIRECT_DEVICE; pDevice->pCard = (void *)pCard; pDevice->flags = ACTIVE; pDevice->pPortalInfo = pPortalInfo; pDevice->pDevHandle = agDev[devIdx]; pDevice->qbusy = agFALSE; //AGTIAPI_PRINTK( "agtiapi_GetDevHandle: idx %d / %d : %p \n", // devIdx, pCard->devDiscover, agDev[devIdx] ); tiINIGetDeviceInfo( &pCard->tiRoot, agDev[devIdx], &pDevice->devInfo ); //AGTIAPI_PRINTK( "agtiapi_GetDevHandle: wwn sizes %ld %d/%d ", // sizeof(pDevice->targetName), // pDevice->devInfo.osAddress1, // pDevice->devInfo.osAddress2 ); wwncpy( pDevice ); wwnprintk( (unsigned char*)pDevice->targetName, pDevice->targetLen ); for ( lDevIndex = 0; lDevIndex < szdv; lDevIndex++ ) // match w/ wwn list { if ( (pCard->pDevList[lDevIndex].portalId == pPortalInfo->portID) && pDevice->targetLen > 0 && portInfT.localNameLen > 0 && portInfT.remoteNameLen > 0 && pSLRList[pWWNList[lDevIndex].sasLrIdx].localeNameLen > 0 && pSLRList[pWWNList[lDevIndex].sasLrIdx].remoteNameLen > 0 && ( portInfT.localNameLen == pSLRList[pWWNList[lDevIndex].sasLrIdx].localeNameLen ) && ( portInfT.remoteNameLen == pSLRList[pWWNList[lDevIndex].sasLrIdx].remoteNameLen ) && memcmp( pWWNList[lDevIndex].targetName, pDevice->targetName, pDevice->targetLen ) == 0 && memcmp( pSLRList[pWWNList[lDevIndex].sasLrIdx].localeName, portInfT.localName, portInfT.localNameLen ) == 0 && memcmp( pSLRList[pWWNList[lDevIndex].sasLrIdx].remoteName, portInfT.remoteName, portInfT.remoteNameLen ) == 0 ) { AGTIAPI_PRINTK( " pWWNList match @ %d/%d/%d \n", lDevIndex, devIdx, pPortalInfo->portID ); if ( (pCard->pDevList[lDevIndex].targetId == lDevIndex) && ( pPortalInfo->pDevList[lDevIndex] == &pCard->pDevList[lDevIndex] ) ) // active { AGTIAPI_PRINTK( "agtiapi_GetDevHandle: dev in use %d of %d/%d\n", lDevIndex, devTotal, pPortalInfo->portID ); lDevFlags[devIdx] |= DPMC_LEANFLAG_AGDEVUSED; // agDev handle lDevFlags[lDevIndex] |= DPMC_LEANFLAG_PDEVSUSED; // pDevice used lReadRm = atomic_readandclear_32( &pWWNList[lDevIndex].devRemoved ); if ( lReadRm ) // cleared timeout, now remove count for timer { AGTIAPI_PRINTK( "agtiapi_GetDevHandle: clear timer count for" " %d of %d\n", lDevIndex, pPortalInfo->portID ); atomic_subtract_16( &pCard->rmChkCt, 1 ); lReadCt = atomic_load_acq_16( &pCard->rmChkCt ); if ( 0 == lReadCt ) { callout_stop( &pCard->devRmTimer ); } } break; } AGTIAPI_PRINTK( "agtiapi_GetDevHandle: goin fresh on %d of %d/%d\n", lDevIndex, // reactivate now devTotal, pPortalInfo->portID ); // pDevice going fresh lRunScanFlag = TRUE; // scan and clear outstanding removals // pCard->tgtCount++; ## pDevice->targetId = lDevIndex; pDevice->portalId = pPortalInfo->portID; memcpy ( &pCard->pDevList[lDevIndex], pDevice, sizeof(lTmpDevice) ); agDev[devIdx]->osData = (void *)&pCard->pDevList[lDevIndex]; if ( agtiapi_InitCCBs( pCard, 1, pDevice->targetId ) == 0 ) { AGTIAPI_PRINTK( "agtiapi_GetDevHandle: InitCCB " "tgtCnt %d ERROR!\n", pCard->tgtCount ); AG_LIST_UNLOCK( &pCard->devListLock ); free((caddr_t)lDevFlags, M_PMC_MFLG); free((caddr_t)agDev, M_PMC_MDEV); return 0; } pPortalInfo->pDevList[lDevIndex] = &pCard->pDevList[lDevIndex]; // (ag_device_t *) if ( 0 == lDevFlags[devIdx] ) { pPortalInfo->devTotal++; lDevFlags[devIdx] |= DPMC_LEANFLAG_AGDEVUSED; // agDev used lDevFlags[lDevIndex] |= DPMC_LEANFLAG_PDEVSUSED; // pDevice used } else { AGTIAPI_PRINTK( "agtiapi_GetDevHandle: odd dev handle " "status inspect %d %d %d\n", lDevFlags[devIdx], devIdx, lDevIndex ); pPortalInfo->devTotal++; lDevFlags[devIdx] |= DPMC_LEANFLAG_AGDEVUSED; // agDev used lDevFlags[lDevIndex] |= DPMC_LEANFLAG_PDEVSUSED; // pDevice used } break; } } // end: match this wwn with previous wwn list // we have an agDev entry, but no pWWNList target for it if ( !(lDevFlags[devIdx] & DPMC_LEANFLAG_AGDEVUSED) ) { // flag dev handle not accounted for yet lDevFlags[devIdx] |= DPMC_LEANFLAG_NOWWNLIST; // later, get an empty pDevice and map this agDev. // AGTIAPI_PRINTK( "agtiapi_GetDevHandle: devIdx %d flags 0x%x, %d\n", // devIdx, lDevFlags[devIdx], (lDevFlags[devIdx] & 8) ); } } else { lDevFlags[devIdx] |= DPMC_LEANFLAG_NOAGDEVYT; // known empty agDev handle } } // AGTIAPI_PRINTK( "agtiapi_GetDevHandle: all WWN all the time, " // "devLstIdx/flags/(WWNL)portId ... \n" ); // review device list for further action needed for ( devIdx = 0; devIdx < pCard->devDiscover; devIdx++ ) { if ( lDevFlags[devIdx] & DPMC_LEANFLAG_NOWWNLIST ) // new target, register { int lNextDyad; // find next available dyad entry AGTIAPI_PRINTK( "agtiapi_GetDevHandle: register new target, " "devIdx %d -- %d \n", devIdx, pCard->devDiscover ); lRunScanFlag = TRUE; // scan and clear outstanding removals for ( lNextDyad = 0; lNextDyad < pCard->devDiscover; lNextDyad++ ) { if ( pSLRList[lNextDyad].localeNameLen < 0 && pSLRList[lNextDyad].remoteNameLen < 0 ) break; } if ( lNextDyad == pCard->devDiscover ) { printf( "agtiapi_GetDevHandle: failed to find available SAS LR\n" ); AG_LIST_UNLOCK( &pCard->devListLock ); free( (caddr_t)lDevFlags, M_PMC_MFLG ); free( (caddr_t)agDev, M_PMC_MDEV ); return 0; } // index of new entry lDevIndex = agtiapi_FindWWNListNext( pWWNList, pCard->devDiscover ); AGTIAPI_PRINTK( "agtiapi_GetDevHandle: listIdx new target %d of %d/%d\n", lDevIndex, devTotal, pPortalInfo->portID ); if ( 0 > lDevIndex ) { printf( "agtiapi_GetDevHandle: WARNING -- WWNList exhausted.\n" ); continue; } pDevice = &pCard->pDevList[lDevIndex]; tiINIGetDeviceInfo( &pCard->tiRoot, agDev[devIdx], &pDevice->devInfo ); wwncpy( pDevice ); agtiapi_InitCCBs( pCard, 1, lDevIndex ); pDevice->pCard = (void *)pCard; pDevice->devType = DIRECT_DEVICE; // begin to populate new WWNList entry memcpy( pWWNList[lDevIndex].targetName, pDevice->targetName, pDevice->targetLen ); pWWNList[lDevIndex].targetLen = pDevice->targetLen; pWWNList[lDevIndex].flags = SOFT_MAPPED; pWWNList[lDevIndex].portId = pPortalInfo->portID; pWWNList[lDevIndex].devListIndex = lDevIndex; pWWNList[lDevIndex].sasLrIdx = lNextDyad; pSLRList[lNextDyad].localeNameLen = portInfT.localNameLen; pSLRList[lNextDyad].remoteNameLen = portInfT.remoteNameLen; memcpy( pSLRList[lNextDyad].localeName, portInfT.localName, portInfT.localNameLen ); memcpy( pSLRList[lNextDyad].remoteName, portInfT.remoteName, portInfT.remoteNameLen ); // end of populating new WWNList entry pDevice->targetId = lDevIndex; pDevice->flags = ACTIVE; pDevice->CCBCount = 0; pDevice->pDevHandle = agDev[devIdx]; agDev[devIdx]->osData = (void*)pDevice; pDevice->pPortalInfo = pPortalInfo; pDevice->portalId = pPortalInfo->portID; pPortalInfo->pDevList[lDevIndex] = (void*)pDevice; lDevFlags[lDevIndex] |= DPMC_LEANFLAG_PDEVSUSED; // mark pDevice slot used } if ( (pCard->pDevList[devIdx].portalId == pPortalInfo->portID) && !(lDevFlags[devIdx] & DPMC_LEANFLAG_PDEVSUSED) ) // pDevice not used { pDevice = &pCard->pDevList[devIdx]; //pDevice->flags &= ~ACTIVE; if ( ( pDevice->pDevHandle != NULL || pPortalInfo->pDevList[devIdx] != NULL ) ) { atomic_add_16( &pCard->rmChkCt, 1 ); // show count of lost device if (FALSE == lRunScanFlag) { AGTIAPI_PRINTK( "agtiapi_GetDevHandle: targ dropped out %d of %d/%d\n", devIdx, devTotal, pPortalInfo->portID ); // if ( 0 == pWWNList[devIdx].devRemoved ) '.devRemoved = 5; cmpsetRtn = atomic_cmpset_32( &pWWNList[devIdx].devRemoved, 0, 5 ); if ( 0 == cmpsetRtn ) { AGTIAPI_PRINTK( "agtiapi_GetDevHandle: target %d timer already set\n", devIdx ); } else { callout_reset( &pCard->devRmTimer, 1 * hz, agtiapi_devRmCheck, pCard ); } } // else ... scan coming soon enough anyway, ignore timer for dropout } } } // end of for ( devIdx = 0; ... AG_LIST_UNLOCK( &pCard->devListLock ); free((caddr_t)lDevFlags, M_PMC_MFLG); free((caddr_t)agDev, M_PMC_MDEV); if ( TRUE == lRunScanFlag ) agtiapi_clrRmScan( pCard ); return devTotal; } // end agtiapi_GetDevHandle /****************************************************************************** agtiapi_scan() Purpose: Triggers CAM's scan Parameters: struct agtiapi_softc *pCard (IN) Pointer to the HBA data structure Return: Note: ******************************************************************************/ static void agtiapi_scan(struct agtiapi_softc *pmcsc) { union ccb *ccb; int bus, tid, lun, card_no; static int num=0; AGTIAPI_PRINTK("agtiapi_scan: start cardNO %d \n", pmcsc->cardNo); bus = cam_sim_path(pmcsc->sim); tid = CAM_TARGET_WILDCARD; lun = CAM_LUN_WILDCARD; mtx_lock(&(pmcsc->pCardInfo->pmIOLock)); ccb = xpt_alloc_ccb_nowait(); if (ccb == agNULL) { mtx_unlock(&(pmcsc->pCardInfo->pmIOLock)); return; } if (xpt_create_path(&ccb->ccb_h.path, agNULL, bus, tid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mtx_unlock(&(pmcsc->pCardInfo->pmIOLock)); xpt_free_ccb(ccb); return; } mtx_unlock(&(pmcsc->pCardInfo->pmIOLock)); pmcsc->dev_scan = agTRUE; xpt_rescan(ccb); return; } /****************************************************************************** agtiapi_DeQueueCCB() Purpose: Remove a ccb from a queue Parameters: struct agtiapi_softc *pCard (IN) Pointer to the card structure pccb_t *phead (IN) Pointer to a head of ccb queue ccb_t *pccd (IN) Pointer to the ccb to be processed Return: AGTIAPI_SUCCESS - the ccb is removed from queue AGTIAPI_FAIL - the ccb is not found from queue Note: ******************************************************************************/ STATIC agBOOLEAN agtiapi_DeQueueCCB(struct agtiapi_softc *pCard, pccb_t *phead, pccb_t *ptail, #ifdef AGTIAPI_LOCAL_LOCK struct mtx *lock, #endif ccb_t *pccb) { ccb_t *pccb_curr; U32 status = AGTIAPI_FAIL; AGTIAPI_PRINTK("agtiapi_DeQueueCCB: %p from %p\n", pccb, phead); if (pccb == NULL || *phead == NULL) { return AGTIAPI_FAIL; } AGTIAPI_PRINTK("agtiapi_DeQueueCCB: %p from %p\n", pccb, phead); AG_LOCAL_LOCK(lock); if (pccb == *phead) { *phead = (*phead)->pccbNext; if (pccb == *ptail) { *ptail = NULL; } else pccb->pccbNext = NULL; status = AGTIAPI_SUCCESS; } else { pccb_curr = *phead; while (pccb_curr->pccbNext != NULL) { if (pccb_curr->pccbNext == pccb) { pccb_curr->pccbNext = pccb->pccbNext; pccb->pccbNext = NULL; if (pccb == *ptail) { *ptail = pccb_curr; } else pccb->pccbNext = NULL; status = AGTIAPI_SUCCESS; break; } pccb_curr = pccb_curr->pccbNext; } } AG_LOCAL_UNLOCK(lock); return status; } STATIC void wwnprintk( unsigned char *name, int len ) { int i; for (i = 0; i < len; i++, name++) AGTIAPI_PRINTK("%02x", *name); AGTIAPI_PRINTK("\n"); } /* * SAS and SATA behind expander has 8 byte long unique address. * However, direct connect SATA device use 512 byte unique device id. * SPC uses remoteName to indicate length of ID and remoteAddress for the * address of memory that holding ID. */ STATIC int wwncpy( ag_device_t *pDevice ) { int rc = 0; if (sizeof(pDevice->targetName) >= pDevice->devInfo.osAddress1 + pDevice->devInfo.osAddress2) { memcpy(pDevice->targetName, pDevice->devInfo.remoteName, pDevice->devInfo.osAddress1); memcpy(pDevice->targetName + pDevice->devInfo.osAddress1, pDevice->devInfo.remoteAddress, pDevice->devInfo.osAddress2); pDevice->targetLen = pDevice->devInfo.osAddress1 + pDevice->devInfo.osAddress2; rc = pDevice->targetLen; } else { AGTIAPI_PRINTK("WWN wrong size: %d + %d ERROR\n", pDevice->devInfo.osAddress1, pDevice->devInfo.osAddress2); rc = -1; } return rc; } /****************************************************************************** agtiapi_ReleaseCCBs() Purpose: Free all allocated CCB memories for the Host Adapter. Parameters: struct agtiapi_softc *pCard (IN) Pointer to HBA data structure Return: Note: ******************************************************************************/ STATIC void agtiapi_ReleaseCCBs( struct agtiapi_softc *pCard ) { ccb_hdr_t *hdr; U32 hdr_sz; - ccb_t *pccb = 0; + ccb_t *pccb = NULL; AGTIAPI_PRINTK( "agtiapi_ReleaseCCBs: start\n" ); #if ( defined AGTIAPI_TEST_DPL || defined AGTIAPI_TEST_EPL ) ccb_t *pccb; #endif #ifdef AGTIAPI_TEST_DPL for (pccb = (pccb_t)pCard->ccbChainList; pccb != NULL; pccb = pccb->pccbChainNext) { if(pccb->dplPtr && pccb->dplDma) pci_pool_free(pCard->dpl_ctx_pool, pccb->dplPtr, pccb->dplDma); } #endif #ifdef AGTIAPI_TEST_EPL for (pccb = (pccb_t)pCard->ccbChainList; pccb != NULL; pccb = pccb->pccbChainNext) { if(pccb->epl_ptr && pccb->epl_dma_ptr) pci_pool_free( pCard->epl_ctx_pool, pccb->epl_ptr, pccb->epl_dma_ptr ); } #endif while ((hdr = pCard->ccbAllocList) != NULL) { pCard->ccbAllocList = hdr->next; hdr_sz = roundup2(sizeof(*hdr), cache_line_size()); pccb = (ccb_t*) ((char*)hdr + hdr_sz); if (pCard->buffer_dmat != NULL && pccb->CCB_dmamap != NULL) { bus_dmamap_destroy(pCard->buffer_dmat, pccb->CCB_dmamap); } free(hdr, M_PMC_MCCB); } pCard->ccbAllocList = NULL; return; } /****************************************************************************** agtiapi_TITimer() Purpose: Timer tick for tisa common layer Parameters: void *data (IN) Pointer to the HBA data structure Return: Note: ******************************************************************************/ STATIC void agtiapi_TITimer( void *data ) { U32 next_tick; struct agtiapi_softc *pCard; pCard = (struct agtiapi_softc *)data; // AGTIAPI_PRINTK("agtiapi_TITimer: start\n"); AG_GLOBAL_ARG( flags ); next_tick = pCard->pCardInfo->tiRscInfo.tiLoLevelResource. loLevelOption.usecsPerTick / USEC_PER_TICK; if( next_tick == 0 ) /* no timer required */ return; AG_SPIN_LOCK_IRQ( agtiapi_host_lock, flags ); if( pCard->flags & AGTIAPI_SHUT_DOWN ) goto ext; tiCOMTimerTick( &pCard->tiRoot ); /* tisa common layer timer tick */ //add for polling mode #ifdef PMC_SPC if( agtiapi_polling_mode ) agtiapi_CheckAllVectors( pCard, tiNonInterruptContext ); #endif callout_reset( &pCard->OS_timer, next_tick, agtiapi_TITimer, pCard ); ext: AG_SPIN_UNLOCK_IRQ( agtiapi_host_lock, flags ); return; } /****************************************************************************** agtiapi_clrRmScan() Purpose: Clears device list entries scheduled for timeout and calls scan Parameters: struct agtiapi_softc *pCard (IN) Pointer to HBA data structure ******************************************************************************/ STATIC void agtiapi_clrRmScan( struct agtiapi_softc *pCard ) { ag_tgt_map_t *pWWNList; ag_portal_info_t *pPortalInfo; ag_portal_data_t *pPortalData; int lIdx; bit32 lReadRm; bit16 lReadCt; pWWNList = pCard->pWWNList; AGTIAPI_PRINTK( "agtiapi_clrRmScan: start\n" ); AG_LIST_LOCK( &pCard->devListLock ); for ( lIdx = 0; lIdx < pCard->devDiscover; lIdx++ ) { lReadCt = atomic_load_acq_16( &pCard->rmChkCt ); if ( 0 == lReadCt ) { break; // trim to who cares } lReadRm = atomic_readandclear_32( &pWWNList[lIdx].devRemoved ); if ( lReadRm > 0 ) { pCard->pDevList[lIdx].flags &= ~ACTIVE; pCard->pDevList[lIdx].pDevHandle = NULL; pPortalData = &pCard->pPortalData[pWWNList[lIdx].portId]; pPortalInfo = &pPortalData->portalInfo; pPortalInfo->pDevList[lIdx] = NULL; AGTIAPI_PRINTK( "agtiapi_clrRmScan: cleared dev %d at port %d\n", lIdx, pWWNList[lIdx].portId ); atomic_subtract_16( &pCard->rmChkCt, 1 ); } } AG_LIST_UNLOCK( &pCard->devListLock ); agtiapi_scan( pCard ); } /****************************************************************************** agtiapi_devRmCheck() Purpose: Timer tick to check for timeout on missing targets Removes device list entry when timeout is reached Parameters: void *data (IN) Pointer to the HBA data structure ******************************************************************************/ STATIC void agtiapi_devRmCheck( void *data ) { struct agtiapi_softc *pCard; ag_tgt_map_t *pWWNList; int lIdx, cmpsetRtn, lRunScanFlag = FALSE; bit16 lReadCt; bit32 lReadRm; pCard = ( struct agtiapi_softc * )data; // routine overhead if ( callout_pending( &pCard->devRmTimer ) ) // callout was reset { return; } if ( !callout_active( &pCard->devRmTimer ) ) // callout was stopped { return; } callout_deactivate( &pCard->devRmTimer ); if( pCard->flags & AGTIAPI_SHUT_DOWN ) { return; // implicit timer clear } pWWNList = pCard->pWWNList; AG_LIST_LOCK( &pCard->devListLock ); lReadCt = atomic_load_acq_16( &pCard->rmChkCt ); if ( lReadCt ) { if ( callout_pending(&pCard->devRmTimer) == FALSE ) { callout_reset( &pCard->devRmTimer, 1 * hz, agtiapi_devRmCheck, pCard ); } else { AG_LIST_UNLOCK( &pCard->devListLock ); return; } for ( lIdx = 0; lIdx < pCard->devDiscover; lIdx++ ) { lReadCt = atomic_load_acq_16( &pCard->rmChkCt ); if ( 0 == lReadCt ) { break; // if handled somewhere else, get out } lReadRm = atomic_load_acq_32( &pWWNList[lIdx].devRemoved ); if ( lReadRm > 0 ) { if ( 1 == lReadRm ) // timed out { // no decrement of devRemoved as way to leave a clrRmScan marker lRunScanFlag = TRUE; // other devRemoved values are about to get wiped break; // ... so bail out } else { AGTIAPI_PRINTK( "agtiapi_devRmCheck: counting down dev %d @ %d; %d\n", lIdx, lReadRm, lReadCt ); cmpsetRtn = atomic_cmpset_32( &pWWNList[lIdx].devRemoved, lReadRm, lReadRm-1 ); if ( 0 == cmpsetRtn ) { printf( "agtiapi_devRmCheck: %d decrement already handled\n", lIdx ); } } } } AG_LIST_UNLOCK( &pCard->devListLock ); if ( TRUE == lRunScanFlag ) agtiapi_clrRmScan( pCard ); } else { AG_LIST_UNLOCK( &pCard->devListLock ); } return; } static void agtiapi_cam_poll( struct cam_sim *asim ) { return; } /***************************************************************************** agtiapi_ResetCard() Purpose: Hard or soft reset on the controller and resend any outstanding requests if needed. Parameters: struct agtiapi_softc *pCard (IN) Pointer to HBA data structure unsigned lomg flags (IN/OUT) Flags used in locking done from calling layers Return: AGTIAPI_SUCCESS - reset successful AGTIAPI_FAIL - reset failed Note: *****************************************************************************/ U32 agtiapi_ResetCard( struct agtiapi_softc *pCard, unsigned long *flags ) { ag_device_t *pDevice; U32 lIdx = 0; U32 lFlagVal; agBOOLEAN ret; ag_portal_info_t *pPortalInfo; ag_portal_data_t *pPortalData; U32 count, loop; int szdv; if( pCard->flags & AGTIAPI_RESET ) { AGTIAPI_PRINTK( "agtiapi_ResetCard: reset card already in progress!\n" ); return AGTIAPI_FAIL; } AGTIAPI_PRINTK( "agtiapi_ResetCard: Enter cnt %d\n", pCard->resetCount ); #ifdef LOGEVENT agtiapi_LogEvent( pCard, IOCTL_EVT_SEV_INFORMATIONAL, 0, agNULL, 0, "Reset initiator time = %d!", pCard->resetCount + 1 ); #endif pCard->flags |= AGTIAPI_RESET; pCard->flags &= ~(AGTIAPI_CB_DONE | AGTIAPI_RESET_SUCCESS); tiCOMSystemInterruptsActive( &pCard->tiRoot, FALSE ); pCard->flags &= ~AGTIAPI_SYS_INTR_ON; agtiapi_FlushCCBs( pCard, AGTIAPI_CALLBACK ); for ( lIdx = 1; 3 >= lIdx; lIdx++ ) // we try reset up to 3 times { if( pCard->flags & AGTIAPI_SOFT_RESET ) { AGTIAPI_PRINTK( "agtiapi_ResetCard: soft variant\n" ); tiCOMReset( &pCard->tiRoot, tiSoftReset ); } else { AGTIAPI_PRINTK( "agtiapi_ResetCard: no flag, no reset!\n" ); } lFlagVal = AGTIAPI_RESET_SUCCESS; AG_SPIN_UNLOCK_IRQ( agtiapi_host_lock, *flags ); ret = agtiapi_CheckCB( pCard, 50000, lFlagVal, &pCard->flags ); AG_SPIN_LOCK_IRQ( agtiapi_host_lock, *flags ); if( ret == AGTIAPI_FAIL ) { AGTIAPI_PRINTK( "agtiapi_ResetCard: CheckCB indicates failed reset call, " "try again?\n" ); } else { break; } } if ( 1 < lIdx ) { if ( AGTIAPI_FAIL == ret ) { AGTIAPI_PRINTK( "agtiapi_ResetCard: soft reset failed after try %d\n", lIdx ); } else { AGTIAPI_PRINTK( "agtiapi_ResetCard: soft reset success at try %d\n", lIdx ); } } if( AGTIAPI_FAIL == ret ) { printf( "agtiapi_ResetCard: reset ERROR\n" ); pCard->flags &= ~AGTIAPI_INSTALLED; return AGTIAPI_FAIL; } pCard->flags &= ~AGTIAPI_SOFT_RESET; // disable all devices pDevice = pCard->pDevList; for( lIdx = 0; lIdx < maxTargets; lIdx++, pDevice++ ) { /* if ( pDevice->flags & ACTIVE ) { printf( "agtiapi_ResetCard: before ... active device %d\n", lIdx ); } */ pDevice->flags &= ~ACTIVE; } AG_SPIN_UNLOCK_IRQ( agtiapi_host_lock, *flags ); if( tiCOMPortInit( &pCard->tiRoot, agFALSE ) != tiSuccess ) printf( "agtiapi_ResetCard: tiCOMPortInit FAILED \n" ); else AGTIAPI_PRINTK( "agtiapi_ResetCard: tiCOMPortInit success\n" ); if( !pCard->pDevList ) { // try to get a little sanity here AGTIAPI_PRINTK( "agtiapi_ResetCard: no pDevList ERROR %p\n", pCard->pDevList ); return AGTIAPI_FAIL; } AGTIAPI_PRINTK( "agtiapi_ResetCard: pre target-count %d port-count %d\n", pCard->tgtCount, pCard->portCount ); pCard->tgtCount = 0; DELAY( 500000 ); pCard->flags &= ~AGTIAPI_CB_DONE; pPortalData = pCard->pPortalData; for( count = 0; count < pCard->portCount; count++ ) { AG_SPIN_LOCK_IRQ( agtiapi_host_lock, flags ); pPortalInfo = &pPortalData->portalInfo; pPortalInfo->portStatus = 0; pPortalInfo->portStatus &= ~( AGTIAPI_PORT_START | AGTIAPI_PORT_DISC_READY | AGTIAPI_DISC_DONE | AGTIAPI_DISC_COMPLETE ); szdv = sizeof( pPortalInfo->pDevList ) / sizeof( pPortalInfo->pDevList[0] ); if (szdv > pCard->devDiscover) { szdv = pCard->devDiscover; } for( lIdx = 0, loop = 0; lIdx < szdv && loop < pPortalInfo->devTotal; lIdx++ ) { pDevice = (ag_device_t*)pPortalInfo->pDevList[lIdx]; if( pDevice ) { loop++; pDevice->pDevHandle = 0; // mark for availability in pCard->pDevList[] // don't erase more as the device is scheduled for removal on DPC } AGTIAPI_PRINTK( "agtiapi_ResetCard: reset pDev %p pDevList %p idx %d\n", pDevice, pPortalInfo->pDevList, lIdx ); pPortalInfo->devTotal = pPortalInfo->devPrev = 0; } for( lIdx = 0; lIdx < maxTargets; lIdx++ ) { // we reconstruct dev list later in get dev handle pPortalInfo->pDevList[lIdx] = NULL; } for( loop = 0; loop < AGTIAPI_LOOP_MAX; loop++ ) { AGTIAPI_PRINTK( "agtiapi_ResetCard: tiCOMPortStart entry data " "%p / %d / %p\n", &pCard->tiRoot, pPortalInfo->portID, &pPortalInfo->tiPortalContext ); if( tiCOMPortStart( &pCard->tiRoot, pPortalInfo->portID, &pPortalInfo->tiPortalContext, 0 ) != tiSuccess ) { printf( "agtiapi_ResetCard: tiCOMPortStart %d FAILED\n", pPortalInfo->portID ); } else { AGTIAPI_PRINTK( "agtiapi_ResetCard: tiCOMPortStart %d success\n", pPortalInfo->portID ); break; } } AG_SPIN_UNLOCK_IRQ( agtiapi_host_lock, flags ); tiCOMGetPortInfo( &pCard->tiRoot, &pPortalInfo->tiPortalContext, &pPortalInfo->tiPortInfo ); pPortalData++; } // ## fail case: pCard->flags &= ~AGTIAPI_INSTALLED; AG_SPIN_LOCK_IRQ(agtiapi_host_lock, *flags); if( !(pCard->flags & AGTIAPI_INSTALLED) ) // driver not installed ! { printf( "agtiapi_ResetCard: error, driver not intstalled? " "!AGTIAPI_INSTALLED \n" ); return AGTIAPI_FAIL; } AGTIAPI_PRINTK( "agtiapi_ResetCard: total device %d\n", pCard->tgtCount ); #ifdef LOGEVENT agtiapi_LogEvent( pCard, IOCTL_EVT_SEV_INFORMATIONAL, 0, agNULL, 0, "Reset initiator total device = %d!", pCard->tgtCount ); #endif pCard->resetCount++; AGTIAPI_PRINTK( "agtiapi_ResetCard: clear send and done queues\n" ); // clear send & done queue AG_LOCAL_LOCK( &pCard->sendLock ); pCard->ccbSendHead = NULL; pCard->ccbSendTail = NULL; AG_LOCAL_UNLOCK( &pCard->sendLock ); AG_LOCAL_LOCK( &pCard->doneLock ); pCard->ccbDoneHead = NULL; pCard->ccbDoneTail = NULL; AG_LOCAL_UNLOCK( &pCard->doneLock ); // clear smp queues also AG_LOCAL_LOCK( &pCard->sendSMPLock ); pCard->smpSendHead = NULL; pCard->smpSendTail = NULL; AG_LOCAL_UNLOCK( &pCard->sendSMPLock ); AG_LOCAL_LOCK( &pCard->doneSMPLock ); pCard->smpDoneHead = NULL; pCard->smpDoneTail = NULL; AG_LOCAL_UNLOCK( &pCard->doneSMPLock ); // finished with all reset stuff, now start things back up tiCOMSystemInterruptsActive( &pCard->tiRoot, TRUE ); pCard->flags |= AGTIAPI_SYS_INTR_ON; pCard->flags |= AGTIAPI_HAD_RESET; pCard->flags &= ~AGTIAPI_RESET; // ## agtiapi_StartIO( pCard ); AGTIAPI_PRINTK( "agtiapi_ResetCard: local return success\n" ); return AGTIAPI_SUCCESS; } // agtiapi_ResetCard /****************************************************************************** agtiapi_ReleaseHBA() Purpose: Releases all resources previously acquired to support a specific Host Adapter, including the I/O Address range, and unregisters the agtiapi Host Adapter. Parameters: device_t dev (IN) - device pointer Return: always return 0 - success Note: ******************************************************************************/ int agtiapi_ReleaseHBA( device_t dev ) { int thisCard = device_get_unit( dev ); // keeping get_unit call to once int i; ag_card_info_t *thisCardInst = &agCardInfoList[ thisCard ]; struct ccb_setasync csa; struct agtiapi_softc *pCard; pCard = device_get_softc( dev ); ag_card_info_t *pCardInfo = pCard->pCardInfo; ag_resource_info_t *pRscInfo = &thisCardInst->tiRscInfo; AG_GLOBAL_ARG(flags); AGTIAPI_PRINTK( "agtiapi_ReleaseHBA: start\n" ); if (thisCardInst != pCardInfo) { AGTIAPI_PRINTK( "agtiapi_ReleaseHBA: Wrong ag_card_info_t thisCardInst %p " "pCardInfo %p\n", thisCardInst, pCardInfo ); panic( "agtiapi_ReleaseHBA: Wrong ag_card_info_t thisCardInst %p pCardInfo " "%p\n", thisCardInst, pCardInfo ); return( EIO ); } AGTIAPI_PRINTK( "agtiapi_ReleaseHBA card %p\n", pCard ); pCard->flags |= AGTIAPI_SHUT_DOWN; // remove timer if (pCard->flags & AGTIAPI_TIMER_ON) { AG_SPIN_LOCK_IRQ( agtiapi_host_lock, flags ); callout_drain( &pCard->OS_timer ); callout_drain( &pCard->devRmTimer ); callout_drain(&pCard->IO_timer); AG_SPIN_UNLOCK_IRQ( agtiapi_host_lock, flags ); AGTIAPI_PRINTK( "agtiapi_ReleaseHBA: timer released\n" ); } #ifdef HIALEAH_ENCRYPTION //Release encryption table memory - Fix it //if(pCard->encrypt && (pCard->flags & AGTIAPI_INSTALLED)) //agtiapi_CleanupEncryption(pCard); #endif /* * Shutdown the channel so that chip gets frozen * and it does not do any more pci-bus accesses. */ if (pCard->flags & AGTIAPI_SYS_INTR_ON) { tiCOMSystemInterruptsActive( &pCard->tiRoot, FALSE ); pCard->flags &= ~AGTIAPI_SYS_INTR_ON; AGTIAPI_PRINTK( "agtiapi_ReleaseHBA: card interrupt off\n" ); } if (pCard->flags & AGTIAPI_INSTALLED) { tiCOMShutDown( &pCard->tiRoot ); AGTIAPI_PRINTK( "agtiapi_ReleaseHBA: low layers shutdown\n" ); } /* * first release IRQ, so that we do not get any more interrupts * from this host */ if (pCard->flags & AGTIAPI_IRQ_REQUESTED) { if (!agtiapi_intx_mode) { int i; for (i = 0; i< MAX_MSIX_NUM_VECTOR; i++) { if (pCard->irq[i] != agNULL && pCard->rscID[i] != 0) { bus_teardown_intr(dev, pCard->irq[i], pCard->intrcookie[i]); bus_release_resource( dev, SYS_RES_IRQ, pCard->rscID[i], pCard->irq[i] ); } } pci_release_msi(dev); } pCard->flags &= ~AGTIAPI_IRQ_REQUESTED; #ifdef AGTIAPI_DPC for (i = 0; i < MAX_MSIX_NUM_DPC; i++) tasklet_kill(&pCard->tasklet_dpc[i]); #endif AGTIAPI_PRINTK("agtiapi_ReleaseHBA: IRQ released\n"); } // release memory vs. alloc in agtiapi_alloc_ostimem; used in ostiAllocMemory if( pCard->osti_busaddr != 0 ) { bus_dmamap_unload( pCard->osti_dmat, pCard->osti_mapp ); } if( pCard->osti_mem != NULL ) { bus_dmamem_free( pCard->osti_dmat, pCard->osti_mem, pCard->osti_mapp ); } if( pCard->osti_dmat != NULL ) { bus_dma_tag_destroy( pCard->osti_dmat ); } /* unmap the mapped PCI memory */ /* calls bus_release_resource( ,SYS_RES_MEMORY, ..) */ agtiapi_ReleasePCIMem(thisCardInst); /* release all ccbs */ if (pCard->ccbTotal) { //calls bus_dmamap_destroy() for all pccbs agtiapi_ReleaseCCBs(pCard); AGTIAPI_PRINTK("agtiapi_ReleaseHBA: CCB released\n"); } #ifdef HIALEAH_ENCRYPTION /*release encryption resources - Fix it*/ if(pCard->encrypt) { /*Check that all IO's are completed */ if(atomic_read (&outstanding_encrypted_io_count) > 0) { printf("%s: WARNING: %d outstanding encrypted IOs !\n", __FUNCTION__, atomic_read(&outstanding_encrypted_io_count)); } //agtiapi_CleanupEncryptionPools(pCard); } #endif /* release device list */ if( pCard->pDevList ) { free((caddr_t)pCard->pDevList, M_PMC_MDVT); pCard->pDevList = NULL; AGTIAPI_PRINTK("agtiapi_ReleaseHBA: device list released\n"); } #ifdef LINUX_PERBI_SUPPORT // ## review use of PERBI AGTIAPI_PRINTK( "agtiapi_ReleaseHBA: WWN list %p \n", pCard->pWWNList ); if( pCard->pWWNList ) { free( (caddr_t)pCard->pWWNList, M_PMC_MTGT ); pCard->pWWNList = NULL; AGTIAPI_PRINTK("agtiapi_ReleaseHBA: WWN list released\n"); } if( pCard->pSLRList ) { free( (caddr_t)pCard->pSLRList, M_PMC_MSLR ); pCard->pSLRList = NULL; AGTIAPI_PRINTK("agtiapi_ReleaseHBA: SAS Local Remote list released\n"); } #endif if (pCard->pPortalData) { free((caddr_t)pCard->pPortalData, M_PMC_MPRT); pCard->pPortalData = NULL; AGTIAPI_PRINTK("agtiapi_ReleaseHBA: PortalData released\n"); } //calls contigfree() or free() agtiapi_MemFree(pCardInfo); AGTIAPI_PRINTK("agtiapi_ReleaseHBA: low level resource released\n"); #ifdef HOTPLUG_SUPPORT if (pCard->flags & AGTIAPI_PORT_INITIALIZED) { // agtiapi_FreeDevWorkList(pCard); AGTIAPI_PRINTK("agtiapi_ReleaseHBA: (HP dev) work resources released\n"); } #endif /* * TBD, scsi_unregister may release wrong host data structure * which cause NULL pointer shows up. */ if (pCard->flags & AGTIAPI_SCSI_REGISTERED) { pCard->flags &= ~AGTIAPI_SCSI_REGISTERED; #ifdef AGTIAPI_LOCAL_LOCK if (pCard->STLock) { //destroy mtx int maxLocks; maxLocks = pRscInfo->tiLoLevelResource.loLevelOption.numOfQueuesPerPort; for( i = 0; i < maxLocks; i++ ) { mtx_destroy(&pCard->STLock[i]); } free(pCard->STLock, M_PMC_MSTL); pCard->STLock = NULL; } #endif } ag_card_good--; /* reset agtiapi_1st_time if this is the only card */ if (!ag_card_good && !agtiapi_1st_time) { agtiapi_1st_time = 1; } /* for tiSgl_t memeory */ if (pCard->tisgl_busaddr != 0) { bus_dmamap_unload(pCard->tisgl_dmat, pCard->tisgl_map); } if (pCard->tisgl_mem != NULL) { bus_dmamem_free(pCard->tisgl_dmat, pCard->tisgl_mem, pCard->tisgl_map); } if (pCard->tisgl_dmat != NULL) { bus_dma_tag_destroy(pCard->tisgl_dmat); } if (pCard->buffer_dmat != agNULL) { bus_dma_tag_destroy(pCard->buffer_dmat); } if (pCard->sim != NULL) { mtx_lock(&thisCardInst->pmIOLock); xpt_setup_ccb(&csa.ccb_h, pCard->path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = 0; csa.callback = agtiapi_async; csa.callback_arg = pCard; xpt_action((union ccb *)&csa); xpt_free_path(pCard->path); // if (pCard->ccbTotal == 0) if (pCard->ccbTotal <= thisCard) { /* no link up so that simq has not been released. In order to remove cam, we call this. */ xpt_release_simq(pCard->sim, 1); } xpt_bus_deregister(cam_sim_path(pCard->sim)); cam_sim_free(pCard->sim, FALSE); mtx_unlock(&thisCardInst->pmIOLock); } if (pCard->devq != NULL) { cam_simq_free(pCard->devq); } //destroy mtx mtx_destroy( &thisCardInst->pmIOLock ); mtx_destroy( &pCard->sendLock ); mtx_destroy( &pCard->doneLock ); mtx_destroy( &pCard->sendSMPLock ); mtx_destroy( &pCard->doneSMPLock ); mtx_destroy( &pCard->ccbLock ); mtx_destroy( &pCard->devListLock ); mtx_destroy( &pCard->OS_timer_lock ); mtx_destroy( &pCard->devRmTimerLock ); mtx_destroy( &pCard->memLock ); mtx_destroy( &pCard->freezeLock ); destroy_dev( pCard->my_cdev ); memset((void *)pCardInfo, 0, sizeof(ag_card_info_t)); return 0; } // Called during system shutdown after sync static int agtiapi_shutdown( device_t dev ) { AGTIAPI_PRINTK( "agtiapi_shutdown\n" ); return( 0 ); } static int agtiapi_suspend( device_t dev ) // Device suspend routine. { AGTIAPI_PRINTK( "agtiapi_suspend\n" ); return( 0 ); } static int agtiapi_resume( device_t dev ) // Device resume routine. { AGTIAPI_PRINTK( "agtiapi_resume\n" ); return( 0 ); } static device_method_t agtiapi_methods[] = { // Device interface DEVMETHOD( device_probe, agtiapi_probe ), DEVMETHOD( device_attach, agtiapi_attach ), DEVMETHOD( device_detach, agtiapi_ReleaseHBA ), DEVMETHOD( device_shutdown, agtiapi_shutdown ), DEVMETHOD( device_suspend, agtiapi_suspend ), DEVMETHOD( device_resume, agtiapi_resume ), { 0, 0 } }; static devclass_t pmspcv_devclass; static driver_t pmspcv_driver = { "pmspcv", agtiapi_methods, sizeof( struct agtiapi_softc ) }; DRIVER_MODULE( pmspcv, pci, pmspcv_driver, pmspcv_devclass, 0, 0 ); MODULE_DEPEND( pmspcv, cam, 1, 1, 1 ); MODULE_DEPEND( pmspcv, pci, 1, 1, 1 ); #include #include #include #include Index: head/sys/dev/ppbus/if_plip.c =================================================================== --- head/sys/dev/ppbus/if_plip.c (revision 313981) +++ head/sys/dev/ppbus/if_plip.c (revision 313982) @@ -1,854 +1,854 @@ /*- * Copyright (c) 1997 Poul-Henning Kamp * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * From Id: lpt.c,v 1.55.2.1 1996/11/12 09:08:38 phk Exp */ #include __FBSDID("$FreeBSD$"); /* * Parallel port TCP/IP interfaces added. I looked at the driver from * MACH but this is a complete rewrite, and btw. incompatible, and it * should perform better too. I have never run the MACH driver though. * * This driver sends two bytes (0x08, 0x00) in front of each packet, * to allow us to distinguish another format later. * * Now added a Linux/Crynwr compatibility mode which is enabled using * IF_LINK0 - Tim Wilkinson. * * TODO: * Make HDLC/PPP mode, use IF_LLC1 to enable. * * Connect the two computers using a Laplink parallel cable to use this * feature: * * +----------------------------------------+ * |A-name A-End B-End Descr. Port/Bit | * +----------------------------------------+ * |DATA0 2 15 Data 0/0x01 | * |-ERROR 15 2 1/0x08 | * +----------------------------------------+ * |DATA1 3 13 Data 0/0x02 | * |+SLCT 13 3 1/0x10 | * +----------------------------------------+ * |DATA2 4 12 Data 0/0x04 | * |+PE 12 4 1/0x20 | * +----------------------------------------+ * |DATA3 5 10 Strobe 0/0x08 | * |-ACK 10 5 1/0x40 | * +----------------------------------------+ * |DATA4 6 11 Data 0/0x10 | * |BUSY 11 6 1/~0x80 | * +----------------------------------------+ * |GND 18-25 18-25 GND - | * +----------------------------------------+ * * Expect transfer-rates up to 75 kbyte/sec. * * If GCC could correctly grok * register int port asm("edx") * the code would be cleaner * * Poul-Henning Kamp */ /* * Update for ppbus, PLIP support only - Nicolas Souchu */ #include "opt_plip.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ppbus_if.h" #include #ifndef LPMTU /* MTU for the lp# interfaces */ #define LPMTU 1500 #endif #ifndef LPMAXSPIN1 /* DELAY factor for the lp# interfaces */ #define LPMAXSPIN1 8000 /* Spinning for remote intr to happen */ #endif #ifndef LPMAXSPIN2 /* DELAY factor for the lp# interfaces */ #define LPMAXSPIN2 500 /* Spinning for remote handshake to happen */ #endif #ifndef LPMAXERRS /* Max errors before !RUNNING */ #define LPMAXERRS 100 #endif #define CLPIPHDRLEN 14 /* We send dummy ethernet addresses (two) + packet type in front of packet */ #define CLPIP_SHAKE 0x80 /* This bit toggles between nibble reception */ #define MLPIPHDRLEN CLPIPHDRLEN #define LPIPHDRLEN 2 /* We send 0x08, 0x00 in front of packet */ #define LPIP_SHAKE 0x40 /* This bit toggles between nibble reception */ #if !defined(MLPIPHDRLEN) || LPIPHDRLEN > MLPIPHDRLEN #define MLPIPHDRLEN LPIPHDRLEN #endif #define LPIPTBLSIZE 256 /* Size of octet translation table */ #define lprintf if (lptflag) printf #ifdef PLIP_DEBUG static int volatile lptflag = 1; #else static int volatile lptflag = 0; #endif struct lp_data { struct ifnet *sc_ifp; device_t sc_dev; u_char *sc_ifbuf; int sc_iferrs; struct resource *res_irq; void *sc_intr_cookie; }; static struct mtx lp_tables_lock; MTX_SYSINIT(lp_tables, &lp_tables_lock, "plip tables", MTX_DEF); /* Tables for the lp# interface */ static u_char *txmith; #define txmitl (txmith + (1 * LPIPTBLSIZE)) #define trecvh (txmith + (2 * LPIPTBLSIZE)) #define trecvl (txmith + (3 * LPIPTBLSIZE)) static u_char *ctxmith; #define ctxmitl (ctxmith + (1 * LPIPTBLSIZE)) #define ctrecvh (ctxmith + (2 * LPIPTBLSIZE)) #define ctrecvl (ctxmith + (3 * LPIPTBLSIZE)) /* Functions for the lp# interface */ static int lpinittables(void); static int lpioctl(struct ifnet *, u_long, caddr_t); static int lpoutput(struct ifnet *, struct mbuf *, const struct sockaddr *, struct route *); static void lpstop(struct lp_data *); static void lp_intr(void *); static int lp_module_handler(module_t, int, void *); #define DEVTOSOFTC(dev) \ ((struct lp_data *)device_get_softc(dev)) static devclass_t lp_devclass; static int lp_module_handler(module_t mod, int what, void *arg) { switch (what) { case MOD_UNLOAD: mtx_lock(&lp_tables_lock); if (txmith != NULL) { free(txmith, M_DEVBUF); txmith = NULL; } if (ctxmith != NULL) { free(ctxmith, M_DEVBUF); ctxmith = NULL; } mtx_unlock(&lp_tables_lock); break; case MOD_LOAD: case MOD_QUIESCE: break; default: return (EOPNOTSUPP); } return (0); } static void lp_identify(driver_t *driver, device_t parent) { device_t dev; dev = device_find_child(parent, "plip", -1); if (!dev) BUS_ADD_CHILD(parent, 0, "plip", -1); } static int lp_probe(device_t dev) { device_set_desc(dev, "PLIP network interface"); return (0); } static int lp_attach(device_t dev) { struct lp_data *lp = DEVTOSOFTC(dev); struct ifnet *ifp; int error, rid = 0; lp->sc_dev = dev; /* * Reserve the interrupt resource. If we don't have one, the * attach fails. */ lp->res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE); - if (lp->res_irq == 0) { + if (lp->res_irq == NULL) { device_printf(dev, "cannot reserve interrupt, failed.\n"); return (ENXIO); } ifp = lp->sc_ifp = if_alloc(IFT_PARA); if (ifp == NULL) { return (ENOSPC); } ifp->if_softc = lp; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = LPMTU; ifp->if_flags = IFF_SIMPLEX | IFF_POINTOPOINT | IFF_MULTICAST; ifp->if_ioctl = lpioctl; ifp->if_output = lpoutput; ifp->if_hdrlen = 0; ifp->if_addrlen = 0; ifp->if_snd.ifq_maxlen = ifqmaxlen; if_attach(ifp); bpfattach(ifp, DLT_NULL, sizeof(u_int32_t)); /* * Attach our interrupt handler. It is only called while we * own the ppbus. */ error = bus_setup_intr(dev, lp->res_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, lp_intr, lp, &lp->sc_intr_cookie); if (error) { bpfdetach(ifp); if_detach(ifp); bus_release_resource(dev, SYS_RES_IRQ, 0, lp->res_irq); device_printf(dev, "Unable to register interrupt handler\n"); return (error); } return (0); } static int lp_detach(device_t dev) { struct lp_data *sc = device_get_softc(dev); device_t ppbus = device_get_parent(dev); ppb_lock(ppbus); lpstop(sc); ppb_unlock(ppbus); bpfdetach(sc->sc_ifp); if_detach(sc->sc_ifp); bus_teardown_intr(dev, sc->res_irq, sc->sc_intr_cookie); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->res_irq); return (0); } /* * Build the translation tables for the LPIP (BSD unix) protocol. * We don't want to calculate these nasties in our tight loop, so we * precalculate them when we initialize. */ static int lpinittables(void) { int i; mtx_lock(&lp_tables_lock); if (txmith == NULL) txmith = malloc(4 * LPIPTBLSIZE, M_DEVBUF, M_NOWAIT); if (txmith == NULL) { mtx_unlock(&lp_tables_lock); return (1); } if (ctxmith == NULL) ctxmith = malloc(4 * LPIPTBLSIZE, M_DEVBUF, M_NOWAIT); if (ctxmith == NULL) { mtx_unlock(&lp_tables_lock); return (1); } for (i = 0; i < LPIPTBLSIZE; i++) { ctxmith[i] = (i & 0xF0) >> 4; ctxmitl[i] = 0x10 | (i & 0x0F); ctrecvh[i] = (i & 0x78) << 1; ctrecvl[i] = (i & 0x78) >> 3; } for (i = 0; i < LPIPTBLSIZE; i++) { txmith[i] = ((i & 0x80) >> 3) | ((i & 0x70) >> 4) | 0x08; txmitl[i] = ((i & 0x08) << 1) | (i & 0x07); trecvh[i] = ((~i) & 0x80) | ((i & 0x38) << 1); trecvl[i] = (((~i) & 0x80) >> 4) | ((i & 0x38) >> 3); } mtx_unlock(&lp_tables_lock); return (0); } static void lpstop(struct lp_data *sc) { device_t ppbus = device_get_parent(sc->sc_dev); ppb_assert_locked(ppbus); ppb_wctr(ppbus, 0x00); sc->sc_ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); free(sc->sc_ifbuf, M_DEVBUF); sc->sc_ifbuf = NULL; /* IFF_UP is not set, try to release the bus anyway */ ppb_release_bus(ppbus, sc->sc_dev); } static int lpinit_locked(struct ifnet *ifp) { struct lp_data *sc = ifp->if_softc; device_t dev = sc->sc_dev; device_t ppbus = device_get_parent(dev); int error; ppb_assert_locked(ppbus); error = ppb_request_bus(ppbus, dev, PPB_DONTWAIT); if (error) return (error); /* Now IFF_UP means that we own the bus */ ppb_set_mode(ppbus, PPB_COMPATIBLE); if (lpinittables()) { ppb_release_bus(ppbus, dev); return (ENOBUFS); } sc->sc_ifbuf = malloc(sc->sc_ifp->if_mtu + MLPIPHDRLEN, M_DEVBUF, M_NOWAIT); if (sc->sc_ifbuf == NULL) { ppb_release_bus(ppbus, dev); return (ENOBUFS); } ppb_wctr(ppbus, IRQENABLE); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; return (0); } /* * Process an ioctl request. */ static int lpioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct lp_data *sc = ifp->if_softc; device_t dev = sc->sc_dev; device_t ppbus = device_get_parent(dev); struct ifaddr *ifa = (struct ifaddr *)data; struct ifreq *ifr = (struct ifreq *)data; u_char *ptr; int error; switch (cmd) { case SIOCAIFADDR: case SIOCSIFADDR: if (ifa->ifa_addr->sa_family != AF_INET) return (EAFNOSUPPORT); ifp->if_flags |= IFF_UP; /* FALLTHROUGH */ case SIOCSIFFLAGS: error = 0; ppb_lock(ppbus); if ((!(ifp->if_flags & IFF_UP)) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) lpstop(sc); else if (((ifp->if_flags & IFF_UP)) && (!(ifp->if_drv_flags & IFF_DRV_RUNNING))) error = lpinit_locked(ifp); ppb_unlock(ppbus); return (error); case SIOCSIFMTU: ppb_lock(ppbus); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ptr = malloc(ifr->ifr_mtu + MLPIPHDRLEN, M_DEVBUF, M_NOWAIT); if (ptr == NULL) { ppb_unlock(ppbus); return (ENOBUFS); } if (sc->sc_ifbuf) free(sc->sc_ifbuf, M_DEVBUF); sc->sc_ifbuf = ptr; } sc->sc_ifp->if_mtu = ifr->ifr_mtu; ppb_unlock(ppbus); break; case SIOCGIFMTU: ifr->ifr_mtu = sc->sc_ifp->if_mtu; break; case SIOCADDMULTI: case SIOCDELMULTI: - if (ifr == 0) { + if (ifr == NULL) { return (EAFNOSUPPORT); /* XXX */ } switch (ifr->ifr_addr.sa_family) { case AF_INET: break; default: return (EAFNOSUPPORT); } break; case SIOCGIFMEDIA: /* * No ifmedia support at this stage; maybe use it * in future for eg. protocol selection. */ return (EINVAL); default: lprintf("LP:ioctl(0x%lx)\n", cmd); return (EINVAL); } return (0); } static __inline int clpoutbyte(u_char byte, int spin, device_t ppbus) { ppb_wdtr(ppbus, ctxmitl[byte]); while (ppb_rstr(ppbus) & CLPIP_SHAKE) if (--spin == 0) { return (1); } ppb_wdtr(ppbus, ctxmith[byte]); while (!(ppb_rstr(ppbus) & CLPIP_SHAKE)) if (--spin == 0) { return (1); } return (0); } static __inline int clpinbyte(int spin, device_t ppbus) { u_char c, cl; while ((ppb_rstr(ppbus) & CLPIP_SHAKE)) if (!--spin) { return (-1); } cl = ppb_rstr(ppbus); ppb_wdtr(ppbus, 0x10); while (!(ppb_rstr(ppbus) & CLPIP_SHAKE)) if (!--spin) { return (-1); } c = ppb_rstr(ppbus); ppb_wdtr(ppbus, 0x00); return (ctrecvl[cl] | ctrecvh[c]); } static void lptap(struct ifnet *ifp, struct mbuf *m) { u_int32_t af = AF_INET; bpf_mtap2(ifp->if_bpf, &af, sizeof(af), m); } static void lp_intr(void *arg) { struct lp_data *sc = arg; device_t ppbus = device_get_parent(sc->sc_dev); int len, j; u_char *bp; u_char c, cl; struct mbuf *top; ppb_assert_locked(ppbus); if (sc->sc_ifp->if_flags & IFF_LINK0) { /* Ack. the request */ ppb_wdtr(ppbus, 0x01); /* Get the packet length */ j = clpinbyte(LPMAXSPIN2, ppbus); if (j == -1) goto err; len = j; j = clpinbyte(LPMAXSPIN2, ppbus); if (j == -1) goto err; len = len + (j << 8); if (len > sc->sc_ifp->if_mtu + MLPIPHDRLEN) goto err; bp = sc->sc_ifbuf; while (len--) { j = clpinbyte(LPMAXSPIN2, ppbus); if (j == -1) { goto err; } *bp++ = j; } /* Get and ignore checksum */ j = clpinbyte(LPMAXSPIN2, ppbus); if (j == -1) { goto err; } len = bp - sc->sc_ifbuf; if (len <= CLPIPHDRLEN) goto err; sc->sc_iferrs = 0; len -= CLPIPHDRLEN; if_inc_counter(sc->sc_ifp, IFCOUNTER_IPACKETS, 1); if_inc_counter(sc->sc_ifp, IFCOUNTER_IBYTES, len); top = m_devget(sc->sc_ifbuf + CLPIPHDRLEN, len, 0, sc->sc_ifp, 0); if (top) { ppb_unlock(ppbus); if (bpf_peers_present(sc->sc_ifp->if_bpf)) lptap(sc->sc_ifp, top); M_SETFIB(top, sc->sc_ifp->if_fib); /* mbuf is free'd on failure. */ netisr_queue(NETISR_IP, top); ppb_lock(ppbus); } return; } while ((ppb_rstr(ppbus) & LPIP_SHAKE)) { len = sc->sc_ifp->if_mtu + LPIPHDRLEN; bp = sc->sc_ifbuf; while (len--) { cl = ppb_rstr(ppbus); ppb_wdtr(ppbus, 8); j = LPMAXSPIN2; while ((ppb_rstr(ppbus) & LPIP_SHAKE)) if (!--j) goto err; c = ppb_rstr(ppbus); ppb_wdtr(ppbus, 0); *bp++= trecvh[cl] | trecvl[c]; j = LPMAXSPIN2; while (!((cl = ppb_rstr(ppbus)) & LPIP_SHAKE)) { if (cl != c && (((cl = ppb_rstr(ppbus)) ^ 0xb8) & 0xf8) == (c & 0xf8)) goto end; if (!--j) goto err; } } end: len = bp - sc->sc_ifbuf; if (len <= LPIPHDRLEN) goto err; sc->sc_iferrs = 0; len -= LPIPHDRLEN; if_inc_counter(sc->sc_ifp, IFCOUNTER_IPACKETS, 1); if_inc_counter(sc->sc_ifp, IFCOUNTER_IBYTES, len); top = m_devget(sc->sc_ifbuf + LPIPHDRLEN, len, 0, sc->sc_ifp, 0); if (top) { ppb_unlock(ppbus); if (bpf_peers_present(sc->sc_ifp->if_bpf)) lptap(sc->sc_ifp, top); M_SETFIB(top, sc->sc_ifp->if_fib); /* mbuf is free'd on failure. */ netisr_queue(NETISR_IP, top); ppb_lock(ppbus); } } return; err: ppb_wdtr(ppbus, 0); lprintf("R"); if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1); sc->sc_iferrs++; /* * We are not able to send receive anything for now, * so stop wasting our time */ if (sc->sc_iferrs > LPMAXERRS) { if_printf(sc->sc_ifp, "Too many errors, Going off-line.\n"); ppb_wctr(ppbus, 0x00); sc->sc_ifp->if_drv_flags &= ~IFF_DRV_RUNNING; sc->sc_iferrs = 0; } } static __inline int lpoutbyte(u_char byte, int spin, device_t ppbus) { ppb_wdtr(ppbus, txmith[byte]); while (!(ppb_rstr(ppbus) & LPIP_SHAKE)) if (--spin == 0) return (1); ppb_wdtr(ppbus, txmitl[byte]); while (ppb_rstr(ppbus) & LPIP_SHAKE) if (--spin == 0) return (1); return (0); } static int lpoutput(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, struct route *ro) { struct lp_data *sc = ifp->if_softc; device_t dev = sc->sc_dev; device_t ppbus = device_get_parent(dev); int err; struct mbuf *mm; u_char *cp = "\0\0"; u_char chksum = 0; int count = 0; int i, len, spin; /* We need a sensible value if we abort */ cp++; ppb_lock(ppbus); ifp->if_drv_flags |= IFF_DRV_OACTIVE; err = 1; /* assume we're aborting because of an error */ /* Suspend (on laptops) or receive-errors might have taken us offline */ ppb_wctr(ppbus, IRQENABLE); if (ifp->if_flags & IFF_LINK0) { if (!(ppb_rstr(ppbus) & CLPIP_SHAKE)) { lprintf("&"); lp_intr(sc); } /* Alert other end to pending packet */ spin = LPMAXSPIN1; ppb_wdtr(ppbus, 0x08); while ((ppb_rstr(ppbus) & 0x08) == 0) if (--spin == 0) { goto nend; } /* Calculate length of packet, then send that */ count += 14; /* Ethernet header len */ mm = m; for (mm = m; mm; mm = mm->m_next) { count += mm->m_len; } if (clpoutbyte(count & 0xFF, LPMAXSPIN1, ppbus)) goto nend; if (clpoutbyte((count >> 8) & 0xFF, LPMAXSPIN1, ppbus)) goto nend; /* Send dummy ethernet header */ for (i = 0; i < 12; i++) { if (clpoutbyte(i, LPMAXSPIN1, ppbus)) goto nend; chksum += i; } if (clpoutbyte(0x08, LPMAXSPIN1, ppbus)) goto nend; if (clpoutbyte(0x00, LPMAXSPIN1, ppbus)) goto nend; chksum += 0x08 + 0x00; /* Add into checksum */ mm = m; do { cp = mtod(mm, u_char *); len = mm->m_len; while (len--) { chksum += *cp; if (clpoutbyte(*cp++, LPMAXSPIN2, ppbus)) goto nend; } } while ((mm = mm->m_next)); /* Send checksum */ if (clpoutbyte(chksum, LPMAXSPIN2, ppbus)) goto nend; /* Go quiescent */ ppb_wdtr(ppbus, 0); err = 0; /* No errors */ nend: ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (err) { /* if we didn't timeout... */ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); lprintf("X"); } else { if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len); if (bpf_peers_present(ifp->if_bpf)) lptap(ifp, m); } m_freem(m); if (!(ppb_rstr(ppbus) & CLPIP_SHAKE)) { lprintf("^"); lp_intr(sc); } ppb_unlock(ppbus); return (0); } if (ppb_rstr(ppbus) & LPIP_SHAKE) { lprintf("&"); lp_intr(sc); } if (lpoutbyte(0x08, LPMAXSPIN1, ppbus)) goto end; if (lpoutbyte(0x00, LPMAXSPIN2, ppbus)) goto end; mm = m; do { cp = mtod(mm, u_char *); len = mm->m_len; while (len--) if (lpoutbyte(*cp++, LPMAXSPIN2, ppbus)) goto end; } while ((mm = mm->m_next)); err = 0; /* no errors were encountered */ end: --cp; ppb_wdtr(ppbus, txmitl[*cp] ^ 0x17); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (err) { /* if we didn't timeout... */ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); lprintf("X"); } else { if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len); if (bpf_peers_present(ifp->if_bpf)) lptap(ifp, m); } m_freem(m); if (ppb_rstr(ppbus) & LPIP_SHAKE) { lprintf("^"); lp_intr(sc); } ppb_unlock(ppbus); return (0); } static device_method_t lp_methods[] = { /* device interface */ DEVMETHOD(device_identify, lp_identify), DEVMETHOD(device_probe, lp_probe), DEVMETHOD(device_attach, lp_attach), DEVMETHOD(device_detach, lp_detach), { 0, 0 } }; static driver_t lp_driver = { "plip", lp_methods, sizeof(struct lp_data), }; DRIVER_MODULE(plip, ppbus, lp_driver, lp_devclass, lp_module_handler, 0); MODULE_DEPEND(plip, ppbus, 1, 1, 1); Index: head/sys/dev/ppbus/ppbconf.c =================================================================== --- head/sys/dev/ppbus/ppbconf.c (revision 313981) +++ head/sys/dev/ppbus/ppbconf.c (revision 313982) @@ -1,603 +1,603 @@ /*- * Copyright (c) 1997, 1998, 1999 Nicolas Souchu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * */ #include __FBSDID("$FreeBSD$"); #include "opt_ppb_1284.h" #include #include #include #include #include #include #include #include #include #include #include #include #include "ppbus_if.h" #define DEVTOSOFTC(dev) ((struct ppb_data *)device_get_softc(dev)) static MALLOC_DEFINE(M_PPBUSDEV, "ppbusdev", "Parallel Port bus device"); static int ppbus_intr(void *arg); /* * Device methods */ static int ppbus_print_child(device_t bus, device_t dev) { struct ppb_device *ppbdev; int retval; retval = bus_print_child_header(bus, dev); ppbdev = (struct ppb_device *)device_get_ivars(dev); if (ppbdev->flags != 0) retval += printf(" flags 0x%x", ppbdev->flags); retval += bus_print_child_footer(bus, dev); return (retval); } static int ppbus_probe(device_t dev) { device_set_desc(dev, "Parallel port bus"); return (0); } /* * ppbus_add_child() * * Add a ppbus device, allocate/initialize the ivars */ static device_t ppbus_add_child(device_t dev, u_int order, const char *name, int unit) { struct ppb_device *ppbdev; device_t child; /* allocate ivars for the new ppbus child */ ppbdev = malloc(sizeof(struct ppb_device), M_PPBUSDEV, M_NOWAIT | M_ZERO); if (!ppbdev) return (NULL); /* initialize the ivars */ ppbdev->name = name; /* add the device as a child to the ppbus bus with the allocated * ivars */ child = device_add_child_ordered(dev, order, name, unit); device_set_ivars(child, ppbdev); return (child); } static int ppbus_read_ivar(device_t bus, device_t dev, int index, uintptr_t* val) { switch (index) { case PPBUS_IVAR_MODE: /* XXX yet device mode = ppbus mode = chipset mode */ *val = (u_long)ppb_get_mode(bus); break; default: return (ENOENT); } return (0); } static int ppbus_write_ivar(device_t bus, device_t dev, int index, uintptr_t val) { switch (index) { case PPBUS_IVAR_MODE: /* XXX yet device mode = ppbus mode = chipset mode */ ppb_set_mode(bus, val); break; default: return (ENOENT); } return (0); } #define PPB_PNP_PRINTER 0 #define PPB_PNP_MODEM 1 #define PPB_PNP_NET 2 #define PPB_PNP_HDC 3 #define PPB_PNP_PCMCIA 4 #define PPB_PNP_MEDIA 5 #define PPB_PNP_FDC 6 #define PPB_PNP_PORTS 7 #define PPB_PNP_SCANNER 8 #define PPB_PNP_DIGICAM 9 #ifndef DONTPROBE_1284 static char *pnp_tokens[] = { "PRINTER", "MODEM", "NET", "HDC", "PCMCIA", "MEDIA", "FDC", "PORTS", "SCANNER", "DIGICAM", "", NULL }; #if 0 static char *pnp_classes[] = { "printer", "modem", "network device", "hard disk", "PCMCIA", "multimedia device", "floppy disk", "ports", "scanner", "digital camera", "unknown device", NULL }; #endif /* * search_token() * * Search the first occurrence of a token within a string */ static char * search_token(char *str, int slen, char *token) { int tlen, i; #define UNKNOWN_LENGTH -1 if (slen == UNKNOWN_LENGTH) /* get string's length */ slen = strlen(str); /* get token's length */ tlen = strlen(token); if (tlen == 0) return (str); for (i = 0; i <= slen-tlen; i++) { if (strncmp(str + i, token, tlen) == 0) return (&str[i]); } return (NULL); } /* * ppb_pnp_detect() * * Returns the class id. of the peripherial, -1 otherwise */ static int ppb_pnp_detect(device_t bus) { - char *token, *class = 0; + char *token, *class = NULL; int i, len, error; int class_id = -1; char str[PPB_PnP_STRING_SIZE+1]; device_printf(bus, "Probing for PnP devices:\n"); if ((error = ppb_1284_read_id(bus, PPB_NIBBLE, str, PPB_PnP_STRING_SIZE, &len))) goto end_detect; #ifdef DEBUG_1284 device_printf(bus, " %d characters: ", len); for (i = 0; i < len; i++) printf("%c(0x%x) ", str[i], str[i]); printf("\n"); #endif /* replace ';' characters by '\0' */ for (i = 0; i < len; i++) str[i] = (str[i] == ';') ? '\0' : str[i]; if ((token = search_token(str, len, "MFG")) != NULL || (token = search_token(str, len, "MANUFACTURER")) != NULL) device_printf(bus, "<%s", search_token(token, UNKNOWN_LENGTH, ":") + 1); else device_printf(bus, ""); if ((token = search_token(str, len, "CLS")) != NULL) { class = search_token(token, UNKNOWN_LENGTH, ":") + 1; printf(" %s", class); } if ((token = search_token(str, len, "CMD")) != NULL || (token = search_token(str, len, "COMMAND")) != NULL) printf(" %s", search_token(token, UNKNOWN_LENGTH, ":") + 1); printf("\n"); if (class) /* identify class ident */ for (i = 0; pnp_tokens[i] != NULL; i++) { if (search_token(class, len, pnp_tokens[i]) != NULL) { class_id = i; goto end_detect; } } class_id = PPB_PnP_UNKNOWN; end_detect: return (class_id); } /* * ppb_scan_bus() * * Scan the ppbus for IEEE1284 compliant devices */ static int ppb_scan_bus(device_t bus) { struct ppb_data * ppb = (struct ppb_data *)device_get_softc(bus); int error = 0; /* try all IEEE1284 modes, for one device only * * XXX We should implement the IEEE1284.3 standard to detect * daisy chained devices */ error = ppb_1284_negociate(bus, PPB_NIBBLE, PPB_REQUEST_ID); if ((ppb->state == PPB_ERROR) && (ppb->error == PPB_NOT_IEEE1284)) goto end_scan; ppb_1284_terminate(bus); device_printf(bus, "IEEE1284 device found "); if (!(error = ppb_1284_negociate(bus, PPB_NIBBLE, 0))) { printf("/NIBBLE"); ppb_1284_terminate(bus); } if (!(error = ppb_1284_negociate(bus, PPB_PS2, 0))) { printf("/PS2"); ppb_1284_terminate(bus); } if (!(error = ppb_1284_negociate(bus, PPB_ECP, 0))) { printf("/ECP"); ppb_1284_terminate(bus); } if (!(error = ppb_1284_negociate(bus, PPB_ECP, PPB_USE_RLE))) { printf("/ECP_RLE"); ppb_1284_terminate(bus); } if (!(error = ppb_1284_negociate(bus, PPB_EPP, 0))) { printf("/EPP"); ppb_1284_terminate(bus); } /* try more IEEE1284 modes */ if (bootverbose) { if (!(error = ppb_1284_negociate(bus, PPB_NIBBLE, PPB_REQUEST_ID))) { printf("/NIBBLE_ID"); ppb_1284_terminate(bus); } if (!(error = ppb_1284_negociate(bus, PPB_PS2, PPB_REQUEST_ID))) { printf("/PS2_ID"); ppb_1284_terminate(bus); } if (!(error = ppb_1284_negociate(bus, PPB_ECP, PPB_REQUEST_ID))) { printf("/ECP_ID"); ppb_1284_terminate(bus); } if (!(error = ppb_1284_negociate(bus, PPB_ECP, PPB_REQUEST_ID | PPB_USE_RLE))) { printf("/ECP_RLE_ID"); ppb_1284_terminate(bus); } if (!(error = ppb_1284_negociate(bus, PPB_COMPATIBLE, PPB_EXTENSIBILITY_LINK))) { printf("/Extensibility Link"); ppb_1284_terminate(bus); } } printf("\n"); /* detect PnP devices */ ppb->class_id = ppb_pnp_detect(bus); return (0); end_scan: return (error); } #endif /* !DONTPROBE_1284 */ static int ppbus_attach(device_t dev) { struct ppb_data *ppb = device_get_softc(dev); int error, rid; error = BUS_READ_IVAR(device_get_parent(dev), dev, PPC_IVAR_LOCK, (uintptr_t *)&ppb->ppc_lock); if (error) { device_printf(dev, "Unable to fetch parent's lock\n"); return (error); } rid = 0; ppb->ppc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE); if (ppb->ppc_irq_res != NULL) { mtx_lock(ppb->ppc_lock); error = BUS_WRITE_IVAR(device_get_parent(dev), dev, PPC_IVAR_INTR_HANDLER, (uintptr_t)&ppbus_intr); mtx_unlock(ppb->ppc_lock); if (error) { device_printf(dev, "Unable to set interrupt handler\n"); return (error); } } /* Locate our children */ bus_generic_probe(dev); #ifndef DONTPROBE_1284 /* detect IEEE1284 compliant devices */ mtx_lock(ppb->ppc_lock); ppb_scan_bus(dev); mtx_unlock(ppb->ppc_lock); #endif /* !DONTPROBE_1284 */ /* launch attachment of the added children */ bus_generic_attach(dev); return (0); } static int ppbus_detach(device_t dev) { int error; error = bus_generic_detach(dev); if (error) return (error); /* detach & delete all children */ device_delete_children(dev); return (0); } static int ppbus_intr(void *arg) { struct ppb_device *ppbdev; struct ppb_data *ppb = arg; mtx_assert(ppb->ppc_lock, MA_OWNED); if (ppb->ppb_owner == NULL) return (ENOENT); ppbdev = device_get_ivars(ppb->ppb_owner); if (ppbdev->intr_hook == NULL) return (ENOENT); ppbdev->intr_hook(ppbdev->intr_arg); return (0); } static int ppbus_setup_intr(device_t bus, device_t child, struct resource *r, int flags, driver_filter_t *filt, void (*ihand)(void *), void *arg, void **cookiep) { struct ppb_device *ppbdev = device_get_ivars(child); struct ppb_data *ppb = DEVTOSOFTC(bus); /* We do not support filters. */ if (filt != NULL || ihand == NULL) return (EINVAL); /* Can only attach handlers to the parent device's resource. */ if (ppb->ppc_irq_res != r) return (EINVAL); mtx_lock(ppb->ppc_lock); ppbdev->intr_hook = ihand; ppbdev->intr_arg = arg; *cookiep = ppbdev; mtx_unlock(ppb->ppc_lock); return (0); } static int ppbus_teardown_intr(device_t bus, device_t child, struct resource *r, void *ih) { struct ppb_device *ppbdev = device_get_ivars(child); struct ppb_data *ppb = DEVTOSOFTC(bus); mtx_lock(ppb->ppc_lock); if (ppbdev != ih || ppb->ppc_irq_res != r) { mtx_unlock(ppb->ppc_lock); return (EINVAL); } ppbdev->intr_hook = NULL; mtx_unlock(ppb->ppc_lock); return (0); } /* * ppb_request_bus() * * Allocate the device to perform transfers. * * how : PPB_WAIT or PPB_DONTWAIT */ int ppb_request_bus(device_t bus, device_t dev, int how) { struct ppb_data *ppb = DEVTOSOFTC(bus); struct ppb_device *ppbdev = (struct ppb_device *)device_get_ivars(dev); int error = 0; mtx_assert(ppb->ppc_lock, MA_OWNED); while (!error) { if (ppb->ppb_owner) { switch (how) { case PPB_WAIT | PPB_INTR: error = mtx_sleep(ppb, ppb->ppc_lock, PPBPRI | PCATCH, "ppbreq", 0); break; case PPB_WAIT | PPB_NOINTR: error = mtx_sleep(ppb, ppb->ppc_lock, PPBPRI, "ppbreq", 0); break; default: return (EWOULDBLOCK); } } else { ppb->ppb_owner = dev; /* restore the context of the device * The first time, ctx.valid is certainly false * then do not change anything. This is useful for * drivers that do not set there operating mode * during attachement */ if (ppbdev->ctx.valid) ppb_set_mode(bus, ppbdev->ctx.mode); return (0); } } return (error); } /* * ppb_release_bus() * * Release the device allocated with ppb_request_bus() */ int ppb_release_bus(device_t bus, device_t dev) { struct ppb_data *ppb = DEVTOSOFTC(bus); struct ppb_device *ppbdev = (struct ppb_device *)device_get_ivars(dev); mtx_assert(ppb->ppc_lock, MA_OWNED); if (ppb->ppb_owner != dev) return (EACCES); /* save the context of the device */ ppbdev->ctx.mode = ppb_get_mode(bus); /* ok, now the context of the device is valid */ ppbdev->ctx.valid = 1; ppb->ppb_owner = 0; /* wakeup waiting processes */ wakeup(ppb); return (0); } static devclass_t ppbus_devclass; static device_method_t ppbus_methods[] = { /* device interface */ DEVMETHOD(device_probe, ppbus_probe), DEVMETHOD(device_attach, ppbus_attach), DEVMETHOD(device_detach, ppbus_detach), /* bus interface */ DEVMETHOD(bus_add_child, ppbus_add_child), DEVMETHOD(bus_print_child, ppbus_print_child), DEVMETHOD(bus_read_ivar, ppbus_read_ivar), DEVMETHOD(bus_write_ivar, ppbus_write_ivar), DEVMETHOD(bus_setup_intr, ppbus_setup_intr), DEVMETHOD(bus_teardown_intr, ppbus_teardown_intr), DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), { 0, 0 } }; static driver_t ppbus_driver = { "ppbus", ppbus_methods, sizeof(struct ppb_data), }; DRIVER_MODULE(ppbus, ppc, ppbus_driver, ppbus_devclass, 0, 0); Index: head/sys/dev/ppc/ppc.c =================================================================== --- head/sys/dev/ppc/ppc.c (revision 313981) +++ head/sys/dev/ppc/ppc.c (revision 313982) @@ -1,2003 +1,2003 @@ /*- * Copyright (c) 1997-2000 Nicolas Souchu * Copyright (c) 2001 Alcove - Nicolas Souchu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ppc.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __i386__ #include #include #include #endif #include #include #include #include #include "ppbus_if.h" static void ppcintr(void *arg); #define IO_LPTSIZE_EXTENDED 8 /* "Extended" LPT controllers */ #define IO_LPTSIZE_NORMAL 4 /* "Normal" LPT controllers */ #define LOG_PPC(function, ppc, string) \ if (bootverbose) printf("%s: %s\n", function, string) #define DEVTOSOFTC(dev) ((struct ppc_data *)device_get_softc(dev)) /* * We use critical enter/exit for the simple config locking needed to * detect the devices. We just want to make sure that both of our writes * happen without someone else also writing to those config registers. Since * we just do this at startup, Giant keeps multiple threads from executing, * and critical_enter() then is all that's needed to keep us from being preempted * during the critical sequences with the hardware. * * Note: this doesn't prevent multiple threads from putting the chips into * config mode, but since we only do that to detect the type at startup the * extra overhead isn't needed since Giant protects us from multiple entry * and no other code changes these registers. */ #define PPC_CONFIG_LOCK(ppc) critical_enter() #define PPC_CONFIG_UNLOCK(ppc) critical_exit() devclass_t ppc_devclass; const char ppc_driver_name[] = "ppc"; static char *ppc_models[] = { "SMC-like", "SMC FDC37C665GT", "SMC FDC37C666GT", "PC87332", "PC87306", "82091AA", "Generic", "W83877F", "W83877AF", "Winbond", "PC87334", "SMC FDC37C935", "PC87303", 0 }; /* list of available modes */ static char *ppc_avms[] = { "COMPATIBLE", "NIBBLE-only", "PS2-only", "PS2/NIBBLE", "EPP-only", "EPP/NIBBLE", "EPP/PS2", "EPP/PS2/NIBBLE", "ECP-only", "ECP/NIBBLE", "ECP/PS2", "ECP/PS2/NIBBLE", "ECP/EPP", "ECP/EPP/NIBBLE", "ECP/EPP/PS2", "ECP/EPP/PS2/NIBBLE", 0 }; /* list of current executing modes * Note that few modes do not actually exist. */ static char *ppc_modes[] = { "COMPATIBLE", "NIBBLE", "PS/2", "PS/2", "EPP", "EPP", "EPP", "EPP", "ECP", "ECP", "ECP+PS2", "ECP+PS2", "ECP+EPP", "ECP+EPP", "ECP+EPP", "ECP+EPP", 0 }; static char *ppc_epp_protocol[] = { " (EPP 1.9)", " (EPP 1.7)", 0 }; #ifdef __i386__ /* * BIOS printer list - used by BIOS probe. */ #define BIOS_PPC_PORTS 0x408 #define BIOS_PORTS (short *)(KERNBASE+BIOS_PPC_PORTS) #define BIOS_MAX_PPC 4 #endif /* * ppc_ecp_sync() XXX */ int ppc_ecp_sync(device_t dev) { int i, r; struct ppc_data *ppc = DEVTOSOFTC(dev); PPC_ASSERT_LOCKED(ppc); if (!(ppc->ppc_avm & PPB_ECP) && !(ppc->ppc_dtm & PPB_ECP)) return 0; r = r_ecr(ppc); if ((r & 0xe0) != PPC_ECR_EPP) return 0; for (i = 0; i < 100; i++) { r = r_ecr(ppc); if (r & 0x1) return 0; DELAY(100); } device_printf(dev, "ECP sync failed as data still present in FIFO.\n"); return 0; } /* * ppc_detect_fifo() * * Detect parallel port FIFO */ static int ppc_detect_fifo(struct ppc_data *ppc) { char ecr_sav; char ctr_sav, ctr, cc; short i; /* save registers */ ecr_sav = r_ecr(ppc); ctr_sav = r_ctr(ppc); /* enter ECP configuration mode, no interrupt, no DMA */ w_ecr(ppc, 0xf4); /* read PWord size - transfers in FIFO mode must be PWord aligned */ ppc->ppc_pword = (r_cnfgA(ppc) & PPC_PWORD_MASK); /* XXX 16 and 32 bits implementations not supported */ if (ppc->ppc_pword != PPC_PWORD_8) { LOG_PPC(__func__, ppc, "PWord not supported"); goto error; } w_ecr(ppc, 0x34); /* byte mode, no interrupt, no DMA */ ctr = r_ctr(ppc); w_ctr(ppc, ctr | PCD); /* set direction to 1 */ /* enter ECP test mode, no interrupt, no DMA */ w_ecr(ppc, 0xd4); /* flush the FIFO */ for (i=0; i<1024; i++) { if (r_ecr(ppc) & PPC_FIFO_EMPTY) break; cc = r_fifo(ppc); } if (i >= 1024) { LOG_PPC(__func__, ppc, "can't flush FIFO"); goto error; } /* enable interrupts, no DMA */ w_ecr(ppc, 0xd0); /* determine readIntrThreshold * fill the FIFO until serviceIntr is set */ for (i=0; i<1024; i++) { w_fifo(ppc, (char)i); if (!ppc->ppc_rthr && (r_ecr(ppc) & PPC_SERVICE_INTR)) { /* readThreshold reached */ ppc->ppc_rthr = i+1; } if (r_ecr(ppc) & PPC_FIFO_FULL) { ppc->ppc_fifo = i+1; break; } } if (i >= 1024) { LOG_PPC(__func__, ppc, "can't fill FIFO"); goto error; } w_ecr(ppc, 0xd4); /* test mode, no interrupt, no DMA */ w_ctr(ppc, ctr & ~PCD); /* set direction to 0 */ w_ecr(ppc, 0xd0); /* enable interrupts */ /* determine writeIntrThreshold * empty the FIFO until serviceIntr is set */ for (i=ppc->ppc_fifo; i>0; i--) { if (r_fifo(ppc) != (char)(ppc->ppc_fifo-i)) { LOG_PPC(__func__, ppc, "invalid data in FIFO"); goto error; } if (r_ecr(ppc) & PPC_SERVICE_INTR) { /* writeIntrThreshold reached */ ppc->ppc_wthr = ppc->ppc_fifo - i+1; } /* if FIFO empty before the last byte, error */ if (i>1 && (r_ecr(ppc) & PPC_FIFO_EMPTY)) { LOG_PPC(__func__, ppc, "data lost in FIFO"); goto error; } } /* FIFO must be empty after the last byte */ if (!(r_ecr(ppc) & PPC_FIFO_EMPTY)) { LOG_PPC(__func__, ppc, "can't empty the FIFO"); goto error; } w_ctr(ppc, ctr_sav); w_ecr(ppc, ecr_sav); return (0); error: w_ctr(ppc, ctr_sav); w_ecr(ppc, ecr_sav); return (EINVAL); } static int ppc_detect_port(struct ppc_data *ppc) { w_ctr(ppc, 0x0c); /* To avoid missing PS2 ports */ w_dtr(ppc, 0xaa); if (r_dtr(ppc) != 0xaa) return (0); return (1); } /* * EPP timeout, according to the PC87332 manual * Semantics of clearing EPP timeout bit. * PC87332 - reading SPP_STR does it... * SMC - write 1 to EPP timeout bit XXX * Others - (?) write 0 to EPP timeout bit */ static void ppc_reset_epp_timeout(struct ppc_data *ppc) { register char r; r = r_str(ppc); w_str(ppc, r | 0x1); w_str(ppc, r & 0xfe); return; } static int ppc_check_epp_timeout(struct ppc_data *ppc) { ppc_reset_epp_timeout(ppc); return (!(r_str(ppc) & TIMEOUT)); } /* * Configure current operating mode */ static int ppc_generic_setmode(struct ppc_data *ppc, int mode) { u_char ecr = 0; /* check if mode is available */ if (mode && !(ppc->ppc_avm & mode)) return (EINVAL); /* if ECP mode, configure ecr register */ if ((ppc->ppc_avm & PPB_ECP) || (ppc->ppc_dtm & PPB_ECP)) { /* return to byte mode (keeping direction bit), * no interrupt, no DMA to be able to change to * ECP */ w_ecr(ppc, PPC_ECR_RESET); ecr = PPC_DISABLE_INTR; if (mode & PPB_EPP) return (EINVAL); else if (mode & PPB_ECP) /* select ECP mode */ ecr |= PPC_ECR_ECP; else if (mode & PPB_PS2) /* select PS2 mode with ECP */ ecr |= PPC_ECR_PS2; else /* select COMPATIBLE/NIBBLE mode */ ecr |= PPC_ECR_STD; w_ecr(ppc, ecr); } ppc->ppc_mode = mode; return (0); } /* * The ppc driver is free to choose options like FIFO or DMA * if ECP mode is available. * * The 'RAW' option allows the upper drivers to force the ppc mode * even with FIFO, DMA available. */ static int ppc_smclike_setmode(struct ppc_data *ppc, int mode) { u_char ecr = 0; /* check if mode is available */ if (mode && !(ppc->ppc_avm & mode)) return (EINVAL); /* if ECP mode, configure ecr register */ if ((ppc->ppc_avm & PPB_ECP) || (ppc->ppc_dtm & PPB_ECP)) { /* return to byte mode (keeping direction bit), * no interrupt, no DMA to be able to change to * ECP or EPP mode */ w_ecr(ppc, PPC_ECR_RESET); ecr = PPC_DISABLE_INTR; if (mode & PPB_EPP) /* select EPP mode */ ecr |= PPC_ECR_EPP; else if (mode & PPB_ECP) /* select ECP mode */ ecr |= PPC_ECR_ECP; else if (mode & PPB_PS2) /* select PS2 mode with ECP */ ecr |= PPC_ECR_PS2; else /* select COMPATIBLE/NIBBLE mode */ ecr |= PPC_ECR_STD; w_ecr(ppc, ecr); } ppc->ppc_mode = mode; return (0); } #ifdef PPC_PROBE_CHIPSET /* * ppc_pc873xx_detect * * Probe for a Natsemi PC873xx-family part. * * References in this function are to the National Semiconductor * PC87332 datasheet TL/C/11930, May 1995 revision. */ static int pc873xx_basetab[] = {0x0398, 0x026e, 0x015c, 0x002e, 0}; static int pc873xx_porttab[] = {0x0378, 0x03bc, 0x0278, 0}; static int pc873xx_irqtab[] = {5, 7, 5, 0}; static int pc873xx_regstab[] = { PC873_FER, PC873_FAR, PC873_PTR, PC873_FCR, PC873_PCR, PC873_PMC, PC873_TUP, PC873_SID, PC873_PNP0, PC873_PNP1, PC873_LPTBA, -1 }; static char *pc873xx_rnametab[] = { "FER", "FAR", "PTR", "FCR", "PCR", "PMC", "TUP", "SID", "PNP0", "PNP1", "LPTBA", NULL }; static int ppc_pc873xx_detect(struct ppc_data *ppc, int chipset_mode) /* XXX mode never forced */ { static int index = 0; int idport, irq; int ptr, pcr, val, i; while ((idport = pc873xx_basetab[index++])) { /* XXX should check first to see if this location is already claimed */ /* * Pull the 873xx through the power-on ID cycle (2.2,1.). * We can't use this to locate the chip as it may already have * been used by the BIOS. */ (void)inb(idport); (void)inb(idport); (void)inb(idport); (void)inb(idport); /* * Read the SID byte. Possible values are : * * 01010xxx PC87334 * 0001xxxx PC87332 * 01110xxx PC87306 * 00110xxx PC87303 */ outb(idport, PC873_SID); val = inb(idport + 1); if ((val & 0xf0) == 0x10) { ppc->ppc_model = NS_PC87332; } else if ((val & 0xf8) == 0x70) { ppc->ppc_model = NS_PC87306; } else if ((val & 0xf8) == 0x50) { ppc->ppc_model = NS_PC87334; } else if ((val & 0xf8) == 0x40) { /* Should be 0x30 by the documentation, but probing yielded 0x40... */ ppc->ppc_model = NS_PC87303; } else { if (bootverbose && (val != 0xff)) printf("PC873xx probe at 0x%x got unknown ID 0x%x\n", idport, val); continue ; /* not recognised */ } /* print registers */ if (bootverbose) { printf("PC873xx"); for (i=0; pc873xx_regstab[i] != -1; i++) { outb(idport, pc873xx_regstab[i]); printf(" %s=0x%x", pc873xx_rnametab[i], inb(idport + 1) & 0xff); } printf("\n"); } /* * We think we have one. Is it enabled and where we want it to be? */ outb(idport, PC873_FER); val = inb(idport + 1); if (!(val & PC873_PPENABLE)) { if (bootverbose) printf("PC873xx parallel port disabled\n"); continue; } outb(idport, PC873_FAR); val = inb(idport + 1); /* XXX we should create a driver instance for every port found */ if (pc873xx_porttab[val & 0x3] != ppc->ppc_base) { /* First try to change the port address to that requested... */ switch (ppc->ppc_base) { case 0x378: val &= 0xfc; break; case 0x3bc: val &= 0xfd; break; case 0x278: val &= 0xfe; break; default: val &= 0xfd; break; } outb(idport, PC873_FAR); outb(idport + 1, val); outb(idport + 1, val); /* Check for success by reading back the value we supposedly wrote and comparing...*/ outb(idport, PC873_FAR); val = inb(idport + 1) & 0x3; /* If we fail, report the failure... */ if (pc873xx_porttab[val] != ppc->ppc_base) { if (bootverbose) printf("PC873xx at 0x%x not for driver at port 0x%x\n", pc873xx_porttab[val], ppc->ppc_base); } continue; } outb(idport, PC873_PTR); ptr = inb(idport + 1); /* get irq settings */ if (ppc->ppc_base == 0x378) irq = (ptr & PC873_LPTBIRQ7) ? 7 : 5; else irq = pc873xx_irqtab[val]; if (bootverbose) printf("PC873xx irq %d at 0x%x\n", irq, ppc->ppc_base); /* * Check if irq settings are correct */ if (irq != ppc->ppc_irq) { /* * If the chipset is not locked and base address is 0x378, * we have another chance */ if (ppc->ppc_base == 0x378 && !(ptr & PC873_CFGLOCK)) { if (ppc->ppc_irq == 7) { outb(idport + 1, (ptr | PC873_LPTBIRQ7)); outb(idport + 1, (ptr | PC873_LPTBIRQ7)); } else { outb(idport + 1, (ptr & ~PC873_LPTBIRQ7)); outb(idport + 1, (ptr & ~PC873_LPTBIRQ7)); } if (bootverbose) printf("PC873xx irq set to %d\n", ppc->ppc_irq); } else { if (bootverbose) printf("PC873xx sorry, can't change irq setting\n"); } } else { if (bootverbose) printf("PC873xx irq settings are correct\n"); } outb(idport, PC873_PCR); pcr = inb(idport + 1); if ((ptr & PC873_CFGLOCK) || !chipset_mode) { if (bootverbose) printf("PC873xx %s", (ptr & PC873_CFGLOCK)?"locked":"unlocked"); ppc->ppc_avm |= PPB_NIBBLE; if (bootverbose) printf(", NIBBLE"); if (pcr & PC873_EPPEN) { ppc->ppc_avm |= PPB_EPP; if (bootverbose) printf(", EPP"); if (pcr & PC873_EPP19) ppc->ppc_epp = EPP_1_9; else ppc->ppc_epp = EPP_1_7; if ((ppc->ppc_model == NS_PC87332) && bootverbose) { outb(idport, PC873_PTR); ptr = inb(idport + 1); if (ptr & PC873_EPPRDIR) printf(", Regular mode"); else printf(", Automatic mode"); } } else if (pcr & PC873_ECPEN) { ppc->ppc_avm |= PPB_ECP; if (bootverbose) printf(", ECP"); if (pcr & PC873_ECPCLK) { /* XXX */ ppc->ppc_avm |= PPB_PS2; if (bootverbose) printf(", PS/2"); } } else { outb(idport, PC873_PTR); ptr = inb(idport + 1); if (ptr & PC873_EXTENDED) { ppc->ppc_avm |= PPB_SPP; if (bootverbose) printf(", SPP"); } } } else { if (bootverbose) printf("PC873xx unlocked"); if (chipset_mode & PPB_ECP) { if ((chipset_mode & PPB_EPP) && bootverbose) printf(", ECP+EPP not supported"); pcr &= ~PC873_EPPEN; pcr |= (PC873_ECPEN | PC873_ECPCLK); /* XXX */ outb(idport + 1, pcr); outb(idport + 1, pcr); if (bootverbose) printf(", ECP"); } else if (chipset_mode & PPB_EPP) { pcr &= ~(PC873_ECPEN | PC873_ECPCLK); pcr |= (PC873_EPPEN | PC873_EPP19); outb(idport + 1, pcr); outb(idport + 1, pcr); ppc->ppc_epp = EPP_1_9; /* XXX */ if (bootverbose) printf(", EPP1.9"); /* enable automatic direction turnover */ if (ppc->ppc_model == NS_PC87332) { outb(idport, PC873_PTR); ptr = inb(idport + 1); ptr &= ~PC873_EPPRDIR; outb(idport + 1, ptr); outb(idport + 1, ptr); if (bootverbose) printf(", Automatic mode"); } } else { pcr &= ~(PC873_ECPEN | PC873_ECPCLK | PC873_EPPEN); outb(idport + 1, pcr); outb(idport + 1, pcr); /* configure extended bit in PTR */ outb(idport, PC873_PTR); ptr = inb(idport + 1); if (chipset_mode & PPB_PS2) { ptr |= PC873_EXTENDED; if (bootverbose) printf(", PS/2"); } else { /* default to NIBBLE mode */ ptr &= ~PC873_EXTENDED; if (bootverbose) printf(", NIBBLE"); } outb(idport + 1, ptr); outb(idport + 1, ptr); } ppc->ppc_avm = chipset_mode; } if (bootverbose) printf("\n"); ppc->ppc_type = PPC_TYPE_GENERIC; ppc_generic_setmode(ppc, chipset_mode); return(chipset_mode); } return(-1); } /* * ppc_smc37c66xgt_detect * * SMC FDC37C66xGT configuration. */ static int ppc_smc37c66xgt_detect(struct ppc_data *ppc, int chipset_mode) { int i; u_char r; int type = -1; int csr = SMC66x_CSR; /* initial value is 0x3F0 */ int port_address[] = { -1 /* disabled */ , 0x3bc, 0x378, 0x278 }; #define cio csr+1 /* config IO port is either 0x3F1 or 0x371 */ /* * Detection: enter configuration mode and read CRD register. */ PPC_CONFIG_LOCK(ppc); outb(csr, SMC665_iCODE); outb(csr, SMC665_iCODE); PPC_CONFIG_UNLOCK(ppc); outb(csr, 0xd); if (inb(cio) == 0x65) { type = SMC_37C665GT; goto config; } for (i = 0; i < 2; i++) { PPC_CONFIG_LOCK(ppc); outb(csr, SMC666_iCODE); outb(csr, SMC666_iCODE); PPC_CONFIG_UNLOCK(ppc); outb(csr, 0xd); if (inb(cio) == 0x66) { type = SMC_37C666GT; break; } /* Another chance, CSR may be hard-configured to be at 0x370 */ csr = SMC666_CSR; } config: /* * If chipset not found, do not continue. */ if (type == -1) { outb(csr, 0xaa); /* end config mode */ return (-1); } /* select CR1 */ outb(csr, 0x1); /* read the port's address: bits 0 and 1 of CR1 */ r = inb(cio) & SMC_CR1_ADDR; if (port_address[(int)r] != ppc->ppc_base) { outb(csr, 0xaa); /* end config mode */ return (-1); } ppc->ppc_model = type; /* * CR1 and CR4 registers bits 3 and 0/1 for mode configuration * If SPP mode is detected, try to set ECP+EPP mode */ if (bootverbose) { outb(csr, 0x1); device_printf(ppc->ppc_dev, "SMC registers CR1=0x%x", inb(cio) & 0xff); outb(csr, 0x4); printf(" CR4=0x%x", inb(cio) & 0xff); } /* select CR1 */ outb(csr, 0x1); if (!chipset_mode) { /* autodetect mode */ /* 666GT is ~certainly~ hardwired to an extended ECP+EPP mode */ if (type == SMC_37C666GT) { ppc->ppc_avm |= PPB_ECP | PPB_EPP | PPB_SPP; if (bootverbose) printf(" configuration hardwired, supposing " \ "ECP+EPP SPP"); } else if ((inb(cio) & SMC_CR1_MODE) == 0) { /* already in extended parallel port mode, read CR4 */ outb(csr, 0x4); r = (inb(cio) & SMC_CR4_EMODE); switch (r) { case SMC_SPP: ppc->ppc_avm |= PPB_SPP; if (bootverbose) printf(" SPP"); break; case SMC_EPPSPP: ppc->ppc_avm |= PPB_EPP | PPB_SPP; if (bootverbose) printf(" EPP SPP"); break; case SMC_ECP: ppc->ppc_avm |= PPB_ECP | PPB_SPP; if (bootverbose) printf(" ECP SPP"); break; case SMC_ECPEPP: ppc->ppc_avm |= PPB_ECP | PPB_EPP | PPB_SPP; if (bootverbose) printf(" ECP+EPP SPP"); break; } } else { /* not an extended port mode */ ppc->ppc_avm |= PPB_SPP; if (bootverbose) printf(" SPP"); } } else { /* mode forced */ ppc->ppc_avm = chipset_mode; /* 666GT is ~certainly~ hardwired to an extended ECP+EPP mode */ if (type == SMC_37C666GT) goto end_detect; r = inb(cio); if ((chipset_mode & (PPB_ECP | PPB_EPP)) == 0) { /* do not use ECP when the mode is not forced to */ outb(cio, r | SMC_CR1_MODE); if (bootverbose) printf(" SPP"); } else { /* an extended mode is selected */ outb(cio, r & ~SMC_CR1_MODE); /* read CR4 register and reset mode field */ outb(csr, 0x4); r = inb(cio) & ~SMC_CR4_EMODE; if (chipset_mode & PPB_ECP) { if (chipset_mode & PPB_EPP) { outb(cio, r | SMC_ECPEPP); if (bootverbose) printf(" ECP+EPP"); } else { outb(cio, r | SMC_ECP); if (bootverbose) printf(" ECP"); } } else { /* PPB_EPP is set */ outb(cio, r | SMC_EPPSPP); if (bootverbose) printf(" EPP SPP"); } } ppc->ppc_avm = chipset_mode; } /* set FIFO threshold to 16 */ if (ppc->ppc_avm & PPB_ECP) { /* select CRA */ outb(csr, 0xa); outb(cio, 16); } end_detect: if (bootverbose) printf ("\n"); if (ppc->ppc_avm & PPB_EPP) { /* select CR4 */ outb(csr, 0x4); r = inb(cio); /* * Set the EPP protocol... * Low=EPP 1.9 (1284 standard) and High=EPP 1.7 */ if (ppc->ppc_epp == EPP_1_9) outb(cio, (r & ~SMC_CR4_EPPTYPE)); else outb(cio, (r | SMC_CR4_EPPTYPE)); } outb(csr, 0xaa); /* end config mode */ ppc->ppc_type = PPC_TYPE_SMCLIKE; ppc_smclike_setmode(ppc, chipset_mode); return (chipset_mode); } /* * SMC FDC37C935 configuration * Found on many Alpha machines */ static int ppc_smc37c935_detect(struct ppc_data *ppc, int chipset_mode) { int type = -1; PPC_CONFIG_LOCK(ppc); outb(SMC935_CFG, 0x55); /* enter config mode */ outb(SMC935_CFG, 0x55); PPC_CONFIG_UNLOCK(ppc); outb(SMC935_IND, SMC935_ID); /* check device id */ if (inb(SMC935_DAT) == 0x2) type = SMC_37C935; if (type == -1) { outb(SMC935_CFG, 0xaa); /* exit config mode */ return (-1); } ppc->ppc_model = type; outb(SMC935_IND, SMC935_LOGDEV); /* select parallel port, */ outb(SMC935_DAT, 3); /* which is logical device 3 */ /* set io port base */ outb(SMC935_IND, SMC935_PORTHI); outb(SMC935_DAT, (u_char)((ppc->ppc_base & 0xff00) >> 8)); outb(SMC935_IND, SMC935_PORTLO); outb(SMC935_DAT, (u_char)(ppc->ppc_base & 0xff)); if (!chipset_mode) ppc->ppc_avm = PPB_COMPATIBLE; /* default mode */ else { ppc->ppc_avm = chipset_mode; outb(SMC935_IND, SMC935_PPMODE); outb(SMC935_DAT, SMC935_CENT); /* start in compatible mode */ /* SPP + EPP or just plain SPP */ if (chipset_mode & (PPB_SPP)) { if (chipset_mode & PPB_EPP) { if (ppc->ppc_epp == EPP_1_9) { outb(SMC935_IND, SMC935_PPMODE); outb(SMC935_DAT, SMC935_EPP19SPP); } if (ppc->ppc_epp == EPP_1_7) { outb(SMC935_IND, SMC935_PPMODE); outb(SMC935_DAT, SMC935_EPP17SPP); } } else { outb(SMC935_IND, SMC935_PPMODE); outb(SMC935_DAT, SMC935_SPP); } } /* ECP + EPP or just plain ECP */ if (chipset_mode & PPB_ECP) { if (chipset_mode & PPB_EPP) { if (ppc->ppc_epp == EPP_1_9) { outb(SMC935_IND, SMC935_PPMODE); outb(SMC935_DAT, SMC935_ECPEPP19); } if (ppc->ppc_epp == EPP_1_7) { outb(SMC935_IND, SMC935_PPMODE); outb(SMC935_DAT, SMC935_ECPEPP17); } } else { outb(SMC935_IND, SMC935_PPMODE); outb(SMC935_DAT, SMC935_ECP); } } } outb(SMC935_CFG, 0xaa); /* exit config mode */ ppc->ppc_type = PPC_TYPE_SMCLIKE; ppc_smclike_setmode(ppc, chipset_mode); return (chipset_mode); } /* * Winbond W83877F stuff * * EFER: extended function enable register * EFIR: extended function index register * EFDR: extended function data register */ #define efir ((efer == 0x250) ? 0x251 : 0x3f0) #define efdr ((efer == 0x250) ? 0x252 : 0x3f1) static int w83877f_efers[] = { 0x250, 0x3f0, 0x3f0, 0x250 }; static int w83877f_keys[] = { 0x89, 0x86, 0x87, 0x88 }; static int w83877f_keyiter[] = { 1, 2, 2, 1 }; static int w83877f_hefs[] = { WINB_HEFERE, WINB_HEFRAS, WINB_HEFERE | WINB_HEFRAS, 0 }; static int ppc_w83877f_detect(struct ppc_data *ppc, int chipset_mode) { int i, j, efer; unsigned char r, hefere, hefras; for (i = 0; i < 4; i ++) { /* first try to enable configuration registers */ efer = w83877f_efers[i]; /* write the key to the EFER */ for (j = 0; j < w83877f_keyiter[i]; j ++) outb (efer, w83877f_keys[i]); /* then check HEFERE and HEFRAS bits */ outb (efir, 0x0c); hefere = inb(efdr) & WINB_HEFERE; outb (efir, 0x16); hefras = inb(efdr) & WINB_HEFRAS; /* * HEFRAS HEFERE * 0 1 write 89h to 250h (power-on default) * 1 0 write 86h twice to 3f0h * 1 1 write 87h twice to 3f0h * 0 0 write 88h to 250h */ if ((hefere | hefras) == w83877f_hefs[i]) goto found; } return (-1); /* failed */ found: /* check base port address - read from CR23 */ outb(efir, 0x23); if (ppc->ppc_base != inb(efdr) * 4) /* 4 bytes boundaries */ return (-1); /* read CHIP ID from CR9/bits0-3 */ outb(efir, 0x9); switch (inb(efdr) & WINB_CHIPID) { case WINB_W83877F_ID: ppc->ppc_model = WINB_W83877F; break; case WINB_W83877AF_ID: ppc->ppc_model = WINB_W83877AF; break; default: ppc->ppc_model = WINB_UNKNOWN; } if (bootverbose) { /* dump of registers */ device_printf(ppc->ppc_dev, "0x%x - ", w83877f_keys[i]); for (i = 0; i <= 0xd; i ++) { outb(efir, i); printf("0x%x ", inb(efdr)); } for (i = 0x10; i <= 0x17; i ++) { outb(efir, i); printf("0x%x ", inb(efdr)); } outb(efir, 0x1e); printf("0x%x ", inb(efdr)); for (i = 0x20; i <= 0x29; i ++) { outb(efir, i); printf("0x%x ", inb(efdr)); } printf("\n"); } ppc->ppc_type = PPC_TYPE_GENERIC; if (!chipset_mode) { /* autodetect mode */ /* select CR0 */ outb(efir, 0x0); r = inb(efdr) & (WINB_PRTMODS0 | WINB_PRTMODS1); /* select CR9 */ outb(efir, 0x9); r |= (inb(efdr) & WINB_PRTMODS2); switch (r) { case WINB_W83757: if (bootverbose) device_printf(ppc->ppc_dev, "W83757 compatible mode\n"); return (-1); /* generic or SMC-like */ case WINB_EXTFDC: case WINB_EXTADP: case WINB_EXT2FDD: case WINB_JOYSTICK: if (bootverbose) device_printf(ppc->ppc_dev, "not in parallel port mode\n"); return (-1); case (WINB_PARALLEL | WINB_EPP_SPP): ppc->ppc_avm |= PPB_EPP | PPB_SPP; if (bootverbose) device_printf(ppc->ppc_dev, "EPP SPP\n"); break; case (WINB_PARALLEL | WINB_ECP): ppc->ppc_avm |= PPB_ECP | PPB_SPP; if (bootverbose) device_printf(ppc->ppc_dev, "ECP SPP\n"); break; case (WINB_PARALLEL | WINB_ECP_EPP): ppc->ppc_avm |= PPB_ECP | PPB_EPP | PPB_SPP; ppc->ppc_type = PPC_TYPE_SMCLIKE; if (bootverbose) device_printf(ppc->ppc_dev, "ECP+EPP SPP\n"); break; default: printf("%s: unknown case (0x%x)!\n", __func__, r); } } else { /* mode forced */ /* select CR9 and set PRTMODS2 bit */ outb(efir, 0x9); outb(efdr, inb(efdr) & ~WINB_PRTMODS2); /* select CR0 and reset PRTMODSx bits */ outb(efir, 0x0); outb(efdr, inb(efdr) & ~(WINB_PRTMODS0 | WINB_PRTMODS1)); if (chipset_mode & PPB_ECP) { if (chipset_mode & PPB_EPP) { outb(efdr, inb(efdr) | WINB_ECP_EPP); if (bootverbose) device_printf(ppc->ppc_dev, "ECP+EPP\n"); ppc->ppc_type = PPC_TYPE_SMCLIKE; } else { outb(efdr, inb(efdr) | WINB_ECP); if (bootverbose) device_printf(ppc->ppc_dev, "ECP\n"); } } else { /* select EPP_SPP otherwise */ outb(efdr, inb(efdr) | WINB_EPP_SPP); if (bootverbose) device_printf(ppc->ppc_dev, "EPP SPP\n"); } ppc->ppc_avm = chipset_mode; } /* exit configuration mode */ outb(efer, 0xaa); switch (ppc->ppc_type) { case PPC_TYPE_SMCLIKE: ppc_smclike_setmode(ppc, chipset_mode); break; default: ppc_generic_setmode(ppc, chipset_mode); break; } return (chipset_mode); } #endif /* * ppc_generic_detect */ static int ppc_generic_detect(struct ppc_data *ppc, int chipset_mode) { /* default to generic */ ppc->ppc_type = PPC_TYPE_GENERIC; if (bootverbose) device_printf(ppc->ppc_dev, "SPP"); /* first, check for ECP */ w_ecr(ppc, PPC_ECR_PS2); if ((r_ecr(ppc) & 0xe0) == PPC_ECR_PS2) { ppc->ppc_dtm |= PPB_ECP | PPB_SPP; if (bootverbose) printf(" ECP "); /* search for SMC style ECP+EPP mode */ w_ecr(ppc, PPC_ECR_EPP); } /* try to reset EPP timeout bit */ if (ppc_check_epp_timeout(ppc)) { ppc->ppc_dtm |= PPB_EPP; if (ppc->ppc_dtm & PPB_ECP) { /* SMC like chipset found */ ppc->ppc_model = SMC_LIKE; ppc->ppc_type = PPC_TYPE_SMCLIKE; if (bootverbose) printf(" ECP+EPP"); } else { if (bootverbose) printf(" EPP"); } } else { /* restore to standard mode */ w_ecr(ppc, PPC_ECR_STD); } /* XXX try to detect NIBBLE and PS2 modes */ ppc->ppc_dtm |= PPB_NIBBLE; if (chipset_mode) ppc->ppc_avm = chipset_mode; else ppc->ppc_avm = ppc->ppc_dtm; if (bootverbose) printf("\n"); switch (ppc->ppc_type) { case PPC_TYPE_SMCLIKE: ppc_smclike_setmode(ppc, chipset_mode); break; default: ppc_generic_setmode(ppc, chipset_mode); break; } return (chipset_mode); } /* * ppc_detect() * * mode is the mode suggested at boot */ static int ppc_detect(struct ppc_data *ppc, int chipset_mode) { #ifdef PPC_PROBE_CHIPSET int i, mode; /* list of supported chipsets */ int (*chipset_detect[])(struct ppc_data *, int) = { ppc_pc873xx_detect, ppc_smc37c66xgt_detect, ppc_w83877f_detect, ppc_smc37c935_detect, ppc_generic_detect, NULL }; #endif /* if can't find the port and mode not forced return error */ if (!ppc_detect_port(ppc) && chipset_mode == 0) return (EIO); /* failed, port not present */ /* assume centronics compatible mode is supported */ ppc->ppc_avm = PPB_COMPATIBLE; #ifdef PPC_PROBE_CHIPSET /* we have to differenciate available chipset modes, * chipset running modes and IEEE-1284 operating modes * * after detection, the port must support running in compatible mode */ if (ppc->ppc_flags & 0x40) { if (bootverbose) printf("ppc: chipset forced to generic\n"); #endif ppc->ppc_mode = ppc_generic_detect(ppc, chipset_mode); #ifdef PPC_PROBE_CHIPSET } else { for (i=0; chipset_detect[i] != NULL; i++) { if ((mode = chipset_detect[i](ppc, chipset_mode)) != -1) { ppc->ppc_mode = mode; break; } } } #endif /* configure/detect ECP FIFO */ if ((ppc->ppc_avm & PPB_ECP) && !(ppc->ppc_flags & 0x80)) ppc_detect_fifo(ppc); return (0); } /* * ppc_exec_microseq() * * Execute a microsequence. * Microsequence mechanism is supposed to handle fast I/O operations. */ int ppc_exec_microseq(device_t dev, struct ppb_microseq **p_msq) { struct ppc_data *ppc = DEVTOSOFTC(dev); struct ppb_microseq *mi; char cc, *p; int i, iter, len; int error; register int reg; register char mask; register int accum = 0; - register char *ptr = 0; + register char *ptr = NULL; - struct ppb_microseq *stack = 0; + struct ppb_microseq *stack = NULL; /* microsequence registers are equivalent to PC-like port registers */ #define r_reg(reg,ppc) (bus_read_1((ppc)->res_ioport, reg)) #define w_reg(reg, ppc, byte) (bus_write_1((ppc)->res_ioport, reg, byte)) #define INCR_PC (mi ++) /* increment program counter */ PPC_ASSERT_LOCKED(ppc); mi = *p_msq; for (;;) { switch (mi->opcode) { case MS_OP_RSET: cc = r_reg(mi->arg[0].i, ppc); cc &= (char)mi->arg[2].i; /* clear mask */ cc |= (char)mi->arg[1].i; /* assert mask */ w_reg(mi->arg[0].i, ppc, cc); INCR_PC; break; case MS_OP_RASSERT_P: reg = mi->arg[1].i; ptr = ppc->ppc_ptr; if ((len = mi->arg[0].i) == MS_ACCUM) { accum = ppc->ppc_accum; for (; accum; accum--) w_reg(reg, ppc, *ptr++); ppc->ppc_accum = accum; } else for (i=0; ippc_ptr = ptr; INCR_PC; break; case MS_OP_RFETCH_P: reg = mi->arg[1].i; mask = (char)mi->arg[2].i; ptr = ppc->ppc_ptr; if ((len = mi->arg[0].i) == MS_ACCUM) { accum = ppc->ppc_accum; for (; accum; accum--) *ptr++ = r_reg(reg, ppc) & mask; ppc->ppc_accum = accum; } else for (i=0; ippc_ptr = ptr; INCR_PC; break; case MS_OP_RFETCH: *((char *) mi->arg[2].p) = r_reg(mi->arg[0].i, ppc) & (char)mi->arg[1].i; INCR_PC; break; case MS_OP_RASSERT: case MS_OP_DELAY: /* let's suppose the next instr. is the same */ prefetch: for (;mi->opcode == MS_OP_RASSERT; INCR_PC) w_reg(mi->arg[0].i, ppc, (char)mi->arg[1].i); if (mi->opcode == MS_OP_DELAY) { DELAY(mi->arg[0].i); INCR_PC; goto prefetch; } break; case MS_OP_ADELAY: if (mi->arg[0].i) { PPC_UNLOCK(ppc); pause("ppbdelay", mi->arg[0].i * (hz/1000)); PPC_LOCK(ppc); } INCR_PC; break; case MS_OP_TRIG: reg = mi->arg[0].i; iter = mi->arg[1].i; p = (char *)mi->arg[2].p; /* XXX delay limited to 255 us */ for (i=0; ippc_accum = mi->arg[0].i; INCR_PC; break; case MS_OP_DBRA: if (--ppc->ppc_accum > 0) mi += mi->arg[0].i; INCR_PC; break; case MS_OP_BRSET: cc = r_str(ppc); if ((cc & (char)mi->arg[0].i) == (char)mi->arg[0].i) mi += mi->arg[1].i; INCR_PC; break; case MS_OP_BRCLEAR: cc = r_str(ppc); if ((cc & (char)mi->arg[0].i) == 0) mi += mi->arg[1].i; INCR_PC; break; case MS_OP_BRSTAT: cc = r_str(ppc); if ((cc & ((char)mi->arg[0].i | (char)mi->arg[1].i)) == (char)mi->arg[0].i) mi += mi->arg[2].i; INCR_PC; break; case MS_OP_C_CALL: /* * If the C call returns !0 then end the microseq. * The current state of ptr is passed to the C function */ if ((error = mi->arg[0].f(mi->arg[1].p, ppc->ppc_ptr))) return (error); INCR_PC; break; case MS_OP_PTR: ppc->ppc_ptr = (char *)mi->arg[0].p; INCR_PC; break; case MS_OP_CALL: if (stack) panic("%s: too much calls", __func__); if (mi->arg[0].p) { /* store the state of the actual * microsequence */ stack = mi; /* jump to the new microsequence */ mi = (struct ppb_microseq *)mi->arg[0].p; } else INCR_PC; break; case MS_OP_SUBRET: /* retrieve microseq and pc state before the call */ mi = stack; /* reset the stack */ - stack = 0; + stack = NULL; /* XXX return code */ INCR_PC; break; case MS_OP_PUT: case MS_OP_GET: case MS_OP_RET: /* can't return to ppb level during the execution * of a submicrosequence */ if (stack) panic("%s: can't return to ppb level", __func__); /* update pc for ppb level of execution */ *p_msq = mi; /* return to ppb level of execution */ return (0); default: panic("%s: unknown microsequence opcode 0x%x", __func__, mi->opcode); } } /* unreached */ } static void ppcintr(void *arg) { struct ppc_data *ppc = arg; u_char ctr, ecr, str; /* * If we have any child interrupt handlers registered, let * them handle this interrupt. * * XXX: If DMA is in progress should we just complete that w/o * doing this? */ PPC_LOCK(ppc); if (ppc->ppc_intr_hook != NULL && ppc->ppc_intr_hook(ppc->ppc_intr_arg) == 0) { PPC_UNLOCK(ppc); return; } str = r_str(ppc); ctr = r_ctr(ppc); ecr = r_ecr(ppc); #if defined(PPC_DEBUG) && PPC_DEBUG > 1 printf("![%x/%x/%x]", ctr, ecr, str); #endif /* don't use ecp mode with IRQENABLE set */ if (ctr & IRQENABLE) { PPC_UNLOCK(ppc); return; } /* interrupts are generated by nFault signal * only in ECP mode */ if ((str & nFAULT) && (ppc->ppc_mode & PPB_ECP)) { /* check if ppc driver has programmed the * nFault interrupt */ if (ppc->ppc_irqstat & PPC_IRQ_nFAULT) { w_ecr(ppc, ecr | PPC_nFAULT_INTR); ppc->ppc_irqstat &= ~PPC_IRQ_nFAULT; } else { /* shall be handled by underlying layers XXX */ PPC_UNLOCK(ppc); return; } } if (ppc->ppc_irqstat & PPC_IRQ_DMA) { /* disable interrupts (should be done by hardware though) */ w_ecr(ppc, ecr | PPC_SERVICE_INTR); ppc->ppc_irqstat &= ~PPC_IRQ_DMA; ecr = r_ecr(ppc); /* check if DMA completed */ if ((ppc->ppc_avm & PPB_ECP) && (ecr & PPC_ENABLE_DMA)) { #ifdef PPC_DEBUG printf("a"); #endif /* stop DMA */ w_ecr(ppc, ecr & ~PPC_ENABLE_DMA); ecr = r_ecr(ppc); if (ppc->ppc_dmastat == PPC_DMA_STARTED) { #ifdef PPC_DEBUG printf("d"); #endif ppc->ppc_dmadone(ppc); ppc->ppc_dmastat = PPC_DMA_COMPLETE; /* wakeup the waiting process */ wakeup(ppc); } } } else if (ppc->ppc_irqstat & PPC_IRQ_FIFO) { /* classic interrupt I/O */ ppc->ppc_irqstat &= ~PPC_IRQ_FIFO; } PPC_UNLOCK(ppc); return; } int ppc_read(device_t dev, char *buf, int len, int mode) { return (EINVAL); } int ppc_write(device_t dev, char *buf, int len, int how) { return (EINVAL); } int ppc_reset_epp(device_t dev) { struct ppc_data *ppc = DEVTOSOFTC(dev); PPC_ASSERT_LOCKED(ppc); ppc_reset_epp_timeout(ppc); return 0; } int ppc_setmode(device_t dev, int mode) { struct ppc_data *ppc = DEVTOSOFTC(dev); PPC_ASSERT_LOCKED(ppc); switch (ppc->ppc_type) { case PPC_TYPE_SMCLIKE: return (ppc_smclike_setmode(ppc, mode)); break; case PPC_TYPE_GENERIC: default: return (ppc_generic_setmode(ppc, mode)); break; } /* not reached */ return (ENXIO); } int ppc_probe(device_t dev, int rid) { #ifdef __i386__ static short next_bios_ppc = 0; #endif struct ppc_data *ppc; int error; rman_res_t port; /* * Allocate the ppc_data structure. */ ppc = DEVTOSOFTC(dev); bzero(ppc, sizeof(struct ppc_data)); ppc->rid_ioport = rid; /* retrieve ISA parameters */ error = bus_get_resource(dev, SYS_RES_IOPORT, rid, &port, NULL); #ifdef __i386__ /* * If port not specified, use bios list. */ if (error) { if ((next_bios_ppc < BIOS_MAX_PPC) && (*(BIOS_PORTS + next_bios_ppc) != 0)) { port = *(BIOS_PORTS + next_bios_ppc++); if (bootverbose) device_printf(dev, "parallel port found at 0x%jx\n", port); } else { device_printf(dev, "parallel port not found.\n"); return (ENXIO); } bus_set_resource(dev, SYS_RES_IOPORT, rid, port, IO_LPTSIZE_EXTENDED); } #endif /* IO port is mandatory */ /* Try "extended" IO port range...*/ ppc->res_ioport = bus_alloc_resource_anywhere(dev, SYS_RES_IOPORT, &ppc->rid_ioport, IO_LPTSIZE_EXTENDED, RF_ACTIVE); if (ppc->res_ioport != 0) { if (bootverbose) device_printf(dev, "using extended I/O port range\n"); } else { /* Failed? If so, then try the "normal" IO port range... */ ppc->res_ioport = bus_alloc_resource_anywhere(dev, SYS_RES_IOPORT, &ppc->rid_ioport, IO_LPTSIZE_NORMAL, RF_ACTIVE); if (ppc->res_ioport != 0) { if (bootverbose) device_printf(dev, "using normal I/O port range\n"); } else { device_printf(dev, "cannot reserve I/O port range\n"); goto error; } } ppc->ppc_base = rman_get_start(ppc->res_ioport); ppc->ppc_flags = device_get_flags(dev); if (!(ppc->ppc_flags & 0x20)) { ppc->res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &ppc->rid_irq, RF_SHAREABLE); ppc->res_drq = bus_alloc_resource_any(dev, SYS_RES_DRQ, &ppc->rid_drq, RF_ACTIVE); } if (ppc->res_irq) ppc->ppc_irq = rman_get_start(ppc->res_irq); if (ppc->res_drq) ppc->ppc_dmachan = rman_get_start(ppc->res_drq); ppc->ppc_dev = dev; ppc->ppc_model = GENERIC; ppc->ppc_mode = PPB_COMPATIBLE; ppc->ppc_epp = (ppc->ppc_flags & 0x10) >> 4; ppc->ppc_type = PPC_TYPE_GENERIC; /* * Try to detect the chipset and its mode. */ if (ppc_detect(ppc, ppc->ppc_flags & 0xf)) goto error; return (0); error: if (ppc->res_irq != 0) { bus_release_resource(dev, SYS_RES_IRQ, ppc->rid_irq, ppc->res_irq); } if (ppc->res_ioport != 0) { bus_release_resource(dev, SYS_RES_IOPORT, ppc->rid_ioport, ppc->res_ioport); } if (ppc->res_drq != 0) { bus_release_resource(dev, SYS_RES_DRQ, ppc->rid_drq, ppc->res_drq); } return (ENXIO); } int ppc_attach(device_t dev) { struct ppc_data *ppc = DEVTOSOFTC(dev); int error; mtx_init(&ppc->ppc_lock, device_get_nameunit(dev), "ppc", MTX_DEF); device_printf(dev, "%s chipset (%s) in %s mode%s\n", ppc_models[ppc->ppc_model], ppc_avms[ppc->ppc_avm], ppc_modes[ppc->ppc_mode], (PPB_IS_EPP(ppc->ppc_mode)) ? ppc_epp_protocol[ppc->ppc_epp] : ""); if (ppc->ppc_fifo) device_printf(dev, "FIFO with %d/%d/%d bytes threshold\n", ppc->ppc_fifo, ppc->ppc_wthr, ppc->ppc_rthr); if (ppc->res_irq) { /* default to the tty mask for registration */ /* XXX */ error = bus_setup_intr(dev, ppc->res_irq, INTR_TYPE_TTY | INTR_MPSAFE, NULL, ppcintr, ppc, &ppc->intr_cookie); if (error) { device_printf(dev, "failed to register interrupt handler: %d\n", error); mtx_destroy(&ppc->ppc_lock); return (error); } } /* add ppbus as a child of this isa to parallel bridge */ ppc->ppbus = device_add_child(dev, "ppbus", -1); /* * Probe the ppbus and attach devices found. */ device_probe_and_attach(ppc->ppbus); return (0); } int ppc_detach(device_t dev) { struct ppc_data *ppc = DEVTOSOFTC(dev); if (ppc->res_irq == 0) { return (ENXIO); } /* detach & delete all children */ device_delete_children(dev); if (ppc->res_irq != 0) { bus_teardown_intr(dev, ppc->res_irq, ppc->intr_cookie); bus_release_resource(dev, SYS_RES_IRQ, ppc->rid_irq, ppc->res_irq); } if (ppc->res_ioport != 0) { bus_release_resource(dev, SYS_RES_IOPORT, ppc->rid_ioport, ppc->res_ioport); } if (ppc->res_drq != 0) { bus_release_resource(dev, SYS_RES_DRQ, ppc->rid_drq, ppc->res_drq); } mtx_destroy(&ppc->ppc_lock); return (0); } u_char ppc_io(device_t ppcdev, int iop, u_char *addr, int cnt, u_char byte) { struct ppc_data *ppc = DEVTOSOFTC(ppcdev); PPC_ASSERT_LOCKED(ppc); switch (iop) { case PPB_OUTSB_EPP: bus_write_multi_1(ppc->res_ioport, PPC_EPP_DATA, addr, cnt); break; case PPB_OUTSW_EPP: bus_write_multi_2(ppc->res_ioport, PPC_EPP_DATA, (u_int16_t *)addr, cnt); break; case PPB_OUTSL_EPP: bus_write_multi_4(ppc->res_ioport, PPC_EPP_DATA, (u_int32_t *)addr, cnt); break; case PPB_INSB_EPP: bus_read_multi_1(ppc->res_ioport, PPC_EPP_DATA, addr, cnt); break; case PPB_INSW_EPP: bus_read_multi_2(ppc->res_ioport, PPC_EPP_DATA, (u_int16_t *)addr, cnt); break; case PPB_INSL_EPP: bus_read_multi_4(ppc->res_ioport, PPC_EPP_DATA, (u_int32_t *)addr, cnt); break; case PPB_RDTR: return (r_dtr(ppc)); case PPB_RSTR: return (r_str(ppc)); case PPB_RCTR: return (r_ctr(ppc)); case PPB_REPP_A: return (r_epp_A(ppc)); case PPB_REPP_D: return (r_epp_D(ppc)); case PPB_RECR: return (r_ecr(ppc)); case PPB_RFIFO: return (r_fifo(ppc)); case PPB_WDTR: w_dtr(ppc, byte); break; case PPB_WSTR: w_str(ppc, byte); break; case PPB_WCTR: w_ctr(ppc, byte); break; case PPB_WEPP_A: w_epp_A(ppc, byte); break; case PPB_WEPP_D: w_epp_D(ppc, byte); break; case PPB_WECR: w_ecr(ppc, byte); break; case PPB_WFIFO: w_fifo(ppc, byte); break; default: panic("%s: unknown I/O operation", __func__); break; } return (0); /* not significative */ } int ppc_read_ivar(device_t bus, device_t dev, int index, uintptr_t *val) { struct ppc_data *ppc = (struct ppc_data *)device_get_softc(bus); switch (index) { case PPC_IVAR_EPP_PROTO: PPC_ASSERT_LOCKED(ppc); *val = (u_long)ppc->ppc_epp; break; case PPC_IVAR_LOCK: *val = (uintptr_t)&ppc->ppc_lock; break; default: return (ENOENT); } return (0); } int ppc_write_ivar(device_t bus, device_t dev, int index, uintptr_t val) { struct ppc_data *ppc = (struct ppc_data *)device_get_softc(bus); switch (index) { case PPC_IVAR_INTR_HANDLER: PPC_ASSERT_LOCKED(ppc); if (dev != ppc->ppbus) return (EINVAL); if (val == 0) { ppc->ppc_intr_hook = NULL; break; } if (ppc->ppc_intr_hook != NULL) return (EBUSY); ppc->ppc_intr_hook = (void *)val; ppc->ppc_intr_arg = device_get_softc(dev); break; default: return (ENOENT); } return (0); } /* * We allow child devices to allocate an IRQ resource at rid 0 for their * interrupt handlers. */ struct resource * ppc_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct ppc_data *ppc = DEVTOSOFTC(bus); switch (type) { case SYS_RES_IRQ: if (*rid == 0) return (ppc->res_irq); break; } return (NULL); } int ppc_release_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { #ifdef INVARIANTS struct ppc_data *ppc = DEVTOSOFTC(bus); #endif switch (type) { case SYS_RES_IRQ: if (rid == 0) { KASSERT(r == ppc->res_irq, ("ppc child IRQ resource mismatch")); return (0); } break; } return (EINVAL); } MODULE_DEPEND(ppc, ppbus, 1, 1, 1); Index: head/sys/dev/sbni/if_sbni_isa.c =================================================================== --- head/sys/dev/sbni/if_sbni_isa.c (revision 313981) +++ head/sys/dev/sbni/if_sbni_isa.c (revision 313982) @@ -1,168 +1,168 @@ /*- * Copyright (c) 1997-2001 Granch, Ltd. All rights reserved. * Author: Denis I.Timofeev * * Redistributon and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int sbni_probe_isa(device_t); static int sbni_attach_isa(device_t); static device_method_t sbni_isa_methods[] = { /* Device interface */ DEVMETHOD(device_probe, sbni_probe_isa), DEVMETHOD(device_attach, sbni_attach_isa), { 0, 0 } }; static driver_t sbni_isa_driver = { "sbni", sbni_isa_methods, sizeof(struct sbni_softc) }; static devclass_t sbni_isa_devclass; static struct isa_pnp_id sbni_ids[] = { { 0, NULL } /* we have no pnp sbni cards atm. */ }; DRIVER_MODULE(sbni, isa, sbni_isa_driver, sbni_isa_devclass, 0, 0); MODULE_DEPEND(sbni, isa, 1, 1, 1); static int sbni_probe_isa(device_t dev) { struct sbni_softc *sc; int error; error = ISA_PNP_PROBE(device_get_parent(dev), dev, sbni_ids); if (error && error != ENOENT) return (error); sc = device_get_softc(dev); sc->io_res = bus_alloc_resource_anywhere(dev, SYS_RES_IOPORT, &sc->io_rid, SBNI_PORTS, RF_ACTIVE); if (!sc->io_res) { printf("sbni: cannot allocate io ports!\n"); return (ENOENT); } if (sbni_probe(sc) != 0) { sbni_release_resources(sc); return (ENXIO); } device_set_desc(dev, "Granch SBNI12/ISA adapter"); return (0); } static int sbni_attach_isa(device_t dev) { struct sbni_softc *sc; struct sbni_flags flags; int error; sc = device_get_softc(dev); sc->dev = dev; sc->irq_res = bus_alloc_resource_any( dev, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE); #ifndef SBNI_DUAL_COMPOUND if (sc->irq_res == NULL) { device_printf(dev, "irq conflict!\n"); sbni_release_resources(sc); return (ENOENT); } #else /* SBNI_DUAL_COMPOUND */ if (sc->irq_res) { sbni_add(sc); } else { struct sbni_softc *master; - if ((master = connect_to_master(sc)) == 0) { + if ((master = connect_to_master(sc)) == NULL) { device_printf(dev, "failed to alloc irq\n"); sbni_release_resources(sc); return (ENXIO); } else { device_printf(dev, "shared irq with %s\n", master->ifp->if_xname); } } #endif /* SBNI_DUAL_COMPOUND */ *(u_int32_t*)&flags = device_get_flags(dev); error = sbni_attach(sc, device_get_unit(dev) * 2, flags); if (error) { device_printf(dev, "cannot initialize driver\n"); sbni_release_resources(sc); return (error); } if (sc->irq_res) { error = bus_setup_intr( dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE, NULL, sbni_intr, sc, &sc->irq_handle); if (error) { device_printf(dev, "bus_setup_intr\n"); sbni_detach(sc); sbni_release_resources(sc); return (error); } } return (0); } Index: head/sys/dev/sn/if_sn.c =================================================================== --- head/sys/dev/sn/if_sn.c (revision 313981) +++ head/sys/dev/sn/if_sn.c (revision 313982) @@ -1,1436 +1,1436 @@ /*- * Copyright (c) 1996 Gardner Buchanan * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Gardner Buchanan. * 4. The name of Gardner Buchanan may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * This is a driver for SMC's 9000 series of Ethernet adapters. * * This FreeBSD driver is derived from the smc9194 Linux driver by * Erik Stahlman and is Copyright (C) 1996 by Erik Stahlman. * This driver also shamelessly borrows from the FreeBSD ep driver * which is Copyright (C) 1994 Herb Peyerl * All rights reserved. * * It is set up for my SMC91C92 equipped Ampro LittleBoard embedded * PC. It is adapted from Erik Stahlman's Linux driver which worked * with his EFA Info*Express SVC VLB adaptor. According to SMC's databook, * it will work for the entire SMC 9xxx series. (Ha Ha) * * "Features" of the SMC chip: * 4608 byte packet memory. (for the 91C92. Others have more) * EEPROM for configuration * AUI/TP selection * * Authors: * Erik Stahlman erik@vt.edu * Herb Peyerl hpeyerl@novatel.ca * Andres Vega Garcia avega@sophia.inria.fr * Serge Babkin babkin@hq.icb.chel.su * Gardner Buchanan gbuchanan@shl.com * * Sources: * o SMC databook * o "smc9194.c:v0.10(FIXED) 02/15/96 by Erik Stahlman (erik@vt.edu)" * o "if_ep.c,v 1.19 1995/01/24 20:53:45 davidg Exp" * * Known Bugs: * o Setting of the hardware address isn't supported. * o Hardware padding isn't used. */ /* * Modifications for Megahertz X-Jack Ethernet Card (XJ-10BT) * * Copyright (c) 1996 by Tatsumi Hosokawa * BSD-nomads, Tokyo, Japan. */ /* * Multicast support by Kei TANAKA * Special thanks to itojun@itojun.org */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #include #include #endif #include #include #include #include /* Exported variables */ devclass_t sn_devclass; static int snioctl(struct ifnet * ifp, u_long, caddr_t); static void snresume(struct ifnet *); static void snintr_locked(struct sn_softc *); static void sninit_locked(void *); static void snstart_locked(struct ifnet *); static void sninit(void *); static void snread(struct ifnet *); static void snstart(struct ifnet *); static void snstop(struct sn_softc *); static void snwatchdog(void *); static void sn_setmcast(struct sn_softc *); static int sn_getmcf(struct ifnet *ifp, u_char *mcf); /* I (GB) have been unlucky getting the hardware padding * to work properly. */ #define SW_PAD static const char *chip_ids[15] = { NULL, NULL, NULL, /* 3 */ "SMC91C90/91C92", /* 4 */ "SMC91C94/91C96", /* 5 */ "SMC91C95", NULL, /* 7 */ "SMC91C100", /* 8 */ "SMC91C100FD", /* 9 */ "SMC91C110", NULL, NULL, NULL, NULL, NULL }; int sn_attach(device_t dev) { struct sn_softc *sc = device_get_softc(dev); struct ifnet *ifp; uint16_t i; uint8_t *p; int rev; uint16_t address; int err; u_char eaddr[6]; ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); return (ENOSPC); } SN_LOCK_INIT(sc); callout_init_mtx(&sc->watchdog, &sc->sc_mtx, 0); snstop(sc); sc->pages_wanted = -1; if (bootverbose || 1) { SMC_SELECT_BANK(sc, 3); rev = (CSR_READ_2(sc, REVISION_REG_W) >> 4) & 0xf; if (chip_ids[rev]) device_printf(dev, " %s ", chip_ids[rev]); else device_printf(dev, " unsupported chip: rev %d ", rev); SMC_SELECT_BANK(sc, 1); i = CSR_READ_2(sc, CONFIG_REG_W); printf("%s\n", i & CR_AUI_SELECT ? "AUI" : "UTP"); } /* * Read the station address from the chip. The MAC address is bank 1, * regs 4 - 9 */ SMC_SELECT_BANK(sc, 1); p = (uint8_t *) eaddr; for (i = 0; i < 6; i += 2) { address = CSR_READ_2(sc, IAR_ADDR0_REG_W + i); p[i + 1] = address >> 8; p[i] = address & 0xFF; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_start = snstart; ifp->if_ioctl = snioctl; ifp->if_init = sninit; ifp->if_baudrate = 10000000; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); ether_ifattach(ifp, eaddr); /* * Activate the interrupt so we can get card interrupts. This * needs to be done last so that we don't have/hold the lock * during startup to avoid LORs in the network layer. */ if ((err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE, NULL, sn_intr, sc, &sc->intrhand)) != 0) { sn_detach(dev); return err; } return 0; } int sn_detach(device_t dev) { struct sn_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->ifp; ether_ifdetach(ifp); SN_LOCK(sc); snstop(sc); SN_UNLOCK(sc); callout_drain(&sc->watchdog); sn_deactivate(dev); if_free(ifp); SN_LOCK_DESTROY(sc); return 0; } static void sninit(void *xsc) { struct sn_softc *sc = xsc; SN_LOCK(sc); sninit_locked(sc); SN_UNLOCK(sc); } /* * Reset and initialize the chip */ static void sninit_locked(void *xsc) { struct sn_softc *sc = xsc; struct ifnet *ifp = sc->ifp; int flags; int mask; SN_ASSERT_LOCKED(sc); /* * This resets the registers mostly to defaults, but doesn't affect * EEPROM. After the reset cycle, we pause briefly for the chip to * be happy. */ SMC_SELECT_BANK(sc, 0); CSR_WRITE_2(sc, RECV_CONTROL_REG_W, RCR_SOFTRESET); SMC_DELAY(sc); CSR_WRITE_2(sc, RECV_CONTROL_REG_W, 0x0000); SMC_DELAY(sc); SMC_DELAY(sc); CSR_WRITE_2(sc, TXMIT_CONTROL_REG_W, 0x0000); /* * Set the control register to automatically release successfully * transmitted packets (making the best use out of our limited * memory) and to enable the EPH interrupt on certain TX errors. */ SMC_SELECT_BANK(sc, 1); CSR_WRITE_2(sc, CONTROL_REG_W, (CTR_AUTO_RELEASE | CTR_TE_ENABLE | CTR_CR_ENABLE | CTR_LE_ENABLE)); /* Set squelch level to 240mV (default 480mV) */ flags = CSR_READ_2(sc, CONFIG_REG_W); flags |= CR_SET_SQLCH; CSR_WRITE_2(sc, CONFIG_REG_W, flags); /* * Reset the MMU and wait for it to be un-busy. */ SMC_SELECT_BANK(sc, 2); CSR_WRITE_2(sc, MMU_CMD_REG_W, MMUCR_RESET); while (CSR_READ_2(sc, MMU_CMD_REG_W) & MMUCR_BUSY) /* NOTHING */ ; /* * Disable all interrupts */ CSR_WRITE_1(sc, INTR_MASK_REG_B, 0x00); sn_setmcast(sc); /* * Set the transmitter control. We want it enabled. */ flags = TCR_ENABLE; #ifndef SW_PAD /* * I (GB) have been unlucky getting this to work. */ flags |= TCR_PAD_ENABLE; #endif /* SW_PAD */ CSR_WRITE_2(sc, TXMIT_CONTROL_REG_W, flags); /* * Now, enable interrupts */ SMC_SELECT_BANK(sc, 2); mask = IM_EPH_INT | IM_RX_OVRN_INT | IM_RCV_INT | IM_TX_INT; CSR_WRITE_1(sc, INTR_MASK_REG_B, mask); sc->intr_mask = mask; sc->pages_wanted = -1; /* * Mark the interface running but not active. */ ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; callout_reset(&sc->watchdog, hz, snwatchdog, sc); /* * Attempt to push out any waiting packets. */ snstart_locked(ifp); } static void snstart(struct ifnet *ifp) { struct sn_softc *sc = ifp->if_softc; SN_LOCK(sc); snstart_locked(ifp); SN_UNLOCK(sc); } static void snstart_locked(struct ifnet *ifp) { struct sn_softc *sc = ifp->if_softc; u_int len; struct mbuf *m; struct mbuf *top; int pad; int mask; uint16_t length; uint16_t numPages; uint8_t packet_no; int time_out; int junk = 0; SN_ASSERT_LOCKED(sc); if (ifp->if_drv_flags & IFF_DRV_OACTIVE) return; if (sc->pages_wanted != -1) { if_printf(ifp, "snstart() while memory allocation pending\n"); return; } startagain: /* * Sneak a peek at the next packet */ m = ifp->if_snd.ifq_head; - if (m == 0) + if (m == NULL) return; /* * Compute the frame length and set pad to give an overall even * number of bytes. Below we assume that the packet length is even. */ for (len = 0, top = m; m; m = m->m_next) len += m->m_len; pad = (len & 1); /* * We drop packets that are too large. Perhaps we should truncate * them instead? */ if (len + pad > ETHER_MAX_LEN - ETHER_CRC_LEN) { if_printf(ifp, "large packet discarded (A)\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); IFQ_DRV_DEQUEUE(&ifp->if_snd, m); m_freem(m); goto readcheck; } #ifdef SW_PAD /* * If HW padding is not turned on, then pad to ETHER_MIN_LEN. */ if (len < ETHER_MIN_LEN - ETHER_CRC_LEN) pad = ETHER_MIN_LEN - ETHER_CRC_LEN - len; #endif /* SW_PAD */ length = pad + len; /* * The MMU wants the number of pages to be the number of 256 byte * 'pages', minus 1 (A packet can't ever have 0 pages. We also * include space for the status word, byte count and control bytes in * the allocation request. */ numPages = (length + 6) >> 8; /* * Now, try to allocate the memory */ SMC_SELECT_BANK(sc, 2); CSR_WRITE_2(sc, MMU_CMD_REG_W, MMUCR_ALLOC | numPages); /* * Wait a short amount of time to see if the allocation request * completes. Otherwise, I enable the interrupt and wait for * completion asynchronously. */ time_out = MEMORY_WAIT_TIME; do { if (CSR_READ_1(sc, INTR_STAT_REG_B) & IM_ALLOC_INT) break; } while (--time_out); if (!time_out || junk > 10) { /* * No memory now. Oh well, wait until the chip finds memory * later. Remember how many pages we were asking for and * enable the allocation completion interrupt. Also set a * watchdog in case we miss the interrupt. We mark the * interface active since there is no point in attempting an * snstart() until after the memory is available. */ mask = CSR_READ_1(sc, INTR_MASK_REG_B) | IM_ALLOC_INT; CSR_WRITE_1(sc, INTR_MASK_REG_B, mask); sc->intr_mask = mask; sc->timer = 1; ifp->if_drv_flags |= IFF_DRV_OACTIVE; sc->pages_wanted = numPages; return; } /* * The memory allocation completed. Check the results. */ packet_no = CSR_READ_1(sc, ALLOC_RESULT_REG_B); if (packet_no & ARR_FAILED) { if (junk++ > 10) if_printf(ifp, "Memory allocation failed\n"); goto startagain; } /* * We have a packet number, so tell the card to use it. */ CSR_WRITE_1(sc, PACKET_NUM_REG_B, packet_no); /* * Point to the beginning of the packet */ CSR_WRITE_2(sc, POINTER_REG_W, PTR_AUTOINC | 0x0000); /* * Send the packet length (+6 for status, length and control byte) * and the status word (set to zeros) */ CSR_WRITE_2(sc, DATA_REG_W, 0); CSR_WRITE_1(sc, DATA_REG_B, (length + 6) & 0xFF); CSR_WRITE_1(sc, DATA_REG_B, (length + 6) >> 8); /* * Get the packet from the kernel. This will include the Ethernet * frame header, MAC Addresses etc. */ IFQ_DRV_DEQUEUE(&ifp->if_snd, m); /* * Push out the data to the card. */ - for (top = m; m != 0; m = m->m_next) { + for (top = m; m != NULL; m = m->m_next) { /* * Push out words. */ CSR_WRITE_MULTI_2(sc, DATA_REG_W, mtod(m, uint16_t *), m->m_len / 2); /* * Push out remaining byte. */ if (m->m_len & 1) CSR_WRITE_1(sc, DATA_REG_B, *(mtod(m, caddr_t) + m->m_len - 1)); } /* * Push out padding. */ while (pad > 1) { CSR_WRITE_2(sc, DATA_REG_W, 0); pad -= 2; } if (pad) CSR_WRITE_1(sc, DATA_REG_B, 0); /* * Push out control byte and unused packet byte The control byte is 0 * meaning the packet is even lengthed and no special CRC handling is * desired. */ CSR_WRITE_2(sc, DATA_REG_W, 0); /* * Enable the interrupts and let the chipset deal with it Also set a * watchdog in case we miss the interrupt. */ mask = CSR_READ_1(sc, INTR_MASK_REG_B) | (IM_TX_INT | IM_TX_EMPTY_INT); CSR_WRITE_1(sc, INTR_MASK_REG_B, mask); sc->intr_mask = mask; CSR_WRITE_2(sc, MMU_CMD_REG_W, MMUCR_ENQUEUE); ifp->if_drv_flags |= IFF_DRV_OACTIVE; sc->timer = 1; BPF_MTAP(ifp, top); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); m_freem(top); readcheck: /* * Is another packet coming in? We don't want to overflow the tiny * RX FIFO. If nothing has arrived then attempt to queue another * transmit packet. */ if (CSR_READ_2(sc, FIFO_PORTS_REG_W) & FIFO_REMPTY) goto startagain; return; } /* Resume a packet transmit operation after a memory allocation * has completed. * * This is basically a hacked up copy of snstart() which handles * a completed memory allocation the same way snstart() does. * It then passes control to snstart to handle any other queued * packets. */ static void snresume(struct ifnet *ifp) { struct sn_softc *sc = ifp->if_softc; u_int len; struct mbuf *m; struct mbuf *top; int pad; int mask; uint16_t length; uint16_t numPages; uint16_t pages_wanted; uint8_t packet_no; if (sc->pages_wanted < 0) return; pages_wanted = sc->pages_wanted; sc->pages_wanted = -1; /* * Sneak a peek at the next packet */ m = ifp->if_snd.ifq_head; - if (m == 0) { + if (m == NULL) { if_printf(ifp, "snresume() with nothing to send\n"); return; } /* * Compute the frame length and set pad to give an overall even * number of bytes. Below we assume that the packet length is even. */ for (len = 0, top = m; m; m = m->m_next) len += m->m_len; pad = (len & 1); /* * We drop packets that are too large. Perhaps we should truncate * them instead? */ if (len + pad > ETHER_MAX_LEN - ETHER_CRC_LEN) { if_printf(ifp, "large packet discarded (B)\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); IFQ_DRV_DEQUEUE(&ifp->if_snd, m); m_freem(m); return; } #ifdef SW_PAD /* * If HW padding is not turned on, then pad to ETHER_MIN_LEN. */ if (len < ETHER_MIN_LEN - ETHER_CRC_LEN) pad = ETHER_MIN_LEN - ETHER_CRC_LEN - len; #endif /* SW_PAD */ length = pad + len; /* * The MMU wants the number of pages to be the number of 256 byte * 'pages', minus 1 (A packet can't ever have 0 pages. We also * include space for the status word, byte count and control bytes in * the allocation request. */ numPages = (length + 6) >> 8; SMC_SELECT_BANK(sc, 2); /* * The memory allocation completed. Check the results. If it failed, * we simply set a watchdog timer and hope for the best. */ packet_no = CSR_READ_1(sc, ALLOC_RESULT_REG_B); if (packet_no & ARR_FAILED) { if_printf(ifp, "Memory allocation failed. Weird.\n"); sc->timer = 1; goto try_start; } /* * We have a packet number, so tell the card to use it. */ CSR_WRITE_1(sc, PACKET_NUM_REG_B, packet_no); /* * Now, numPages should match the pages_wanted recorded when the * memory allocation was initiated. */ if (pages_wanted != numPages) { if_printf(ifp, "memory allocation wrong size. Weird.\n"); /* * If the allocation was the wrong size we simply release the * memory once it is granted. Wait for the MMU to be un-busy. */ while (CSR_READ_2(sc, MMU_CMD_REG_W) & MMUCR_BUSY) /* NOTHING */ ; CSR_WRITE_2(sc, MMU_CMD_REG_W, MMUCR_FREEPKT); return; } /* * Point to the beginning of the packet */ CSR_WRITE_2(sc, POINTER_REG_W, PTR_AUTOINC | 0x0000); /* * Send the packet length (+6 for status, length and control byte) * and the status word (set to zeros) */ CSR_WRITE_2(sc, DATA_REG_W, 0); CSR_WRITE_1(sc, DATA_REG_B, (length + 6) & 0xFF); CSR_WRITE_1(sc, DATA_REG_B, (length + 6) >> 8); /* * Get the packet from the kernel. This will include the Ethernet * frame header, MAC Addresses etc. */ IFQ_DRV_DEQUEUE(&ifp->if_snd, m); /* * Push out the data to the card. */ - for (top = m; m != 0; m = m->m_next) { + for (top = m; m != NULL; m = m->m_next) { /* * Push out words. */ CSR_WRITE_MULTI_2(sc, DATA_REG_W, mtod(m, uint16_t *), m->m_len / 2); /* * Push out remaining byte. */ if (m->m_len & 1) CSR_WRITE_1(sc, DATA_REG_B, *(mtod(m, caddr_t) + m->m_len - 1)); } /* * Push out padding. */ while (pad > 1) { CSR_WRITE_2(sc, DATA_REG_W, 0); pad -= 2; } if (pad) CSR_WRITE_1(sc, DATA_REG_B, 0); /* * Push out control byte and unused packet byte The control byte is 0 * meaning the packet is even lengthed and no special CRC handling is * desired. */ CSR_WRITE_2(sc, DATA_REG_W, 0); /* * Enable the interrupts and let the chipset deal with it Also set a * watchdog in case we miss the interrupt. */ mask = CSR_READ_1(sc, INTR_MASK_REG_B) | (IM_TX_INT | IM_TX_EMPTY_INT); CSR_WRITE_1(sc, INTR_MASK_REG_B, mask); sc->intr_mask = mask; CSR_WRITE_2(sc, MMU_CMD_REG_W, MMUCR_ENQUEUE); BPF_MTAP(ifp, top); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); m_freem(top); try_start: /* * Now pass control to snstart() to queue any additional packets */ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; snstart_locked(ifp); /* * We've sent something, so we're active. Set a watchdog in case the * TX_EMPTY interrupt is lost. */ ifp->if_drv_flags |= IFF_DRV_OACTIVE; sc->timer = 1; return; } void sn_intr(void *arg) { struct sn_softc *sc = (struct sn_softc *) arg; SN_LOCK(sc); snintr_locked(sc); SN_UNLOCK(sc); } static void snintr_locked(struct sn_softc *sc) { int status, interrupts; struct ifnet *ifp = sc->ifp; /* * Chip state registers */ uint8_t mask; uint8_t packet_no; uint16_t tx_status; uint16_t card_stats; /* * Clear the watchdog. */ sc->timer = 0; SMC_SELECT_BANK(sc, 2); /* * Obtain the current interrupt mask and clear the hardware mask * while servicing interrupts. */ mask = CSR_READ_1(sc, INTR_MASK_REG_B); CSR_WRITE_1(sc, INTR_MASK_REG_B, 0x00); /* * Get the set of interrupts which occurred and eliminate any which * are masked. */ interrupts = CSR_READ_1(sc, INTR_STAT_REG_B); status = interrupts & mask; /* * Now, process each of the interrupt types. */ /* * Receive Overrun. */ if (status & IM_RX_OVRN_INT) { /* * Acknowlege Interrupt */ SMC_SELECT_BANK(sc, 2); CSR_WRITE_1(sc, INTR_ACK_REG_B, IM_RX_OVRN_INT); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); } /* * Got a packet. */ if (status & IM_RCV_INT) { int packet_number; SMC_SELECT_BANK(sc, 2); packet_number = CSR_READ_2(sc, FIFO_PORTS_REG_W); if (packet_number & FIFO_REMPTY) { /* * we got called , but nothing was on the FIFO */ printf("sn: Receive interrupt with nothing on FIFO\n"); goto out; } snread(ifp); } /* * An on-card memory allocation came through. */ if (status & IM_ALLOC_INT) { /* * Disable this interrupt. */ mask &= ~IM_ALLOC_INT; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; snresume(ifp); } /* * TX Completion. Handle a transmit error message. This will only be * called when there is an error, because of the AUTO_RELEASE mode. */ if (status & IM_TX_INT) { /* * Acknowlege Interrupt */ SMC_SELECT_BANK(sc, 2); CSR_WRITE_1(sc, INTR_ACK_REG_B, IM_TX_INT); packet_no = CSR_READ_2(sc, FIFO_PORTS_REG_W); packet_no &= FIFO_TX_MASK; /* * select this as the packet to read from */ CSR_WRITE_1(sc, PACKET_NUM_REG_B, packet_no); /* * Position the pointer to the first word from this packet */ CSR_WRITE_2(sc, POINTER_REG_W, PTR_AUTOINC | PTR_READ | 0x0000); /* * Fetch the TX status word. The value found here will be a * copy of the EPH_STATUS_REG_W at the time the transmit * failed. */ tx_status = CSR_READ_2(sc, DATA_REG_W); if (tx_status & EPHSR_TX_SUC) { device_printf(sc->dev, "Successful packet caused interrupt\n"); } else { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } if (tx_status & EPHSR_LATCOL) if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); /* * Some of these errors will have disabled transmit. * Re-enable transmit now. */ SMC_SELECT_BANK(sc, 0); #ifdef SW_PAD CSR_WRITE_2(sc, TXMIT_CONTROL_REG_W, TCR_ENABLE); #else CSR_WRITE_2(sc, TXMIT_CONTROL_REG_W, TCR_ENABLE | TCR_PAD_ENABLE); #endif /* SW_PAD */ /* * kill the failed packet. Wait for the MMU to be un-busy. */ SMC_SELECT_BANK(sc, 2); while (CSR_READ_2(sc, MMU_CMD_REG_W) & MMUCR_BUSY) /* NOTHING */ ; CSR_WRITE_2(sc, MMU_CMD_REG_W, MMUCR_FREEPKT); /* * Attempt to queue more transmits. */ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; snstart_locked(ifp); } /* * Transmit underrun. We use this opportunity to update transmit * statistics from the card. */ if (status & IM_TX_EMPTY_INT) { /* * Acknowlege Interrupt */ SMC_SELECT_BANK(sc, 2); CSR_WRITE_1(sc, INTR_ACK_REG_B, IM_TX_EMPTY_INT); /* * Disable this interrupt. */ mask &= ~IM_TX_EMPTY_INT; SMC_SELECT_BANK(sc, 0); card_stats = CSR_READ_2(sc, COUNTER_REG_W); /* * Single collisions */ if_inc_counter(ifp, IFCOUNTER_COLLISIONS, card_stats & ECR_COLN_MASK); /* * Multiple collisions */ if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (card_stats & ECR_MCOLN_MASK) >> 4); SMC_SELECT_BANK(sc, 2); /* * Attempt to enqueue some more stuff. */ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; snstart_locked(ifp); } /* * Some other error. Try to fix it by resetting the adapter. */ if (status & IM_EPH_INT) { snstop(sc); sninit_locked(sc); } out: /* * Handled all interrupt sources. */ SMC_SELECT_BANK(sc, 2); /* * Reestablish interrupts from mask which have not been deselected * during this interrupt. Note that the hardware mask, which was set * to 0x00 at the start of this service routine, may have been * updated by one or more of the interrupt handers and we must let * those new interrupts stay enabled here. */ mask |= CSR_READ_1(sc, INTR_MASK_REG_B); CSR_WRITE_1(sc, INTR_MASK_REG_B, mask); sc->intr_mask = mask; } static void snread(struct ifnet *ifp) { struct sn_softc *sc = ifp->if_softc; struct ether_header *eh; struct mbuf *m; short status; int packet_number; uint16_t packet_length; uint8_t *data; SMC_SELECT_BANK(sc, 2); #if 0 packet_number = CSR_READ_2(sc, FIFO_PORTS_REG_W); if (packet_number & FIFO_REMPTY) { /* * we got called , but nothing was on the FIFO */ printf("sn: Receive interrupt with nothing on FIFO\n"); return; } #endif read_another: /* * Start reading from the start of the packet. Since PTR_RCV is set, * packet number is found in FIFO_PORTS_REG_W, FIFO_RX_MASK. */ CSR_WRITE_2(sc, POINTER_REG_W, PTR_READ | PTR_RCV | PTR_AUTOINC | 0x0000); /* * First two words are status and packet_length */ status = CSR_READ_2(sc, DATA_REG_W); packet_length = CSR_READ_2(sc, DATA_REG_W) & RLEN_MASK; /* * The packet length contains 3 extra words: status, length, and a * extra word with the control byte. */ packet_length -= 6; /* * Account for receive errors and discard. */ if (status & RS_ERRORS) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); goto out; } /* * A packet is received. */ /* * Adjust for odd-length packet. */ if (status & RS_ODDFRAME) packet_length++; /* * Allocate a header mbuf from the kernel. */ MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) goto out; m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = packet_length; /* * Attach an mbuf cluster. */ if (!(MCLGET(m, M_NOWAIT))) { m_freem(m); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); printf("sn: snread() kernel memory allocation problem\n"); goto out; } eh = mtod(m, struct ether_header *); /* * Get packet, including link layer address, from interface. */ data = (uint8_t *) eh; CSR_READ_MULTI_2(sc, DATA_REG_W, (uint16_t *) data, packet_length >> 1); if (packet_length & 1) { data += packet_length & ~1; *data = CSR_READ_1(sc, DATA_REG_B); } if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); /* * Remove link layer addresses and whatnot. */ m->m_pkthdr.len = m->m_len = packet_length; /* * Drop locks before calling if_input() since it may re-enter * snstart() in the netisr case. This would result in a * lock reversal. Better performance might be obtained by * chaining all packets received, dropping the lock, and then * calling if_input() on each one. */ SN_UNLOCK(sc); (*ifp->if_input)(ifp, m); SN_LOCK(sc); out: /* * Error or good, tell the card to get rid of this packet Wait for * the MMU to be un-busy. */ SMC_SELECT_BANK(sc, 2); while (CSR_READ_2(sc, MMU_CMD_REG_W) & MMUCR_BUSY) /* NOTHING */ ; CSR_WRITE_2(sc, MMU_CMD_REG_W, MMUCR_RELEASE); /* * Check whether another packet is ready */ packet_number = CSR_READ_2(sc, FIFO_PORTS_REG_W); if (packet_number & FIFO_REMPTY) { return; } goto read_another; } /* * Handle IOCTLS. This function is completely stolen from if_ep.c * As with its progenitor, it does not handle hardware address * changes. */ static int snioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct sn_softc *sc = ifp->if_softc; int error = 0; switch (cmd) { case SIOCSIFFLAGS: SN_LOCK(sc); if ((ifp->if_flags & IFF_UP) == 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) { snstop(sc); } else { /* reinitialize card on any parameter change */ sninit_locked(sc); } SN_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: /* update multicast filter list. */ SN_LOCK(sc); sn_setmcast(sc); error = 0; SN_UNLOCK(sc); break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } static void snwatchdog(void *arg) { struct sn_softc *sc; sc = arg; SN_ASSERT_LOCKED(sc); callout_reset(&sc->watchdog, hz, snwatchdog, sc); if (sc->timer == 0 || --sc->timer > 0) return; snintr_locked(sc); } /* 1. zero the interrupt mask * 2. clear the enable receive flag * 3. clear the enable xmit flags */ static void snstop(struct sn_softc *sc) { struct ifnet *ifp = sc->ifp; /* * Clear interrupt mask; disable all interrupts. */ SMC_SELECT_BANK(sc, 2); CSR_WRITE_1(sc, INTR_MASK_REG_B, 0x00); /* * Disable transmitter and Receiver */ SMC_SELECT_BANK(sc, 0); CSR_WRITE_2(sc, RECV_CONTROL_REG_W, 0x0000); CSR_WRITE_2(sc, TXMIT_CONTROL_REG_W, 0x0000); /* * Cancel watchdog. */ sc->timer = 0; callout_stop(&sc->watchdog); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); } int sn_activate(device_t dev) { struct sn_softc *sc = device_get_softc(dev); sc->port_rid = 0; sc->port_res = bus_alloc_resource_anywhere(dev, SYS_RES_IOPORT, &sc->port_rid, SMC_IO_EXTENT, RF_ACTIVE); if (!sc->port_res) { if (bootverbose) device_printf(dev, "Cannot allocate ioport\n"); return ENOMEM; } sc->irq_rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE); if (!sc->irq_res) { if (bootverbose) device_printf(dev, "Cannot allocate irq\n"); sn_deactivate(dev); return ENOMEM; } return (0); } void sn_deactivate(device_t dev) { struct sn_softc *sc = device_get_softc(dev); if (sc->intrhand) bus_teardown_intr(dev, sc->irq_res, sc->intrhand); sc->intrhand = 0; if (sc->port_res) bus_release_resource(dev, SYS_RES_IOPORT, sc->port_rid, sc->port_res); sc->port_res = 0; if (sc->modem_res) bus_release_resource(dev, SYS_RES_IOPORT, sc->modem_rid, sc->modem_res); sc->modem_res = 0; if (sc->irq_res) bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq_res); sc->irq_res = 0; return; } /* * Function: sn_probe(device_t dev) * * Purpose: * Tests to see if a given ioaddr points to an SMC9xxx chip. * Tries to cause as little damage as possible if it's not a SMC chip. * Returns a 0 on success * * Algorithm: * (1) see if the high byte of BANK_SELECT is 0x33 * (2) compare the ioaddr with the base register's address * (3) see if I recognize the chip ID in the appropriate register * * */ int sn_probe(device_t dev) { struct sn_softc *sc = device_get_softc(dev); uint16_t bank; uint16_t revision_register; uint16_t base_address_register; int err; if ((err = sn_activate(dev)) != 0) return err; /* * First, see if the high byte is 0x33 */ bank = CSR_READ_2(sc, BANK_SELECT_REG_W); if ((bank & BSR_DETECT_MASK) != BSR_DETECT_VALUE) { #ifdef SN_DEBUG device_printf(dev, "test1 failed\n"); #endif goto error; } /* * The above MIGHT indicate a device, but I need to write to further * test this. Go to bank 0, then test that the register still * reports the high byte is 0x33. */ CSR_WRITE_2(sc, BANK_SELECT_REG_W, 0x0000); bank = CSR_READ_2(sc, BANK_SELECT_REG_W); if ((bank & BSR_DETECT_MASK) != BSR_DETECT_VALUE) { #ifdef SN_DEBUG device_printf(dev, "test2 failed\n"); #endif goto error; } /* * well, we've already written once, so hopefully another time won't * hurt. This time, I need to switch the bank register to bank 1, so * I can access the base address register. The contents of the * BASE_ADDR_REG_W register, after some jiggery pokery, is expected * to match the I/O port address where the adapter is being probed. */ CSR_WRITE_2(sc, BANK_SELECT_REG_W, 0x0001); base_address_register = (CSR_READ_2(sc, BASE_ADDR_REG_W) >> 3) & 0x3e0; if (rman_get_start(sc->port_res) != base_address_register) { /* * Well, the base address register didn't match. Must not * have been a SMC chip after all. */ #ifdef SN_DEBUG device_printf(dev, "test3 failed ioaddr = 0x%x, " "base_address_register = 0x%x\n", rman_get_start(sc->port_res), base_address_register); #endif goto error; } /* * Check if the revision register is something that I recognize. * These might need to be added to later, as future revisions could * be added. */ CSR_WRITE_2(sc, BANK_SELECT_REG_W, 0x3); revision_register = CSR_READ_2(sc, REVISION_REG_W); if (!chip_ids[(revision_register >> 4) & 0xF]) { /* * I don't regonize this chip, so... */ #ifdef SN_DEBUG device_printf(dev, "test4 failed\n"); #endif goto error; } /* * at this point I'll assume that the chip is an SMC9xxx. It might be * prudent to check a listing of MAC addresses against the hardware * address, or do some other tests. */ sn_deactivate(dev); return 0; error: sn_deactivate(dev); return ENXIO; } #define MCFSZ 8 static void sn_setmcast(struct sn_softc *sc) { struct ifnet *ifp = sc->ifp; int flags; uint8_t mcf[MCFSZ]; SN_ASSERT_LOCKED(sc); /* * Set the receiver filter. We want receive enabled and auto strip * of CRC from received packet. If we are promiscuous then set that * bit too. */ flags = RCR_ENABLE | RCR_STRIP_CRC; if (ifp->if_flags & IFF_PROMISC) { flags |= RCR_PROMISC | RCR_ALMUL; } else if (ifp->if_flags & IFF_ALLMULTI) { flags |= RCR_ALMUL; } else { if (sn_getmcf(ifp, mcf)) { /* set filter */ SMC_SELECT_BANK(sc, 3); CSR_WRITE_2(sc, MULTICAST1_REG_W, ((uint16_t)mcf[1] << 8) | mcf[0]); CSR_WRITE_2(sc, MULTICAST2_REG_W, ((uint16_t)mcf[3] << 8) | mcf[2]); CSR_WRITE_2(sc, MULTICAST3_REG_W, ((uint16_t)mcf[5] << 8) | mcf[4]); CSR_WRITE_2(sc, MULTICAST4_REG_W, ((uint16_t)mcf[7] << 8) | mcf[6]); } else { flags |= RCR_ALMUL; } } SMC_SELECT_BANK(sc, 0); CSR_WRITE_2(sc, RECV_CONTROL_REG_W, flags); } static int sn_getmcf(struct ifnet *ifp, uint8_t *mcf) { int i; uint32_t index, index2; uint8_t *af = mcf; struct ifmultiaddr *ifma; bzero(mcf, MCFSZ); if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) { if_maddr_runlock(ifp); return 0; } index = ether_crc32_le(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) & 0x3f; index2 = 0; for (i = 0; i < 6; i++) { index2 <<= 1; index2 |= (index & 0x01); index >>= 1; } af[index2 >> 3] |= 1 << (index2 & 7); } if_maddr_runlock(ifp); return 1; /* use multicast filter */ } Index: head/sys/dev/sym/sym_hipd.c =================================================================== --- head/sys/dev/sym/sym_hipd.c (revision 313981) +++ head/sys/dev/sym/sym_hipd.c (revision 313982) @@ -1,9620 +1,9620 @@ /*- * Device driver optimized for the Symbios/LSI 53C896/53C895A/53C1010 * PCI-SCSI controllers. * * Copyright (C) 1999-2001 Gerard Roudier * * This driver also supports the following Symbios/LSI PCI-SCSI chips: * 53C810A, 53C825A, 53C860, 53C875, 53C876, 53C885, 53C895, * 53C810, 53C815, 53C825 and the 53C1510D is 53C8XX mode. * * * This driver for FreeBSD-CAM is derived from the Linux sym53c8xx driver. * Copyright (C) 1998-1999 Gerard Roudier * * The sym53c8xx driver is derived from the ncr53c8xx driver that had been * a port of the FreeBSD ncr driver to Linux-1.2.13. * * The original ncr driver has been written for 386bsd and FreeBSD by * Wolfgang Stanglmeier * Stefan Esser * Copyright (C) 1994 Wolfgang Stanglmeier * * The initialisation code, and part of the code that addresses * FreeBSD-CAM services is based on the aic7xxx driver for FreeBSD-CAM * written by Justin T. Gibbs. * * Other major contributions: * * NVRAM detection and reading. * Copyright (C) 1997 Richard Waltham * *----------------------------------------------------------------------------- * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #define SYM_DRIVER_NAME "sym-1.6.5-20000902" /* #define SYM_DEBUG_GENERIC_SUPPORT */ #include /* * Driver configuration options. */ #include "opt_sym.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __sparc64__ #include #include #endif #include #include #include #include #include #include #include #include /* Short and quite clear integer types */ typedef int8_t s8; typedef int16_t s16; typedef int32_t s32; typedef u_int8_t u8; typedef u_int16_t u16; typedef u_int32_t u32; /* * Driver definitions. */ #include #include /* * IA32 architecture does not reorder STORES and prevents * LOADS from passing STORES. It is called `program order' * by Intel and allows device drivers to deal with memory * ordering by only ensuring that the code is not reordered * by the compiler when ordering is required. * Other architectures implement a weaker ordering that * requires memory barriers (and also IO barriers when they * make sense) to be used. */ #if defined __i386__ || defined __amd64__ #define MEMORY_BARRIER() do { ; } while(0) #elif defined __powerpc__ #define MEMORY_BARRIER() __asm__ volatile("eieio; sync" : : : "memory") #elif defined __sparc64__ #define MEMORY_BARRIER() __asm__ volatile("membar #Sync" : : : "memory") #elif defined __arm__ #define MEMORY_BARRIER() dmb() #elif defined __aarch64__ #define MEMORY_BARRIER() dmb(sy) #elif defined __riscv__ #define MEMORY_BARRIER() fence() #else #error "Not supported platform" #endif /* * A la VMS/CAM-3 queue management. */ typedef struct sym_quehead { struct sym_quehead *flink; /* Forward pointer */ struct sym_quehead *blink; /* Backward pointer */ } SYM_QUEHEAD; #define sym_que_init(ptr) do { \ (ptr)->flink = (ptr); (ptr)->blink = (ptr); \ } while (0) static __inline void __sym_que_add(struct sym_quehead * new, struct sym_quehead * blink, struct sym_quehead * flink) { flink->blink = new; new->flink = flink; new->blink = blink; blink->flink = new; } static __inline void __sym_que_del(struct sym_quehead * blink, struct sym_quehead * flink) { flink->blink = blink; blink->flink = flink; } static __inline int sym_que_empty(struct sym_quehead *head) { return head->flink == head; } static __inline void sym_que_splice(struct sym_quehead *list, struct sym_quehead *head) { struct sym_quehead *first = list->flink; if (first != list) { struct sym_quehead *last = list->blink; struct sym_quehead *at = head->flink; first->blink = head; head->flink = first; last->flink = at; at->blink = last; } } #define sym_que_entry(ptr, type, member) \ ((type *)((char *)(ptr)-(size_t)(&((type *)0)->member))) #define sym_insque(new, pos) __sym_que_add(new, pos, (pos)->flink) #define sym_remque(el) __sym_que_del((el)->blink, (el)->flink) #define sym_insque_head(new, head) __sym_que_add(new, head, (head)->flink) static __inline struct sym_quehead *sym_remque_head(struct sym_quehead *head) { struct sym_quehead *elem = head->flink; if (elem != head) __sym_que_del(head, elem->flink); else elem = NULL; return elem; } #define sym_insque_tail(new, head) __sym_que_add(new, (head)->blink, head) /* * This one may be useful. */ #define FOR_EACH_QUEUED_ELEMENT(head, qp) \ for (qp = (head)->flink; qp != (head); qp = qp->flink) /* * FreeBSD does not offer our kind of queue in the CAM CCB. * So, we have to cast. */ #define sym_qptr(p) ((struct sym_quehead *) (p)) /* * Simple bitmap operations. */ #define sym_set_bit(p, n) (((u32 *)(p))[(n)>>5] |= (1<<((n)&0x1f))) #define sym_clr_bit(p, n) (((u32 *)(p))[(n)>>5] &= ~(1<<((n)&0x1f))) #define sym_is_bit(p, n) (((u32 *)(p))[(n)>>5] & (1<<((n)&0x1f))) /* * Number of tasks per device we want to handle. */ #if SYM_CONF_MAX_TAG_ORDER > 8 #error "more than 256 tags per logical unit not allowed." #endif #define SYM_CONF_MAX_TASK (1< SYM_CONF_MAX_TASK #undef SYM_CONF_MAX_TAG #define SYM_CONF_MAX_TAG SYM_CONF_MAX_TASK #endif /* * This one means 'NO TAG for this job' */ #define NO_TAG (256) /* * Number of SCSI targets. */ #if SYM_CONF_MAX_TARGET > 16 #error "more than 16 targets not allowed." #endif /* * Number of logical units per target. */ #if SYM_CONF_MAX_LUN > 64 #error "more than 64 logical units per target not allowed." #endif /* * Asynchronous pre-scaler (ns). Shall be 40 for * the SCSI timings to be compliant. */ #define SYM_CONF_MIN_ASYNC (40) /* * Number of entries in the START and DONE queues. * * We limit to 1 PAGE in order to succeed allocation of * these queues. Each entry is 8 bytes long (2 DWORDS). */ #ifdef SYM_CONF_MAX_START #define SYM_CONF_MAX_QUEUE (SYM_CONF_MAX_START+2) #else #define SYM_CONF_MAX_QUEUE (7*SYM_CONF_MAX_TASK+2) #define SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2) #endif #if SYM_CONF_MAX_QUEUE > PAGE_SIZE/8 #undef SYM_CONF_MAX_QUEUE #define SYM_CONF_MAX_QUEUE PAGE_SIZE/8 #undef SYM_CONF_MAX_START #define SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2) #endif /* * For this one, we want a short name :-) */ #define MAX_QUEUE SYM_CONF_MAX_QUEUE /* * Active debugging tags and verbosity. */ #define DEBUG_ALLOC (0x0001) #define DEBUG_PHASE (0x0002) #define DEBUG_POLL (0x0004) #define DEBUG_QUEUE (0x0008) #define DEBUG_RESULT (0x0010) #define DEBUG_SCATTER (0x0020) #define DEBUG_SCRIPT (0x0040) #define DEBUG_TINY (0x0080) #define DEBUG_TIMING (0x0100) #define DEBUG_NEGO (0x0200) #define DEBUG_TAGS (0x0400) #define DEBUG_POINTER (0x0800) #if 0 static int sym_debug = 0; #define DEBUG_FLAGS sym_debug #else /* #define DEBUG_FLAGS (0x0631) */ #define DEBUG_FLAGS (0x0000) #endif #define sym_verbose (np->verbose) /* * Insert a delay in micro-seconds and milli-seconds. */ static void UDELAY(int us) { DELAY(us); } static void MDELAY(int ms) { while (ms--) UDELAY(1000); } /* * Simple power of two buddy-like allocator. * * This simple code is not intended to be fast, but to * provide power of 2 aligned memory allocations. * Since the SCRIPTS processor only supplies 8 bit arithmetic, * this allocator allows simple and fast address calculations * from the SCRIPTS code. In addition, cache line alignment * is guaranteed for power of 2 cache line size. * * This allocator has been developed for the Linux sym53c8xx * driver, since this O/S does not provide naturally aligned * allocations. * It has the advantage of allowing the driver to use private * pages of memory that will be useful if we ever need to deal * with IO MMUs for PCI. */ #define MEMO_SHIFT 4 /* 16 bytes minimum memory chunk */ #define MEMO_PAGE_ORDER 0 /* 1 PAGE maximum */ #if 0 #define MEMO_FREE_UNUSED /* Free unused pages immediately */ #endif #define MEMO_WARN 1 #define MEMO_CLUSTER_SHIFT (PAGE_SHIFT+MEMO_PAGE_ORDER) #define MEMO_CLUSTER_SIZE (1UL << MEMO_CLUSTER_SHIFT) #define MEMO_CLUSTER_MASK (MEMO_CLUSTER_SIZE-1) #define get_pages() malloc(MEMO_CLUSTER_SIZE, M_DEVBUF, M_NOWAIT) #define free_pages(p) free((p), M_DEVBUF) typedef u_long m_addr_t; /* Enough bits to bit-hack addresses */ typedef struct m_link { /* Link between free memory chunks */ struct m_link *next; } m_link_s; typedef struct m_vtob { /* Virtual to Bus address translation */ struct m_vtob *next; bus_dmamap_t dmamap; /* Map for this chunk */ m_addr_t vaddr; /* Virtual address */ m_addr_t baddr; /* Bus physical address */ } m_vtob_s; /* Hash this stuff a bit to speed up translations */ #define VTOB_HASH_SHIFT 5 #define VTOB_HASH_SIZE (1UL << VTOB_HASH_SHIFT) #define VTOB_HASH_MASK (VTOB_HASH_SIZE-1) #define VTOB_HASH_CODE(m) \ ((((m_addr_t) (m)) >> MEMO_CLUSTER_SHIFT) & VTOB_HASH_MASK) typedef struct m_pool { /* Memory pool of a given kind */ bus_dma_tag_t dev_dmat; /* Identifies the pool */ bus_dma_tag_t dmat; /* Tag for our fixed allocations */ m_addr_t (*getp)(struct m_pool *); #ifdef MEMO_FREE_UNUSED void (*freep)(struct m_pool *, m_addr_t); #endif #define M_GETP() mp->getp(mp) #define M_FREEP(p) mp->freep(mp, p) int nump; m_vtob_s *(vtob[VTOB_HASH_SIZE]); struct m_pool *next; struct m_link h[MEMO_CLUSTER_SHIFT - MEMO_SHIFT + 1]; } m_pool_s; static void *___sym_malloc(m_pool_s *mp, int size) { int i = 0; int s = (1 << MEMO_SHIFT); int j; m_addr_t a; m_link_s *h = mp->h; if (size > MEMO_CLUSTER_SIZE) return NULL; while (size > s) { s <<= 1; ++i; } j = i; while (!h[j].next) { if (s == MEMO_CLUSTER_SIZE) { h[j].next = (m_link_s *) M_GETP(); if (h[j].next) h[j].next->next = NULL; break; } ++j; s <<= 1; } a = (m_addr_t) h[j].next; if (a) { h[j].next = h[j].next->next; while (j > i) { j -= 1; s >>= 1; h[j].next = (m_link_s *) (a+s); h[j].next->next = NULL; } } #ifdef DEBUG printf("___sym_malloc(%d) = %p\n", size, (void *) a); #endif return (void *) a; } static void ___sym_mfree(m_pool_s *mp, void *ptr, int size) { int i = 0; int s = (1 << MEMO_SHIFT); m_link_s *q; m_addr_t a, b; m_link_s *h = mp->h; #ifdef DEBUG printf("___sym_mfree(%p, %d)\n", ptr, size); #endif if (size > MEMO_CLUSTER_SIZE) return; while (size > s) { s <<= 1; ++i; } a = (m_addr_t) ptr; while (1) { #ifdef MEMO_FREE_UNUSED if (s == MEMO_CLUSTER_SIZE) { M_FREEP(a); break; } #endif b = a ^ s; q = &h[i]; while (q->next && q->next != (m_link_s *) b) { q = q->next; } if (!q->next) { ((m_link_s *) a)->next = h[i].next; h[i].next = (m_link_s *) a; break; } q->next = q->next->next; a = a & b; s <<= 1; ++i; } } static void *__sym_calloc2(m_pool_s *mp, int size, char *name, int uflags) { void *p; p = ___sym_malloc(mp, size); if (DEBUG_FLAGS & DEBUG_ALLOC) printf ("new %-10s[%4d] @%p.\n", name, size, p); if (p) bzero(p, size); else if (uflags & MEMO_WARN) printf ("__sym_calloc2: failed to allocate %s[%d]\n", name, size); return p; } #define __sym_calloc(mp, s, n) __sym_calloc2(mp, s, n, MEMO_WARN) static void __sym_mfree(m_pool_s *mp, void *ptr, int size, char *name) { if (DEBUG_FLAGS & DEBUG_ALLOC) printf ("freeing %-10s[%4d] @%p.\n", name, size, ptr); ___sym_mfree(mp, ptr, size); } /* * Default memory pool we donnot need to involve in DMA. */ /* * With the `bus dma abstraction', we use a separate pool for * memory we donnot need to involve in DMA. */ static m_addr_t ___mp0_getp(m_pool_s *mp) { m_addr_t m = (m_addr_t) get_pages(); if (m) ++mp->nump; return m; } #ifdef MEMO_FREE_UNUSED static void ___mp0_freep(m_pool_s *mp, m_addr_t m) { free_pages(m); --mp->nump; } #endif #ifdef MEMO_FREE_UNUSED static m_pool_s mp0 = {0, 0, ___mp0_getp, ___mp0_freep}; #else static m_pool_s mp0 = {0, 0, ___mp0_getp}; #endif /* * Actual memory allocation routine for non-DMAed memory. */ static void *sym_calloc(int size, char *name) { void *m; /* Lock */ m = __sym_calloc(&mp0, size, name); /* Unlock */ return m; } /* * Actual memory allocation routine for non-DMAed memory. */ static void sym_mfree(void *ptr, int size, char *name) { /* Lock */ __sym_mfree(&mp0, ptr, size, name); /* Unlock */ } /* * DMAable pools. */ /* * With `bus dma abstraction', we use a separate pool per parent * BUS handle. A reverse table (hashed) is maintained for virtual * to BUS address translation. */ static void getbaddrcb(void *arg, bus_dma_segment_t *segs, int nseg __unused, int error) { bus_addr_t *baddr; KASSERT(nseg == 1, ("%s: too many DMA segments (%d)", __func__, nseg)); baddr = (bus_addr_t *)arg; if (error) *baddr = 0; else *baddr = segs->ds_addr; } static m_addr_t ___dma_getp(m_pool_s *mp) { m_vtob_s *vbp; void *vaddr = NULL; bus_addr_t baddr = 0; vbp = __sym_calloc(&mp0, sizeof(*vbp), "VTOB"); if (!vbp) goto out_err; if (bus_dmamem_alloc(mp->dmat, &vaddr, BUS_DMA_COHERENT | BUS_DMA_WAITOK, &vbp->dmamap)) goto out_err; bus_dmamap_load(mp->dmat, vbp->dmamap, vaddr, MEMO_CLUSTER_SIZE, getbaddrcb, &baddr, BUS_DMA_NOWAIT); if (baddr) { int hc = VTOB_HASH_CODE(vaddr); vbp->vaddr = (m_addr_t) vaddr; vbp->baddr = (m_addr_t) baddr; vbp->next = mp->vtob[hc]; mp->vtob[hc] = vbp; ++mp->nump; return (m_addr_t) vaddr; } out_err: if (baddr) bus_dmamap_unload(mp->dmat, vbp->dmamap); if (vaddr) bus_dmamem_free(mp->dmat, vaddr, vbp->dmamap); if (vbp) __sym_mfree(&mp0, vbp, sizeof(*vbp), "VTOB"); return 0; } #ifdef MEMO_FREE_UNUSED static void ___dma_freep(m_pool_s *mp, m_addr_t m) { m_vtob_s **vbpp, *vbp; int hc = VTOB_HASH_CODE(m); vbpp = &mp->vtob[hc]; while (*vbpp && (*vbpp)->vaddr != m) vbpp = &(*vbpp)->next; if (*vbpp) { vbp = *vbpp; *vbpp = (*vbpp)->next; bus_dmamap_unload(mp->dmat, vbp->dmamap); bus_dmamem_free(mp->dmat, (void *) vbp->vaddr, vbp->dmamap); __sym_mfree(&mp0, vbp, sizeof(*vbp), "VTOB"); --mp->nump; } } #endif static __inline m_pool_s *___get_dma_pool(bus_dma_tag_t dev_dmat) { m_pool_s *mp; for (mp = mp0.next; mp && mp->dev_dmat != dev_dmat; mp = mp->next); return mp; } static m_pool_s *___cre_dma_pool(bus_dma_tag_t dev_dmat) { m_pool_s *mp = NULL; mp = __sym_calloc(&mp0, sizeof(*mp), "MPOOL"); if (mp) { mp->dev_dmat = dev_dmat; if (!bus_dma_tag_create(dev_dmat, 1, MEMO_CLUSTER_SIZE, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MEMO_CLUSTER_SIZE, 1, MEMO_CLUSTER_SIZE, 0, NULL, NULL, &mp->dmat)) { mp->getp = ___dma_getp; #ifdef MEMO_FREE_UNUSED mp->freep = ___dma_freep; #endif mp->next = mp0.next; mp0.next = mp; return mp; } } if (mp) __sym_mfree(&mp0, mp, sizeof(*mp), "MPOOL"); return NULL; } #ifdef MEMO_FREE_UNUSED static void ___del_dma_pool(m_pool_s *p) { struct m_pool **pp = &mp0.next; while (*pp && *pp != p) pp = &(*pp)->next; if (*pp) { *pp = (*pp)->next; bus_dma_tag_destroy(p->dmat); __sym_mfree(&mp0, p, sizeof(*p), "MPOOL"); } } #endif static void *__sym_calloc_dma(bus_dma_tag_t dev_dmat, int size, char *name) { struct m_pool *mp; void *m = NULL; /* Lock */ mp = ___get_dma_pool(dev_dmat); if (!mp) mp = ___cre_dma_pool(dev_dmat); if (mp) m = __sym_calloc(mp, size, name); #ifdef MEMO_FREE_UNUSED if (mp && !mp->nump) ___del_dma_pool(mp); #endif /* Unlock */ return m; } static void __sym_mfree_dma(bus_dma_tag_t dev_dmat, void *m, int size, char *name) { struct m_pool *mp; /* Lock */ mp = ___get_dma_pool(dev_dmat); if (mp) __sym_mfree(mp, m, size, name); #ifdef MEMO_FREE_UNUSED if (mp && !mp->nump) ___del_dma_pool(mp); #endif /* Unlock */ } static m_addr_t __vtobus(bus_dma_tag_t dev_dmat, void *m) { m_pool_s *mp; int hc = VTOB_HASH_CODE(m); m_vtob_s *vp = NULL; m_addr_t a = ((m_addr_t) m) & ~MEMO_CLUSTER_MASK; /* Lock */ mp = ___get_dma_pool(dev_dmat); if (mp) { vp = mp->vtob[hc]; while (vp && (m_addr_t) vp->vaddr != a) vp = vp->next; } /* Unlock */ if (!vp) panic("sym: VTOBUS FAILED!\n"); return vp ? vp->baddr + (((m_addr_t) m) - a) : 0; } /* * Verbs for DMAable memory handling. * The _uvptv_ macro avoids a nasty warning about pointer to volatile * being discarded. */ #define _uvptv_(p) ((void *)((vm_offset_t)(p))) #define _sym_calloc_dma(np, s, n) __sym_calloc_dma(np->bus_dmat, s, n) #define _sym_mfree_dma(np, p, s, n) \ __sym_mfree_dma(np->bus_dmat, _uvptv_(p), s, n) #define sym_calloc_dma(s, n) _sym_calloc_dma(np, s, n) #define sym_mfree_dma(p, s, n) _sym_mfree_dma(np, p, s, n) #define _vtobus(np, p) __vtobus(np->bus_dmat, _uvptv_(p)) #define vtobus(p) _vtobus(np, p) /* * Print a buffer in hexadecimal format. */ static void sym_printb_hex (u_char *p, int n) { while (n-- > 0) printf (" %x", *p++); } /* * Same with a label at beginning and .\n at end. */ static void sym_printl_hex (char *label, u_char *p, int n) { printf ("%s", label); sym_printb_hex (p, n); printf (".\n"); } /* * Return a string for SCSI BUS mode. */ static const char *sym_scsi_bus_mode(int mode) { switch(mode) { case SMODE_HVD: return "HVD"; case SMODE_SE: return "SE"; case SMODE_LVD: return "LVD"; } return "??"; } /* * Some poor and bogus sync table that refers to Tekram NVRAM layout. */ #ifdef SYM_CONF_NVRAM_SUPPORT static const u_char Tekram_sync[16] = {25,31,37,43, 50,62,75,125, 12,15,18,21, 6,7,9,10}; #endif /* * Union of supported NVRAM formats. */ struct sym_nvram { int type; #define SYM_SYMBIOS_NVRAM (1) #define SYM_TEKRAM_NVRAM (2) #ifdef SYM_CONF_NVRAM_SUPPORT union { Symbios_nvram Symbios; Tekram_nvram Tekram; } data; #endif }; /* * This one is hopefully useless, but actually useful. :-) */ #ifndef assert #define assert(expression) { \ if (!(expression)) { \ (void)panic( \ "assertion \"%s\" failed: file \"%s\", line %d\n", \ #expression, \ __FILE__, __LINE__); \ } \ } #endif /* * Some provision for a possible big endian mode supported by * Symbios chips (never seen, by the way). * For now, this stuff does not deserve any comments. :) */ #define sym_offb(o) (o) #define sym_offw(o) (o) /* * Some provision for support for BIG ENDIAN CPU. */ #define cpu_to_scr(dw) htole32(dw) #define scr_to_cpu(dw) le32toh(dw) /* * Access to the chip IO registers and on-chip RAM. * We use the `bus space' interface under FreeBSD-4 and * later kernel versions. */ #if defined(SYM_CONF_IOMAPPED) #define INB_OFF(o) bus_read_1(np->io_res, (o)) #define INW_OFF(o) bus_read_2(np->io_res, (o)) #define INL_OFF(o) bus_read_4(np->io_res, (o)) #define OUTB_OFF(o, v) bus_write_1(np->io_res, (o), (v)) #define OUTW_OFF(o, v) bus_write_2(np->io_res, (o), (v)) #define OUTL_OFF(o, v) bus_write_4(np->io_res, (o), (v)) #else /* Memory mapped IO */ #define INB_OFF(o) bus_read_1(np->mmio_res, (o)) #define INW_OFF(o) bus_read_2(np->mmio_res, (o)) #define INL_OFF(o) bus_read_4(np->mmio_res, (o)) #define OUTB_OFF(o, v) bus_write_1(np->mmio_res, (o), (v)) #define OUTW_OFF(o, v) bus_write_2(np->mmio_res, (o), (v)) #define OUTL_OFF(o, v) bus_write_4(np->mmio_res, (o), (v)) #endif /* SYM_CONF_IOMAPPED */ #define OUTRAM_OFF(o, a, l) \ bus_write_region_1(np->ram_res, (o), (a), (l)) /* * Common definitions for both bus space and legacy IO methods. */ #define INB(r) INB_OFF(offsetof(struct sym_reg,r)) #define INW(r) INW_OFF(offsetof(struct sym_reg,r)) #define INL(r) INL_OFF(offsetof(struct sym_reg,r)) #define OUTB(r, v) OUTB_OFF(offsetof(struct sym_reg,r), (v)) #define OUTW(r, v) OUTW_OFF(offsetof(struct sym_reg,r), (v)) #define OUTL(r, v) OUTL_OFF(offsetof(struct sym_reg,r), (v)) #define OUTONB(r, m) OUTB(r, INB(r) | (m)) #define OUTOFFB(r, m) OUTB(r, INB(r) & ~(m)) #define OUTONW(r, m) OUTW(r, INW(r) | (m)) #define OUTOFFW(r, m) OUTW(r, INW(r) & ~(m)) #define OUTONL(r, m) OUTL(r, INL(r) | (m)) #define OUTOFFL(r, m) OUTL(r, INL(r) & ~(m)) /* * We normally want the chip to have a consistent view * of driver internal data structures when we restart it. * Thus these macros. */ #define OUTL_DSP(v) \ do { \ MEMORY_BARRIER(); \ OUTL (nc_dsp, (v)); \ } while (0) #define OUTONB_STD() \ do { \ MEMORY_BARRIER(); \ OUTONB (nc_dcntl, (STD|NOCOM)); \ } while (0) /* * Command control block states. */ #define HS_IDLE (0) #define HS_BUSY (1) #define HS_NEGOTIATE (2) /* sync/wide data transfer*/ #define HS_DISCONNECT (3) /* Disconnected by target */ #define HS_WAIT (4) /* waiting for resource */ #define HS_DONEMASK (0x80) #define HS_COMPLETE (4|HS_DONEMASK) #define HS_SEL_TIMEOUT (5|HS_DONEMASK) /* Selection timeout */ #define HS_UNEXPECTED (6|HS_DONEMASK) /* Unexpected disconnect */ #define HS_COMP_ERR (7|HS_DONEMASK) /* Completed with error */ /* * Software Interrupt Codes */ #define SIR_BAD_SCSI_STATUS (1) #define SIR_SEL_ATN_NO_MSG_OUT (2) #define SIR_MSG_RECEIVED (3) #define SIR_MSG_WEIRD (4) #define SIR_NEGO_FAILED (5) #define SIR_NEGO_PROTO (6) #define SIR_SCRIPT_STOPPED (7) #define SIR_REJECT_TO_SEND (8) #define SIR_SWIDE_OVERRUN (9) #define SIR_SODL_UNDERRUN (10) #define SIR_RESEL_NO_MSG_IN (11) #define SIR_RESEL_NO_IDENTIFY (12) #define SIR_RESEL_BAD_LUN (13) #define SIR_TARGET_SELECTED (14) #define SIR_RESEL_BAD_I_T_L (15) #define SIR_RESEL_BAD_I_T_L_Q (16) #define SIR_ABORT_SENT (17) #define SIR_RESEL_ABORTED (18) #define SIR_MSG_OUT_DONE (19) #define SIR_COMPLETE_ERROR (20) #define SIR_DATA_OVERRUN (21) #define SIR_BAD_PHASE (22) #define SIR_MAX (22) /* * Extended error bit codes. * xerr_status field of struct sym_ccb. */ #define XE_EXTRA_DATA (1) /* unexpected data phase */ #define XE_BAD_PHASE (1<<1) /* illegal phase (4/5) */ #define XE_PARITY_ERR (1<<2) /* unrecovered SCSI parity error */ #define XE_SODL_UNRUN (1<<3) /* ODD transfer in DATA OUT phase */ #define XE_SWIDE_OVRUN (1<<4) /* ODD transfer in DATA IN phase */ /* * Negotiation status. * nego_status field of struct sym_ccb. */ #define NS_SYNC (1) #define NS_WIDE (2) #define NS_PPR (3) /* * A CCB hashed table is used to retrieve CCB address * from DSA value. */ #define CCB_HASH_SHIFT 8 #define CCB_HASH_SIZE (1UL << CCB_HASH_SHIFT) #define CCB_HASH_MASK (CCB_HASH_SIZE-1) #define CCB_HASH_CODE(dsa) (((dsa) >> 9) & CCB_HASH_MASK) /* * Device flags. */ #define SYM_DISC_ENABLED (1) #define SYM_TAGS_ENABLED (1<<1) #define SYM_SCAN_BOOT_DISABLED (1<<2) #define SYM_SCAN_LUNS_DISABLED (1<<3) /* * Host adapter miscellaneous flags. */ #define SYM_AVOID_BUS_RESET (1) #define SYM_SCAN_TARGETS_HILO (1<<1) /* * Device quirks. * Some devices, for example the CHEETAH 2 LVD, disconnects without * saving the DATA POINTER then reselects and terminates the IO. * On reselection, the automatic RESTORE DATA POINTER makes the * CURRENT DATA POINTER not point at the end of the IO. * This behaviour just breaks our calculation of the residual. * For now, we just force an AUTO SAVE on disconnection and will * fix that in a further driver version. */ #define SYM_QUIRK_AUTOSAVE 1 /* * Misc. */ #define SYM_LOCK() mtx_lock(&np->mtx) #define SYM_LOCK_ASSERT(_what) mtx_assert(&np->mtx, (_what)) #define SYM_LOCK_DESTROY() mtx_destroy(&np->mtx) #define SYM_LOCK_INIT() mtx_init(&np->mtx, "sym_lock", NULL, MTX_DEF) #define SYM_LOCK_INITIALIZED() mtx_initialized(&np->mtx) #define SYM_UNLOCK() mtx_unlock(&np->mtx) #define SYM_SNOOP_TIMEOUT (10000000) #define SYM_PCI_IO PCIR_BAR(0) #define SYM_PCI_MMIO PCIR_BAR(1) #define SYM_PCI_RAM PCIR_BAR(2) #define SYM_PCI_RAM64 PCIR_BAR(3) /* * Back-pointer from the CAM CCB to our data structures. */ #define sym_hcb_ptr spriv_ptr0 /* #define sym_ccb_ptr spriv_ptr1 */ /* * We mostly have to deal with pointers. * Thus these typedef's. */ typedef struct sym_tcb *tcb_p; typedef struct sym_lcb *lcb_p; typedef struct sym_ccb *ccb_p; typedef struct sym_hcb *hcb_p; /* * Gather negotiable parameters value */ struct sym_trans { u8 scsi_version; u8 spi_version; u8 period; u8 offset; u8 width; u8 options; /* PPR options */ }; struct sym_tinfo { struct sym_trans current; struct sym_trans goal; struct sym_trans user; }; #define BUS_8_BIT MSG_EXT_WDTR_BUS_8_BIT #define BUS_16_BIT MSG_EXT_WDTR_BUS_16_BIT /* * Global TCB HEADER. * * Due to lack of indirect addressing on earlier NCR chips, * this substructure is copied from the TCB to a global * address after selection. * For SYMBIOS chips that support LOAD/STORE this copy is * not needed and thus not performed. */ struct sym_tcbh { /* * Scripts bus addresses of LUN table accessed from scripts. * LUN #0 is a special case, since multi-lun devices are rare, * and we we want to speed-up the general case and not waste * resources. */ u32 luntbl_sa; /* bus address of this table */ u32 lun0_sa; /* bus address of LCB #0 */ /* * Actual SYNC/WIDE IO registers value for this target. * 'sval', 'wval' and 'uval' are read from SCRIPTS and * so have alignment constraints. */ /*0*/ u_char uval; /* -> SCNTL4 register */ /*1*/ u_char sval; /* -> SXFER io register */ /*2*/ u_char filler1; /*3*/ u_char wval; /* -> SCNTL3 io register */ }; /* * Target Control Block */ struct sym_tcb { /* * TCB header. * Assumed at offset 0. */ /*0*/ struct sym_tcbh head; /* * LUN table used by the SCRIPTS processor. * An array of bus addresses is used on reselection. */ u32 *luntbl; /* LCBs bus address table */ /* * LUN table used by the C code. */ lcb_p lun0p; /* LCB of LUN #0 (usual case) */ #if SYM_CONF_MAX_LUN > 1 lcb_p *lunmp; /* Other LCBs [1..MAX_LUN] */ #endif /* * Bitmap that tells about LUNs that succeeded at least * 1 IO and therefore assumed to be a real device. * Avoid useless allocation of the LCB structure. */ u32 lun_map[(SYM_CONF_MAX_LUN+31)/32]; /* * Bitmap that tells about LUNs that haven't yet an LCB * allocated (not discovered or LCB allocation failed). */ u32 busy0_map[(SYM_CONF_MAX_LUN+31)/32]; /* * Transfer capabilities (SIP) */ struct sym_tinfo tinfo; /* * Keep track of the CCB used for the negotiation in order * to ensure that only 1 negotiation is queued at a time. */ ccb_p nego_cp; /* CCB used for the nego */ /* * Set when we want to reset the device. */ u_char to_reset; /* * Other user settable limits and options. * These limits are read from the NVRAM if present. */ u_char usrflags; u_short usrtags; }; /* * Assert some alignments required by the chip. */ CTASSERT(((offsetof(struct sym_reg, nc_sxfer) ^ offsetof(struct sym_tcb, head.sval)) &3) == 0); CTASSERT(((offsetof(struct sym_reg, nc_scntl3) ^ offsetof(struct sym_tcb, head.wval)) &3) == 0); /* * Global LCB HEADER. * * Due to lack of indirect addressing on earlier NCR chips, * this substructure is copied from the LCB to a global * address after selection. * For SYMBIOS chips that support LOAD/STORE this copy is * not needed and thus not performed. */ struct sym_lcbh { /* * SCRIPTS address jumped by SCRIPTS on reselection. * For not probed logical units, this address points to * SCRIPTS that deal with bad LU handling (must be at * offset zero of the LCB for that reason). */ /*0*/ u32 resel_sa; /* * Task (bus address of a CCB) read from SCRIPTS that points * to the unique ITL nexus allowed to be disconnected. */ u32 itl_task_sa; /* * Task table bus address (read from SCRIPTS). */ u32 itlq_tbl_sa; }; /* * Logical Unit Control Block */ struct sym_lcb { /* * TCB header. * Assumed at offset 0. */ /*0*/ struct sym_lcbh head; /* * Task table read from SCRIPTS that contains pointers to * ITLQ nexuses. The bus address read from SCRIPTS is * inside the header. */ u32 *itlq_tbl; /* Kernel virtual address */ /* * Busy CCBs management. */ u_short busy_itlq; /* Number of busy tagged CCBs */ u_short busy_itl; /* Number of busy untagged CCBs */ /* * Circular tag allocation buffer. */ u_short ia_tag; /* Tag allocation index */ u_short if_tag; /* Tag release index */ u_char *cb_tags; /* Circular tags buffer */ /* * Set when we want to clear all tasks. */ u_char to_clear; /* * Capabilities. */ u_char user_flags; u_char current_flags; }; /* * Action from SCRIPTS on a task. * Is part of the CCB, but is also used separately to plug * error handling action to perform from SCRIPTS. */ struct sym_actscr { u32 start; /* Jumped by SCRIPTS after selection */ u32 restart; /* Jumped by SCRIPTS on relection */ }; /* * Phase mismatch context. * * It is part of the CCB and is used as parameters for the * DATA pointer. We need two contexts to handle correctly the * SAVED DATA POINTER. */ struct sym_pmc { struct sym_tblmove sg; /* Updated interrupted SG block */ u32 ret; /* SCRIPT return address */ }; /* * LUN control block lookup. * We use a direct pointer for LUN #0, and a table of * pointers which is only allocated for devices that support * LUN(s) > 0. */ #if SYM_CONF_MAX_LUN <= 1 #define sym_lp(tp, lun) (!lun) ? (tp)->lun0p : 0 #else #define sym_lp(tp, lun) \ (!lun) ? (tp)->lun0p : (tp)->lunmp ? (tp)->lunmp[(lun)] : 0 #endif /* * Status are used by the host and the script processor. * * The last four bytes (status[4]) are copied to the * scratchb register (declared as scr0..scr3) just after the * select/reselect, and copied back just after disconnecting. * Inside the script the XX_REG are used. */ /* * Last four bytes (script) */ #define QU_REG scr0 #define HS_REG scr1 #define HS_PRT nc_scr1 #define SS_REG scr2 #define SS_PRT nc_scr2 #define HF_REG scr3 #define HF_PRT nc_scr3 /* * Last four bytes (host) */ #define actualquirks phys.head.status[0] #define host_status phys.head.status[1] #define ssss_status phys.head.status[2] #define host_flags phys.head.status[3] /* * Host flags */ #define HF_IN_PM0 1u #define HF_IN_PM1 (1u<<1) #define HF_ACT_PM (1u<<2) #define HF_DP_SAVED (1u<<3) #define HF_SENSE (1u<<4) #define HF_EXT_ERR (1u<<5) #define HF_DATA_IN (1u<<6) #ifdef SYM_CONF_IARB_SUPPORT #define HF_HINT_IARB (1u<<7) #endif /* * Global CCB HEADER. * * Due to lack of indirect addressing on earlier NCR chips, * this substructure is copied from the ccb to a global * address after selection (or reselection) and copied back * before disconnect. * For SYMBIOS chips that support LOAD/STORE this copy is * not needed and thus not performed. */ struct sym_ccbh { /* * Start and restart SCRIPTS addresses (must be at 0). */ /*0*/ struct sym_actscr go; /* * SCRIPTS jump address that deal with data pointers. * 'savep' points to the position in the script responsible * for the actual transfer of data. * It's written on reception of a SAVE_DATA_POINTER message. */ u32 savep; /* Jump address to saved data pointer */ u32 lastp; /* SCRIPTS address at end of data */ u32 goalp; /* Not accessed for now from SCRIPTS */ /* * Status fields. */ u8 status[4]; }; /* * Data Structure Block * * During execution of a ccb by the script processor, the * DSA (data structure address) register points to this * substructure of the ccb. */ struct sym_dsb { /* * CCB header. * Also assumed at offset 0 of the sym_ccb structure. */ /*0*/ struct sym_ccbh head; /* * Phase mismatch contexts. * We need two to handle correctly the SAVED DATA POINTER. * MUST BOTH BE AT OFFSET < 256, due to using 8 bit arithmetic * for address calculation from SCRIPTS. */ struct sym_pmc pm0; struct sym_pmc pm1; /* * Table data for Script */ struct sym_tblsel select; struct sym_tblmove smsg; struct sym_tblmove smsg_ext; struct sym_tblmove cmd; struct sym_tblmove sense; struct sym_tblmove wresid; struct sym_tblmove data [SYM_CONF_MAX_SG]; }; /* * Our Command Control Block */ struct sym_ccb { /* * This is the data structure which is pointed by the DSA * register when it is executed by the script processor. * It must be the first entry. */ struct sym_dsb phys; /* * Pointer to CAM ccb and related stuff. */ struct callout ch; /* callout handle */ union ccb *cam_ccb; /* CAM scsiio ccb */ u8 cdb_buf[16]; /* Copy of CDB */ u8 *sns_bbuf; /* Bounce buffer for sense data */ #define SYM_SNS_BBUF_LEN sizeof(struct scsi_sense_data) int data_len; /* Total data length */ int segments; /* Number of SG segments */ /* * Miscellaneous status'. */ u_char nego_status; /* Negotiation status */ u_char xerr_status; /* Extended error flags */ u32 extra_bytes; /* Extraneous bytes transferred */ /* * Message areas. * We prepare a message to be sent after selection. * We may use a second one if the command is rescheduled * due to CHECK_CONDITION or COMMAND TERMINATED. * Contents are IDENTIFY and SIMPLE_TAG. * While negotiating sync or wide transfer, * a SDTR or WDTR message is appended. */ u_char scsi_smsg [12]; u_char scsi_smsg2[12]; /* * Auto request sense related fields. */ u_char sensecmd[6]; /* Request Sense command */ u_char sv_scsi_status; /* Saved SCSI status */ u_char sv_xerr_status; /* Saved extended status */ int sv_resid; /* Saved residual */ /* * Map for the DMA of user data. */ void *arg; /* Argument for some callback */ bus_dmamap_t dmamap; /* DMA map for user data */ u_char dmamapped; #define SYM_DMA_NONE 0 #define SYM_DMA_READ 1 #define SYM_DMA_WRITE 2 /* * Other fields. */ u32 ccb_ba; /* BUS address of this CCB */ u_short tag; /* Tag for this transfer */ /* NO_TAG means no tag */ u_char target; u_char lun; ccb_p link_ccbh; /* Host adapter CCB hash chain */ SYM_QUEHEAD link_ccbq; /* Link to free/busy CCB queue */ u32 startp; /* Initial data pointer */ int ext_sg; /* Extreme data pointer, used */ int ext_ofs; /* to calculate the residual. */ u_char to_abort; /* Want this IO to be aborted */ }; #define CCB_BA(cp,lbl) (cp->ccb_ba + offsetof(struct sym_ccb, lbl)) /* * Host Control Block */ struct sym_hcb { struct mtx mtx; /* * Global headers. * Due to poorness of addressing capabilities, earlier * chips (810, 815, 825) copy part of the data structures * (CCB, TCB and LCB) in fixed areas. */ #ifdef SYM_CONF_GENERIC_SUPPORT struct sym_ccbh ccb_head; struct sym_tcbh tcb_head; struct sym_lcbh lcb_head; #endif /* * Idle task and invalid task actions and * their bus addresses. */ struct sym_actscr idletask, notask, bad_itl, bad_itlq; vm_offset_t idletask_ba, notask_ba, bad_itl_ba, bad_itlq_ba; /* * Dummy lun table to protect us against target * returning bad lun number on reselection. */ u32 *badluntbl; /* Table physical address */ u32 badlun_sa; /* SCRIPT handler BUS address */ /* * Bus address of this host control block. */ u32 hcb_ba; /* * Bit 32-63 of the on-chip RAM bus address in LE format. * The START_RAM64 script loads the MMRS and MMWS from this * field. */ u32 scr_ram_seg; /* * Chip and controller indentification. */ device_t device; /* * Initial value of some IO register bits. * These values are assumed to have been set by BIOS, and may * be used to probe adapter implementation differences. */ u_char sv_scntl0, sv_scntl3, sv_dmode, sv_dcntl, sv_ctest3, sv_ctest4, sv_ctest5, sv_gpcntl, sv_stest2, sv_stest4, sv_scntl4, sv_stest1; /* * Actual initial value of IO register bits used by the * driver. They are loaded at initialisation according to * features that are to be enabled/disabled. */ u_char rv_scntl0, rv_scntl3, rv_dmode, rv_dcntl, rv_ctest3, rv_ctest4, rv_ctest5, rv_stest2, rv_ccntl0, rv_ccntl1, rv_scntl4; /* * Target data. */ #ifdef __amd64__ struct sym_tcb *target; #else struct sym_tcb target[SYM_CONF_MAX_TARGET]; #endif /* * Target control block bus address array used by the SCRIPT * on reselection. */ u32 *targtbl; u32 targtbl_ba; /* * CAM SIM information for this instance. */ struct cam_sim *sim; struct cam_path *path; /* * Allocated hardware resources. */ struct resource *irq_res; struct resource *io_res; struct resource *mmio_res; struct resource *ram_res; int ram_id; void *intr; /* * Bus stuff. * * My understanding of PCI is that all agents must share the * same addressing range and model. * But some hardware architecture guys provide complex and * brain-deaded stuff that makes shit. * This driver only support PCI compliant implementations and * deals with part of the BUS stuff complexity only to fit O/S * requirements. */ /* * DMA stuff. */ bus_dma_tag_t bus_dmat; /* DMA tag from parent BUS */ bus_dma_tag_t data_dmat; /* DMA tag for user data */ /* * BUS addresses of the chip */ vm_offset_t mmio_ba; /* MMIO BUS address */ int mmio_ws; /* MMIO Window size */ vm_offset_t ram_ba; /* RAM BUS address */ int ram_ws; /* RAM window size */ /* * SCRIPTS virtual and physical bus addresses. * 'script' is loaded in the on-chip RAM if present. * 'scripth' stays in main memory for all chips except the * 53C895A, 53C896 and 53C1010 that provide 8K on-chip RAM. */ u_char *scripta0; /* Copies of script and scripth */ u_char *scriptb0; /* Copies of script and scripth */ vm_offset_t scripta_ba; /* Actual script and scripth */ vm_offset_t scriptb_ba; /* bus addresses. */ vm_offset_t scriptb0_ba; u_short scripta_sz; /* Actual size of script A */ u_short scriptb_sz; /* Actual size of script B */ /* * Bus addresses, setup and patch methods for * the selected firmware. */ struct sym_fwa_ba fwa_bas; /* Useful SCRIPTA bus addresses */ struct sym_fwb_ba fwb_bas; /* Useful SCRIPTB bus addresses */ void (*fw_setup)(hcb_p np, const struct sym_fw *fw); void (*fw_patch)(hcb_p np); const char *fw_name; /* * General controller parameters and configuration. */ u_short device_id; /* PCI device id */ u_char revision_id; /* PCI device revision id */ u_int features; /* Chip features map */ u_char myaddr; /* SCSI id of the adapter */ u_char maxburst; /* log base 2 of dwords burst */ u_char maxwide; /* Maximum transfer width */ u_char minsync; /* Min sync period factor (ST) */ u_char maxsync; /* Max sync period factor (ST) */ u_char maxoffs; /* Max scsi offset (ST) */ u_char minsync_dt; /* Min sync period factor (DT) */ u_char maxsync_dt; /* Max sync period factor (DT) */ u_char maxoffs_dt; /* Max scsi offset (DT) */ u_char multiplier; /* Clock multiplier (1,2,4) */ u_char clock_divn; /* Number of clock divisors */ u32 clock_khz; /* SCSI clock frequency in KHz */ u32 pciclk_khz; /* Estimated PCI clock in KHz */ /* * Start queue management. * It is filled up by the host processor and accessed by the * SCRIPTS processor in order to start SCSI commands. */ volatile /* Prevent code optimizations */ u32 *squeue; /* Start queue virtual address */ u32 squeue_ba; /* Start queue BUS address */ u_short squeueput; /* Next free slot of the queue */ u_short actccbs; /* Number of allocated CCBs */ /* * Command completion queue. * It is the same size as the start queue to avoid overflow. */ u_short dqueueget; /* Next position to scan */ volatile /* Prevent code optimizations */ u32 *dqueue; /* Completion (done) queue */ u32 dqueue_ba; /* Done queue BUS address */ /* * Miscellaneous buffers accessed by the scripts-processor. * They shall be DWORD aligned, because they may be read or * written with a script command. */ u_char msgout[8]; /* Buffer for MESSAGE OUT */ u_char msgin [8]; /* Buffer for MESSAGE IN */ u32 lastmsg; /* Last SCSI message sent */ u_char scratch; /* Scratch for SCSI receive */ /* * Miscellaneous configuration and status parameters. */ u_char usrflags; /* Miscellaneous user flags */ u_char scsi_mode; /* Current SCSI BUS mode */ u_char verbose; /* Verbosity for this controller*/ u32 cache; /* Used for cache test at init. */ /* * CCB lists and queue. */ ccb_p ccbh[CCB_HASH_SIZE]; /* CCB hashed by DSA value */ SYM_QUEHEAD free_ccbq; /* Queue of available CCBs */ SYM_QUEHEAD busy_ccbq; /* Queue of busy CCBs */ /* * During error handling and/or recovery, * active CCBs that are to be completed with * error or requeued are moved from the busy_ccbq * to the comp_ccbq prior to completion. */ SYM_QUEHEAD comp_ccbq; /* * CAM CCB pending queue. */ SYM_QUEHEAD cam_ccbq; /* * IMMEDIATE ARBITRATION (IARB) control. * * We keep track in 'last_cp' of the last CCB that has been * queued to the SCRIPTS processor and clear 'last_cp' when * this CCB completes. If last_cp is not zero at the moment * we queue a new CCB, we set a flag in 'last_cp' that is * used by the SCRIPTS as a hint for setting IARB. * We donnot set more than 'iarb_max' consecutive hints for * IARB in order to leave devices a chance to reselect. * By the way, any non zero value of 'iarb_max' is unfair. :) */ #ifdef SYM_CONF_IARB_SUPPORT u_short iarb_max; /* Max. # consecutive IARB hints*/ u_short iarb_count; /* Actual # of these hints */ ccb_p last_cp; #endif /* * Command abort handling. * We need to synchronize tightly with the SCRIPTS * processor in order to handle things correctly. */ u_char abrt_msg[4]; /* Message to send buffer */ struct sym_tblmove abrt_tbl; /* Table for the MOV of it */ struct sym_tblsel abrt_sel; /* Sync params for selection */ u_char istat_sem; /* Tells the chip to stop (SEM) */ }; #define HCB_BA(np, lbl) (np->hcb_ba + offsetof(struct sym_hcb, lbl)) /* * Return the name of the controller. */ static __inline const char *sym_name(hcb_p np) { return device_get_nameunit(np->device); } /*--------------------------------------------------------------------------*/ /*------------------------------ FIRMWARES ---------------------------------*/ /*--------------------------------------------------------------------------*/ /* * This stuff will be moved to a separate source file when * the driver will be broken into several source modules. */ /* * Macros used for all firmwares. */ #define SYM_GEN_A(s, label) ((short) offsetof(s, label)), #define SYM_GEN_B(s, label) ((short) offsetof(s, label)), #define PADDR_A(label) SYM_GEN_PADDR_A(struct SYM_FWA_SCR, label) #define PADDR_B(label) SYM_GEN_PADDR_B(struct SYM_FWB_SCR, label) #ifdef SYM_CONF_GENERIC_SUPPORT /* * Allocate firmware #1 script area. */ #define SYM_FWA_SCR sym_fw1a_scr #define SYM_FWB_SCR sym_fw1b_scr #include static const struct sym_fwa_ofs sym_fw1a_ofs = { SYM_GEN_FW_A(struct SYM_FWA_SCR) }; static const struct sym_fwb_ofs sym_fw1b_ofs = { SYM_GEN_FW_B(struct SYM_FWB_SCR) }; #undef SYM_FWA_SCR #undef SYM_FWB_SCR #endif /* SYM_CONF_GENERIC_SUPPORT */ /* * Allocate firmware #2 script area. */ #define SYM_FWA_SCR sym_fw2a_scr #define SYM_FWB_SCR sym_fw2b_scr #include static const struct sym_fwa_ofs sym_fw2a_ofs = { SYM_GEN_FW_A(struct SYM_FWA_SCR) }; static const struct sym_fwb_ofs sym_fw2b_ofs = { SYM_GEN_FW_B(struct SYM_FWB_SCR) SYM_GEN_B(struct SYM_FWB_SCR, start64) SYM_GEN_B(struct SYM_FWB_SCR, pm_handle) }; #undef SYM_FWA_SCR #undef SYM_FWB_SCR #undef SYM_GEN_A #undef SYM_GEN_B #undef PADDR_A #undef PADDR_B #ifdef SYM_CONF_GENERIC_SUPPORT /* * Patch routine for firmware #1. */ static void sym_fw1_patch(hcb_p np) { struct sym_fw1a_scr *scripta0; struct sym_fw1b_scr *scriptb0; scripta0 = (struct sym_fw1a_scr *) np->scripta0; scriptb0 = (struct sym_fw1b_scr *) np->scriptb0; /* * Remove LED support if not needed. */ if (!(np->features & FE_LED0)) { scripta0->idle[0] = cpu_to_scr(SCR_NO_OP); scripta0->reselected[0] = cpu_to_scr(SCR_NO_OP); scripta0->start[0] = cpu_to_scr(SCR_NO_OP); } #ifdef SYM_CONF_IARB_SUPPORT /* * If user does not want to use IMMEDIATE ARBITRATION * when we are reselected while attempting to arbitrate, * patch the SCRIPTS accordingly with a SCRIPT NO_OP. */ if (!SYM_CONF_SET_IARB_ON_ARB_LOST) scripta0->ungetjob[0] = cpu_to_scr(SCR_NO_OP); #endif /* * Patch some data in SCRIPTS. * - start and done queue initial bus address. * - target bus address table bus address. */ scriptb0->startpos[0] = cpu_to_scr(np->squeue_ba); scriptb0->done_pos[0] = cpu_to_scr(np->dqueue_ba); scriptb0->targtbl[0] = cpu_to_scr(np->targtbl_ba); } #endif /* SYM_CONF_GENERIC_SUPPORT */ /* * Patch routine for firmware #2. */ static void sym_fw2_patch(hcb_p np) { struct sym_fw2a_scr *scripta0; struct sym_fw2b_scr *scriptb0; scripta0 = (struct sym_fw2a_scr *) np->scripta0; scriptb0 = (struct sym_fw2b_scr *) np->scriptb0; /* * Remove LED support if not needed. */ if (!(np->features & FE_LED0)) { scripta0->idle[0] = cpu_to_scr(SCR_NO_OP); scripta0->reselected[0] = cpu_to_scr(SCR_NO_OP); scripta0->start[0] = cpu_to_scr(SCR_NO_OP); } #ifdef SYM_CONF_IARB_SUPPORT /* * If user does not want to use IMMEDIATE ARBITRATION * when we are reselected while attempting to arbitrate, * patch the SCRIPTS accordingly with a SCRIPT NO_OP. */ if (!SYM_CONF_SET_IARB_ON_ARB_LOST) scripta0->ungetjob[0] = cpu_to_scr(SCR_NO_OP); #endif /* * Patch some variable in SCRIPTS. * - start and done queue initial bus address. * - target bus address table bus address. */ scriptb0->startpos[0] = cpu_to_scr(np->squeue_ba); scriptb0->done_pos[0] = cpu_to_scr(np->dqueue_ba); scriptb0->targtbl[0] = cpu_to_scr(np->targtbl_ba); /* * Remove the load of SCNTL4 on reselection if not a C10. */ if (!(np->features & FE_C10)) { scripta0->resel_scntl4[0] = cpu_to_scr(SCR_NO_OP); scripta0->resel_scntl4[1] = cpu_to_scr(0); } /* * Remove a couple of work-arounds specific to C1010 if * they are not desirable. See `sym_fw2.h' for more details. */ if (!(np->device_id == PCI_ID_LSI53C1010_2 && np->revision_id < 0x1 && np->pciclk_khz < 60000)) { scripta0->datao_phase[0] = cpu_to_scr(SCR_NO_OP); scripta0->datao_phase[1] = cpu_to_scr(0); } if (!(np->device_id == PCI_ID_LSI53C1010 && /* np->revision_id < 0xff */ 1)) { scripta0->sel_done[0] = cpu_to_scr(SCR_NO_OP); scripta0->sel_done[1] = cpu_to_scr(0); } /* * Patch some other variables in SCRIPTS. * These ones are loaded by the SCRIPTS processor. */ scriptb0->pm0_data_addr[0] = cpu_to_scr(np->scripta_ba + offsetof(struct sym_fw2a_scr, pm0_data)); scriptb0->pm1_data_addr[0] = cpu_to_scr(np->scripta_ba + offsetof(struct sym_fw2a_scr, pm1_data)); } /* * Fill the data area in scripts. * To be done for all firmwares. */ static void sym_fw_fill_data (u32 *in, u32 *out) { int i; for (i = 0; i < SYM_CONF_MAX_SG; i++) { *in++ = SCR_CHMOV_TBL ^ SCR_DATA_IN; *in++ = offsetof (struct sym_dsb, data[i]); *out++ = SCR_CHMOV_TBL ^ SCR_DATA_OUT; *out++ = offsetof (struct sym_dsb, data[i]); } } /* * Setup useful script bus addresses. * To be done for all firmwares. */ static void sym_fw_setup_bus_addresses(hcb_p np, const struct sym_fw *fw) { u32 *pa; const u_short *po; int i; /* * Build the bus address table for script A * from the script A offset table. */ po = (const u_short *) fw->a_ofs; pa = (u32 *) &np->fwa_bas; for (i = 0 ; i < sizeof(np->fwa_bas)/sizeof(u32) ; i++) pa[i] = np->scripta_ba + po[i]; /* * Same for script B. */ po = (const u_short *) fw->b_ofs; pa = (u32 *) &np->fwb_bas; for (i = 0 ; i < sizeof(np->fwb_bas)/sizeof(u32) ; i++) pa[i] = np->scriptb_ba + po[i]; } #ifdef SYM_CONF_GENERIC_SUPPORT /* * Setup routine for firmware #1. */ static void sym_fw1_setup(hcb_p np, const struct sym_fw *fw) { struct sym_fw1a_scr *scripta0; scripta0 = (struct sym_fw1a_scr *) np->scripta0; /* * Fill variable parts in scripts. */ sym_fw_fill_data(scripta0->data_in, scripta0->data_out); /* * Setup bus addresses used from the C code.. */ sym_fw_setup_bus_addresses(np, fw); } #endif /* SYM_CONF_GENERIC_SUPPORT */ /* * Setup routine for firmware #2. */ static void sym_fw2_setup(hcb_p np, const struct sym_fw *fw) { struct sym_fw2a_scr *scripta0; scripta0 = (struct sym_fw2a_scr *) np->scripta0; /* * Fill variable parts in scripts. */ sym_fw_fill_data(scripta0->data_in, scripta0->data_out); /* * Setup bus addresses used from the C code.. */ sym_fw_setup_bus_addresses(np, fw); } /* * Allocate firmware descriptors. */ #ifdef SYM_CONF_GENERIC_SUPPORT static const struct sym_fw sym_fw1 = SYM_FW_ENTRY(sym_fw1, "NCR-generic"); #endif /* SYM_CONF_GENERIC_SUPPORT */ static const struct sym_fw sym_fw2 = SYM_FW_ENTRY(sym_fw2, "LOAD/STORE-based"); /* * Find the most appropriate firmware for a chip. */ static const struct sym_fw * sym_find_firmware(const struct sym_pci_chip *chip) { if (chip->features & FE_LDSTR) return &sym_fw2; #ifdef SYM_CONF_GENERIC_SUPPORT else if (!(chip->features & (FE_PFEN|FE_NOPM|FE_DAC))) return &sym_fw1; #endif else return NULL; } /* * Bind a script to physical addresses. */ static void sym_fw_bind_script (hcb_p np, u32 *start, int len) { u32 opcode, new, old, tmp1, tmp2; u32 *end, *cur; int relocs; cur = start; end = start + len/4; while (cur < end) { opcode = *cur; /* * If we forget to change the length * in scripts, a field will be * padded with 0. This is an illegal * command. */ if (opcode == 0) { printf ("%s: ERROR0 IN SCRIPT at %d.\n", sym_name(np), (int) (cur-start)); MDELAY (10000); ++cur; continue; } /* * We use the bogus value 0xf00ff00f ;-) * to reserve data area in SCRIPTS. */ if (opcode == SCR_DATA_ZERO) { *cur++ = 0; continue; } if (DEBUG_FLAGS & DEBUG_SCRIPT) printf ("%d: <%x>\n", (int) (cur-start), (unsigned)opcode); /* * We don't have to decode ALL commands */ switch (opcode >> 28) { case 0xf: /* * LOAD / STORE DSA relative, don't relocate. */ relocs = 0; break; case 0xe: /* * LOAD / STORE absolute. */ relocs = 1; break; case 0xc: /* * COPY has TWO arguments. */ relocs = 2; tmp1 = cur[1]; tmp2 = cur[2]; if ((tmp1 ^ tmp2) & 3) { printf ("%s: ERROR1 IN SCRIPT at %d.\n", sym_name(np), (int) (cur-start)); MDELAY (10000); } /* * If PREFETCH feature not enabled, remove * the NO FLUSH bit if present. */ if ((opcode & SCR_NO_FLUSH) && !(np->features & FE_PFEN)) { opcode = (opcode & ~SCR_NO_FLUSH); } break; case 0x0: /* * MOVE/CHMOV (absolute address) */ if (!(np->features & FE_WIDE)) opcode = (opcode | OPC_MOVE); relocs = 1; break; case 0x1: /* * MOVE/CHMOV (table indirect) */ if (!(np->features & FE_WIDE)) opcode = (opcode | OPC_MOVE); relocs = 0; break; case 0x8: /* * JUMP / CALL * dont't relocate if relative :-) */ if (opcode & 0x00800000) relocs = 0; else if ((opcode & 0xf8400000) == 0x80400000)/*JUMP64*/ relocs = 2; else relocs = 1; break; case 0x4: case 0x5: case 0x6: case 0x7: relocs = 1; break; default: relocs = 0; break; } /* * Scriptify:) the opcode. */ *cur++ = cpu_to_scr(opcode); /* * If no relocation, assume 1 argument * and just scriptize:) it. */ if (!relocs) { *cur = cpu_to_scr(*cur); ++cur; continue; } /* * Otherwise performs all needed relocations. */ while (relocs--) { old = *cur; switch (old & RELOC_MASK) { case RELOC_REGISTER: new = (old & ~RELOC_MASK) + np->mmio_ba; break; case RELOC_LABEL_A: new = (old & ~RELOC_MASK) + np->scripta_ba; break; case RELOC_LABEL_B: new = (old & ~RELOC_MASK) + np->scriptb_ba; break; case RELOC_SOFTC: new = (old & ~RELOC_MASK) + np->hcb_ba; break; case 0: /* * Don't relocate a 0 address. * They are mostly used for patched or * script self-modified areas. */ if (old == 0) { new = old; break; } /* fall through */ default: new = 0; panic("sym_fw_bind_script: " "weird relocation %x\n", old); break; } *cur++ = cpu_to_scr(new); } } } /*---------------------------------------------------------------------------*/ /*--------------------------- END OF FIRMWARES -----------------------------*/ /*---------------------------------------------------------------------------*/ /* * Function prototypes. */ static void sym_save_initial_setting (hcb_p np); static int sym_prepare_setting (hcb_p np, struct sym_nvram *nvram); static int sym_prepare_nego (hcb_p np, ccb_p cp, int nego, u_char *msgptr); static void sym_put_start_queue (hcb_p np, ccb_p cp); static void sym_chip_reset (hcb_p np); static void sym_soft_reset (hcb_p np); static void sym_start_reset (hcb_p np); static int sym_reset_scsi_bus (hcb_p np, int enab_int); static int sym_wakeup_done (hcb_p np); static void sym_flush_busy_queue (hcb_p np, int cam_status); static void sym_flush_comp_queue (hcb_p np, int cam_status); static void sym_init (hcb_p np, int reason); static int sym_getsync(hcb_p np, u_char dt, u_char sfac, u_char *divp, u_char *fakp); static void sym_setsync (hcb_p np, ccb_p cp, u_char ofs, u_char per, u_char div, u_char fak); static void sym_setwide (hcb_p np, ccb_p cp, u_char wide); static void sym_setpprot(hcb_p np, ccb_p cp, u_char dt, u_char ofs, u_char per, u_char wide, u_char div, u_char fak); static void sym_settrans(hcb_p np, ccb_p cp, u_char dt, u_char ofs, u_char per, u_char wide, u_char div, u_char fak); static void sym_log_hard_error (hcb_p np, u_short sist, u_char dstat); static void sym_intr (void *arg); static void sym_poll (struct cam_sim *sim); static void sym_recover_scsi_int (hcb_p np, u_char hsts); static void sym_int_sto (hcb_p np); static void sym_int_udc (hcb_p np); static void sym_int_sbmc (hcb_p np); static void sym_int_par (hcb_p np, u_short sist); static void sym_int_ma (hcb_p np); static int sym_dequeue_from_squeue(hcb_p np, int i, int target, int lun, int task); static void sym_sir_bad_scsi_status (hcb_p np, ccb_p cp); static int sym_clear_tasks (hcb_p np, int status, int targ, int lun, int task); static void sym_sir_task_recovery (hcb_p np, int num); static int sym_evaluate_dp (hcb_p np, ccb_p cp, u32 scr, int *ofs); static void sym_modify_dp(hcb_p np, ccb_p cp, int ofs); static int sym_compute_residual (hcb_p np, ccb_p cp); static int sym_show_msg (u_char * msg); static void sym_print_msg (ccb_p cp, char *label, u_char *msg); static void sym_sync_nego (hcb_p np, tcb_p tp, ccb_p cp); static void sym_ppr_nego (hcb_p np, tcb_p tp, ccb_p cp); static void sym_wide_nego (hcb_p np, tcb_p tp, ccb_p cp); static void sym_nego_default (hcb_p np, tcb_p tp, ccb_p cp); static void sym_nego_rejected (hcb_p np, tcb_p tp, ccb_p cp); static void sym_int_sir (hcb_p np); static void sym_free_ccb (hcb_p np, ccb_p cp); static ccb_p sym_get_ccb (hcb_p np, u_char tn, u_char ln, u_char tag_order); static ccb_p sym_alloc_ccb (hcb_p np); static ccb_p sym_ccb_from_dsa (hcb_p np, u32 dsa); static lcb_p sym_alloc_lcb (hcb_p np, u_char tn, u_char ln); static void sym_alloc_lcb_tags (hcb_p np, u_char tn, u_char ln); static int sym_snooptest (hcb_p np); static void sym_selectclock(hcb_p np, u_char scntl3); static void sym_getclock (hcb_p np, int mult); static int sym_getpciclock (hcb_p np); static void sym_complete_ok (hcb_p np, ccb_p cp); static void sym_complete_error (hcb_p np, ccb_p cp); static void sym_callout (void *arg); static int sym_abort_scsiio (hcb_p np, union ccb *ccb, int timed_out); static void sym_reset_dev (hcb_p np, union ccb *ccb); static void sym_action (struct cam_sim *sim, union ccb *ccb); static int sym_setup_cdb (hcb_p np, struct ccb_scsiio *csio, ccb_p cp); static void sym_setup_data_and_start (hcb_p np, struct ccb_scsiio *csio, ccb_p cp); static int sym_fast_scatter_sg_physical(hcb_p np, ccb_p cp, bus_dma_segment_t *psegs, int nsegs); static int sym_scatter_sg_physical (hcb_p np, ccb_p cp, bus_dma_segment_t *psegs, int nsegs); static void sym_action2 (struct cam_sim *sim, union ccb *ccb); static void sym_update_trans(hcb_p np, struct sym_trans *tip, struct ccb_trans_settings *cts); static void sym_update_dflags(hcb_p np, u_char *flags, struct ccb_trans_settings *cts); static const struct sym_pci_chip *sym_find_pci_chip (device_t dev); static int sym_pci_probe (device_t dev); static int sym_pci_attach (device_t dev); static void sym_pci_free (hcb_p np); static int sym_cam_attach (hcb_p np); static void sym_cam_free (hcb_p np); static void sym_nvram_setup_host (hcb_p np, struct sym_nvram *nvram); static void sym_nvram_setup_target (hcb_p np, int targ, struct sym_nvram *nvp); static int sym_read_nvram (hcb_p np, struct sym_nvram *nvp); /* * Print something which allows to retrieve the controller type, * unit, target, lun concerned by a kernel message. */ static void PRINT_TARGET (hcb_p np, int target) { printf ("%s:%d:", sym_name(np), target); } static void PRINT_LUN(hcb_p np, int target, int lun) { printf ("%s:%d:%d:", sym_name(np), target, lun); } static void PRINT_ADDR (ccb_p cp) { if (cp && cp->cam_ccb) xpt_print_path(cp->cam_ccb->ccb_h.path); } /* * Take into account this ccb in the freeze count. */ static void sym_freeze_cam_ccb(union ccb *ccb) { if (!(ccb->ccb_h.flags & CAM_DEV_QFRZDIS)) { if (!(ccb->ccb_h.status & CAM_DEV_QFRZN)) { ccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); } } } /* * Set the status field of a CAM CCB. */ static __inline void sym_set_cam_status(union ccb *ccb, cam_status status) { ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= status; } /* * Get the status field of a CAM CCB. */ static __inline int sym_get_cam_status(union ccb *ccb) { return ccb->ccb_h.status & CAM_STATUS_MASK; } /* * Enqueue a CAM CCB. */ static void sym_enqueue_cam_ccb(ccb_p cp) { hcb_p np; union ccb *ccb; ccb = cp->cam_ccb; np = (hcb_p) cp->arg; assert(!(ccb->ccb_h.status & CAM_SIM_QUEUED)); ccb->ccb_h.status = CAM_REQ_INPROG; callout_reset_sbt(&cp->ch, SBT_1MS * ccb->ccb_h.timeout, 0, sym_callout, (caddr_t)ccb, 0); ccb->ccb_h.status |= CAM_SIM_QUEUED; ccb->ccb_h.sym_hcb_ptr = np; sym_insque_tail(sym_qptr(&ccb->ccb_h.sim_links), &np->cam_ccbq); } /* * Complete a pending CAM CCB. */ static void sym_xpt_done(hcb_p np, union ccb *ccb, ccb_p cp) { SYM_LOCK_ASSERT(MA_OWNED); if (ccb->ccb_h.status & CAM_SIM_QUEUED) { callout_stop(&cp->ch); sym_remque(sym_qptr(&ccb->ccb_h.sim_links)); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.sym_hcb_ptr = NULL; } xpt_done(ccb); } static void sym_xpt_done2(hcb_p np, union ccb *ccb, int cam_status) { SYM_LOCK_ASSERT(MA_OWNED); sym_set_cam_status(ccb, cam_status); xpt_done(ccb); } /* * SYMBIOS chip clock divisor table. * * Divisors are multiplied by 10,000,000 in order to make * calculations more simple. */ #define _5M 5000000 static const u32 div_10M[] = {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M}; /* * SYMBIOS chips allow burst lengths of 2, 4, 8, 16, 32, 64, * 128 transfers. All chips support at least 16 transfers * bursts. The 825A, 875 and 895 chips support bursts of up * to 128 transfers and the 895A and 896 support bursts of up * to 64 transfers. All other chips support up to 16 * transfers bursts. * * For PCI 32 bit data transfers each transfer is a DWORD. * It is a QUADWORD (8 bytes) for PCI 64 bit data transfers. * * We use log base 2 (burst length) as internal code, with * value 0 meaning "burst disabled". */ /* * Burst length from burst code. */ #define burst_length(bc) (!(bc))? 0 : 1 << (bc) /* * Burst code from io register bits. */ #define burst_code(dmode, ctest4, ctest5) \ (ctest4) & 0x80? 0 : (((dmode) & 0xc0) >> 6) + ((ctest5) & 0x04) + 1 /* * Set initial io register bits from burst code. */ static __inline void sym_init_burst(hcb_p np, u_char bc) { np->rv_ctest4 &= ~0x80; np->rv_dmode &= ~(0x3 << 6); np->rv_ctest5 &= ~0x4; if (!bc) { np->rv_ctest4 |= 0x80; } else { --bc; np->rv_dmode |= ((bc & 0x3) << 6); np->rv_ctest5 |= (bc & 0x4); } } /* * Print out the list of targets that have some flag disabled by user. */ static void sym_print_targets_flag(hcb_p np, int mask, char *msg) { int cnt; int i; for (cnt = 0, i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { if (i == np->myaddr) continue; if (np->target[i].usrflags & mask) { if (!cnt++) printf("%s: %s disabled for targets", sym_name(np), msg); printf(" %d", i); } } if (cnt) printf(".\n"); } /* * Save initial settings of some IO registers. * Assumed to have been set by BIOS. * We cannot reset the chip prior to reading the * IO registers, since informations will be lost. * Since the SCRIPTS processor may be running, this * is not safe on paper, but it seems to work quite * well. :) */ static void sym_save_initial_setting (hcb_p np) { np->sv_scntl0 = INB(nc_scntl0) & 0x0a; np->sv_scntl3 = INB(nc_scntl3) & 0x07; np->sv_dmode = INB(nc_dmode) & 0xce; np->sv_dcntl = INB(nc_dcntl) & 0xa8; np->sv_ctest3 = INB(nc_ctest3) & 0x01; np->sv_ctest4 = INB(nc_ctest4) & 0x80; np->sv_gpcntl = INB(nc_gpcntl); np->sv_stest1 = INB(nc_stest1); np->sv_stest2 = INB(nc_stest2) & 0x20; np->sv_stest4 = INB(nc_stest4); if (np->features & FE_C10) { /* Always large DMA fifo + ultra3 */ np->sv_scntl4 = INB(nc_scntl4); np->sv_ctest5 = INB(nc_ctest5) & 0x04; } else np->sv_ctest5 = INB(nc_ctest5) & 0x24; } /* * Prepare io register values used by sym_init() according * to selected and supported features. */ static int sym_prepare_setting(hcb_p np, struct sym_nvram *nvram) { u_char burst_max; u32 period; int i; /* * Wide ? */ np->maxwide = (np->features & FE_WIDE)? 1 : 0; /* * Get the frequency of the chip's clock. */ if (np->features & FE_QUAD) np->multiplier = 4; else if (np->features & FE_DBLR) np->multiplier = 2; else np->multiplier = 1; np->clock_khz = (np->features & FE_CLK80)? 80000 : 40000; np->clock_khz *= np->multiplier; if (np->clock_khz != 40000) sym_getclock(np, np->multiplier); /* * Divisor to be used for async (timer pre-scaler). */ i = np->clock_divn - 1; while (--i >= 0) { if (10ul * SYM_CONF_MIN_ASYNC * np->clock_khz > div_10M[i]) { ++i; break; } } np->rv_scntl3 = i+1; /* * The C1010 uses hardwired divisors for async. * So, we just throw away, the async. divisor.:-) */ if (np->features & FE_C10) np->rv_scntl3 = 0; /* * Minimum synchronous period factor supported by the chip. * Btw, 'period' is in tenths of nanoseconds. */ period = howmany(4 * div_10M[0], np->clock_khz); if (period <= 250) np->minsync = 10; else if (period <= 303) np->minsync = 11; else if (period <= 500) np->minsync = 12; else np->minsync = howmany(period, 40); /* * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2). */ if (np->minsync < 25 && !(np->features & (FE_ULTRA|FE_ULTRA2|FE_ULTRA3))) np->minsync = 25; else if (np->minsync < 12 && !(np->features & (FE_ULTRA2|FE_ULTRA3))) np->minsync = 12; /* * Maximum synchronous period factor supported by the chip. */ period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz); np->maxsync = period > 2540 ? 254 : period / 10; /* * If chip is a C1010, guess the sync limits in DT mode. */ if ((np->features & (FE_C10|FE_ULTRA3)) == (FE_C10|FE_ULTRA3)) { if (np->clock_khz == 160000) { np->minsync_dt = 9; np->maxsync_dt = 50; np->maxoffs_dt = 62; } } /* * 64 bit addressing (895A/896/1010) ? */ if (np->features & FE_DAC) #ifdef __LP64__ np->rv_ccntl1 |= (XTIMOD | EXTIBMV); #else np->rv_ccntl1 |= (DDAC); #endif /* * Phase mismatch handled by SCRIPTS (895A/896/1010) ? */ if (np->features & FE_NOPM) np->rv_ccntl0 |= (ENPMJ); /* * C1010 Errata. * In dual channel mode, contention occurs if internal cycles * are used. Disable internal cycles. */ if (np->device_id == PCI_ID_LSI53C1010 && np->revision_id < 0x2) np->rv_ccntl0 |= DILS; /* * Select burst length (dwords) */ burst_max = SYM_SETUP_BURST_ORDER; if (burst_max == 255) burst_max = burst_code(np->sv_dmode, np->sv_ctest4, np->sv_ctest5); if (burst_max > 7) burst_max = 7; if (burst_max > np->maxburst) burst_max = np->maxburst; /* * DEL 352 - 53C810 Rev x11 - Part Number 609-0392140 - ITEM 2. * This chip and the 860 Rev 1 may wrongly use PCI cache line * based transactions on LOAD/STORE instructions. So we have * to prevent these chips from using such PCI transactions in * this driver. The generic ncr driver that does not use * LOAD/STORE instructions does not need this work-around. */ if ((np->device_id == PCI_ID_SYM53C810 && np->revision_id >= 0x10 && np->revision_id <= 0x11) || (np->device_id == PCI_ID_SYM53C860 && np->revision_id <= 0x1)) np->features &= ~(FE_WRIE|FE_ERL|FE_ERMP); /* * Select all supported special features. * If we are using on-board RAM for scripts, prefetch (PFEN) * does not help, but burst op fetch (BOF) does. * Disabling PFEN makes sure BOF will be used. */ if (np->features & FE_ERL) np->rv_dmode |= ERL; /* Enable Read Line */ if (np->features & FE_BOF) np->rv_dmode |= BOF; /* Burst Opcode Fetch */ if (np->features & FE_ERMP) np->rv_dmode |= ERMP; /* Enable Read Multiple */ #if 1 if ((np->features & FE_PFEN) && !np->ram_ba) #else if (np->features & FE_PFEN) #endif np->rv_dcntl |= PFEN; /* Prefetch Enable */ if (np->features & FE_CLSE) np->rv_dcntl |= CLSE; /* Cache Line Size Enable */ if (np->features & FE_WRIE) np->rv_ctest3 |= WRIE; /* Write and Invalidate */ if (np->features & FE_DFS) np->rv_ctest5 |= DFS; /* Dma Fifo Size */ /* * Select some other */ if (SYM_SETUP_PCI_PARITY) np->rv_ctest4 |= MPEE; /* Master parity checking */ if (SYM_SETUP_SCSI_PARITY) np->rv_scntl0 |= 0x0a; /* full arb., ena parity, par->ATN */ /* * Get parity checking, host ID and verbose mode from NVRAM */ np->myaddr = 255; sym_nvram_setup_host (np, nvram); #ifdef __sparc64__ np->myaddr = OF_getscsinitid(np->device); #endif /* * Get SCSI addr of host adapter (set by bios?). */ if (np->myaddr == 255) { np->myaddr = INB(nc_scid) & 0x07; if (!np->myaddr) np->myaddr = SYM_SETUP_HOST_ID; } /* * Prepare initial io register bits for burst length */ sym_init_burst(np, burst_max); /* * Set SCSI BUS mode. * - LVD capable chips (895/895A/896/1010) report the * current BUS mode through the STEST4 IO register. * - For previous generation chips (825/825A/875), * user has to tell us how to check against HVD, * since a 100% safe algorithm is not possible. */ np->scsi_mode = SMODE_SE; if (np->features & (FE_ULTRA2|FE_ULTRA3)) np->scsi_mode = (np->sv_stest4 & SMODE); else if (np->features & FE_DIFF) { if (SYM_SETUP_SCSI_DIFF == 1) { if (np->sv_scntl3) { if (np->sv_stest2 & 0x20) np->scsi_mode = SMODE_HVD; } else if (nvram->type == SYM_SYMBIOS_NVRAM) { if (!(INB(nc_gpreg) & 0x08)) np->scsi_mode = SMODE_HVD; } } else if (SYM_SETUP_SCSI_DIFF == 2) np->scsi_mode = SMODE_HVD; } if (np->scsi_mode == SMODE_HVD) np->rv_stest2 |= 0x20; /* * Set LED support from SCRIPTS. * Ignore this feature for boards known to use a * specific GPIO wiring and for the 895A, 896 * and 1010 that drive the LED directly. */ if ((SYM_SETUP_SCSI_LED || (nvram->type == SYM_SYMBIOS_NVRAM || (nvram->type == SYM_TEKRAM_NVRAM && np->device_id == PCI_ID_SYM53C895))) && !(np->features & FE_LEDC) && !(np->sv_gpcntl & 0x01)) np->features |= FE_LED0; /* * Set irq mode. */ switch(SYM_SETUP_IRQ_MODE & 3) { case 2: np->rv_dcntl |= IRQM; break; case 1: np->rv_dcntl |= (np->sv_dcntl & IRQM); break; default: break; } /* * Configure targets according to driver setup. * If NVRAM present get targets setup from NVRAM. */ for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { tcb_p tp = &np->target[i]; tp->tinfo.user.scsi_version = tp->tinfo.current.scsi_version= 2; tp->tinfo.user.spi_version = tp->tinfo.current.spi_version = 2; tp->tinfo.user.period = np->minsync; if (np->features & FE_ULTRA3) tp->tinfo.user.period = np->minsync_dt; tp->tinfo.user.offset = np->maxoffs; tp->tinfo.user.width = np->maxwide ? BUS_16_BIT : BUS_8_BIT; tp->usrflags |= (SYM_DISC_ENABLED | SYM_TAGS_ENABLED); tp->usrtags = SYM_SETUP_MAX_TAG; sym_nvram_setup_target (np, i, nvram); /* * For now, guess PPR/DT support from the period * and BUS width. */ if (np->features & FE_ULTRA3) { if (tp->tinfo.user.period <= 9 && tp->tinfo.user.width == BUS_16_BIT) { tp->tinfo.user.options |= PPR_OPT_DT; tp->tinfo.user.offset = np->maxoffs_dt; tp->tinfo.user.spi_version = 3; } } if (!tp->usrtags) tp->usrflags &= ~SYM_TAGS_ENABLED; } /* * Let user know about the settings. */ i = nvram->type; printf("%s: %s NVRAM, ID %d, Fast-%d, %s, %s\n", sym_name(np), i == SYM_SYMBIOS_NVRAM ? "Symbios" : (i == SYM_TEKRAM_NVRAM ? "Tekram" : "No"), np->myaddr, (np->features & FE_ULTRA3) ? 80 : (np->features & FE_ULTRA2) ? 40 : (np->features & FE_ULTRA) ? 20 : 10, sym_scsi_bus_mode(np->scsi_mode), (np->rv_scntl0 & 0xa) ? "parity checking" : "NO parity"); /* * Tell him more on demand. */ if (sym_verbose) { printf("%s: %s IRQ line driver%s\n", sym_name(np), np->rv_dcntl & IRQM ? "totem pole" : "open drain", np->ram_ba ? ", using on-chip SRAM" : ""); printf("%s: using %s firmware.\n", sym_name(np), np->fw_name); if (np->features & FE_NOPM) printf("%s: handling phase mismatch from SCRIPTS.\n", sym_name(np)); } /* * And still more. */ if (sym_verbose > 1) { printf ("%s: initial SCNTL3/DMODE/DCNTL/CTEST3/4/5 = " "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n", sym_name(np), np->sv_scntl3, np->sv_dmode, np->sv_dcntl, np->sv_ctest3, np->sv_ctest4, np->sv_ctest5); printf ("%s: final SCNTL3/DMODE/DCNTL/CTEST3/4/5 = " "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n", sym_name(np), np->rv_scntl3, np->rv_dmode, np->rv_dcntl, np->rv_ctest3, np->rv_ctest4, np->rv_ctest5); } /* * Let user be aware of targets that have some disable flags set. */ sym_print_targets_flag(np, SYM_SCAN_BOOT_DISABLED, "SCAN AT BOOT"); if (sym_verbose) sym_print_targets_flag(np, SYM_SCAN_LUNS_DISABLED, "SCAN FOR LUNS"); return 0; } /* * Prepare the next negotiation message if needed. * * Fill in the part of message buffer that contains the * negotiation and the nego_status field of the CCB. * Returns the size of the message in bytes. */ static int sym_prepare_nego(hcb_p np, ccb_p cp, int nego, u_char *msgptr) { tcb_p tp = &np->target[cp->target]; int msglen = 0; /* * Early C1010 chips need a work-around for DT * data transfer to work. */ if (!(np->features & FE_U3EN)) tp->tinfo.goal.options = 0; /* * negotiate using PPR ? */ if (tp->tinfo.goal.options & PPR_OPT_MASK) nego = NS_PPR; /* * negotiate wide transfers ? */ else if (tp->tinfo.current.width != tp->tinfo.goal.width) nego = NS_WIDE; /* * negotiate synchronous transfers? */ else if (tp->tinfo.current.period != tp->tinfo.goal.period || tp->tinfo.current.offset != tp->tinfo.goal.offset) nego = NS_SYNC; switch (nego) { case NS_SYNC: msgptr[msglen++] = M_EXTENDED; msgptr[msglen++] = 3; msgptr[msglen++] = M_X_SYNC_REQ; msgptr[msglen++] = tp->tinfo.goal.period; msgptr[msglen++] = tp->tinfo.goal.offset; break; case NS_WIDE: msgptr[msglen++] = M_EXTENDED; msgptr[msglen++] = 2; msgptr[msglen++] = M_X_WIDE_REQ; msgptr[msglen++] = tp->tinfo.goal.width; break; case NS_PPR: msgptr[msglen++] = M_EXTENDED; msgptr[msglen++] = 6; msgptr[msglen++] = M_X_PPR_REQ; msgptr[msglen++] = tp->tinfo.goal.period; msgptr[msglen++] = 0; msgptr[msglen++] = tp->tinfo.goal.offset; msgptr[msglen++] = tp->tinfo.goal.width; msgptr[msglen++] = tp->tinfo.goal.options & PPR_OPT_DT; break; } cp->nego_status = nego; if (nego) { tp->nego_cp = cp; /* Keep track a nego will be performed */ if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_msg(cp, nego == NS_SYNC ? "sync msgout" : nego == NS_WIDE ? "wide msgout" : "ppr msgout", msgptr); } } return msglen; } /* * Insert a job into the start queue. */ static void sym_put_start_queue(hcb_p np, ccb_p cp) { u_short qidx; #ifdef SYM_CONF_IARB_SUPPORT /* * If the previously queued CCB is not yet done, * set the IARB hint. The SCRIPTS will go with IARB * for this job when starting the previous one. * We leave devices a chance to win arbitration by * not using more than 'iarb_max' consecutive * immediate arbitrations. */ if (np->last_cp && np->iarb_count < np->iarb_max) { np->last_cp->host_flags |= HF_HINT_IARB; ++np->iarb_count; } else np->iarb_count = 0; np->last_cp = cp; #endif /* * Insert first the idle task and then our job. * The MB should ensure proper ordering. */ qidx = np->squeueput + 2; if (qidx >= MAX_QUEUE*2) qidx = 0; np->squeue [qidx] = cpu_to_scr(np->idletask_ba); MEMORY_BARRIER(); np->squeue [np->squeueput] = cpu_to_scr(cp->ccb_ba); np->squeueput = qidx; if (DEBUG_FLAGS & DEBUG_QUEUE) printf ("%s: queuepos=%d.\n", sym_name (np), np->squeueput); /* * Script processor may be waiting for reselect. * Wake it up. */ MEMORY_BARRIER(); OUTB (nc_istat, SIGP|np->istat_sem); } /* * Soft reset the chip. * * Raising SRST when the chip is running may cause * problems on dual function chips (see below). * On the other hand, LVD devices need some delay * to settle and report actual BUS mode in STEST4. */ static void sym_chip_reset (hcb_p np) { OUTB (nc_istat, SRST); UDELAY (10); OUTB (nc_istat, 0); UDELAY(2000); /* For BUS MODE to settle */ } /* * Soft reset the chip. * * Some 896 and 876 chip revisions may hang-up if we set * the SRST (soft reset) bit at the wrong time when SCRIPTS * are running. * So, we need to abort the current operation prior to * soft resetting the chip. */ static void sym_soft_reset (hcb_p np) { u_char istat; int i; OUTB (nc_istat, CABRT); for (i = 1000000 ; i ; --i) { istat = INB (nc_istat); if (istat & SIP) { INW (nc_sist); continue; } if (istat & DIP) { OUTB (nc_istat, 0); INB (nc_dstat); break; } } if (!i) printf("%s: unable to abort current chip operation.\n", sym_name(np)); sym_chip_reset (np); } /* * Start reset process. * * The interrupt handler will reinitialize the chip. */ static void sym_start_reset(hcb_p np) { (void) sym_reset_scsi_bus(np, 1); } static int sym_reset_scsi_bus(hcb_p np, int enab_int) { u32 term; int retv = 0; sym_soft_reset(np); /* Soft reset the chip */ if (enab_int) OUTW (nc_sien, RST); /* * Enable Tolerant, reset IRQD if present and * properly set IRQ mode, prior to resetting the bus. */ OUTB (nc_stest3, TE); OUTB (nc_dcntl, (np->rv_dcntl & IRQM)); OUTB (nc_scntl1, CRST); UDELAY (200); if (!SYM_SETUP_SCSI_BUS_CHECK) goto out; /* * Check for no terminators or SCSI bus shorts to ground. * Read SCSI data bus, data parity bits and control signals. * We are expecting RESET to be TRUE and other signals to be * FALSE. */ term = INB(nc_sstat0); term = ((term & 2) << 7) + ((term & 1) << 17); /* rst sdp0 */ term |= ((INB(nc_sstat2) & 0x01) << 26) | /* sdp1 */ ((INW(nc_sbdl) & 0xff) << 9) | /* d7-0 */ ((INW(nc_sbdl) & 0xff00) << 10) | /* d15-8 */ INB(nc_sbcl); /* req ack bsy sel atn msg cd io */ if (!(np->features & FE_WIDE)) term &= 0x3ffff; if (term != (2<<7)) { printf("%s: suspicious SCSI data while resetting the BUS.\n", sym_name(np)); printf("%s: %sdp0,d7-0,rst,req,ack,bsy,sel,atn,msg,c/d,i/o = " "0x%lx, expecting 0x%lx\n", sym_name(np), (np->features & FE_WIDE) ? "dp1,d15-8," : "", (u_long)term, (u_long)(2<<7)); if (SYM_SETUP_SCSI_BUS_CHECK == 1) retv = 1; } out: OUTB (nc_scntl1, 0); /* MDELAY(100); */ return retv; } /* * The chip may have completed jobs. Look at the DONE QUEUE. * * On architectures that may reorder LOAD/STORE operations, * a memory barrier may be needed after the reading of the * so-called `flag' and prior to dealing with the data. */ static int sym_wakeup_done (hcb_p np) { ccb_p cp; int i, n; u32 dsa; SYM_LOCK_ASSERT(MA_OWNED); n = 0; i = np->dqueueget; while (1) { dsa = scr_to_cpu(np->dqueue[i]); if (!dsa) break; np->dqueue[i] = 0; if ((i = i+2) >= MAX_QUEUE*2) i = 0; cp = sym_ccb_from_dsa(np, dsa); if (cp) { MEMORY_BARRIER(); sym_complete_ok (np, cp); ++n; } else printf ("%s: bad DSA (%x) in done queue.\n", sym_name(np), (u_int) dsa); } np->dqueueget = i; return n; } /* * Complete all active CCBs with error. * Used on CHIP/SCSI RESET. */ static void sym_flush_busy_queue (hcb_p np, int cam_status) { /* * Move all active CCBs to the COMP queue * and flush this queue. */ sym_que_splice(&np->busy_ccbq, &np->comp_ccbq); sym_que_init(&np->busy_ccbq); sym_flush_comp_queue(np, cam_status); } /* * Start chip. * * 'reason' means: * 0: initialisation. * 1: SCSI BUS RESET delivered or received. * 2: SCSI BUS MODE changed. */ static void sym_init (hcb_p np, int reason) { int i; u32 phys; SYM_LOCK_ASSERT(MA_OWNED); /* * Reset chip if asked, otherwise just clear fifos. */ if (reason == 1) sym_soft_reset(np); else { OUTB (nc_stest3, TE|CSF); OUTONB (nc_ctest3, CLF); } /* * Clear Start Queue */ phys = np->squeue_ba; for (i = 0; i < MAX_QUEUE*2; i += 2) { np->squeue[i] = cpu_to_scr(np->idletask_ba); np->squeue[i+1] = cpu_to_scr(phys + (i+2)*4); } np->squeue[MAX_QUEUE*2-1] = cpu_to_scr(phys); /* * Start at first entry. */ np->squeueput = 0; /* * Clear Done Queue */ phys = np->dqueue_ba; for (i = 0; i < MAX_QUEUE*2; i += 2) { np->dqueue[i] = 0; np->dqueue[i+1] = cpu_to_scr(phys + (i+2)*4); } np->dqueue[MAX_QUEUE*2-1] = cpu_to_scr(phys); /* * Start at first entry. */ np->dqueueget = 0; /* * Install patches in scripts. * This also let point to first position the start * and done queue pointers used from SCRIPTS. */ np->fw_patch(np); /* * Wakeup all pending jobs. */ sym_flush_busy_queue(np, CAM_SCSI_BUS_RESET); /* * Init chip. */ OUTB (nc_istat, 0x00 ); /* Remove Reset, abort */ UDELAY (2000); /* The 895 needs time for the bus mode to settle */ OUTB (nc_scntl0, np->rv_scntl0 | 0xc0); /* full arb., ena parity, par->ATN */ OUTB (nc_scntl1, 0x00); /* odd parity, and remove CRST!! */ sym_selectclock(np, np->rv_scntl3); /* Select SCSI clock */ OUTB (nc_scid , RRE|np->myaddr); /* Adapter SCSI address */ OUTW (nc_respid, 1ul<myaddr); /* Id to respond to */ OUTB (nc_istat , SIGP ); /* Signal Process */ OUTB (nc_dmode , np->rv_dmode); /* Burst length, dma mode */ OUTB (nc_ctest5, np->rv_ctest5); /* Large fifo + large burst */ OUTB (nc_dcntl , NOCOM|np->rv_dcntl); /* Protect SFBR */ OUTB (nc_ctest3, np->rv_ctest3); /* Write and invalidate */ OUTB (nc_ctest4, np->rv_ctest4); /* Master parity checking */ /* Extended Sreq/Sack filtering not supported on the C10 */ if (np->features & FE_C10) OUTB (nc_stest2, np->rv_stest2); else OUTB (nc_stest2, EXT|np->rv_stest2); OUTB (nc_stest3, TE); /* TolerANT enable */ OUTB (nc_stime0, 0x0c); /* HTH disabled STO 0.25 sec */ /* * For now, disable AIP generation on C1010-66. */ if (np->device_id == PCI_ID_LSI53C1010_2) OUTB (nc_aipcntl1, DISAIP); /* * C10101 Errata. * Errant SGE's when in narrow. Write bits 4 & 5 of * STEST1 register to disable SGE. We probably should do * that from SCRIPTS for each selection/reselection, but * I just don't want. :) */ if (np->device_id == PCI_ID_LSI53C1010 && /* np->revision_id < 0xff */ 1) OUTB (nc_stest1, INB(nc_stest1) | 0x30); /* * DEL 441 - 53C876 Rev 5 - Part Number 609-0392787/2788 - ITEM 2. * Disable overlapped arbitration for some dual function devices, * regardless revision id (kind of post-chip-design feature. ;-)) */ if (np->device_id == PCI_ID_SYM53C875) OUTB (nc_ctest0, (1<<5)); else if (np->device_id == PCI_ID_SYM53C896) np->rv_ccntl0 |= DPR; /* * Write CCNTL0/CCNTL1 for chips capable of 64 bit addressing * and/or hardware phase mismatch, since only such chips * seem to support those IO registers. */ if (np->features & (FE_DAC|FE_NOPM)) { OUTB (nc_ccntl0, np->rv_ccntl0); OUTB (nc_ccntl1, np->rv_ccntl1); } /* * If phase mismatch handled by scripts (895A/896/1010), * set PM jump addresses. */ if (np->features & FE_NOPM) { OUTL (nc_pmjad1, SCRIPTB_BA (np, pm_handle)); OUTL (nc_pmjad2, SCRIPTB_BA (np, pm_handle)); } /* * Enable GPIO0 pin for writing if LED support from SCRIPTS. * Also set GPIO5 and clear GPIO6 if hardware LED control. */ if (np->features & FE_LED0) OUTB(nc_gpcntl, INB(nc_gpcntl) & ~0x01); else if (np->features & FE_LEDC) OUTB(nc_gpcntl, (INB(nc_gpcntl) & ~0x41) | 0x20); /* * enable ints */ OUTW (nc_sien , STO|HTH|MA|SGE|UDC|RST|PAR); OUTB (nc_dien , MDPE|BF|SSI|SIR|IID); /* * For 895/6 enable SBMC interrupt and save current SCSI bus mode. * Try to eat the spurious SBMC interrupt that may occur when * we reset the chip but not the SCSI BUS (at initialization). */ if (np->features & (FE_ULTRA2|FE_ULTRA3)) { OUTONW (nc_sien, SBMC); if (reason == 0) { MDELAY(100); INW (nc_sist); } np->scsi_mode = INB (nc_stest4) & SMODE; } /* * Fill in target structure. * Reinitialize usrsync. * Reinitialize usrwide. * Prepare sync negotiation according to actual SCSI bus mode. */ for (i=0;itarget[i]; tp->to_reset = 0; tp->head.sval = 0; tp->head.wval = np->rv_scntl3; tp->head.uval = 0; tp->tinfo.current.period = 0; tp->tinfo.current.offset = 0; tp->tinfo.current.width = BUS_8_BIT; tp->tinfo.current.options = 0; } /* * Download SCSI SCRIPTS to on-chip RAM if present, * and start script processor. */ if (np->ram_ba) { if (sym_verbose > 1) printf ("%s: Downloading SCSI SCRIPTS.\n", sym_name(np)); if (np->ram_ws == 8192) { OUTRAM_OFF(4096, np->scriptb0, np->scriptb_sz); OUTL (nc_mmws, np->scr_ram_seg); OUTL (nc_mmrs, np->scr_ram_seg); OUTL (nc_sfs, np->scr_ram_seg); phys = SCRIPTB_BA (np, start64); } else phys = SCRIPTA_BA (np, init); OUTRAM_OFF(0, np->scripta0, np->scripta_sz); } else phys = SCRIPTA_BA (np, init); np->istat_sem = 0; OUTL (nc_dsa, np->hcb_ba); OUTL_DSP (phys); /* * Notify the XPT about the RESET condition. */ if (reason != 0) xpt_async(AC_BUS_RESET, np->path, NULL); } /* * Get clock factor and sync divisor for a given * synchronous factor period. */ static int sym_getsync(hcb_p np, u_char dt, u_char sfac, u_char *divp, u_char *fakp) { u32 clk = np->clock_khz; /* SCSI clock frequency in kHz */ int div = np->clock_divn; /* Number of divisors supported */ u32 fak; /* Sync factor in sxfer */ u32 per; /* Period in tenths of ns */ u32 kpc; /* (per * clk) */ int ret; /* * Compute the synchronous period in tenths of nano-seconds */ if (dt && sfac <= 9) per = 125; else if (sfac <= 10) per = 250; else if (sfac == 11) per = 303; else if (sfac == 12) per = 500; else per = 40 * sfac; ret = per; kpc = per * clk; if (dt) kpc <<= 1; /* * For earliest C10 revision 0, we cannot use extra * clocks for the setting of the SCSI clocking. * Note that this limits the lowest sync data transfer * to 5 Mega-transfers per second and may result in * using higher clock divisors. */ #if 1 if ((np->features & (FE_C10|FE_U3EN)) == FE_C10) { /* * Look for the lowest clock divisor that allows an * output speed not faster than the period. */ while (div > 0) { --div; if (kpc > (div_10M[div] << 2)) { ++div; break; } } fak = 0; /* No extra clocks */ if (div == np->clock_divn) { /* Are we too fast ? */ ret = -1; } *divp = div; *fakp = fak; return ret; } #endif /* * Look for the greatest clock divisor that allows an * input speed faster than the period. */ while (div-- > 0) if (kpc >= (div_10M[div] << 2)) break; /* * Calculate the lowest clock factor that allows an output * speed not faster than the period, and the max output speed. * If fak >= 1 we will set both XCLKH_ST and XCLKH_DT. * If fak >= 2 we will also set XCLKS_ST and XCLKS_DT. */ if (dt) { fak = (kpc - 1) / (div_10M[div] << 1) + 1 - 2; /* ret = ((2+fak)*div_10M[div])/np->clock_khz; */ } else { fak = (kpc - 1) / div_10M[div] + 1 - 4; /* ret = ((4+fak)*div_10M[div])/np->clock_khz; */ } /* * Check against our hardware limits, or bugs :). */ if (fak > 2) {fak = 2; ret = -1;} /* * Compute and return sync parameters. */ *divp = div; *fakp = fak; return ret; } /* * Tell the SCSI layer about the new transfer parameters. */ static void sym_xpt_async_transfer_neg(hcb_p np, int target, u_int spi_valid) { struct ccb_trans_settings cts; struct cam_path *path; int sts; tcb_p tp = &np->target[target]; sts = xpt_create_path(&path, NULL, cam_sim_path(np->sim), target, CAM_LUN_WILDCARD); if (sts != CAM_REQ_CMP) return; bzero(&cts, sizeof(cts)); #define cts__scsi (cts.proto_specific.scsi) #define cts__spi (cts.xport_specific.spi) cts.type = CTS_TYPE_CURRENT_SETTINGS; cts.protocol = PROTO_SCSI; cts.transport = XPORT_SPI; cts.protocol_version = tp->tinfo.current.scsi_version; cts.transport_version = tp->tinfo.current.spi_version; cts__spi.valid = spi_valid; if (spi_valid & CTS_SPI_VALID_SYNC_RATE) cts__spi.sync_period = tp->tinfo.current.period; if (spi_valid & CTS_SPI_VALID_SYNC_OFFSET) cts__spi.sync_offset = tp->tinfo.current.offset; if (spi_valid & CTS_SPI_VALID_BUS_WIDTH) cts__spi.bus_width = tp->tinfo.current.width; if (spi_valid & CTS_SPI_VALID_PPR_OPTIONS) cts__spi.ppr_options = tp->tinfo.current.options; #undef cts__spi #undef cts__scsi xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1); xpt_async(AC_TRANSFER_NEG, path, &cts); xpt_free_path(path); } #define SYM_SPI_VALID_WDTR \ CTS_SPI_VALID_BUS_WIDTH | \ CTS_SPI_VALID_SYNC_RATE | \ CTS_SPI_VALID_SYNC_OFFSET #define SYM_SPI_VALID_SDTR \ CTS_SPI_VALID_SYNC_RATE | \ CTS_SPI_VALID_SYNC_OFFSET #define SYM_SPI_VALID_PPR \ CTS_SPI_VALID_PPR_OPTIONS | \ CTS_SPI_VALID_BUS_WIDTH | \ CTS_SPI_VALID_SYNC_RATE | \ CTS_SPI_VALID_SYNC_OFFSET /* * We received a WDTR. * Let everything be aware of the changes. */ static void sym_setwide(hcb_p np, ccb_p cp, u_char wide) { tcb_p tp = &np->target[cp->target]; sym_settrans(np, cp, 0, 0, 0, wide, 0, 0); /* * Tell the SCSI layer about the new transfer parameters. */ tp->tinfo.goal.width = tp->tinfo.current.width = wide; tp->tinfo.current.offset = 0; tp->tinfo.current.period = 0; tp->tinfo.current.options = 0; sym_xpt_async_transfer_neg(np, cp->target, SYM_SPI_VALID_WDTR); } /* * We received a SDTR. * Let everything be aware of the changes. */ static void sym_setsync(hcb_p np, ccb_p cp, u_char ofs, u_char per, u_char div, u_char fak) { tcb_p tp = &np->target[cp->target]; u_char wide = (cp->phys.select.sel_scntl3 & EWS) ? 1 : 0; sym_settrans(np, cp, 0, ofs, per, wide, div, fak); /* * Tell the SCSI layer about the new transfer parameters. */ tp->tinfo.goal.period = tp->tinfo.current.period = per; tp->tinfo.goal.offset = tp->tinfo.current.offset = ofs; tp->tinfo.goal.options = tp->tinfo.current.options = 0; sym_xpt_async_transfer_neg(np, cp->target, SYM_SPI_VALID_SDTR); } /* * We received a PPR. * Let everything be aware of the changes. */ static void sym_setpprot(hcb_p np, ccb_p cp, u_char dt, u_char ofs, u_char per, u_char wide, u_char div, u_char fak) { tcb_p tp = &np->target[cp->target]; sym_settrans(np, cp, dt, ofs, per, wide, div, fak); /* * Tell the SCSI layer about the new transfer parameters. */ tp->tinfo.goal.width = tp->tinfo.current.width = wide; tp->tinfo.goal.period = tp->tinfo.current.period = per; tp->tinfo.goal.offset = tp->tinfo.current.offset = ofs; tp->tinfo.goal.options = tp->tinfo.current.options = dt; sym_xpt_async_transfer_neg(np, cp->target, SYM_SPI_VALID_PPR); } /* * Switch trans mode for current job and it's target. */ static void sym_settrans(hcb_p np, ccb_p cp, u_char dt, u_char ofs, u_char per, u_char wide, u_char div, u_char fak) { SYM_QUEHEAD *qp; union ccb *ccb; tcb_p tp; u_char target = INB (nc_sdid) & 0x0f; u_char sval, wval, uval; assert (cp); if (!cp) return; ccb = cp->cam_ccb; assert (ccb); if (!ccb) return; assert (target == (cp->target & 0xf)); tp = &np->target[target]; sval = tp->head.sval; wval = tp->head.wval; uval = tp->head.uval; #if 0 printf("XXXX sval=%x wval=%x uval=%x (%x)\n", sval, wval, uval, np->rv_scntl3); #endif /* * Set the offset. */ if (!(np->features & FE_C10)) sval = (sval & ~0x1f) | ofs; else sval = (sval & ~0x3f) | ofs; /* * Set the sync divisor and extra clock factor. */ if (ofs != 0) { wval = (wval & ~0x70) | ((div+1) << 4); if (!(np->features & FE_C10)) sval = (sval & ~0xe0) | (fak << 5); else { uval = uval & ~(XCLKH_ST|XCLKH_DT|XCLKS_ST|XCLKS_DT); if (fak >= 1) uval |= (XCLKH_ST|XCLKH_DT); if (fak >= 2) uval |= (XCLKS_ST|XCLKS_DT); } } /* * Set the bus width. */ wval = wval & ~EWS; if (wide != 0) wval |= EWS; /* * Set misc. ultra enable bits. */ if (np->features & FE_C10) { uval = uval & ~(U3EN|AIPCKEN); if (dt) { assert(np->features & FE_U3EN); uval |= U3EN; } } else { wval = wval & ~ULTRA; if (per <= 12) wval |= ULTRA; } /* * Stop there if sync parameters are unchanged. */ if (tp->head.sval == sval && tp->head.wval == wval && tp->head.uval == uval) return; tp->head.sval = sval; tp->head.wval = wval; tp->head.uval = uval; /* * Disable extended Sreq/Sack filtering if per < 50. * Not supported on the C1010. */ if (per < 50 && !(np->features & FE_C10)) OUTOFFB (nc_stest2, EXT); /* * set actual value and sync_status */ OUTB (nc_sxfer, tp->head.sval); OUTB (nc_scntl3, tp->head.wval); if (np->features & FE_C10) { OUTB (nc_scntl4, tp->head.uval); } /* * patch ALL busy ccbs of this target. */ FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp->target != target) continue; cp->phys.select.sel_scntl3 = tp->head.wval; cp->phys.select.sel_sxfer = tp->head.sval; if (np->features & FE_C10) { cp->phys.select.sel_scntl4 = tp->head.uval; } } } /* * log message for real hard errors * * sym0 targ 0?: ERROR (ds:si) (so-si-sd) (sxfer/scntl3) @ name (dsp:dbc). * reg: r0 r1 r2 r3 r4 r5 r6 ..... rf. * * exception register: * ds: dstat * si: sist * * SCSI bus lines: * so: control lines as driven by chip. * si: control lines as seen by chip. * sd: scsi data lines as seen by chip. * * wide/fastmode: * sxfer: (see the manual) * scntl3: (see the manual) * * current script command: * dsp: script address (relative to start of script). * dbc: first word of script command. * * First 24 register of the chip: * r0..rf */ static void sym_log_hard_error(hcb_p np, u_short sist, u_char dstat) { u32 dsp; int script_ofs; int script_size; char *script_name; u_char *script_base; int i; dsp = INL (nc_dsp); if (dsp > np->scripta_ba && dsp <= np->scripta_ba + np->scripta_sz) { script_ofs = dsp - np->scripta_ba; script_size = np->scripta_sz; script_base = (u_char *) np->scripta0; script_name = "scripta"; } else if (np->scriptb_ba < dsp && dsp <= np->scriptb_ba + np->scriptb_sz) { script_ofs = dsp - np->scriptb_ba; script_size = np->scriptb_sz; script_base = (u_char *) np->scriptb0; script_name = "scriptb"; } else { script_ofs = dsp; script_size = 0; - script_base = 0; + script_base = NULL; script_name = "mem"; } printf ("%s:%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x) @ (%s %x:%08x).\n", sym_name (np), (unsigned)INB (nc_sdid)&0x0f, dstat, sist, (unsigned)INB (nc_socl), (unsigned)INB (nc_sbcl), (unsigned)INB (nc_sbdl), (unsigned)INB (nc_sxfer), (unsigned)INB (nc_scntl3), script_name, script_ofs, (unsigned)INL (nc_dbc)); if (((script_ofs & 3) == 0) && (unsigned)script_ofs < script_size) { printf ("%s: script cmd = %08x\n", sym_name(np), scr_to_cpu((int) *(u32 *)(script_base + script_ofs))); } printf ("%s: regdump:", sym_name(np)); for (i=0; i<24;i++) printf (" %02x", (unsigned)INB_OFF(i)); printf (".\n"); /* * PCI BUS error, read the PCI ststus register. */ if (dstat & (MDPE|BF)) { u_short pci_sts; pci_sts = pci_read_config(np->device, PCIR_STATUS, 2); if (pci_sts & 0xf900) { pci_write_config(np->device, PCIR_STATUS, pci_sts, 2); printf("%s: PCI STATUS = 0x%04x\n", sym_name(np), pci_sts & 0xf900); } } } /* * chip interrupt handler * * In normal situations, interrupt conditions occur one at * a time. But when something bad happens on the SCSI BUS, * the chip may raise several interrupt flags before * stopping and interrupting the CPU. The additionnal * interrupt flags are stacked in some extra registers * after the SIP and/or DIP flag has been raised in the * ISTAT. After the CPU has read the interrupt condition * flag from SIST or DSTAT, the chip unstacks the other * interrupt flags and sets the corresponding bits in * SIST or DSTAT. Since the chip starts stacking once the * SIP or DIP flag is set, there is a small window of time * where the stacking does not occur. * * Typically, multiple interrupt conditions may happen in * the following situations: * * - SCSI parity error + Phase mismatch (PAR|MA) * When a parity error is detected in input phase * and the device switches to msg-in phase inside a * block MOV. * - SCSI parity error + Unexpected disconnect (PAR|UDC) * When a stupid device does not want to handle the * recovery of an SCSI parity error. * - Some combinations of STO, PAR, UDC, ... * When using non compliant SCSI stuff, when user is * doing non compliant hot tampering on the BUS, when * something really bad happens to a device, etc ... * * The heuristic suggested by SYMBIOS to handle * multiple interrupts is to try unstacking all * interrupts conditions and to handle them on some * priority based on error severity. * This will work when the unstacking has been * successful, but we cannot be 100 % sure of that, * since the CPU may have been faster to unstack than * the chip is able to stack. Hmmm ... But it seems that * such a situation is very unlikely to happen. * * If this happen, for example STO caught by the CPU * then UDC happenning before the CPU have restarted * the SCRIPTS, the driver may wrongly complete the * same command on UDC, since the SCRIPTS didn't restart * and the DSA still points to the same command. * We avoid this situation by setting the DSA to an * invalid value when the CCB is completed and before * restarting the SCRIPTS. * * Another issue is that we need some section of our * recovery procedures to be somehow uninterruptible but * the SCRIPTS processor does not provides such a * feature. For this reason, we handle recovery preferently * from the C code and check against some SCRIPTS critical * sections from the C code. * * Hopefully, the interrupt handling of the driver is now * able to resist to weird BUS error conditions, but donnot * ask me for any guarantee that it will never fail. :-) * Use at your own decision and risk. */ static void sym_intr1 (hcb_p np) { u_char istat, istatc; u_char dstat; u_short sist; SYM_LOCK_ASSERT(MA_OWNED); /* * interrupt on the fly ? * * A `dummy read' is needed to ensure that the * clear of the INTF flag reaches the device * before the scanning of the DONE queue. */ istat = INB (nc_istat); if (istat & INTF) { OUTB (nc_istat, (istat & SIGP) | INTF | np->istat_sem); istat = INB (nc_istat); /* DUMMY READ */ if (DEBUG_FLAGS & DEBUG_TINY) printf ("F "); (void)sym_wakeup_done (np); } if (!(istat & (SIP|DIP))) return; #if 0 /* We should never get this one */ if (istat & CABRT) OUTB (nc_istat, CABRT); #endif /* * PAR and MA interrupts may occur at the same time, * and we need to know of both in order to handle * this situation properly. We try to unstack SCSI * interrupts for that reason. BTW, I dislike a LOT * such a loop inside the interrupt routine. * Even if DMA interrupt stacking is very unlikely to * happen, we also try unstacking these ones, since * this has no performance impact. */ sist = 0; dstat = 0; istatc = istat; do { if (istatc & SIP) sist |= INW (nc_sist); if (istatc & DIP) dstat |= INB (nc_dstat); istatc = INB (nc_istat); istat |= istatc; } while (istatc & (SIP|DIP)); if (DEBUG_FLAGS & DEBUG_TINY) printf ("<%d|%x:%x|%x:%x>", (int)INB(nc_scr0), dstat,sist, (unsigned)INL(nc_dsp), (unsigned)INL(nc_dbc)); /* * On paper, a memory barrier may be needed here. * And since we are paranoid ... :) */ MEMORY_BARRIER(); /* * First, interrupts we want to service cleanly. * * Phase mismatch (MA) is the most frequent interrupt * for chip earlier than the 896 and so we have to service * it as quickly as possible. * A SCSI parity error (PAR) may be combined with a phase * mismatch condition (MA). * Programmed interrupts (SIR) are used to call the C code * from SCRIPTS. * The single step interrupt (SSI) is not used in this * driver. */ if (!(sist & (STO|GEN|HTH|SGE|UDC|SBMC|RST)) && !(dstat & (MDPE|BF|ABRT|IID))) { if (sist & PAR) sym_int_par (np, sist); else if (sist & MA) sym_int_ma (np); else if (dstat & SIR) sym_int_sir (np); else if (dstat & SSI) OUTONB_STD (); else goto unknown_int; return; } /* * Now, interrupts that donnot happen in normal * situations and that we may need to recover from. * * On SCSI RESET (RST), we reset everything. * On SCSI BUS MODE CHANGE (SBMC), we complete all * active CCBs with RESET status, prepare all devices * for negotiating again and restart the SCRIPTS. * On STO and UDC, we complete the CCB with the corres- * ponding status and restart the SCRIPTS. */ if (sist & RST) { xpt_print_path(np->path); printf("SCSI BUS reset detected.\n"); sym_init (np, 1); return; } OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */ if (!(sist & (GEN|HTH|SGE)) && !(dstat & (MDPE|BF|ABRT|IID))) { if (sist & SBMC) sym_int_sbmc (np); else if (sist & STO) sym_int_sto (np); else if (sist & UDC) sym_int_udc (np); else goto unknown_int; return; } /* * Now, interrupts we are not able to recover cleanly. * * Log message for hard errors. * Reset everything. */ sym_log_hard_error(np, sist, dstat); if ((sist & (GEN|HTH|SGE)) || (dstat & (MDPE|BF|ABRT|IID))) { sym_start_reset(np); return; } unknown_int: /* * We just miss the cause of the interrupt. :( * Print a message. The timeout will do the real work. */ printf( "%s: unknown interrupt(s) ignored, " "ISTAT=0x%x DSTAT=0x%x SIST=0x%x\n", sym_name(np), istat, dstat, sist); } static void sym_intr(void *arg) { hcb_p np = arg; SYM_LOCK(); if (DEBUG_FLAGS & DEBUG_TINY) printf ("["); sym_intr1((hcb_p) arg); if (DEBUG_FLAGS & DEBUG_TINY) printf ("]"); SYM_UNLOCK(); } static void sym_poll(struct cam_sim *sim) { sym_intr1(cam_sim_softc(sim)); } /* * generic recovery from scsi interrupt * * The doc says that when the chip gets an SCSI interrupt, * it tries to stop in an orderly fashion, by completing * an instruction fetch that had started or by flushing * the DMA fifo for a write to memory that was executing. * Such a fashion is not enough to know if the instruction * that was just before the current DSP value has been * executed or not. * * There are some small SCRIPTS sections that deal with * the start queue and the done queue that may break any * assomption from the C code if we are interrupted * inside, so we reset if this happens. Btw, since these * SCRIPTS sections are executed while the SCRIPTS hasn't * started SCSI operations, it is very unlikely to happen. * * All the driver data structures are supposed to be * allocated from the same 4 GB memory window, so there * is a 1 to 1 relationship between DSA and driver data * structures. Since we are careful :) to invalidate the * DSA when we complete a command or when the SCRIPTS * pushes a DSA into a queue, we can trust it when it * points to a CCB. */ static void sym_recover_scsi_int (hcb_p np, u_char hsts) { u32 dsp = INL (nc_dsp); u32 dsa = INL (nc_dsa); ccb_p cp = sym_ccb_from_dsa(np, dsa); /* * If we haven't been interrupted inside the SCRIPTS * critical paths, we can safely restart the SCRIPTS * and trust the DSA value if it matches a CCB. */ if ((!(dsp > SCRIPTA_BA (np, getjob_begin) && dsp < SCRIPTA_BA (np, getjob_end) + 1)) && (!(dsp > SCRIPTA_BA (np, ungetjob) && dsp < SCRIPTA_BA (np, reselect) + 1)) && (!(dsp > SCRIPTB_BA (np, sel_for_abort) && dsp < SCRIPTB_BA (np, sel_for_abort_1) + 1)) && (!(dsp > SCRIPTA_BA (np, done) && dsp < SCRIPTA_BA (np, done_end) + 1))) { OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */ /* * If we have a CCB, let the SCRIPTS call us back for * the handling of the error with SCRATCHA filled with * STARTPOS. This way, we will be able to freeze the * device queue and requeue awaiting IOs. */ if (cp) { cp->host_status = hsts; OUTL_DSP (SCRIPTA_BA (np, complete_error)); } /* * Otherwise just restart the SCRIPTS. */ else { OUTL (nc_dsa, 0xffffff); OUTL_DSP (SCRIPTA_BA (np, start)); } } else goto reset_all; return; reset_all: sym_start_reset(np); } /* * chip exception handler for selection timeout */ static void sym_int_sto (hcb_p np) { u32 dsp = INL (nc_dsp); if (DEBUG_FLAGS & DEBUG_TINY) printf ("T"); if (dsp == SCRIPTA_BA (np, wf_sel_done) + 8) sym_recover_scsi_int(np, HS_SEL_TIMEOUT); else sym_start_reset(np); } /* * chip exception handler for unexpected disconnect */ static void sym_int_udc (hcb_p np) { printf ("%s: unexpected disconnect\n", sym_name(np)); sym_recover_scsi_int(np, HS_UNEXPECTED); } /* * chip exception handler for SCSI bus mode change * * spi2-r12 11.2.3 says a transceiver mode change must * generate a reset event and a device that detects a reset * event shall initiate a hard reset. It says also that a * device that detects a mode change shall set data transfer * mode to eight bit asynchronous, etc... * So, just reinitializing all except chip should be enough. */ static void sym_int_sbmc (hcb_p np) { u_char scsi_mode = INB (nc_stest4) & SMODE; /* * Notify user. */ xpt_print_path(np->path); printf("SCSI BUS mode change from %s to %s.\n", sym_scsi_bus_mode(np->scsi_mode), sym_scsi_bus_mode(scsi_mode)); /* * Should suspend command processing for a few seconds and * reinitialize all except the chip. */ sym_init (np, 2); } /* * chip exception handler for SCSI parity error. * * When the chip detects a SCSI parity error and is * currently executing a (CH)MOV instruction, it does * not interrupt immediately, but tries to finish the * transfer of the current scatter entry before * interrupting. The following situations may occur: * * - The complete scatter entry has been transferred * without the device having changed phase. * The chip will then interrupt with the DSP pointing * to the instruction that follows the MOV. * * - A phase mismatch occurs before the MOV finished * and phase errors are to be handled by the C code. * The chip will then interrupt with both PAR and MA * conditions set. * * - A phase mismatch occurs before the MOV finished and * phase errors are to be handled by SCRIPTS. * The chip will load the DSP with the phase mismatch * JUMP address and interrupt the host processor. */ static void sym_int_par (hcb_p np, u_short sist) { u_char hsts = INB (HS_PRT); u32 dsp = INL (nc_dsp); u32 dbc = INL (nc_dbc); u32 dsa = INL (nc_dsa); u_char sbcl = INB (nc_sbcl); u_char cmd = dbc >> 24; int phase = cmd & 7; ccb_p cp = sym_ccb_from_dsa(np, dsa); printf("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n", sym_name(np), hsts, dbc, sbcl); /* * Check that the chip is connected to the SCSI BUS. */ if (!(INB (nc_scntl1) & ISCON)) { sym_recover_scsi_int(np, HS_UNEXPECTED); return; } /* * If the nexus is not clearly identified, reset the bus. * We will try to do better later. */ if (!cp) goto reset_all; /* * Check instruction was a MOV, direction was INPUT and * ATN is asserted. */ if ((cmd & 0xc0) || !(phase & 1) || !(sbcl & 0x8)) goto reset_all; /* * Keep track of the parity error. */ OUTONB (HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_PARITY_ERR; /* * Prepare the message to send to the device. */ np->msgout[0] = (phase == 7) ? M_PARITY : M_ID_ERROR; /* * If the old phase was DATA IN phase, we have to deal with * the 3 situations described above. * For other input phases (MSG IN and STATUS), the device * must resend the whole thing that failed parity checking * or signal error. So, jumping to dispatcher should be OK. */ if (phase == 1 || phase == 5) { /* Phase mismatch handled by SCRIPTS */ if (dsp == SCRIPTB_BA (np, pm_handle)) OUTL_DSP (dsp); /* Phase mismatch handled by the C code */ else if (sist & MA) sym_int_ma (np); /* No phase mismatch occurred */ else { OUTL (nc_temp, dsp); OUTL_DSP (SCRIPTA_BA (np, dispatch)); } } else OUTL_DSP (SCRIPTA_BA (np, clrack)); return; reset_all: sym_start_reset(np); } /* * chip exception handler for phase errors. * * We have to construct a new transfer descriptor, * to transfer the rest of the current block. */ static void sym_int_ma (hcb_p np) { u32 dbc; u32 rest; u32 dsp; u32 dsa; u32 nxtdsp; u32 *vdsp; u32 oadr, olen; u32 *tblp; u32 newcmd; u_int delta; u_char cmd; u_char hflags, hflags0; struct sym_pmc *pm; ccb_p cp; dsp = INL (nc_dsp); dbc = INL (nc_dbc); dsa = INL (nc_dsa); cmd = dbc >> 24; rest = dbc & 0xffffff; delta = 0; /* * locate matching cp if any. */ cp = sym_ccb_from_dsa(np, dsa); /* * Donnot take into account dma fifo and various buffers in * INPUT phase since the chip flushes everything before * raising the MA interrupt for interrupted INPUT phases. * For DATA IN phase, we will check for the SWIDE later. */ if ((cmd & 7) != 1 && (cmd & 7) != 5) { u_char ss0, ss2; if (np->features & FE_DFBC) delta = INW (nc_dfbc); else { u32 dfifo; /* * Read DFIFO, CTEST[4-6] using 1 PCI bus ownership. */ dfifo = INL(nc_dfifo); /* * Calculate remaining bytes in DMA fifo. * (CTEST5 = dfifo >> 16) */ if (dfifo & (DFS << 16)) delta = ((((dfifo >> 8) & 0x300) | (dfifo & 0xff)) - rest) & 0x3ff; else delta = ((dfifo & 0xff) - rest) & 0x7f; } /* * The data in the dma fifo has not been transferred to * the target -> add the amount to the rest * and clear the data. * Check the sstat2 register in case of wide transfer. */ rest += delta; ss0 = INB (nc_sstat0); if (ss0 & OLF) rest++; if (!(np->features & FE_C10)) if (ss0 & ORF) rest++; if (cp && (cp->phys.select.sel_scntl3 & EWS)) { ss2 = INB (nc_sstat2); if (ss2 & OLF1) rest++; if (!(np->features & FE_C10)) if (ss2 & ORF1) rest++; } /* * Clear fifos. */ OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* dma fifo */ OUTB (nc_stest3, TE|CSF); /* scsi fifo */ } /* * log the information */ if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE)) printf ("P%x%x RL=%d D=%d ", cmd&7, INB(nc_sbcl)&7, (unsigned) rest, (unsigned) delta); /* * try to find the interrupted script command, * and the address at which to continue. */ - vdsp = 0; + vdsp = NULL; nxtdsp = 0; if (dsp > np->scripta_ba && dsp <= np->scripta_ba + np->scripta_sz) { vdsp = (u32 *)((char*)np->scripta0 + (dsp-np->scripta_ba-8)); nxtdsp = dsp; } else if (dsp > np->scriptb_ba && dsp <= np->scriptb_ba + np->scriptb_sz) { vdsp = (u32 *)((char*)np->scriptb0 + (dsp-np->scriptb_ba-8)); nxtdsp = dsp; } /* * log the information */ if (DEBUG_FLAGS & DEBUG_PHASE) { printf ("\nCP=%p DSP=%x NXT=%x VDSP=%p CMD=%x ", cp, (unsigned)dsp, (unsigned)nxtdsp, vdsp, cmd); } if (!vdsp) { printf ("%s: interrupted SCRIPT address not found.\n", sym_name (np)); goto reset_all; } if (!cp) { printf ("%s: SCSI phase error fixup: CCB already dequeued.\n", sym_name (np)); goto reset_all; } /* * get old startaddress and old length. */ oadr = scr_to_cpu(vdsp[1]); if (cmd & 0x10) { /* Table indirect */ tblp = (u32 *) ((char*) &cp->phys + oadr); olen = scr_to_cpu(tblp[0]); oadr = scr_to_cpu(tblp[1]); } else { tblp = (u32 *) 0; olen = scr_to_cpu(vdsp[0]) & 0xffffff; } if (DEBUG_FLAGS & DEBUG_PHASE) { printf ("OCMD=%x\nTBLP=%p OLEN=%x OADR=%x\n", (unsigned) (scr_to_cpu(vdsp[0]) >> 24), tblp, (unsigned) olen, (unsigned) oadr); } /* * check cmd against assumed interrupted script command. * If dt data phase, the MOVE instruction hasn't bit 4 of * the phase. */ if (((cmd & 2) ? cmd : (cmd & ~4)) != (scr_to_cpu(vdsp[0]) >> 24)) { PRINT_ADDR(cp); printf ("internal error: cmd=%02x != %02x=(vdsp[0] >> 24)\n", (unsigned)cmd, (unsigned)scr_to_cpu(vdsp[0]) >> 24); goto reset_all; } /* * if old phase not dataphase, leave here. */ if (cmd & 2) { PRINT_ADDR(cp); printf ("phase change %x-%x %d@%08x resid=%d.\n", cmd&7, INB(nc_sbcl)&7, (unsigned)olen, (unsigned)oadr, (unsigned)rest); goto unexpected_phase; } /* * Choose the correct PM save area. * * Look at the PM_SAVE SCRIPT if you want to understand * this stuff. The equivalent code is implemented in * SCRIPTS for the 895A, 896 and 1010 that are able to * handle PM from the SCRIPTS processor. */ hflags0 = INB (HF_PRT); hflags = hflags0; if (hflags & (HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED)) { if (hflags & HF_IN_PM0) nxtdsp = scr_to_cpu(cp->phys.pm0.ret); else if (hflags & HF_IN_PM1) nxtdsp = scr_to_cpu(cp->phys.pm1.ret); if (hflags & HF_DP_SAVED) hflags ^= HF_ACT_PM; } if (!(hflags & HF_ACT_PM)) { pm = &cp->phys.pm0; newcmd = SCRIPTA_BA (np, pm0_data); } else { pm = &cp->phys.pm1; newcmd = SCRIPTA_BA (np, pm1_data); } hflags &= ~(HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED); if (hflags != hflags0) OUTB (HF_PRT, hflags); /* * fillin the phase mismatch context */ pm->sg.addr = cpu_to_scr(oadr + olen - rest); pm->sg.size = cpu_to_scr(rest); pm->ret = cpu_to_scr(nxtdsp); /* * If we have a SWIDE, * - prepare the address to write the SWIDE from SCRIPTS, * - compute the SCRIPTS address to restart from, * - move current data pointer context by one byte. */ nxtdsp = SCRIPTA_BA (np, dispatch); if ((cmd & 7) == 1 && cp && (cp->phys.select.sel_scntl3 & EWS) && (INB (nc_scntl2) & WSR)) { u32 tmp; /* * Set up the table indirect for the MOVE * of the residual byte and adjust the data * pointer context. */ tmp = scr_to_cpu(pm->sg.addr); cp->phys.wresid.addr = cpu_to_scr(tmp); pm->sg.addr = cpu_to_scr(tmp + 1); tmp = scr_to_cpu(pm->sg.size); cp->phys.wresid.size = cpu_to_scr((tmp&0xff000000) | 1); pm->sg.size = cpu_to_scr(tmp - 1); /* * If only the residual byte is to be moved, * no PM context is needed. */ if ((tmp&0xffffff) == 1) newcmd = pm->ret; /* * Prepare the address of SCRIPTS that will * move the residual byte to memory. */ nxtdsp = SCRIPTB_BA (np, wsr_ma_helper); } if (DEBUG_FLAGS & DEBUG_PHASE) { PRINT_ADDR(cp); printf ("PM %x %x %x / %x %x %x.\n", hflags0, hflags, newcmd, (unsigned)scr_to_cpu(pm->sg.addr), (unsigned)scr_to_cpu(pm->sg.size), (unsigned)scr_to_cpu(pm->ret)); } /* * Restart the SCRIPTS processor. */ OUTL (nc_temp, newcmd); OUTL_DSP (nxtdsp); return; /* * Unexpected phase changes that occurs when the current phase * is not a DATA IN or DATA OUT phase are due to error conditions. * Such event may only happen when the SCRIPTS is using a * multibyte SCSI MOVE. * * Phase change Some possible cause * * COMMAND --> MSG IN SCSI parity error detected by target. * COMMAND --> STATUS Bad command or refused by target. * MSG OUT --> MSG IN Message rejected by target. * MSG OUT --> COMMAND Bogus target that discards extended * negotiation messages. * * The code below does not care of the new phase and so * trusts the target. Why to annoy it ? * If the interrupted phase is COMMAND phase, we restart at * dispatcher. * If a target does not get all the messages after selection, * the code assumes blindly that the target discards extended * messages and clears the negotiation status. * If the target does not want all our response to negotiation, * we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids * bloat for such a should_not_happen situation). * In all other situation, we reset the BUS. * Are these assumptions reasonnable ? (Wait and see ...) */ unexpected_phase: dsp -= 8; nxtdsp = 0; switch (cmd & 7) { case 2: /* COMMAND phase */ nxtdsp = SCRIPTA_BA (np, dispatch); break; #if 0 case 3: /* STATUS phase */ nxtdsp = SCRIPTA_BA (np, dispatch); break; #endif case 6: /* MSG OUT phase */ /* * If the device may want to use untagged when we want * tagged, we prepare an IDENTIFY without disc. granted, * since we will not be able to handle reselect. * Otherwise, we just don't care. */ if (dsp == SCRIPTA_BA (np, send_ident)) { if (cp->tag != NO_TAG && olen - rest <= 3) { cp->host_status = HS_BUSY; np->msgout[0] = M_IDENTIFY | cp->lun; nxtdsp = SCRIPTB_BA (np, ident_break_atn); } else nxtdsp = SCRIPTB_BA (np, ident_break); } else if (dsp == SCRIPTB_BA (np, send_wdtr) || dsp == SCRIPTB_BA (np, send_sdtr) || dsp == SCRIPTB_BA (np, send_ppr)) { nxtdsp = SCRIPTB_BA (np, nego_bad_phase); } break; #if 0 case 7: /* MSG IN phase */ nxtdsp = SCRIPTA_BA (np, clrack); break; #endif } if (nxtdsp) { OUTL_DSP (nxtdsp); return; } reset_all: sym_start_reset(np); } /* * Dequeue from the START queue all CCBs that match * a given target/lun/task condition (-1 means all), * and move them from the BUSY queue to the COMP queue * with CAM_REQUEUE_REQ status condition. * This function is used during error handling/recovery. * It is called with SCRIPTS not running. */ static int sym_dequeue_from_squeue(hcb_p np, int i, int target, int lun, int task) { int j; ccb_p cp; /* * Make sure the starting index is within range. */ assert((i >= 0) && (i < 2*MAX_QUEUE)); /* * Walk until end of START queue and dequeue every job * that matches the target/lun/task condition. */ j = i; while (i != np->squeueput) { cp = sym_ccb_from_dsa(np, scr_to_cpu(np->squeue[i])); assert(cp); #ifdef SYM_CONF_IARB_SUPPORT /* Forget hints for IARB, they may be no longer relevant */ cp->host_flags &= ~HF_HINT_IARB; #endif if ((target == -1 || cp->target == target) && (lun == -1 || cp->lun == lun) && (task == -1 || cp->tag == task)) { sym_set_cam_status(cp->cam_ccb, CAM_REQUEUE_REQ); sym_remque(&cp->link_ccbq); sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); } else { if (i != j) np->squeue[j] = np->squeue[i]; if ((j += 2) >= MAX_QUEUE*2) j = 0; } if ((i += 2) >= MAX_QUEUE*2) i = 0; } if (i != j) /* Copy back the idle task if needed */ np->squeue[j] = np->squeue[i]; np->squeueput = j; /* Update our current start queue pointer */ return (i - j) / 2; } /* * Complete all CCBs queued to the COMP queue. * * These CCBs are assumed: * - Not to be referenced either by devices or * SCRIPTS-related queues and datas. * - To have to be completed with an error condition * or requeued. * * The device queue freeze count is incremented * for each CCB that does not prevent this. * This function is called when all CCBs involved * in error handling/recovery have been reaped. */ static void sym_flush_comp_queue(hcb_p np, int cam_status) { SYM_QUEHEAD *qp; ccb_p cp; while ((qp = sym_remque_head(&np->comp_ccbq)) != NULL) { union ccb *ccb; cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); /* Leave quiet CCBs waiting for resources */ if (cp->host_status == HS_WAIT) continue; ccb = cp->cam_ccb; if (cam_status) sym_set_cam_status(ccb, cam_status); sym_freeze_cam_ccb(ccb); sym_xpt_done(np, ccb, cp); sym_free_ccb(np, cp); } } /* * chip handler for bad SCSI status condition * * In case of bad SCSI status, we unqueue all the tasks * currently queued to the controller but not yet started * and then restart the SCRIPTS processor immediately. * * QUEUE FULL and BUSY conditions are handled the same way. * Basically all the not yet started tasks are requeued in * device queue and the queue is frozen until a completion. * * For CHECK CONDITION and COMMAND TERMINATED status, we use * the CCB of the failed command to prepare a REQUEST SENSE * SCSI command and queue it to the controller queue. * * SCRATCHA is assumed to have been loaded with STARTPOS * before the SCRIPTS called the C code. */ static void sym_sir_bad_scsi_status(hcb_p np, ccb_p cp) { tcb_p tp = &np->target[cp->target]; u32 startp; u_char s_status = cp->ssss_status; u_char h_flags = cp->host_flags; int msglen; int nego; int i; SYM_LOCK_ASSERT(MA_OWNED); /* * Compute the index of the next job to start from SCRIPTS. */ i = (INL (nc_scratcha) - np->squeue_ba) / 4; /* * The last CCB queued used for IARB hint may be * no longer relevant. Forget it. */ #ifdef SYM_CONF_IARB_SUPPORT if (np->last_cp) np->last_cp = NULL; #endif /* * Now deal with the SCSI status. */ switch(s_status) { case S_BUSY: case S_QUEUE_FULL: if (sym_verbose >= 2) { PRINT_ADDR(cp); printf (s_status == S_BUSY ? "BUSY" : "QUEUE FULL\n"); } default: /* S_INT, S_INT_COND_MET, S_CONFLICT */ sym_complete_error (np, cp); break; case S_TERMINATED: case S_CHECK_COND: /* * If we get an SCSI error when requesting sense, give up. */ if (h_flags & HF_SENSE) { sym_complete_error (np, cp); break; } /* * Dequeue all queued CCBs for that device not yet started, * and restart the SCRIPTS processor immediately. */ (void) sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); OUTL_DSP (SCRIPTA_BA (np, start)); /* * Save some info of the actual IO. * Compute the data residual. */ cp->sv_scsi_status = cp->ssss_status; cp->sv_xerr_status = cp->xerr_status; cp->sv_resid = sym_compute_residual(np, cp); /* * Prepare all needed data structures for * requesting sense data. */ /* * identify message */ cp->scsi_smsg2[0] = M_IDENTIFY | cp->lun; msglen = 1; /* * If we are currently using anything different from * async. 8 bit data transfers with that target, * start a negotiation, since the device may want * to report us a UNIT ATTENTION condition due to * a cause we currently ignore, and we donnot want * to be stuck with WIDE and/or SYNC data transfer. * * cp->nego_status is filled by sym_prepare_nego(). */ cp->nego_status = 0; nego = 0; if (tp->tinfo.current.options & PPR_OPT_MASK) nego = NS_PPR; else if (tp->tinfo.current.width != BUS_8_BIT) nego = NS_WIDE; else if (tp->tinfo.current.offset != 0) nego = NS_SYNC; if (nego) msglen += sym_prepare_nego (np,cp, nego, &cp->scsi_smsg2[msglen]); /* * Message table indirect structure. */ cp->phys.smsg.addr = cpu_to_scr(CCB_BA (cp, scsi_smsg2)); cp->phys.smsg.size = cpu_to_scr(msglen); /* * sense command */ cp->phys.cmd.addr = cpu_to_scr(CCB_BA (cp, sensecmd)); cp->phys.cmd.size = cpu_to_scr(6); /* * patch requested size into sense command */ cp->sensecmd[0] = 0x03; cp->sensecmd[1] = cp->lun << 5; if (tp->tinfo.current.scsi_version > 2 || cp->lun > 7) cp->sensecmd[1] = 0; cp->sensecmd[4] = SYM_SNS_BBUF_LEN; cp->data_len = SYM_SNS_BBUF_LEN; /* * sense data */ bzero(cp->sns_bbuf, SYM_SNS_BBUF_LEN); cp->phys.sense.addr = cpu_to_scr(vtobus(cp->sns_bbuf)); cp->phys.sense.size = cpu_to_scr(SYM_SNS_BBUF_LEN); /* * requeue the command. */ startp = SCRIPTB_BA (np, sdata_in); cp->phys.head.savep = cpu_to_scr(startp); cp->phys.head.goalp = cpu_to_scr(startp + 16); cp->phys.head.lastp = cpu_to_scr(startp); cp->startp = cpu_to_scr(startp); cp->actualquirks = SYM_QUIRK_AUTOSAVE; cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; cp->ssss_status = S_ILLEGAL; cp->host_flags = (HF_SENSE|HF_DATA_IN); cp->xerr_status = 0; cp->extra_bytes = 0; cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA (np, select)); /* * Requeue the command. */ sym_put_start_queue(np, cp); /* * Give back to upper layer everything we have dequeued. */ sym_flush_comp_queue(np, 0); break; } } /* * After a device has accepted some management message * as BUS DEVICE RESET, ABORT TASK, etc ..., or when * a device signals a UNIT ATTENTION condition, some * tasks are thrown away by the device. We are required * to reflect that on our tasks list since the device * will never complete these tasks. * * This function move from the BUSY queue to the COMP * queue all disconnected CCBs for a given target that * match the following criteria: * - lun=-1 means any logical UNIT otherwise a given one. * - task=-1 means any task, otherwise a given one. */ static int sym_clear_tasks(hcb_p np, int cam_status, int target, int lun, int task) { SYM_QUEHEAD qtmp, *qp; int i = 0; ccb_p cp; /* * Move the entire BUSY queue to our temporary queue. */ sym_que_init(&qtmp); sym_que_splice(&np->busy_ccbq, &qtmp); sym_que_init(&np->busy_ccbq); /* * Put all CCBs that matches our criteria into * the COMP queue and put back other ones into * the BUSY queue. */ while ((qp = sym_remque_head(&qtmp)) != NULL) { union ccb *ccb; cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); ccb = cp->cam_ccb; if (cp->host_status != HS_DISCONNECT || cp->target != target || (lun != -1 && cp->lun != lun) || (task != -1 && (cp->tag != NO_TAG && cp->scsi_smsg[2] != task))) { sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); continue; } sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); /* Preserve the software timeout condition */ if (sym_get_cam_status(ccb) != CAM_CMD_TIMEOUT) sym_set_cam_status(ccb, cam_status); ++i; #if 0 printf("XXXX TASK @%p CLEARED\n", cp); #endif } return i; } /* * chip handler for TASKS recovery * * We cannot safely abort a command, while the SCRIPTS * processor is running, since we just would be in race * with it. * * As long as we have tasks to abort, we keep the SEM * bit set in the ISTAT. When this bit is set, the * SCRIPTS processor interrupts (SIR_SCRIPT_STOPPED) * each time it enters the scheduler. * * If we have to reset a target, clear tasks of a unit, * or to perform the abort of a disconnected job, we * restart the SCRIPTS for selecting the target. Once * selected, the SCRIPTS interrupts (SIR_TARGET_SELECTED). * If it loses arbitration, the SCRIPTS will interrupt again * the next time it will enter its scheduler, and so on ... * * On SIR_TARGET_SELECTED, we scan for the more * appropriate thing to do: * * - If nothing, we just sent a M_ABORT message to the * target to get rid of the useless SCSI bus ownership. * According to the specs, no tasks shall be affected. * - If the target is to be reset, we send it a M_RESET * message. * - If a logical UNIT is to be cleared , we send the * IDENTIFY(lun) + M_ABORT. * - If an untagged task is to be aborted, we send the * IDENTIFY(lun) + M_ABORT. * - If a tagged task is to be aborted, we send the * IDENTIFY(lun) + task attributes + M_ABORT_TAG. * * Once our 'kiss of death' :) message has been accepted * by the target, the SCRIPTS interrupts again * (SIR_ABORT_SENT). On this interrupt, we complete * all the CCBs that should have been aborted by the * target according to our message. */ static void sym_sir_task_recovery(hcb_p np, int num) { SYM_QUEHEAD *qp; ccb_p cp; tcb_p tp; int target=-1, lun=-1, task; int i, k; switch(num) { /* * The SCRIPTS processor stopped before starting * the next command in order to allow us to perform * some task recovery. */ case SIR_SCRIPT_STOPPED: /* * Do we have any target to reset or unit to clear ? */ for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { tp = &np->target[i]; if (tp->to_reset || (tp->lun0p && tp->lun0p->to_clear)) { target = i; break; } if (!tp->lunmp) continue; for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) { if (tp->lunmp[k] && tp->lunmp[k]->to_clear) { target = i; break; } } if (target != -1) break; } /* * If not, walk the busy queue for any * disconnected CCB to be aborted. */ if (target == -1) { FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { cp = sym_que_entry(qp,struct sym_ccb,link_ccbq); if (cp->host_status != HS_DISCONNECT) continue; if (cp->to_abort) { target = cp->target; break; } } } /* * If some target is to be selected, * prepare and start the selection. */ if (target != -1) { tp = &np->target[target]; np->abrt_sel.sel_id = target; np->abrt_sel.sel_scntl3 = tp->head.wval; np->abrt_sel.sel_sxfer = tp->head.sval; OUTL(nc_dsa, np->hcb_ba); OUTL_DSP (SCRIPTB_BA (np, sel_for_abort)); return; } /* * Now look for a CCB to abort that haven't started yet. * Btw, the SCRIPTS processor is still stopped, so * we are not in race. */ i = 0; cp = NULL; FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp->host_status != HS_BUSY && cp->host_status != HS_NEGOTIATE) continue; if (!cp->to_abort) continue; #ifdef SYM_CONF_IARB_SUPPORT /* * If we are using IMMEDIATE ARBITRATION, we donnot * want to cancel the last queued CCB, since the * SCRIPTS may have anticipated the selection. */ if (cp == np->last_cp) { cp->to_abort = 0; continue; } #endif i = 1; /* Means we have found some */ break; } if (!i) { /* * We are done, so we donnot need * to synchronize with the SCRIPTS anylonger. * Remove the SEM flag from the ISTAT. */ np->istat_sem = 0; OUTB (nc_istat, SIGP); break; } /* * Compute index of next position in the start * queue the SCRIPTS intends to start and dequeue * all CCBs for that device that haven't been started. */ i = (INL (nc_scratcha) - np->squeue_ba) / 4; i = sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); /* * Make sure at least our IO to abort has been dequeued. */ assert(i && sym_get_cam_status(cp->cam_ccb) == CAM_REQUEUE_REQ); /* * Keep track in cam status of the reason of the abort. */ if (cp->to_abort == 2) sym_set_cam_status(cp->cam_ccb, CAM_CMD_TIMEOUT); else sym_set_cam_status(cp->cam_ccb, CAM_REQ_ABORTED); /* * Complete with error everything that we have dequeued. */ sym_flush_comp_queue(np, 0); break; /* * The SCRIPTS processor has selected a target * we may have some manual recovery to perform for. */ case SIR_TARGET_SELECTED: target = (INB (nc_sdid) & 0xf); tp = &np->target[target]; np->abrt_tbl.addr = cpu_to_scr(vtobus(np->abrt_msg)); /* * If the target is to be reset, prepare a * M_RESET message and clear the to_reset flag * since we donnot expect this operation to fail. */ if (tp->to_reset) { np->abrt_msg[0] = M_RESET; np->abrt_tbl.size = 1; tp->to_reset = 0; break; } /* * Otherwise, look for some logical unit to be cleared. */ if (tp->lun0p && tp->lun0p->to_clear) lun = 0; else if (tp->lunmp) { for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) { if (tp->lunmp[k] && tp->lunmp[k]->to_clear) { lun = k; break; } } } /* * If a logical unit is to be cleared, prepare * an IDENTIFY(lun) + ABORT MESSAGE. */ if (lun != -1) { lcb_p lp = sym_lp(tp, lun); lp->to_clear = 0; /* We donnot expect to fail here */ np->abrt_msg[0] = M_IDENTIFY | lun; np->abrt_msg[1] = M_ABORT; np->abrt_tbl.size = 2; break; } /* * Otherwise, look for some disconnected job to * abort for this target. */ i = 0; cp = NULL; FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp->host_status != HS_DISCONNECT) continue; if (cp->target != target) continue; if (!cp->to_abort) continue; i = 1; /* Means we have some */ break; } /* * If we have none, probably since the device has * completed the command before we won abitration, * send a M_ABORT message without IDENTIFY. * According to the specs, the device must just * disconnect the BUS and not abort any task. */ if (!i) { np->abrt_msg[0] = M_ABORT; np->abrt_tbl.size = 1; break; } /* * We have some task to abort. * Set the IDENTIFY(lun) */ np->abrt_msg[0] = M_IDENTIFY | cp->lun; /* * If we want to abort an untagged command, we * will send an IDENTIFY + M_ABORT. * Otherwise (tagged command), we will send * an IDENTIFY + task attributes + ABORT TAG. */ if (cp->tag == NO_TAG) { np->abrt_msg[1] = M_ABORT; np->abrt_tbl.size = 2; } else { np->abrt_msg[1] = cp->scsi_smsg[1]; np->abrt_msg[2] = cp->scsi_smsg[2]; np->abrt_msg[3] = M_ABORT_TAG; np->abrt_tbl.size = 4; } /* * Keep track of software timeout condition, since the * peripheral driver may not count retries on abort * conditions not due to timeout. */ if (cp->to_abort == 2) sym_set_cam_status(cp->cam_ccb, CAM_CMD_TIMEOUT); cp->to_abort = 0; /* We donnot expect to fail here */ break; /* * The target has accepted our message and switched * to BUS FREE phase as we expected. */ case SIR_ABORT_SENT: target = (INB (nc_sdid) & 0xf); tp = &np->target[target]; /* ** If we didn't abort anything, leave here. */ if (np->abrt_msg[0] == M_ABORT) break; /* * If we sent a M_RESET, then a hardware reset has * been performed by the target. * - Reset everything to async 8 bit * - Tell ourself to negotiate next time :-) * - Prepare to clear all disconnected CCBs for * this target from our task list (lun=task=-1) */ lun = -1; task = -1; if (np->abrt_msg[0] == M_RESET) { tp->head.sval = 0; tp->head.wval = np->rv_scntl3; tp->head.uval = 0; tp->tinfo.current.period = 0; tp->tinfo.current.offset = 0; tp->tinfo.current.width = BUS_8_BIT; tp->tinfo.current.options = 0; } /* * Otherwise, check for the LUN and TASK(s) * concerned by the cancellation. * If it is not ABORT_TAG then it is CLEAR_QUEUE * or an ABORT message :-) */ else { lun = np->abrt_msg[0] & 0x3f; if (np->abrt_msg[1] == M_ABORT_TAG) task = np->abrt_msg[2]; } /* * Complete all the CCBs the device should have * aborted due to our 'kiss of death' message. */ i = (INL (nc_scratcha) - np->squeue_ba) / 4; (void) sym_dequeue_from_squeue(np, i, target, lun, -1); (void) sym_clear_tasks(np, CAM_REQ_ABORTED, target, lun, task); sym_flush_comp_queue(np, 0); /* * If we sent a BDR, make uper layer aware of that. */ if (np->abrt_msg[0] == M_RESET) xpt_async(AC_SENT_BDR, np->path, NULL); break; } /* * Print to the log the message we intend to send. */ if (num == SIR_TARGET_SELECTED) { PRINT_TARGET(np, target); sym_printl_hex("control msgout:", np->abrt_msg, np->abrt_tbl.size); np->abrt_tbl.size = cpu_to_scr(np->abrt_tbl.size); } /* * Let the SCRIPTS processor continue. */ OUTONB_STD (); } /* * Gerard's alchemy:) that deals with with the data * pointer for both MDP and the residual calculation. * * I didn't want to bloat the code by more than 200 * lignes for the handling of both MDP and the residual. * This has been achieved by using a data pointer * representation consisting in an index in the data * array (dp_sg) and a negative offset (dp_ofs) that * have the following meaning: * * - dp_sg = SYM_CONF_MAX_SG * we are at the end of the data script. * - dp_sg < SYM_CONF_MAX_SG * dp_sg points to the next entry of the scatter array * we want to transfer. * - dp_ofs < 0 * dp_ofs represents the residual of bytes of the * previous entry scatter entry we will send first. * - dp_ofs = 0 * no residual to send first. * * The function sym_evaluate_dp() accepts an arbitray * offset (basically from the MDP message) and returns * the corresponding values of dp_sg and dp_ofs. */ static int sym_evaluate_dp(hcb_p np, ccb_p cp, u32 scr, int *ofs) { u32 dp_scr; int dp_ofs, dp_sg, dp_sgmin; int tmp; struct sym_pmc *pm; /* * Compute the resulted data pointer in term of a script * address within some DATA script and a signed byte offset. */ dp_scr = scr; dp_ofs = *ofs; if (dp_scr == SCRIPTA_BA (np, pm0_data)) pm = &cp->phys.pm0; else if (dp_scr == SCRIPTA_BA (np, pm1_data)) pm = &cp->phys.pm1; else pm = NULL; if (pm) { dp_scr = scr_to_cpu(pm->ret); dp_ofs -= scr_to_cpu(pm->sg.size); } /* * If we are auto-sensing, then we are done. */ if (cp->host_flags & HF_SENSE) { *ofs = dp_ofs; return 0; } /* * Deduce the index of the sg entry. * Keep track of the index of the first valid entry. * If result is dp_sg = SYM_CONF_MAX_SG, then we are at the * end of the data. */ tmp = scr_to_cpu(cp->phys.head.goalp); dp_sg = SYM_CONF_MAX_SG; if (dp_scr != tmp) dp_sg -= (tmp - 8 - (int)dp_scr) / (2*4); dp_sgmin = SYM_CONF_MAX_SG - cp->segments; /* * Move to the sg entry the data pointer belongs to. * * If we are inside the data area, we expect result to be: * * Either, * dp_ofs = 0 and dp_sg is the index of the sg entry * the data pointer belongs to (or the end of the data) * Or, * dp_ofs < 0 and dp_sg is the index of the sg entry * the data pointer belongs to + 1. */ if (dp_ofs < 0) { int n; while (dp_sg > dp_sgmin) { --dp_sg; tmp = scr_to_cpu(cp->phys.data[dp_sg].size); n = dp_ofs + (tmp & 0xffffff); if (n > 0) { ++dp_sg; break; } dp_ofs = n; } } else if (dp_ofs > 0) { while (dp_sg < SYM_CONF_MAX_SG) { tmp = scr_to_cpu(cp->phys.data[dp_sg].size); dp_ofs -= (tmp & 0xffffff); ++dp_sg; if (dp_ofs <= 0) break; } } /* * Make sure the data pointer is inside the data area. * If not, return some error. */ if (dp_sg < dp_sgmin || (dp_sg == dp_sgmin && dp_ofs < 0)) goto out_err; else if (dp_sg > SYM_CONF_MAX_SG || (dp_sg == SYM_CONF_MAX_SG && dp_ofs > 0)) goto out_err; /* * Save the extreme pointer if needed. */ if (dp_sg > cp->ext_sg || (dp_sg == cp->ext_sg && dp_ofs > cp->ext_ofs)) { cp->ext_sg = dp_sg; cp->ext_ofs = dp_ofs; } /* * Return data. */ *ofs = dp_ofs; return dp_sg; out_err: return -1; } /* * chip handler for MODIFY DATA POINTER MESSAGE * * We also call this function on IGNORE WIDE RESIDUE * messages that do not match a SWIDE full condition. * Btw, we assume in that situation that such a message * is equivalent to a MODIFY DATA POINTER (offset=-1). */ static void sym_modify_dp(hcb_p np, ccb_p cp, int ofs) { int dp_ofs = ofs; u32 dp_scr = INL (nc_temp); u32 dp_ret; u32 tmp; u_char hflags; int dp_sg; struct sym_pmc *pm; /* * Not supported for auto-sense. */ if (cp->host_flags & HF_SENSE) goto out_reject; /* * Apply our alchemy:) (see comments in sym_evaluate_dp()), * to the resulted data pointer. */ dp_sg = sym_evaluate_dp(np, cp, dp_scr, &dp_ofs); if (dp_sg < 0) goto out_reject; /* * And our alchemy:) allows to easily calculate the data * script address we want to return for the next data phase. */ dp_ret = cpu_to_scr(cp->phys.head.goalp); dp_ret = dp_ret - 8 - (SYM_CONF_MAX_SG - dp_sg) * (2*4); /* * If offset / scatter entry is zero we donnot need * a context for the new current data pointer. */ if (dp_ofs == 0) { dp_scr = dp_ret; goto out_ok; } /* * Get a context for the new current data pointer. */ hflags = INB (HF_PRT); if (hflags & HF_DP_SAVED) hflags ^= HF_ACT_PM; if (!(hflags & HF_ACT_PM)) { pm = &cp->phys.pm0; dp_scr = SCRIPTA_BA (np, pm0_data); } else { pm = &cp->phys.pm1; dp_scr = SCRIPTA_BA (np, pm1_data); } hflags &= ~(HF_DP_SAVED); OUTB (HF_PRT, hflags); /* * Set up the new current data pointer. * ofs < 0 there, and for the next data phase, we * want to transfer part of the data of the sg entry * corresponding to index dp_sg-1 prior to returning * to the main data script. */ pm->ret = cpu_to_scr(dp_ret); tmp = scr_to_cpu(cp->phys.data[dp_sg-1].addr); tmp += scr_to_cpu(cp->phys.data[dp_sg-1].size) + dp_ofs; pm->sg.addr = cpu_to_scr(tmp); pm->sg.size = cpu_to_scr(-dp_ofs); out_ok: OUTL (nc_temp, dp_scr); OUTL_DSP (SCRIPTA_BA (np, clrack)); return; out_reject: OUTL_DSP (SCRIPTB_BA (np, msg_bad)); } /* * chip calculation of the data residual. * * As I used to say, the requirement of data residual * in SCSI is broken, useless and cannot be achieved * without huge complexity. * But most OSes and even the official CAM require it. * When stupidity happens to be so widely spread inside * a community, it gets hard to convince. * * Anyway, I don't care, since I am not going to use * any software that considers this data residual as * a relevant information. :) */ static int sym_compute_residual(hcb_p np, ccb_p cp) { int dp_sg, dp_sgmin, resid = 0; int dp_ofs = 0; /* * Check for some data lost or just thrown away. * We are not required to be quite accurate in this * situation. Btw, if we are odd for output and the * device claims some more data, it may well happen * than our residual be zero. :-) */ if (cp->xerr_status & (XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) { if (cp->xerr_status & XE_EXTRA_DATA) resid -= cp->extra_bytes; if (cp->xerr_status & XE_SODL_UNRUN) ++resid; if (cp->xerr_status & XE_SWIDE_OVRUN) --resid; } /* * If all data has been transferred, * there is no residual. */ if (cp->phys.head.lastp == cp->phys.head.goalp) return resid; /* * If no data transfer occurs, or if the data * pointer is weird, return full residual. */ if (cp->startp == cp->phys.head.lastp || sym_evaluate_dp(np, cp, scr_to_cpu(cp->phys.head.lastp), &dp_ofs) < 0) { return cp->data_len; } /* * If we were auto-sensing, then we are done. */ if (cp->host_flags & HF_SENSE) { return -dp_ofs; } /* * We are now full comfortable in the computation * of the data residual (2's complement). */ dp_sgmin = SYM_CONF_MAX_SG - cp->segments; resid = -cp->ext_ofs; for (dp_sg = cp->ext_sg; dp_sg < SYM_CONF_MAX_SG; ++dp_sg) { u_int tmp = scr_to_cpu(cp->phys.data[dp_sg].size); resid += (tmp & 0xffffff); } /* * Hopefully, the result is not too wrong. */ return resid; } /* * Print out the content of a SCSI message. */ static int sym_show_msg (u_char * msg) { u_char i; printf ("%x",*msg); if (*msg==M_EXTENDED) { for (i=1;i<8;i++) { if (i-1>msg[1]) break; printf ("-%x",msg[i]); } return (i+1); } else if ((*msg & 0xf0) == 0x20) { printf ("-%x",msg[1]); return (2); } return (1); } static void sym_print_msg (ccb_p cp, char *label, u_char *msg) { PRINT_ADDR(cp); if (label) printf ("%s: ", label); (void) sym_show_msg (msg); printf (".\n"); } /* * Negotiation for WIDE and SYNCHRONOUS DATA TRANSFER. * * When we try to negotiate, we append the negotiation message * to the identify and (maybe) simple tag message. * The host status field is set to HS_NEGOTIATE to mark this * situation. * * If the target doesn't answer this message immediately * (as required by the standard), the SIR_NEGO_FAILED interrupt * will be raised eventually. * The handler removes the HS_NEGOTIATE status, and sets the * negotiated value to the default (async / nowide). * * If we receive a matching answer immediately, we check it * for validity, and set the values. * * If we receive a Reject message immediately, we assume the * negotiation has failed, and fall back to standard values. * * If we receive a negotiation message while not in HS_NEGOTIATE * state, it's a target initiated negotiation. We prepare a * (hopefully) valid answer, set our parameters, and send back * this answer to the target. * * If the target doesn't fetch the answer (no message out phase), * we assume the negotiation has failed, and fall back to default * settings (SIR_NEGO_PROTO interrupt). * * When we set the values, we adjust them in all ccbs belonging * to this target, in the controller's register, and in the "phys" * field of the controller's struct sym_hcb. */ /* * chip handler for SYNCHRONOUS DATA TRANSFER REQUEST (SDTR) message. */ static void sym_sync_nego(hcb_p np, tcb_p tp, ccb_p cp) { u_char chg, ofs, per, fak, div; int req = 1; /* * Synchronous request message received. */ if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_msg(cp, "sync msgin", np->msgin); } /* * request or answer ? */ if (INB (HS_PRT) == HS_NEGOTIATE) { OUTB (HS_PRT, HS_BUSY); if (cp->nego_status && cp->nego_status != NS_SYNC) goto reject_it; req = 0; } /* * get requested values. */ chg = 0; per = np->msgin[3]; ofs = np->msgin[4]; /* * check values against our limits. */ if (ofs) { if (ofs > np->maxoffs) {chg = 1; ofs = np->maxoffs;} if (req) { if (ofs > tp->tinfo.user.offset) {chg = 1; ofs = tp->tinfo.user.offset;} } } if (ofs) { if (per < np->minsync) {chg = 1; per = np->minsync;} if (req) { if (per < tp->tinfo.user.period) {chg = 1; per = tp->tinfo.user.period;} } } div = fak = 0; if (ofs && sym_getsync(np, 0, per, &div, &fak) < 0) goto reject_it; if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp); printf ("sdtr: ofs=%d per=%d div=%d fak=%d chg=%d.\n", ofs, per, div, fak, chg); } /* * This was an answer message */ if (req == 0) { if (chg) /* Answer wasn't acceptable. */ goto reject_it; sym_setsync (np, cp, ofs, per, div, fak); OUTL_DSP (SCRIPTA_BA (np, clrack)); return; } /* * It was a request. Set value and * prepare an answer message */ sym_setsync (np, cp, ofs, per, div, fak); np->msgout[0] = M_EXTENDED; np->msgout[1] = 3; np->msgout[2] = M_X_SYNC_REQ; np->msgout[3] = per; np->msgout[4] = ofs; cp->nego_status = NS_SYNC; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_msg(cp, "sync msgout", np->msgout); } np->msgin [0] = M_NOOP; OUTL_DSP (SCRIPTB_BA (np, sdtr_resp)); return; reject_it: sym_setsync (np, cp, 0, 0, 0, 0); OUTL_DSP (SCRIPTB_BA (np, msg_bad)); } /* * chip handler for PARALLEL PROTOCOL REQUEST (PPR) message. */ static void sym_ppr_nego(hcb_p np, tcb_p tp, ccb_p cp) { u_char chg, ofs, per, fak, dt, div, wide; int req = 1; /* * Synchronous request message received. */ if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_msg(cp, "ppr msgin", np->msgin); } /* * get requested values. */ chg = 0; per = np->msgin[3]; ofs = np->msgin[5]; wide = np->msgin[6]; dt = np->msgin[7] & PPR_OPT_DT; /* * request or answer ? */ if (INB (HS_PRT) == HS_NEGOTIATE) { OUTB (HS_PRT, HS_BUSY); if (cp->nego_status && cp->nego_status != NS_PPR) goto reject_it; req = 0; } /* * check values against our limits. */ if (wide > np->maxwide) {chg = 1; wide = np->maxwide;} if (!wide || !(np->features & FE_ULTRA3)) dt &= ~PPR_OPT_DT; if (req) { if (wide > tp->tinfo.user.width) {chg = 1; wide = tp->tinfo.user.width;} } if (!(np->features & FE_U3EN)) /* Broken U3EN bit not supported */ dt &= ~PPR_OPT_DT; if (dt != (np->msgin[7] & PPR_OPT_MASK)) chg = 1; if (ofs) { if (dt) { if (ofs > np->maxoffs_dt) {chg = 1; ofs = np->maxoffs_dt;} } else if (ofs > np->maxoffs) {chg = 1; ofs = np->maxoffs;} if (req) { if (ofs > tp->tinfo.user.offset) {chg = 1; ofs = tp->tinfo.user.offset;} } } if (ofs) { if (dt) { if (per < np->minsync_dt) {chg = 1; per = np->minsync_dt;} } else if (per < np->minsync) {chg = 1; per = np->minsync;} if (req) { if (per < tp->tinfo.user.period) {chg = 1; per = tp->tinfo.user.period;} } } div = fak = 0; if (ofs && sym_getsync(np, dt, per, &div, &fak) < 0) goto reject_it; if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp); printf ("ppr: " "dt=%x ofs=%d per=%d wide=%d div=%d fak=%d chg=%d.\n", dt, ofs, per, wide, div, fak, chg); } /* * It was an answer. */ if (req == 0) { if (chg) /* Answer wasn't acceptable */ goto reject_it; sym_setpprot (np, cp, dt, ofs, per, wide, div, fak); OUTL_DSP (SCRIPTA_BA (np, clrack)); return; } /* * It was a request. Set value and * prepare an answer message */ sym_setpprot (np, cp, dt, ofs, per, wide, div, fak); np->msgout[0] = M_EXTENDED; np->msgout[1] = 6; np->msgout[2] = M_X_PPR_REQ; np->msgout[3] = per; np->msgout[4] = 0; np->msgout[5] = ofs; np->msgout[6] = wide; np->msgout[7] = dt; cp->nego_status = NS_PPR; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_msg(cp, "ppr msgout", np->msgout); } np->msgin [0] = M_NOOP; OUTL_DSP (SCRIPTB_BA (np, ppr_resp)); return; reject_it: sym_setpprot (np, cp, 0, 0, 0, 0, 0, 0); OUTL_DSP (SCRIPTB_BA (np, msg_bad)); /* * If it was a device response that should result in * ST, we may want to try a legacy negotiation later. */ if (!req && !dt) { tp->tinfo.goal.options = 0; tp->tinfo.goal.width = wide; tp->tinfo.goal.period = per; tp->tinfo.goal.offset = ofs; } } /* * chip handler for WIDE DATA TRANSFER REQUEST (WDTR) message. */ static void sym_wide_nego(hcb_p np, tcb_p tp, ccb_p cp) { u_char chg, wide; int req = 1; /* * Wide request message received. */ if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_msg(cp, "wide msgin", np->msgin); } /* * Is it a request from the device? */ if (INB (HS_PRT) == HS_NEGOTIATE) { OUTB (HS_PRT, HS_BUSY); if (cp->nego_status && cp->nego_status != NS_WIDE) goto reject_it; req = 0; } /* * get requested values. */ chg = 0; wide = np->msgin[3]; /* * check values against driver limits. */ if (wide > np->maxwide) {chg = 1; wide = np->maxwide;} if (req) { if (wide > tp->tinfo.user.width) {chg = 1; wide = tp->tinfo.user.width;} } if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp); printf ("wdtr: wide=%d chg=%d.\n", wide, chg); } /* * This was an answer message */ if (req == 0) { if (chg) /* Answer wasn't acceptable. */ goto reject_it; sym_setwide (np, cp, wide); /* * Negotiate for SYNC immediately after WIDE response. * This allows to negotiate for both WIDE and SYNC on * a single SCSI command (Suggested by Justin Gibbs). */ if (tp->tinfo.goal.offset) { np->msgout[0] = M_EXTENDED; np->msgout[1] = 3; np->msgout[2] = M_X_SYNC_REQ; np->msgout[3] = tp->tinfo.goal.period; np->msgout[4] = tp->tinfo.goal.offset; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_msg(cp, "sync msgout", np->msgout); } cp->nego_status = NS_SYNC; OUTB (HS_PRT, HS_NEGOTIATE); OUTL_DSP (SCRIPTB_BA (np, sdtr_resp)); return; } OUTL_DSP (SCRIPTA_BA (np, clrack)); return; } /* * It was a request, set value and * prepare an answer message */ sym_setwide (np, cp, wide); np->msgout[0] = M_EXTENDED; np->msgout[1] = 2; np->msgout[2] = M_X_WIDE_REQ; np->msgout[3] = wide; np->msgin [0] = M_NOOP; cp->nego_status = NS_WIDE; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_msg(cp, "wide msgout", np->msgout); } OUTL_DSP (SCRIPTB_BA (np, wdtr_resp)); return; reject_it: OUTL_DSP (SCRIPTB_BA (np, msg_bad)); } /* * Reset SYNC or WIDE to default settings. * * Called when a negotiation does not succeed either * on rejection or on protocol error. * * If it was a PPR that made problems, we may want to * try a legacy negotiation later. */ static void sym_nego_default(hcb_p np, tcb_p tp, ccb_p cp) { /* * any error in negotiation: * fall back to default mode. */ switch (cp->nego_status) { case NS_PPR: #if 0 sym_setpprot (np, cp, 0, 0, 0, 0, 0, 0); #else tp->tinfo.goal.options = 0; if (tp->tinfo.goal.period < np->minsync) tp->tinfo.goal.period = np->minsync; if (tp->tinfo.goal.offset > np->maxoffs) tp->tinfo.goal.offset = np->maxoffs; #endif break; case NS_SYNC: sym_setsync (np, cp, 0, 0, 0, 0); break; case NS_WIDE: sym_setwide (np, cp, 0); break; } np->msgin [0] = M_NOOP; np->msgout[0] = M_NOOP; cp->nego_status = 0; } /* * chip handler for MESSAGE REJECT received in response to * a WIDE or SYNCHRONOUS negotiation. */ static void sym_nego_rejected(hcb_p np, tcb_p tp, ccb_p cp) { sym_nego_default(np, tp, cp); OUTB (HS_PRT, HS_BUSY); } /* * chip exception handler for programmed interrupts. */ static void sym_int_sir (hcb_p np) { u_char num = INB (nc_dsps); u32 dsa = INL (nc_dsa); ccb_p cp = sym_ccb_from_dsa(np, dsa); u_char target = INB (nc_sdid) & 0x0f; tcb_p tp = &np->target[target]; int tmp; SYM_LOCK_ASSERT(MA_OWNED); if (DEBUG_FLAGS & DEBUG_TINY) printf ("I#%d", num); switch (num) { /* * Command has been completed with error condition * or has been auto-sensed. */ case SIR_COMPLETE_ERROR: sym_complete_error(np, cp); return; /* * The C code is currently trying to recover from something. * Typically, user want to abort some command. */ case SIR_SCRIPT_STOPPED: case SIR_TARGET_SELECTED: case SIR_ABORT_SENT: sym_sir_task_recovery(np, num); return; /* * The device didn't go to MSG OUT phase after having * been selected with ATN. We donnot want to handle * that. */ case SIR_SEL_ATN_NO_MSG_OUT: printf ("%s:%d: No MSG OUT phase after selection with ATN.\n", sym_name (np), target); goto out_stuck; /* * The device didn't switch to MSG IN phase after * having reseleted the initiator. */ case SIR_RESEL_NO_MSG_IN: printf ("%s:%d: No MSG IN phase after reselection.\n", sym_name (np), target); goto out_stuck; /* * After reselection, the device sent a message that wasn't * an IDENTIFY. */ case SIR_RESEL_NO_IDENTIFY: printf ("%s:%d: No IDENTIFY after reselection.\n", sym_name (np), target); goto out_stuck; /* * The device reselected a LUN we donnot know about. */ case SIR_RESEL_BAD_LUN: np->msgout[0] = M_RESET; goto out; /* * The device reselected for an untagged nexus and we * haven't any. */ case SIR_RESEL_BAD_I_T_L: np->msgout[0] = M_ABORT; goto out; /* * The device reselected for a tagged nexus that we donnot * have. */ case SIR_RESEL_BAD_I_T_L_Q: np->msgout[0] = M_ABORT_TAG; goto out; /* * The SCRIPTS let us know that the device has grabbed * our message and will abort the job. */ case SIR_RESEL_ABORTED: np->lastmsg = np->msgout[0]; np->msgout[0] = M_NOOP; printf ("%s:%d: message %x sent on bad reselection.\n", sym_name (np), target, np->lastmsg); goto out; /* * The SCRIPTS let us know that a message has been * successfully sent to the device. */ case SIR_MSG_OUT_DONE: np->lastmsg = np->msgout[0]; np->msgout[0] = M_NOOP; /* Should we really care of that */ if (np->lastmsg == M_PARITY || np->lastmsg == M_ID_ERROR) { if (cp) { cp->xerr_status &= ~XE_PARITY_ERR; if (!cp->xerr_status) OUTOFFB (HF_PRT, HF_EXT_ERR); } } goto out; /* * The device didn't send a GOOD SCSI status. * We may have some work to do prior to allow * the SCRIPTS processor to continue. */ case SIR_BAD_SCSI_STATUS: if (!cp) goto out; sym_sir_bad_scsi_status(np, cp); return; /* * We are asked by the SCRIPTS to prepare a * REJECT message. */ case SIR_REJECT_TO_SEND: sym_print_msg(cp, "M_REJECT to send for ", np->msgin); np->msgout[0] = M_REJECT; goto out; /* * We have been ODD at the end of a DATA IN * transfer and the device didn't send a * IGNORE WIDE RESIDUE message. * It is a data overrun condition. */ case SIR_SWIDE_OVERRUN: if (cp) { OUTONB (HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_SWIDE_OVRUN; } goto out; /* * We have been ODD at the end of a DATA OUT * transfer. * It is a data underrun condition. */ case SIR_SODL_UNDERRUN: if (cp) { OUTONB (HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_SODL_UNRUN; } goto out; /* * The device wants us to transfer more data than * expected or in the wrong direction. * The number of extra bytes is in scratcha. * It is a data overrun condition. */ case SIR_DATA_OVERRUN: if (cp) { OUTONB (HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_EXTRA_DATA; cp->extra_bytes += INL (nc_scratcha); } goto out; /* * The device switched to an illegal phase (4/5). */ case SIR_BAD_PHASE: if (cp) { OUTONB (HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_BAD_PHASE; } goto out; /* * We received a message. */ case SIR_MSG_RECEIVED: if (!cp) goto out_stuck; switch (np->msgin [0]) { /* * We received an extended message. * We handle MODIFY DATA POINTER, SDTR, WDTR * and reject all other extended messages. */ case M_EXTENDED: switch (np->msgin [2]) { case M_X_MODIFY_DP: if (DEBUG_FLAGS & DEBUG_POINTER) sym_print_msg(cp,"modify DP",np->msgin); tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) + (np->msgin[5]<<8) + (np->msgin[6]); sym_modify_dp(np, cp, tmp); return; case M_X_SYNC_REQ: sym_sync_nego(np, tp, cp); return; case M_X_PPR_REQ: sym_ppr_nego(np, tp, cp); return; case M_X_WIDE_REQ: sym_wide_nego(np, tp, cp); return; default: goto out_reject; } break; /* * We received a 1/2 byte message not handled from SCRIPTS. * We are only expecting MESSAGE REJECT and IGNORE WIDE * RESIDUE messages that haven't been anticipated by * SCRIPTS on SWIDE full condition. Unanticipated IGNORE * WIDE RESIDUE messages are aliased as MODIFY DP (-1). */ case M_IGN_RESIDUE: if (DEBUG_FLAGS & DEBUG_POINTER) sym_print_msg(cp,"ign wide residue", np->msgin); sym_modify_dp(np, cp, -1); return; case M_REJECT: if (INB (HS_PRT) == HS_NEGOTIATE) sym_nego_rejected(np, tp, cp); else { PRINT_ADDR(cp); printf ("M_REJECT received (%x:%x).\n", scr_to_cpu(np->lastmsg), np->msgout[0]); } goto out_clrack; break; default: goto out_reject; } break; /* * We received an unknown message. * Ignore all MSG IN phases and reject it. */ case SIR_MSG_WEIRD: sym_print_msg(cp, "WEIRD message received", np->msgin); OUTL_DSP (SCRIPTB_BA (np, msg_weird)); return; /* * Negotiation failed. * Target does not send us the reply. * Remove the HS_NEGOTIATE status. */ case SIR_NEGO_FAILED: OUTB (HS_PRT, HS_BUSY); /* * Negotiation failed. * Target does not want answer message. */ case SIR_NEGO_PROTO: sym_nego_default(np, tp, cp); goto out; } out: OUTONB_STD (); return; out_reject: OUTL_DSP (SCRIPTB_BA (np, msg_bad)); return; out_clrack: OUTL_DSP (SCRIPTA_BA (np, clrack)); return; out_stuck: return; } /* * Acquire a control block */ static ccb_p sym_get_ccb (hcb_p np, u_char tn, u_char ln, u_char tag_order) { tcb_p tp = &np->target[tn]; lcb_p lp = sym_lp(tp, ln); u_short tag = NO_TAG; SYM_QUEHEAD *qp; ccb_p cp = (ccb_p) NULL; /* * Look for a free CCB */ if (sym_que_empty(&np->free_ccbq)) goto out; qp = sym_remque_head(&np->free_ccbq); if (!qp) goto out; cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); /* * If the LCB is not yet available and the LUN * has been probed ok, try to allocate the LCB. */ if (!lp && sym_is_bit(tp->lun_map, ln)) { lp = sym_alloc_lcb(np, tn, ln); if (!lp) goto out_free; } /* * If the LCB is not available here, then the * logical unit is not yet discovered. For those * ones only accept 1 SCSI IO per logical unit, * since we cannot allow disconnections. */ if (!lp) { if (!sym_is_bit(tp->busy0_map, ln)) sym_set_bit(tp->busy0_map, ln); else goto out_free; } else { /* * If we have been asked for a tagged command. */ if (tag_order) { /* * Debugging purpose. */ assert(lp->busy_itl == 0); /* * Allocate resources for tags if not yet. */ if (!lp->cb_tags) { sym_alloc_lcb_tags(np, tn, ln); if (!lp->cb_tags) goto out_free; } /* * Get a tag for this SCSI IO and set up * the CCB bus address for reselection, * and count it for this LUN. * Toggle reselect path to tagged. */ if (lp->busy_itlq < SYM_CONF_MAX_TASK) { tag = lp->cb_tags[lp->ia_tag]; if (++lp->ia_tag == SYM_CONF_MAX_TASK) lp->ia_tag = 0; lp->itlq_tbl[tag] = cpu_to_scr(cp->ccb_ba); ++lp->busy_itlq; lp->head.resel_sa = cpu_to_scr(SCRIPTA_BA (np, resel_tag)); } else goto out_free; } /* * This command will not be tagged. * If we already have either a tagged or untagged * one, refuse to overlap this untagged one. */ else { /* * Debugging purpose. */ assert(lp->busy_itl == 0 && lp->busy_itlq == 0); /* * Count this nexus for this LUN. * Set up the CCB bus address for reselection. * Toggle reselect path to untagged. */ if (++lp->busy_itl == 1) { lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba); lp->head.resel_sa = cpu_to_scr(SCRIPTA_BA (np, resel_no_tag)); } else goto out_free; } } /* * Put the CCB into the busy queue. */ sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); /* * Remember all informations needed to free this CCB. */ cp->to_abort = 0; cp->tag = tag; cp->target = tn; cp->lun = ln; if (DEBUG_FLAGS & DEBUG_TAGS) { PRINT_LUN(np, tn, ln); printf ("ccb @%p using tag %d.\n", cp, tag); } out: return cp; out_free: sym_insque_head(&cp->link_ccbq, &np->free_ccbq); return NULL; } /* * Release one control block */ static void sym_free_ccb(hcb_p np, ccb_p cp) { tcb_p tp = &np->target[cp->target]; lcb_p lp = sym_lp(tp, cp->lun); if (DEBUG_FLAGS & DEBUG_TAGS) { PRINT_LUN(np, cp->target, cp->lun); printf ("ccb @%p freeing tag %d.\n", cp, cp->tag); } /* * If LCB available, */ if (lp) { /* * If tagged, release the tag, set the relect path */ if (cp->tag != NO_TAG) { /* * Free the tag value. */ lp->cb_tags[lp->if_tag] = cp->tag; if (++lp->if_tag == SYM_CONF_MAX_TASK) lp->if_tag = 0; /* * Make the reselect path invalid, * and uncount this CCB. */ lp->itlq_tbl[cp->tag] = cpu_to_scr(np->bad_itlq_ba); --lp->busy_itlq; } else { /* Untagged */ /* * Make the reselect path invalid, * and uncount this CCB. */ lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba); --lp->busy_itl; } /* * If no JOB active, make the LUN reselect path invalid. */ if (lp->busy_itlq == 0 && lp->busy_itl == 0) lp->head.resel_sa = cpu_to_scr(SCRIPTB_BA (np, resel_bad_lun)); } /* * Otherwise, we only accept 1 IO per LUN. * Clear the bit that keeps track of this IO. */ else sym_clr_bit(tp->busy0_map, cp->lun); /* * We donnot queue more than 1 ccb per target * with negotiation at any time. If this ccb was * used for negotiation, clear this info in the tcb. */ if (cp == tp->nego_cp) tp->nego_cp = NULL; #ifdef SYM_CONF_IARB_SUPPORT /* * If we just complete the last queued CCB, * clear this info that is no longer relevant. */ if (cp == np->last_cp) np->last_cp = NULL; #endif /* * Unmap user data from DMA map if needed. */ if (cp->dmamapped) { bus_dmamap_unload(np->data_dmat, cp->dmamap); cp->dmamapped = 0; } /* * Make this CCB available. */ cp->cam_ccb = NULL; cp->host_status = HS_IDLE; sym_remque(&cp->link_ccbq); sym_insque_head(&cp->link_ccbq, &np->free_ccbq); } /* * Allocate a CCB from memory and initialize its fixed part. */ static ccb_p sym_alloc_ccb(hcb_p np) { ccb_p cp = NULL; int hcode; SYM_LOCK_ASSERT(MA_NOTOWNED); /* * Prevent from allocating more CCBs than we can * queue to the controller. */ if (np->actccbs >= SYM_CONF_MAX_START) return NULL; /* * Allocate memory for this CCB. */ cp = sym_calloc_dma(sizeof(struct sym_ccb), "CCB"); if (!cp) return NULL; /* * Allocate a bounce buffer for sense data. */ cp->sns_bbuf = sym_calloc_dma(SYM_SNS_BBUF_LEN, "SNS_BBUF"); if (!cp->sns_bbuf) goto out_free; /* * Allocate a map for the DMA of user data. */ if (bus_dmamap_create(np->data_dmat, 0, &cp->dmamap)) goto out_free; /* * Count it. */ np->actccbs++; /* * Initialize the callout. */ callout_init(&cp->ch, 1); /* * Compute the bus address of this ccb. */ cp->ccb_ba = vtobus(cp); /* * Insert this ccb into the hashed list. */ hcode = CCB_HASH_CODE(cp->ccb_ba); cp->link_ccbh = np->ccbh[hcode]; np->ccbh[hcode] = cp; /* * Initialize the start and restart actions. */ cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA (np, idle)); cp->phys.head.go.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l)); /* * Initilialyze some other fields. */ cp->phys.smsg_ext.addr = cpu_to_scr(HCB_BA(np, msgin[2])); /* * Chain into free ccb queue. */ sym_insque_head(&cp->link_ccbq, &np->free_ccbq); return cp; out_free: if (cp->sns_bbuf) sym_mfree_dma(cp->sns_bbuf, SYM_SNS_BBUF_LEN, "SNS_BBUF"); sym_mfree_dma(cp, sizeof(*cp), "CCB"); return NULL; } /* * Look up a CCB from a DSA value. */ static ccb_p sym_ccb_from_dsa(hcb_p np, u32 dsa) { int hcode; ccb_p cp; hcode = CCB_HASH_CODE(dsa); cp = np->ccbh[hcode]; while (cp) { if (cp->ccb_ba == dsa) break; cp = cp->link_ccbh; } return cp; } /* * Lun control block allocation and initialization. */ static lcb_p sym_alloc_lcb (hcb_p np, u_char tn, u_char ln) { tcb_p tp = &np->target[tn]; lcb_p lp = sym_lp(tp, ln); /* * Already done, just return. */ if (lp) return lp; /* * Check against some race. */ assert(!sym_is_bit(tp->busy0_map, ln)); /* * Allocate the LCB bus address array. * Compute the bus address of this table. */ if (ln && !tp->luntbl) { int i; tp->luntbl = sym_calloc_dma(256, "LUNTBL"); if (!tp->luntbl) goto fail; for (i = 0 ; i < 64 ; i++) tp->luntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa)); tp->head.luntbl_sa = cpu_to_scr(vtobus(tp->luntbl)); } /* * Allocate the table of pointers for LUN(s) > 0, if needed. */ if (ln && !tp->lunmp) { tp->lunmp = sym_calloc(SYM_CONF_MAX_LUN * sizeof(lcb_p), "LUNMP"); if (!tp->lunmp) goto fail; } /* * Allocate the lcb. * Make it available to the chip. */ lp = sym_calloc_dma(sizeof(struct sym_lcb), "LCB"); if (!lp) goto fail; if (ln) { tp->lunmp[ln] = lp; tp->luntbl[ln] = cpu_to_scr(vtobus(lp)); } else { tp->lun0p = lp; tp->head.lun0_sa = cpu_to_scr(vtobus(lp)); } /* * Let the itl task point to error handling. */ lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba); /* * Set the reselect pattern to our default. :) */ lp->head.resel_sa = cpu_to_scr(SCRIPTB_BA (np, resel_bad_lun)); /* * Set user capabilities. */ lp->user_flags = tp->usrflags & (SYM_DISC_ENABLED | SYM_TAGS_ENABLED); fail: return lp; } /* * Allocate LCB resources for tagged command queuing. */ static void sym_alloc_lcb_tags (hcb_p np, u_char tn, u_char ln) { tcb_p tp = &np->target[tn]; lcb_p lp = sym_lp(tp, ln); int i; /* * If LCB not available, try to allocate it. */ if (!lp && !(lp = sym_alloc_lcb(np, tn, ln))) return; /* * Allocate the task table and and the tag allocation * circular buffer. We want both or none. */ lp->itlq_tbl = sym_calloc_dma(SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); if (!lp->itlq_tbl) return; lp->cb_tags = sym_calloc(SYM_CONF_MAX_TASK, "CB_TAGS"); if (!lp->cb_tags) { sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); - lp->itlq_tbl = 0; + lp->itlq_tbl = NULL; return; } /* * Initialize the task table with invalid entries. */ for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++) lp->itlq_tbl[i] = cpu_to_scr(np->notask_ba); /* * Fill up the tag buffer with tag numbers. */ for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++) lp->cb_tags[i] = i; /* * Make the task table available to SCRIPTS, * And accept tagged commands now. */ lp->head.itlq_tbl_sa = cpu_to_scr(vtobus(lp->itlq_tbl)); } /* * Test the pci bus snoop logic :-( * * Has to be called with interrupts disabled. */ #ifndef SYM_CONF_IOMAPPED static int sym_regtest (hcb_p np) { register volatile u32 data; /* * chip registers may NOT be cached. * write 0xffffffff to a read only register area, * and try to read it back. */ data = 0xffffffff; OUTL_OFF(offsetof(struct sym_reg, nc_dstat), data); data = INL_OFF(offsetof(struct sym_reg, nc_dstat)); #if 1 if (data == 0xffffffff) { #else if ((data & 0xe2f0fffd) != 0x02000080) { #endif printf ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n", (unsigned) data); return (0x10); } return (0); } #endif static int sym_snooptest (hcb_p np) { u32 sym_rd, sym_wr, sym_bk, host_rd, host_wr, pc, dstat; int i, err=0; #ifndef SYM_CONF_IOMAPPED err |= sym_regtest (np); if (err) return (err); #endif restart_test: /* * Enable Master Parity Checking as we intend * to enable it for normal operations. */ OUTB (nc_ctest4, (np->rv_ctest4 & MPEE)); /* * init */ pc = SCRIPTB0_BA (np, snooptest); host_wr = 1; sym_wr = 2; /* * Set memory and register. */ np->cache = cpu_to_scr(host_wr); OUTL (nc_temp, sym_wr); /* * Start script (exchange values) */ OUTL (nc_dsa, np->hcb_ba); OUTL_DSP (pc); /* * Wait 'til done (with timeout) */ for (i=0; i=SYM_SNOOP_TIMEOUT) { printf ("CACHE TEST FAILED: timeout.\n"); return (0x20); } /* * Check for fatal DMA errors. */ dstat = INB (nc_dstat); #if 1 /* Band aiding for broken hardwares that fail PCI parity */ if ((dstat & MDPE) && (np->rv_ctest4 & MPEE)) { printf ("%s: PCI DATA PARITY ERROR DETECTED - " "DISABLING MASTER DATA PARITY CHECKING.\n", sym_name(np)); np->rv_ctest4 &= ~MPEE; goto restart_test; } #endif if (dstat & (MDPE|BF|IID)) { printf ("CACHE TEST FAILED: DMA error (dstat=0x%02x).", dstat); return (0x80); } /* * Save termination position. */ pc = INL (nc_dsp); /* * Read memory and register. */ host_rd = scr_to_cpu(np->cache); sym_rd = INL (nc_scratcha); sym_bk = INL (nc_temp); /* * Check termination position. */ if (pc != SCRIPTB0_BA (np, snoopend)+8) { printf ("CACHE TEST FAILED: script execution failed.\n"); printf ("start=%08lx, pc=%08lx, end=%08lx\n", (u_long) SCRIPTB0_BA (np, snooptest), (u_long) pc, (u_long) SCRIPTB0_BA (np, snoopend) +8); return (0x40); } /* * Show results. */ if (host_wr != sym_rd) { printf ("CACHE TEST FAILED: host wrote %d, chip read %d.\n", (int) host_wr, (int) sym_rd); err |= 1; } if (host_rd != sym_wr) { printf ("CACHE TEST FAILED: chip wrote %d, host read %d.\n", (int) sym_wr, (int) host_rd); err |= 2; } if (sym_bk != sym_wr) { printf ("CACHE TEST FAILED: chip wrote %d, read back %d.\n", (int) sym_wr, (int) sym_bk); err |= 4; } return (err); } /* * Determine the chip's clock frequency. * * This is essential for the negotiation of the synchronous * transfer rate. * * Note: we have to return the correct value. * THERE IS NO SAFE DEFAULT VALUE. * * Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock. * 53C860 and 53C875 rev. 1 support fast20 transfers but * do not have a clock doubler and so are provided with a * 80 MHz clock. All other fast20 boards incorporate a doubler * and so should be delivered with a 40 MHz clock. * The recent fast40 chips (895/896/895A/1010) use a 40 Mhz base * clock and provide a clock quadrupler (160 Mhz). */ /* * Select SCSI clock frequency */ static void sym_selectclock(hcb_p np, u_char scntl3) { /* * If multiplier not present or not selected, leave here. */ if (np->multiplier <= 1) { OUTB(nc_scntl3, scntl3); return; } if (sym_verbose >= 2) printf ("%s: enabling clock multiplier\n", sym_name(np)); OUTB(nc_stest1, DBLEN); /* Enable clock multiplier */ /* * Wait for the LCKFRQ bit to be set if supported by the chip. * Otherwise wait 20 micro-seconds. */ if (np->features & FE_LCKFRQ) { int i = 20; while (!(INB(nc_stest4) & LCKFRQ) && --i > 0) UDELAY (20); if (!i) printf("%s: the chip cannot lock the frequency\n", sym_name(np)); } else UDELAY (20); OUTB(nc_stest3, HSC); /* Halt the scsi clock */ OUTB(nc_scntl3, scntl3); OUTB(nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier */ OUTB(nc_stest3, 0x00); /* Restart scsi clock */ } /* * calculate SCSI clock frequency (in KHz) */ static unsigned getfreq (hcb_p np, int gen) { unsigned int ms = 0; unsigned int f; /* * Measure GEN timer delay in order * to calculate SCSI clock frequency * * This code will never execute too * many loop iterations (if DELAY is * reasonably correct). It could get * too low a delay (too high a freq.) * if the CPU is slow executing the * loop for some reason (an NMI, for * example). For this reason we will * if multiple measurements are to be * performed trust the higher delay * (lower frequency returned). */ OUTW (nc_sien , 0); /* mask all scsi interrupts */ (void) INW (nc_sist); /* clear pending scsi interrupt */ OUTB (nc_dien , 0); /* mask all dma interrupts */ (void) INW (nc_sist); /* another one, just to be sure :) */ OUTB (nc_scntl3, 4); /* set pre-scaler to divide by 3 */ OUTB (nc_stime1, 0); /* disable general purpose timer */ OUTB (nc_stime1, gen); /* set to nominal delay of 1<= 2) printf ("%s: Delay (GEN=%d): %u msec, %u KHz\n", sym_name(np), gen, ms, f); return f; } static unsigned sym_getfreq (hcb_p np) { u_int f1, f2; int gen = 11; (void) getfreq (np, gen); /* throw away first result */ f1 = getfreq (np, gen); f2 = getfreq (np, gen); if (f1 > f2) f1 = f2; /* trust lower result */ return f1; } /* * Get/probe chip SCSI clock frequency */ static void sym_getclock (hcb_p np, int mult) { unsigned char scntl3 = np->sv_scntl3; unsigned char stest1 = np->sv_stest1; unsigned f1; /* * For the C10 core, assume 40 MHz. */ if (np->features & FE_C10) { np->multiplier = mult; np->clock_khz = 40000 * mult; return; } np->multiplier = 1; f1 = 40000; /* * True with 875/895/896/895A with clock multiplier selected */ if (mult > 1 && (stest1 & (DBLEN+DBLSEL)) == DBLEN+DBLSEL) { if (sym_verbose >= 2) printf ("%s: clock multiplier found\n", sym_name(np)); np->multiplier = mult; } /* * If multiplier not found or scntl3 not 7,5,3, * reset chip and get frequency from general purpose timer. * Otherwise trust scntl3 BIOS setting. */ if (np->multiplier != mult || (scntl3 & 7) < 3 || !(scntl3 & 1)) { OUTB (nc_stest1, 0); /* make sure doubler is OFF */ f1 = sym_getfreq (np); if (sym_verbose) printf ("%s: chip clock is %uKHz\n", sym_name(np), f1); if (f1 < 45000) f1 = 40000; else if (f1 < 55000) f1 = 50000; else f1 = 80000; if (f1 < 80000 && mult > 1) { if (sym_verbose >= 2) printf ("%s: clock multiplier assumed\n", sym_name(np)); np->multiplier = mult; } } else { if ((scntl3 & 7) == 3) f1 = 40000; else if ((scntl3 & 7) == 5) f1 = 80000; else f1 = 160000; f1 /= np->multiplier; } /* * Compute controller synchronous parameters. */ f1 *= np->multiplier; np->clock_khz = f1; } /* * Get/probe PCI clock frequency */ static int sym_getpciclock (hcb_p np) { int f = 0; /* * For the C1010-33, this doesn't work. * For the C1010-66, this will be tested when I'll have * such a beast to play with. */ if (!(np->features & FE_C10)) { OUTB (nc_stest1, SCLK); /* Use the PCI clock as SCSI clock */ f = (int) sym_getfreq (np); OUTB (nc_stest1, 0); } np->pciclk_khz = f; return f; } /*============= DRIVER ACTION/COMPLETION ====================*/ /* * Print something that tells about extended errors. */ static void sym_print_xerr(ccb_p cp, int x_status) { if (x_status & XE_PARITY_ERR) { PRINT_ADDR(cp); printf ("unrecovered SCSI parity error.\n"); } if (x_status & XE_EXTRA_DATA) { PRINT_ADDR(cp); printf ("extraneous data discarded.\n"); } if (x_status & XE_BAD_PHASE) { PRINT_ADDR(cp); printf ("illegal scsi phase (4/5).\n"); } if (x_status & XE_SODL_UNRUN) { PRINT_ADDR(cp); printf ("ODD transfer in DATA OUT phase.\n"); } if (x_status & XE_SWIDE_OVRUN) { PRINT_ADDR(cp); printf ("ODD transfer in DATA IN phase.\n"); } } /* * Choose the more appropriate CAM status if * the IO encountered an extended error. */ static int sym_xerr_cam_status(int cam_status, int x_status) { if (x_status) { if (x_status & XE_PARITY_ERR) cam_status = CAM_UNCOR_PARITY; else if (x_status &(XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) cam_status = CAM_DATA_RUN_ERR; else if (x_status & XE_BAD_PHASE) cam_status = CAM_REQ_CMP_ERR; else cam_status = CAM_REQ_CMP_ERR; } return cam_status; } /* * Complete execution of a SCSI command with extented * error, SCSI status error, or having been auto-sensed. * * The SCRIPTS processor is not running there, so we * can safely access IO registers and remove JOBs from * the START queue. * SCRATCHA is assumed to have been loaded with STARTPOS * before the SCRIPTS called the C code. */ static void sym_complete_error (hcb_p np, ccb_p cp) { struct ccb_scsiio *csio; u_int cam_status; int i, sense_returned; SYM_LOCK_ASSERT(MA_OWNED); /* * Paranoid check. :) */ if (!cp || !cp->cam_ccb) return; if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_RESULT)) { printf ("CCB=%lx STAT=%x/%x/%x DEV=%d/%d\n", (unsigned long)cp, cp->host_status, cp->ssss_status, cp->host_flags, cp->target, cp->lun); MDELAY(100); } /* * Get CAM command pointer. */ csio = &cp->cam_ccb->csio; /* * Check for extended errors. */ if (cp->xerr_status) { if (sym_verbose) sym_print_xerr(cp, cp->xerr_status); if (cp->host_status == HS_COMPLETE) cp->host_status = HS_COMP_ERR; } /* * Calculate the residual. */ csio->sense_resid = 0; csio->resid = sym_compute_residual(np, cp); if (!SYM_CONF_RESIDUAL_SUPPORT) {/* If user does not want residuals */ csio->resid = 0; /* throw them away. :) */ cp->sv_resid = 0; } if (cp->host_flags & HF_SENSE) { /* Auto sense */ csio->scsi_status = cp->sv_scsi_status; /* Restore status */ csio->sense_resid = csio->resid; /* Swap residuals */ csio->resid = cp->sv_resid; cp->sv_resid = 0; if (sym_verbose && cp->sv_xerr_status) sym_print_xerr(cp, cp->sv_xerr_status); if (cp->host_status == HS_COMPLETE && cp->ssss_status == S_GOOD && cp->xerr_status == 0) { cam_status = sym_xerr_cam_status(CAM_SCSI_STATUS_ERROR, cp->sv_xerr_status); cam_status |= CAM_AUTOSNS_VALID; /* * Bounce back the sense data to user and * fix the residual. */ bzero(&csio->sense_data, sizeof(csio->sense_data)); sense_returned = SYM_SNS_BBUF_LEN - csio->sense_resid; if (sense_returned < csio->sense_len) csio->sense_resid = csio->sense_len - sense_returned; else csio->sense_resid = 0; bcopy(cp->sns_bbuf, &csio->sense_data, MIN(csio->sense_len, sense_returned)); #if 0 /* * If the device reports a UNIT ATTENTION condition * due to a RESET condition, we should consider all * disconnect CCBs for this unit as aborted. */ if (1) { u_char *p; p = (u_char *) csio->sense_data; if (p[0]==0x70 && p[2]==0x6 && p[12]==0x29) sym_clear_tasks(np, CAM_REQ_ABORTED, cp->target,cp->lun, -1); } #endif } else cam_status = CAM_AUTOSENSE_FAIL; } else if (cp->host_status == HS_COMPLETE) { /* Bad SCSI status */ csio->scsi_status = cp->ssss_status; cam_status = CAM_SCSI_STATUS_ERROR; } else if (cp->host_status == HS_SEL_TIMEOUT) /* Selection timeout */ cam_status = CAM_SEL_TIMEOUT; else if (cp->host_status == HS_UNEXPECTED) /* Unexpected BUS FREE*/ cam_status = CAM_UNEXP_BUSFREE; else { /* Extended error */ if (sym_verbose) { PRINT_ADDR(cp); printf ("COMMAND FAILED (%x %x %x).\n", cp->host_status, cp->ssss_status, cp->xerr_status); } csio->scsi_status = cp->ssss_status; /* * Set the most appropriate value for CAM status. */ cam_status = sym_xerr_cam_status(CAM_REQ_CMP_ERR, cp->xerr_status); } /* * Dequeue all queued CCBs for that device * not yet started by SCRIPTS. */ i = (INL (nc_scratcha) - np->squeue_ba) / 4; (void) sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); /* * Restart the SCRIPTS processor. */ OUTL_DSP (SCRIPTA_BA (np, start)); /* * Synchronize DMA map if needed. */ if (cp->dmamapped) { bus_dmamap_sync(np->data_dmat, cp->dmamap, (cp->dmamapped == SYM_DMA_READ ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)); } /* * Add this one to the COMP queue. * Complete all those commands with either error * or requeue condition. */ sym_set_cam_status((union ccb *) csio, cam_status); sym_remque(&cp->link_ccbq); sym_insque_head(&cp->link_ccbq, &np->comp_ccbq); sym_flush_comp_queue(np, 0); } /* * Complete execution of a successful SCSI command. * * Only successful commands go to the DONE queue, * since we need to have the SCRIPTS processor * stopped on any error condition. * The SCRIPTS processor is running while we are * completing successful commands. */ static void sym_complete_ok (hcb_p np, ccb_p cp) { struct ccb_scsiio *csio; tcb_p tp; lcb_p lp; SYM_LOCK_ASSERT(MA_OWNED); /* * Paranoid check. :) */ if (!cp || !cp->cam_ccb) return; assert (cp->host_status == HS_COMPLETE); /* * Get command, target and lun pointers. */ csio = &cp->cam_ccb->csio; tp = &np->target[cp->target]; lp = sym_lp(tp, cp->lun); /* * Assume device discovered on first success. */ if (!lp) sym_set_bit(tp->lun_map, cp->lun); /* * If all data have been transferred, given than no * extended error did occur, there is no residual. */ csio->resid = 0; if (cp->phys.head.lastp != cp->phys.head.goalp) csio->resid = sym_compute_residual(np, cp); /* * Wrong transfer residuals may be worse than just always * returning zero. User can disable this feature from * sym_conf.h. Residual support is enabled by default. */ if (!SYM_CONF_RESIDUAL_SUPPORT) csio->resid = 0; /* * Synchronize DMA map if needed. */ if (cp->dmamapped) { bus_dmamap_sync(np->data_dmat, cp->dmamap, (cp->dmamapped == SYM_DMA_READ ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)); } /* * Set status and complete the command. */ csio->scsi_status = cp->ssss_status; sym_set_cam_status((union ccb *) csio, CAM_REQ_CMP); sym_xpt_done(np, (union ccb *) csio, cp); sym_free_ccb(np, cp); } /* * Our callout handler */ static void sym_callout(void *arg) { union ccb *ccb = (union ccb *) arg; hcb_p np = ccb->ccb_h.sym_hcb_ptr; /* * Check that the CAM CCB is still queued. */ if (!np) return; SYM_LOCK(); switch(ccb->ccb_h.func_code) { case XPT_SCSI_IO: (void) sym_abort_scsiio(np, ccb, 1); break; default: break; } SYM_UNLOCK(); } /* * Abort an SCSI IO. */ static int sym_abort_scsiio(hcb_p np, union ccb *ccb, int timed_out) { ccb_p cp; SYM_QUEHEAD *qp; SYM_LOCK_ASSERT(MA_OWNED); /* * Look up our CCB control block. */ cp = NULL; FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { ccb_p cp2 = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp2->cam_ccb == ccb) { cp = cp2; break; } } if (!cp || cp->host_status == HS_WAIT) return -1; /* * If a previous abort didn't succeed in time, * perform a BUS reset. */ if (cp->to_abort) { sym_reset_scsi_bus(np, 1); return 0; } /* * Mark the CCB for abort and allow time for. */ cp->to_abort = timed_out ? 2 : 1; callout_reset(&cp->ch, 10 * hz, sym_callout, (caddr_t) ccb); /* * Tell the SCRIPTS processor to stop and synchronize with us. */ np->istat_sem = SEM; OUTB (nc_istat, SIGP|SEM); return 0; } /* * Reset a SCSI device (all LUNs of a target). */ static void sym_reset_dev(hcb_p np, union ccb *ccb) { tcb_p tp; struct ccb_hdr *ccb_h = &ccb->ccb_h; SYM_LOCK_ASSERT(MA_OWNED); if (ccb_h->target_id == np->myaddr || ccb_h->target_id >= SYM_CONF_MAX_TARGET || ccb_h->target_lun >= SYM_CONF_MAX_LUN) { sym_xpt_done2(np, ccb, CAM_DEV_NOT_THERE); return; } tp = &np->target[ccb_h->target_id]; tp->to_reset = 1; sym_xpt_done2(np, ccb, CAM_REQ_CMP); np->istat_sem = SEM; OUTB (nc_istat, SIGP|SEM); } /* * SIM action entry point. */ static void sym_action(struct cam_sim *sim, union ccb *ccb) { hcb_p np; tcb_p tp; lcb_p lp; ccb_p cp; int tmp; u_char idmsg, *msgptr; u_int msglen; struct ccb_scsiio *csio; struct ccb_hdr *ccb_h; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("sym_action\n")); /* * Retrieve our controller data structure. */ np = (hcb_p) cam_sim_softc(sim); SYM_LOCK_ASSERT(MA_OWNED); /* * The common case is SCSI IO. * We deal with other ones elsewhere. */ if (ccb->ccb_h.func_code != XPT_SCSI_IO) { sym_action2(sim, ccb); return; } csio = &ccb->csio; ccb_h = &csio->ccb_h; /* * Work around races. */ if ((ccb_h->status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { xpt_done(ccb); return; } /* * Minimal checkings, so that we will not * go outside our tables. */ if (ccb_h->target_id == np->myaddr || ccb_h->target_id >= SYM_CONF_MAX_TARGET || ccb_h->target_lun >= SYM_CONF_MAX_LUN) { sym_xpt_done2(np, ccb, CAM_DEV_NOT_THERE); return; } /* * Retrieve the target and lun descriptors. */ tp = &np->target[ccb_h->target_id]; lp = sym_lp(tp, ccb_h->target_lun); /* * Complete the 1st INQUIRY command with error * condition if the device is flagged NOSCAN * at BOOT in the NVRAM. This may speed up * the boot and maintain coherency with BIOS * device numbering. Clearing the flag allows * user to rescan skipped devices later. * We also return error for devices not flagged * for SCAN LUNS in the NVRAM since some mono-lun * devices behave badly when asked for some non * zero LUN. Btw, this is an absolute hack.:-) */ if (!(ccb_h->flags & CAM_CDB_PHYS) && (0x12 == ((ccb_h->flags & CAM_CDB_POINTER) ? csio->cdb_io.cdb_ptr[0] : csio->cdb_io.cdb_bytes[0]))) { if ((tp->usrflags & SYM_SCAN_BOOT_DISABLED) || ((tp->usrflags & SYM_SCAN_LUNS_DISABLED) && ccb_h->target_lun != 0)) { tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED; sym_xpt_done2(np, ccb, CAM_DEV_NOT_THERE); return; } } /* * Get a control block for this IO. */ tmp = ((ccb_h->flags & CAM_TAG_ACTION_VALID) != 0); cp = sym_get_ccb(np, ccb_h->target_id, ccb_h->target_lun, tmp); if (!cp) { sym_xpt_done2(np, ccb, CAM_RESRC_UNAVAIL); return; } /* * Keep track of the IO in our CCB. */ cp->cam_ccb = ccb; /* * Build the IDENTIFY message. */ idmsg = M_IDENTIFY | cp->lun; if (cp->tag != NO_TAG || (lp && (lp->current_flags & SYM_DISC_ENABLED))) idmsg |= 0x40; msgptr = cp->scsi_smsg; msglen = 0; msgptr[msglen++] = idmsg; /* * Build the tag message if present. */ if (cp->tag != NO_TAG) { u_char order = csio->tag_action; switch(order) { case M_ORDERED_TAG: break; case M_HEAD_TAG: break; default: order = M_SIMPLE_TAG; } msgptr[msglen++] = order; /* * For less than 128 tags, actual tags are numbered * 1,3,5,..2*MAXTAGS+1,since we may have to deal * with devices that have problems with #TAG 0 or too * great #TAG numbers. For more tags (up to 256), * we use directly our tag number. */ #if SYM_CONF_MAX_TASK > (512/4) msgptr[msglen++] = cp->tag; #else msgptr[msglen++] = (cp->tag << 1) + 1; #endif } /* * Build a negotiation message if needed. * (nego_status is filled by sym_prepare_nego()) */ cp->nego_status = 0; if (tp->tinfo.current.width != tp->tinfo.goal.width || tp->tinfo.current.period != tp->tinfo.goal.period || tp->tinfo.current.offset != tp->tinfo.goal.offset || tp->tinfo.current.options != tp->tinfo.goal.options) { if (!tp->nego_cp && lp) msglen += sym_prepare_nego(np, cp, 0, msgptr + msglen); } /* * Fill in our ccb */ /* * Startqueue */ cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA (np, select)); cp->phys.head.go.restart = cpu_to_scr(SCRIPTA_BA (np, resel_dsa)); /* * select */ cp->phys.select.sel_id = cp->target; cp->phys.select.sel_scntl3 = tp->head.wval; cp->phys.select.sel_sxfer = tp->head.sval; cp->phys.select.sel_scntl4 = tp->head.uval; /* * message */ cp->phys.smsg.addr = cpu_to_scr(CCB_BA (cp, scsi_smsg)); cp->phys.smsg.size = cpu_to_scr(msglen); /* * command */ if (sym_setup_cdb(np, csio, cp) < 0) { sym_xpt_done(np, ccb, cp); sym_free_ccb(np, cp); return; } /* * status */ #if 0 /* Provision */ cp->actualquirks = tp->quirks; #endif cp->actualquirks = SYM_QUIRK_AUTOSAVE; cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; cp->ssss_status = S_ILLEGAL; cp->xerr_status = 0; cp->host_flags = 0; cp->extra_bytes = 0; /* * extreme data pointer. * shall be positive, so -1 is lower than lowest.:) */ cp->ext_sg = -1; cp->ext_ofs = 0; /* * Build the data descriptor block * and start the IO. */ sym_setup_data_and_start(np, csio, cp); } /* * Setup buffers and pointers that address the CDB. * I bet, physical CDBs will never be used on the planet, * since they can be bounced without significant overhead. */ static int sym_setup_cdb(hcb_p np, struct ccb_scsiio *csio, ccb_p cp) { struct ccb_hdr *ccb_h; u32 cmd_ba; int cmd_len; SYM_LOCK_ASSERT(MA_OWNED); ccb_h = &csio->ccb_h; /* * CDB is 16 bytes max. */ if (csio->cdb_len > sizeof(cp->cdb_buf)) { sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID); return -1; } cmd_len = csio->cdb_len; if (ccb_h->flags & CAM_CDB_POINTER) { /* CDB is a pointer */ if (!(ccb_h->flags & CAM_CDB_PHYS)) { /* CDB pointer is virtual */ bcopy(csio->cdb_io.cdb_ptr, cp->cdb_buf, cmd_len); cmd_ba = CCB_BA (cp, cdb_buf[0]); } else { /* CDB pointer is physical */ #if 0 cmd_ba = ((u32)csio->cdb_io.cdb_ptr) & 0xffffffff; #else sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID); return -1; #endif } } else { /* CDB is in the CAM ccb (buffer) */ bcopy(csio->cdb_io.cdb_bytes, cp->cdb_buf, cmd_len); cmd_ba = CCB_BA (cp, cdb_buf[0]); } cp->phys.cmd.addr = cpu_to_scr(cmd_ba); cp->phys.cmd.size = cpu_to_scr(cmd_len); return 0; } /* * Set up data pointers used by SCRIPTS. */ static void __inline sym_setup_data_pointers(hcb_p np, ccb_p cp, int dir) { u32 lastp, goalp; SYM_LOCK_ASSERT(MA_OWNED); /* * No segments means no data. */ if (!cp->segments) dir = CAM_DIR_NONE; /* * Set the data pointer. */ switch(dir) { case CAM_DIR_OUT: goalp = SCRIPTA_BA (np, data_out2) + 8; lastp = goalp - 8 - (cp->segments * (2*4)); break; case CAM_DIR_IN: cp->host_flags |= HF_DATA_IN; goalp = SCRIPTA_BA (np, data_in2) + 8; lastp = goalp - 8 - (cp->segments * (2*4)); break; case CAM_DIR_NONE: default: lastp = goalp = SCRIPTB_BA (np, no_data); break; } cp->phys.head.lastp = cpu_to_scr(lastp); cp->phys.head.goalp = cpu_to_scr(goalp); cp->phys.head.savep = cpu_to_scr(lastp); cp->startp = cp->phys.head.savep; } /* * Call back routine for the DMA map service. * If bounce buffers are used (why ?), we may sleep and then * be called there in another context. */ static void sym_execute_ccb(void *arg, bus_dma_segment_t *psegs, int nsegs, int error) { ccb_p cp; hcb_p np; union ccb *ccb; cp = (ccb_p) arg; ccb = cp->cam_ccb; np = (hcb_p) cp->arg; SYM_LOCK_ASSERT(MA_OWNED); /* * Deal with weird races. */ if (sym_get_cam_status(ccb) != CAM_REQ_INPROG) goto out_abort; /* * Deal with weird errors. */ if (error) { cp->dmamapped = 0; sym_set_cam_status(cp->cam_ccb, CAM_REQ_ABORTED); goto out_abort; } /* * Build the data descriptor for the chip. */ if (nsegs) { int retv; /* 896 rev 1 requires to be careful about boundaries */ if (np->device_id == PCI_ID_SYM53C896 && np->revision_id <= 1) retv = sym_scatter_sg_physical(np, cp, psegs, nsegs); else retv = sym_fast_scatter_sg_physical(np,cp, psegs,nsegs); if (retv < 0) { sym_set_cam_status(cp->cam_ccb, CAM_REQ_TOO_BIG); goto out_abort; } } /* * Synchronize the DMA map only if we have * actually mapped the data. */ if (cp->dmamapped) { bus_dmamap_sync(np->data_dmat, cp->dmamap, (cp->dmamapped == SYM_DMA_READ ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); } /* * Set host status to busy state. * May have been set back to HS_WAIT to avoid a race. */ cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; /* * Set data pointers. */ sym_setup_data_pointers(np, cp, (ccb->ccb_h.flags & CAM_DIR_MASK)); /* * Enqueue this IO in our pending queue. */ sym_enqueue_cam_ccb(cp); /* * When `#ifed 1', the code below makes the driver * panic on the first attempt to write to a SCSI device. * It is the first test we want to do after a driver * change that does not seem obviously safe. :) */ #if 0 switch (cp->cdb_buf[0]) { case 0x0A: case 0x2A: case 0xAA: panic("XXXXXXXXXXXXX WRITE NOT YET ALLOWED XXXXXXXXXXXXXX\n"); MDELAY(10000); break; default: break; } #endif /* * Activate this job. */ sym_put_start_queue(np, cp); return; out_abort: sym_xpt_done(np, ccb, cp); sym_free_ccb(np, cp); } /* * How complex it gets to deal with the data in CAM. * The Bus Dma stuff makes things still more complex. */ static void sym_setup_data_and_start(hcb_p np, struct ccb_scsiio *csio, ccb_p cp) { struct ccb_hdr *ccb_h; int dir, retv; SYM_LOCK_ASSERT(MA_OWNED); ccb_h = &csio->ccb_h; /* * Now deal with the data. */ cp->data_len = csio->dxfer_len; cp->arg = np; /* * No direction means no data. */ dir = (ccb_h->flags & CAM_DIR_MASK); if (dir == CAM_DIR_NONE) { sym_execute_ccb(cp, NULL, 0, 0); return; } cp->dmamapped = (dir == CAM_DIR_IN) ? SYM_DMA_READ : SYM_DMA_WRITE; retv = bus_dmamap_load_ccb(np->data_dmat, cp->dmamap, (union ccb *)csio, sym_execute_ccb, cp, 0); if (retv == EINPROGRESS) { cp->host_status = HS_WAIT; xpt_freeze_simq(np->sim, 1); csio->ccb_h.status |= CAM_RELEASE_SIMQ; } } /* * Move the scatter list to our data block. */ static int sym_fast_scatter_sg_physical(hcb_p np, ccb_p cp, bus_dma_segment_t *psegs, int nsegs) { struct sym_tblmove *data; bus_dma_segment_t *psegs2; SYM_LOCK_ASSERT(MA_OWNED); if (nsegs > SYM_CONF_MAX_SG) return -1; data = &cp->phys.data[SYM_CONF_MAX_SG-1]; psegs2 = &psegs[nsegs-1]; cp->segments = nsegs; while (1) { data->addr = cpu_to_scr(psegs2->ds_addr); data->size = cpu_to_scr(psegs2->ds_len); if (DEBUG_FLAGS & DEBUG_SCATTER) { printf ("%s scatter: paddr=%lx len=%ld\n", sym_name(np), (long) psegs2->ds_addr, (long) psegs2->ds_len); } if (psegs2 != psegs) { --data; --psegs2; continue; } break; } return 0; } /* * Scatter a SG list with physical addresses into bus addressable chunks. */ static int sym_scatter_sg_physical(hcb_p np, ccb_p cp, bus_dma_segment_t *psegs, int nsegs) { u_long ps, pe, pn; u_long k; int s, t; SYM_LOCK_ASSERT(MA_OWNED); s = SYM_CONF_MAX_SG - 1; t = nsegs - 1; ps = psegs[t].ds_addr; pe = ps + psegs[t].ds_len; while (s >= 0) { pn = rounddown2(pe - 1, SYM_CONF_DMA_BOUNDARY); if (pn <= ps) pn = ps; k = pe - pn; if (DEBUG_FLAGS & DEBUG_SCATTER) { printf ("%s scatter: paddr=%lx len=%ld\n", sym_name(np), pn, k); } cp->phys.data[s].addr = cpu_to_scr(pn); cp->phys.data[s].size = cpu_to_scr(k); --s; if (pn == ps) { if (--t < 0) break; ps = psegs[t].ds_addr; pe = ps + psegs[t].ds_len; } else pe = pn; } cp->segments = SYM_CONF_MAX_SG - 1 - s; return t >= 0 ? -1 : 0; } /* * SIM action for non performance critical stuff. */ static void sym_action2(struct cam_sim *sim, union ccb *ccb) { union ccb *abort_ccb; struct ccb_hdr *ccb_h; struct ccb_pathinq *cpi; struct ccb_trans_settings *cts; struct sym_trans *tip; hcb_p np; tcb_p tp; lcb_p lp; u_char dflags; /* * Retrieve our controller data structure. */ np = (hcb_p) cam_sim_softc(sim); SYM_LOCK_ASSERT(MA_OWNED); ccb_h = &ccb->ccb_h; switch (ccb_h->func_code) { case XPT_SET_TRAN_SETTINGS: cts = &ccb->cts; tp = &np->target[ccb_h->target_id]; /* * Update SPI transport settings in TARGET control block. * Update SCSI device settings in LUN control block. */ lp = sym_lp(tp, ccb_h->target_lun); if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { sym_update_trans(np, &tp->tinfo.goal, cts); if (lp) sym_update_dflags(np, &lp->current_flags, cts); } if (cts->type == CTS_TYPE_USER_SETTINGS) { sym_update_trans(np, &tp->tinfo.user, cts); if (lp) sym_update_dflags(np, &lp->user_flags, cts); } sym_xpt_done2(np, ccb, CAM_REQ_CMP); break; case XPT_GET_TRAN_SETTINGS: cts = &ccb->cts; tp = &np->target[ccb_h->target_id]; lp = sym_lp(tp, ccb_h->target_lun); #define cts__scsi (&cts->proto_specific.scsi) #define cts__spi (&cts->xport_specific.spi) if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { tip = &tp->tinfo.current; dflags = lp ? lp->current_flags : 0; } else { tip = &tp->tinfo.user; dflags = lp ? lp->user_flags : tp->usrflags; } cts->protocol = PROTO_SCSI; cts->transport = XPORT_SPI; cts->protocol_version = tip->scsi_version; cts->transport_version = tip->spi_version; cts__spi->sync_period = tip->period; cts__spi->sync_offset = tip->offset; cts__spi->bus_width = tip->width; cts__spi->ppr_options = tip->options; cts__spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH | CTS_SPI_VALID_PPR_OPTIONS; cts__spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; if (dflags & SYM_DISC_ENABLED) cts__spi->flags |= CTS_SPI_FLAGS_DISC_ENB; cts__spi->valid |= CTS_SPI_VALID_DISC; cts__scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; if (dflags & SYM_TAGS_ENABLED) cts__scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; cts__scsi->valid |= CTS_SCSI_VALID_TQ; #undef cts__spi #undef cts__scsi sym_xpt_done2(np, ccb, CAM_REQ_CMP); break; case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, /*extended*/1); sym_xpt_done2(np, ccb, CAM_REQ_CMP); break; case XPT_PATH_INQ: cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_MDP_ABLE|PI_SDTR_ABLE|PI_TAG_ABLE; if ((np->features & FE_WIDE) != 0) cpi->hba_inquiry |= PI_WIDE_16; cpi->target_sprt = 0; cpi->hba_misc = PIM_UNMAPPED; if (np->usrflags & SYM_SCAN_TARGETS_HILO) cpi->hba_misc |= PIM_SCANHILO; if (np->usrflags & SYM_AVOID_BUS_RESET) cpi->hba_misc |= PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = (np->features & FE_WIDE) ? 15 : 7; /* Semantic problem:)LUN number max = max number of LUNs - 1 */ cpi->max_lun = SYM_CONF_MAX_LUN-1; if (SYM_SETUP_MAX_LUN < SYM_CONF_MAX_LUN) cpi->max_lun = SYM_SETUP_MAX_LUN-1; cpi->bus_id = cam_sim_bus(sim); cpi->initiator_id = np->myaddr; cpi->base_transfer_speed = 3300; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "Symbios", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_ST; if (np->features & FE_ULTRA3) { cpi->transport_version = 3; cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_DT_ST; } cpi->maxio = SYM_CONF_MAX_SG * PAGE_SIZE; sym_xpt_done2(np, ccb, CAM_REQ_CMP); break; case XPT_ABORT: abort_ccb = ccb->cab.abort_ccb; switch(abort_ccb->ccb_h.func_code) { case XPT_SCSI_IO: if (sym_abort_scsiio(np, abort_ccb, 0) == 0) { sym_xpt_done2(np, ccb, CAM_REQ_CMP); break; } default: sym_xpt_done2(np, ccb, CAM_UA_ABORT); break; } break; case XPT_RESET_DEV: sym_reset_dev(np, ccb); break; case XPT_RESET_BUS: sym_reset_scsi_bus(np, 0); if (sym_verbose) { xpt_print_path(np->path); printf("SCSI BUS reset delivered.\n"); } sym_init (np, 1); sym_xpt_done2(np, ccb, CAM_REQ_CMP); break; case XPT_TERM_IO: default: sym_xpt_done2(np, ccb, CAM_REQ_INVALID); break; } } /* * Asynchronous notification handler. */ static void sym_async(void *cb_arg, u32 code, struct cam_path *path, void *args __unused) { hcb_p np; struct cam_sim *sim; u_int tn; tcb_p tp; sim = (struct cam_sim *) cb_arg; np = (hcb_p) cam_sim_softc(sim); SYM_LOCK_ASSERT(MA_OWNED); switch (code) { case AC_LOST_DEVICE: tn = xpt_path_target_id(path); if (tn >= SYM_CONF_MAX_TARGET) break; tp = &np->target[tn]; tp->to_reset = 0; tp->head.sval = 0; tp->head.wval = np->rv_scntl3; tp->head.uval = 0; tp->tinfo.current.period = tp->tinfo.goal.period = 0; tp->tinfo.current.offset = tp->tinfo.goal.offset = 0; tp->tinfo.current.width = tp->tinfo.goal.width = BUS_8_BIT; tp->tinfo.current.options = tp->tinfo.goal.options = 0; break; default: break; } } /* * Update transfer settings of a target. */ static void sym_update_trans(hcb_p np, struct sym_trans *tip, struct ccb_trans_settings *cts) { SYM_LOCK_ASSERT(MA_OWNED); /* * Update the infos. */ #define cts__spi (&cts->xport_specific.spi) if ((cts__spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) tip->width = cts__spi->bus_width; if ((cts__spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) tip->offset = cts__spi->sync_offset; if ((cts__spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) tip->period = cts__spi->sync_period; if ((cts__spi->valid & CTS_SPI_VALID_PPR_OPTIONS) != 0) tip->options = (cts__spi->ppr_options & PPR_OPT_DT); if (cts->protocol_version != PROTO_VERSION_UNSPECIFIED && cts->protocol_version != PROTO_VERSION_UNKNOWN) tip->scsi_version = cts->protocol_version; if (cts->transport_version != XPORT_VERSION_UNSPECIFIED && cts->transport_version != XPORT_VERSION_UNKNOWN) tip->spi_version = cts->transport_version; #undef cts__spi /* * Scale against driver configuration limits. */ if (tip->width > SYM_SETUP_MAX_WIDE) tip->width = SYM_SETUP_MAX_WIDE; if (tip->period && tip->offset) { if (tip->offset > SYM_SETUP_MAX_OFFS) tip->offset = SYM_SETUP_MAX_OFFS; if (tip->period < SYM_SETUP_MIN_SYNC) tip->period = SYM_SETUP_MIN_SYNC; } else { tip->offset = 0; tip->period = 0; } /* * Scale against actual controller BUS width. */ if (tip->width > np->maxwide) tip->width = np->maxwide; /* * Only accept DT if controller supports and SYNC/WIDE asked. */ if (!((np->features & (FE_C10|FE_ULTRA3)) == (FE_C10|FE_ULTRA3)) || !(tip->width == BUS_16_BIT && tip->offset)) { tip->options &= ~PPR_OPT_DT; } /* * Scale period factor and offset against controller limits. */ if (tip->offset && tip->period) { if (tip->options & PPR_OPT_DT) { if (tip->period < np->minsync_dt) tip->period = np->minsync_dt; if (tip->period > np->maxsync_dt) tip->period = np->maxsync_dt; if (tip->offset > np->maxoffs_dt) tip->offset = np->maxoffs_dt; } else { if (tip->period < np->minsync) tip->period = np->minsync; if (tip->period > np->maxsync) tip->period = np->maxsync; if (tip->offset > np->maxoffs) tip->offset = np->maxoffs; } } } /* * Update flags for a device (logical unit). */ static void sym_update_dflags(hcb_p np, u_char *flags, struct ccb_trans_settings *cts) { SYM_LOCK_ASSERT(MA_OWNED); #define cts__scsi (&cts->proto_specific.scsi) #define cts__spi (&cts->xport_specific.spi) if ((cts__spi->valid & CTS_SPI_VALID_DISC) != 0) { if ((cts__spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) *flags |= SYM_DISC_ENABLED; else *flags &= ~SYM_DISC_ENABLED; } if ((cts__scsi->valid & CTS_SCSI_VALID_TQ) != 0) { if ((cts__scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) *flags |= SYM_TAGS_ENABLED; else *flags &= ~SYM_TAGS_ENABLED; } #undef cts__spi #undef cts__scsi } /*============= DRIVER INITIALISATION ==================*/ static device_method_t sym_pci_methods[] = { DEVMETHOD(device_probe, sym_pci_probe), DEVMETHOD(device_attach, sym_pci_attach), DEVMETHOD_END }; static driver_t sym_pci_driver = { "sym", sym_pci_methods, 1 /* no softc */ }; static devclass_t sym_devclass; DRIVER_MODULE(sym, pci, sym_pci_driver, sym_devclass, NULL, NULL); MODULE_DEPEND(sym, cam, 1, 1, 1); MODULE_DEPEND(sym, pci, 1, 1, 1); static const struct sym_pci_chip sym_pci_dev_table[] = { {PCI_ID_SYM53C810, 0x0f, "810", 4, 8, 4, 64, FE_ERL} , #ifdef SYM_DEBUG_GENERIC_SUPPORT {PCI_ID_SYM53C810, 0xff, "810a", 4, 8, 4, 1, FE_BOF} , #else {PCI_ID_SYM53C810, 0xff, "810a", 4, 8, 4, 1, FE_CACHE_SET|FE_LDSTR|FE_PFEN|FE_BOF} , #endif {PCI_ID_SYM53C815, 0xff, "815", 4, 8, 4, 64, FE_BOF|FE_ERL} , {PCI_ID_SYM53C825, 0x0f, "825", 6, 8, 4, 64, FE_WIDE|FE_BOF|FE_ERL|FE_DIFF} , {PCI_ID_SYM53C825, 0xff, "825a", 6, 8, 4, 2, FE_WIDE|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM|FE_DIFF} , {PCI_ID_SYM53C860, 0xff, "860", 4, 8, 5, 1, FE_ULTRA|FE_CLK80|FE_CACHE_SET|FE_BOF|FE_LDSTR|FE_PFEN} , {PCI_ID_SYM53C875, 0x01, "875", 6, 16, 5, 2, FE_WIDE|FE_ULTRA|FE_CLK80|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DIFF} , {PCI_ID_SYM53C875, 0xff, "875", 6, 16, 5, 2, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DIFF} , {PCI_ID_SYM53C875_2, 0xff, "875", 6, 16, 5, 2, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DIFF} , {PCI_ID_SYM53C885, 0xff, "885", 6, 16, 5, 2, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DIFF} , #ifdef SYM_DEBUG_GENERIC_SUPPORT {PCI_ID_SYM53C895, 0xff, "895", 6, 31, 7, 2, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS| FE_RAM|FE_LCKFRQ} , #else {PCI_ID_SYM53C895, 0xff, "895", 6, 31, 7, 2, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_LCKFRQ} , #endif {PCI_ID_SYM53C896, 0xff, "896", 6, 31, 7, 4, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ} , {PCI_ID_SYM53C895A, 0xff, "895a", 6, 31, 7, 4, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ} , {PCI_ID_LSI53C1010, 0x00, "1010-33", 6, 31, 7, 8, FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC| FE_C10} , {PCI_ID_LSI53C1010, 0xff, "1010-33", 6, 31, 7, 8, FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC| FE_C10|FE_U3EN} , {PCI_ID_LSI53C1010_2, 0xff, "1010-66", 6, 31, 7, 8, FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_66MHZ|FE_CRC| FE_C10|FE_U3EN} , {PCI_ID_LSI53C1510D, 0xff, "1510d", 6, 31, 7, 4, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_IO256|FE_LEDC} }; /* * Look up the chip table. * * Return a pointer to the chip entry if found, * zero otherwise. */ static const struct sym_pci_chip * sym_find_pci_chip(device_t dev) { const struct sym_pci_chip *chip; int i; u_short device_id; u_char revision; if (pci_get_vendor(dev) != PCI_VENDOR_NCR) return NULL; device_id = pci_get_device(dev); revision = pci_get_revid(dev); for (i = 0; i < nitems(sym_pci_dev_table); i++) { chip = &sym_pci_dev_table[i]; if (device_id != chip->device_id) continue; if (revision > chip->revision_id) continue; return chip; } return NULL; } /* * Tell upper layer if the chip is supported. */ static int sym_pci_probe(device_t dev) { const struct sym_pci_chip *chip; chip = sym_find_pci_chip(dev); if (chip && sym_find_firmware(chip)) { device_set_desc(dev, chip->name); return (chip->lp_probe_bit & SYM_SETUP_LP_PROBE_MAP)? BUS_PROBE_LOW_PRIORITY : BUS_PROBE_DEFAULT; } return ENXIO; } /* * Attach a sym53c8xx device. */ static int sym_pci_attach(device_t dev) { const struct sym_pci_chip *chip; u_short command; u_char cachelnsz; struct sym_hcb *np = NULL; struct sym_nvram nvram; const struct sym_fw *fw = NULL; int i; bus_dma_tag_t bus_dmat; bus_dmat = bus_get_dma_tag(dev); /* * Only probed devices should be attached. * We just enjoy being paranoid. :) */ chip = sym_find_pci_chip(dev); if (chip == NULL || (fw = sym_find_firmware(chip)) == NULL) return (ENXIO); /* * Allocate immediately the host control block, * since we are only expecting to succeed. :) * We keep track in the HCB of all the resources that * are to be released on error. */ np = __sym_calloc_dma(bus_dmat, sizeof(*np), "HCB"); if (np) np->bus_dmat = bus_dmat; else return (ENXIO); device_set_softc(dev, np); SYM_LOCK_INIT(); /* * Copy some useful infos to the HCB. */ np->hcb_ba = vtobus(np); np->verbose = bootverbose; np->device = dev; np->device_id = pci_get_device(dev); np->revision_id = pci_get_revid(dev); np->features = chip->features; np->clock_divn = chip->nr_divisor; np->maxoffs = chip->offset_max; np->maxburst = chip->burst_max; np->scripta_sz = fw->a_size; np->scriptb_sz = fw->b_size; np->fw_setup = fw->setup; np->fw_patch = fw->patch; np->fw_name = fw->name; #ifdef __amd64__ np->target = sym_calloc_dma(SYM_CONF_MAX_TARGET * sizeof(*(np->target)), "TARGET"); if (!np->target) goto attach_failed; #endif /* * Initialize the CCB free and busy queues. */ sym_que_init(&np->free_ccbq); sym_que_init(&np->busy_ccbq); sym_que_init(&np->comp_ccbq); sym_que_init(&np->cam_ccbq); /* * Allocate a tag for the DMA of user data. */ if (bus_dma_tag_create(np->bus_dmat, 1, SYM_CONF_DMA_BOUNDARY, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, SYM_CONF_MAX_SG, SYM_CONF_DMA_BOUNDARY, 0, busdma_lock_mutex, &np->mtx, &np->data_dmat)) { device_printf(dev, "failed to create DMA tag.\n"); goto attach_failed; } /* * Read and apply some fix-ups to the PCI COMMAND * register. We want the chip to be enabled for: * - BUS mastering * - PCI parity checking (reporting would also be fine) * - Write And Invalidate. */ command = pci_read_config(dev, PCIR_COMMAND, 2); command |= PCIM_CMD_BUSMASTEREN | PCIM_CMD_PERRESPEN | PCIM_CMD_MWRICEN; pci_write_config(dev, PCIR_COMMAND, command, 2); /* * Let the device know about the cache line size, * if it doesn't yet. */ cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); if (!cachelnsz) { cachelnsz = 8; pci_write_config(dev, PCIR_CACHELNSZ, cachelnsz, 1); } /* * Alloc/get/map/retrieve everything that deals with MMIO. */ i = SYM_PCI_MMIO; np->mmio_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &i, RF_ACTIVE); if (!np->mmio_res) { device_printf(dev, "failed to allocate MMIO resources\n"); goto attach_failed; } np->mmio_ba = rman_get_start(np->mmio_res); /* * Allocate the IRQ. */ i = 0; np->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &i, RF_ACTIVE | RF_SHAREABLE); if (!np->irq_res) { device_printf(dev, "failed to allocate IRQ resource\n"); goto attach_failed; } #ifdef SYM_CONF_IOMAPPED /* * User want us to use normal IO with PCI. * Alloc/get/map/retrieve everything that deals with IO. */ i = SYM_PCI_IO; np->io_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &i, RF_ACTIVE); if (!np->io_res) { device_printf(dev, "failed to allocate IO resources\n"); goto attach_failed; } #endif /* SYM_CONF_IOMAPPED */ /* * If the chip has RAM. * Alloc/get/map/retrieve the corresponding resources. */ if (np->features & (FE_RAM|FE_RAM8K)) { int regs_id = SYM_PCI_RAM; if (np->features & FE_64BIT) regs_id = SYM_PCI_RAM64; np->ram_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, ®s_id, RF_ACTIVE); if (!np->ram_res) { device_printf(dev,"failed to allocate RAM resources\n"); goto attach_failed; } np->ram_id = regs_id; np->ram_ba = rman_get_start(np->ram_res); } /* * Save setting of some IO registers, so we will * be able to probe specific implementations. */ sym_save_initial_setting (np); /* * Reset the chip now, since it has been reported * that SCSI clock calibration may not work properly * if the chip is currently active. */ sym_chip_reset (np); /* * Try to read the user set-up. */ (void) sym_read_nvram(np, &nvram); /* * Prepare controller and devices settings, according * to chip features, user set-up and driver set-up. */ (void) sym_prepare_setting(np, &nvram); /* * Check the PCI clock frequency. * Must be performed after prepare_setting since it destroys * STEST1 that is used to probe for the clock doubler. */ i = sym_getpciclock(np); if (i > 37000) device_printf(dev, "PCI BUS clock seems too high: %u KHz.\n",i); /* * Allocate the start queue. */ np->squeue = (u32 *) sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"SQUEUE"); if (!np->squeue) goto attach_failed; np->squeue_ba = vtobus(np->squeue); /* * Allocate the done queue. */ np->dqueue = (u32 *) sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"DQUEUE"); if (!np->dqueue) goto attach_failed; np->dqueue_ba = vtobus(np->dqueue); /* * Allocate the target bus address array. */ np->targtbl = (u32 *) sym_calloc_dma(256, "TARGTBL"); if (!np->targtbl) goto attach_failed; np->targtbl_ba = vtobus(np->targtbl); /* * Allocate SCRIPTS areas. */ np->scripta0 = sym_calloc_dma(np->scripta_sz, "SCRIPTA0"); np->scriptb0 = sym_calloc_dma(np->scriptb_sz, "SCRIPTB0"); if (!np->scripta0 || !np->scriptb0) goto attach_failed; /* * Allocate the CCBs. We need at least ONE. */ for (i = 0; sym_alloc_ccb(np) != NULL; i++) ; if (i < 1) goto attach_failed; /* * Calculate BUS addresses where we are going * to load the SCRIPTS. */ np->scripta_ba = vtobus(np->scripta0); np->scriptb_ba = vtobus(np->scriptb0); np->scriptb0_ba = np->scriptb_ba; if (np->ram_ba) { np->scripta_ba = np->ram_ba; if (np->features & FE_RAM8K) { np->ram_ws = 8192; np->scriptb_ba = np->scripta_ba + 4096; #ifdef __LP64__ np->scr_ram_seg = cpu_to_scr(np->scripta_ba >> 32); #endif } else np->ram_ws = 4096; } /* * Copy scripts to controller instance. */ bcopy(fw->a_base, np->scripta0, np->scripta_sz); bcopy(fw->b_base, np->scriptb0, np->scriptb_sz); /* * Setup variable parts in scripts and compute * scripts bus addresses used from the C code. */ np->fw_setup(np, fw); /* * Bind SCRIPTS with physical addresses usable by the * SCRIPTS processor (as seen from the BUS = BUS addresses). */ sym_fw_bind_script(np, (u32 *) np->scripta0, np->scripta_sz); sym_fw_bind_script(np, (u32 *) np->scriptb0, np->scriptb_sz); #ifdef SYM_CONF_IARB_SUPPORT /* * If user wants IARB to be set when we win arbitration * and have other jobs, compute the max number of consecutive * settings of IARB hints before we leave devices a chance to * arbitrate for reselection. */ #ifdef SYM_SETUP_IARB_MAX np->iarb_max = SYM_SETUP_IARB_MAX; #else np->iarb_max = 4; #endif #endif /* * Prepare the idle and invalid task actions. */ np->idletask.start = cpu_to_scr(SCRIPTA_BA (np, idle)); np->idletask.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l)); np->idletask_ba = vtobus(&np->idletask); np->notask.start = cpu_to_scr(SCRIPTA_BA (np, idle)); np->notask.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l)); np->notask_ba = vtobus(&np->notask); np->bad_itl.start = cpu_to_scr(SCRIPTA_BA (np, idle)); np->bad_itl.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l)); np->bad_itl_ba = vtobus(&np->bad_itl); np->bad_itlq.start = cpu_to_scr(SCRIPTA_BA (np, idle)); np->bad_itlq.restart = cpu_to_scr(SCRIPTB_BA (np,bad_i_t_l_q)); np->bad_itlq_ba = vtobus(&np->bad_itlq); /* * Allocate and prepare the lun JUMP table that is used * for a target prior the probing of devices (bad lun table). * A private table will be allocated for the target on the * first INQUIRY response received. */ np->badluntbl = sym_calloc_dma(256, "BADLUNTBL"); if (!np->badluntbl) goto attach_failed; np->badlun_sa = cpu_to_scr(SCRIPTB_BA (np, resel_bad_lun)); for (i = 0 ; i < 64 ; i++) /* 64 luns/target, no less */ np->badluntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa)); /* * Prepare the bus address array that contains the bus * address of each target control block. * For now, assume all logical units are wrong. :) */ for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { np->targtbl[i] = cpu_to_scr(vtobus(&np->target[i])); np->target[i].head.luntbl_sa = cpu_to_scr(vtobus(np->badluntbl)); np->target[i].head.lun0_sa = cpu_to_scr(vtobus(&np->badlun_sa)); } /* * Now check the cache handling of the pci chipset. */ if (sym_snooptest (np)) { device_printf(dev, "CACHE INCORRECTLY CONFIGURED.\n"); goto attach_failed; } /* * Now deal with CAM. * Hopefully, we will succeed with that one.:) */ if (!sym_cam_attach(np)) goto attach_failed; /* * Sigh! we are done. */ return 0; /* * We have failed. * We will try to free all the resources we have * allocated, but if we are a boot device, this * will not help that much.;) */ attach_failed: if (np) sym_pci_free(np); return ENXIO; } /* * Free everything that have been allocated for this device. */ static void sym_pci_free(hcb_p np) { SYM_QUEHEAD *qp; ccb_p cp; tcb_p tp; lcb_p lp; int target, lun; /* * First free CAM resources. */ sym_cam_free(np); /* * Now every should be quiet for us to * free other resources. */ if (np->ram_res) bus_release_resource(np->device, SYS_RES_MEMORY, np->ram_id, np->ram_res); if (np->mmio_res) bus_release_resource(np->device, SYS_RES_MEMORY, SYM_PCI_MMIO, np->mmio_res); if (np->io_res) bus_release_resource(np->device, SYS_RES_IOPORT, SYM_PCI_IO, np->io_res); if (np->irq_res) bus_release_resource(np->device, SYS_RES_IRQ, 0, np->irq_res); if (np->scriptb0) sym_mfree_dma(np->scriptb0, np->scriptb_sz, "SCRIPTB0"); if (np->scripta0) sym_mfree_dma(np->scripta0, np->scripta_sz, "SCRIPTA0"); if (np->squeue) sym_mfree_dma(np->squeue, sizeof(u32)*(MAX_QUEUE*2), "SQUEUE"); if (np->dqueue) sym_mfree_dma(np->dqueue, sizeof(u32)*(MAX_QUEUE*2), "DQUEUE"); while ((qp = sym_remque_head(&np->free_ccbq)) != NULL) { cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); bus_dmamap_destroy(np->data_dmat, cp->dmamap); sym_mfree_dma(cp->sns_bbuf, SYM_SNS_BBUF_LEN, "SNS_BBUF"); sym_mfree_dma(cp, sizeof(*cp), "CCB"); } if (np->badluntbl) sym_mfree_dma(np->badluntbl, 256,"BADLUNTBL"); for (target = 0; target < SYM_CONF_MAX_TARGET ; target++) { tp = &np->target[target]; for (lun = 0 ; lun < SYM_CONF_MAX_LUN ; lun++) { lp = sym_lp(tp, lun); if (!lp) continue; if (lp->itlq_tbl) sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); if (lp->cb_tags) sym_mfree(lp->cb_tags, SYM_CONF_MAX_TASK, "CB_TAGS"); sym_mfree_dma(lp, sizeof(*lp), "LCB"); } #if SYM_CONF_MAX_LUN > 1 if (tp->lunmp) sym_mfree(tp->lunmp, SYM_CONF_MAX_LUN*sizeof(lcb_p), "LUNMP"); #endif } #ifdef __amd64__ if (np->target) sym_mfree_dma(np->target, SYM_CONF_MAX_TARGET * sizeof(*(np->target)), "TARGET"); #endif if (np->targtbl) sym_mfree_dma(np->targtbl, 256, "TARGTBL"); if (np->data_dmat) bus_dma_tag_destroy(np->data_dmat); if (SYM_LOCK_INITIALIZED() != 0) SYM_LOCK_DESTROY(); device_set_softc(np->device, NULL); sym_mfree_dma(np, sizeof(*np), "HCB"); } /* * Allocate CAM resources and register a bus to CAM. */ static int sym_cam_attach(hcb_p np) { struct cam_devq *devq = NULL; struct cam_sim *sim = NULL; struct cam_path *path = NULL; int err; /* * Establish our interrupt handler. */ err = bus_setup_intr(np->device, np->irq_res, INTR_ENTROPY | INTR_MPSAFE | INTR_TYPE_CAM, NULL, sym_intr, np, &np->intr); if (err) { device_printf(np->device, "bus_setup_intr() failed: %d\n", err); goto fail; } /* * Create the device queue for our sym SIM. */ devq = cam_simq_alloc(SYM_CONF_MAX_START); if (!devq) goto fail; /* * Construct our SIM entry. */ sim = cam_sim_alloc(sym_action, sym_poll, "sym", np, device_get_unit(np->device), &np->mtx, 1, SYM_SETUP_MAX_TAG, devq); if (!sim) goto fail; SYM_LOCK(); if (xpt_bus_register(sim, np->device, 0) != CAM_SUCCESS) goto fail; np->sim = sim; sim = NULL; if (xpt_create_path(&path, NULL, cam_sim_path(np->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { goto fail; } np->path = path; /* * Establish our async notification handler. */ if (xpt_register_async(AC_LOST_DEVICE, sym_async, np->sim, path) != CAM_REQ_CMP) goto fail; /* * Start the chip now, without resetting the BUS, since * it seems that this must stay under control of CAM. * With LVD/SE capable chips and BUS in SE mode, we may * get a spurious SMBC interrupt. */ sym_init (np, 0); SYM_UNLOCK(); return 1; fail: if (sim) cam_sim_free(sim, FALSE); if (devq) cam_simq_free(devq); SYM_UNLOCK(); sym_cam_free(np); return 0; } /* * Free everything that deals with CAM. */ static void sym_cam_free(hcb_p np) { SYM_LOCK_ASSERT(MA_NOTOWNED); if (np->intr) { bus_teardown_intr(np->device, np->irq_res, np->intr); np->intr = NULL; } SYM_LOCK(); if (np->sim) { xpt_bus_deregister(cam_sim_path(np->sim)); cam_sim_free(np->sim, /*free_devq*/ TRUE); np->sim = NULL; } if (np->path) { xpt_free_path(np->path); np->path = NULL; } SYM_UNLOCK(); } /*============ OPTIONNAL NVRAM SUPPORT =================*/ /* * Get host setup from NVRAM. */ static void sym_nvram_setup_host (hcb_p np, struct sym_nvram *nvram) { #ifdef SYM_CONF_NVRAM_SUPPORT /* * Get parity checking, host ID, verbose mode * and miscellaneous host flags from NVRAM. */ switch(nvram->type) { case SYM_SYMBIOS_NVRAM: if (!(nvram->data.Symbios.flags & SYMBIOS_PARITY_ENABLE)) np->rv_scntl0 &= ~0x0a; np->myaddr = nvram->data.Symbios.host_id & 0x0f; if (nvram->data.Symbios.flags & SYMBIOS_VERBOSE_MSGS) np->verbose += 1; if (nvram->data.Symbios.flags1 & SYMBIOS_SCAN_HI_LO) np->usrflags |= SYM_SCAN_TARGETS_HILO; if (nvram->data.Symbios.flags2 & SYMBIOS_AVOID_BUS_RESET) np->usrflags |= SYM_AVOID_BUS_RESET; break; case SYM_TEKRAM_NVRAM: np->myaddr = nvram->data.Tekram.host_id & 0x0f; break; default: break; } #endif } /* * Get target setup from NVRAM. */ #ifdef SYM_CONF_NVRAM_SUPPORT static void sym_Symbios_setup_target(hcb_p np,int target, Symbios_nvram *nvram); static void sym_Tekram_setup_target(hcb_p np,int target, Tekram_nvram *nvram); #endif static void sym_nvram_setup_target (hcb_p np, int target, struct sym_nvram *nvp) { #ifdef SYM_CONF_NVRAM_SUPPORT switch(nvp->type) { case SYM_SYMBIOS_NVRAM: sym_Symbios_setup_target (np, target, &nvp->data.Symbios); break; case SYM_TEKRAM_NVRAM: sym_Tekram_setup_target (np, target, &nvp->data.Tekram); break; default: break; } #endif } #ifdef SYM_CONF_NVRAM_SUPPORT /* * Get target set-up from Symbios format NVRAM. */ static void sym_Symbios_setup_target(hcb_p np, int target, Symbios_nvram *nvram) { tcb_p tp = &np->target[target]; Symbios_target *tn = &nvram->target[target]; tp->tinfo.user.period = tn->sync_period ? (tn->sync_period + 3) / 4 : 0; tp->tinfo.user.width = tn->bus_width == 0x10 ? BUS_16_BIT : BUS_8_BIT; tp->usrtags = (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? SYM_SETUP_MAX_TAG : 0; if (!(tn->flags & SYMBIOS_DISCONNECT_ENABLE)) tp->usrflags &= ~SYM_DISC_ENABLED; if (!(tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME)) tp->usrflags |= SYM_SCAN_BOOT_DISABLED; if (!(tn->flags & SYMBIOS_SCAN_LUNS)) tp->usrflags |= SYM_SCAN_LUNS_DISABLED; } /* * Get target set-up from Tekram format NVRAM. */ static void sym_Tekram_setup_target(hcb_p np, int target, Tekram_nvram *nvram) { tcb_p tp = &np->target[target]; struct Tekram_target *tn = &nvram->target[target]; int i; if (tn->flags & TEKRAM_SYNC_NEGO) { i = tn->sync_index & 0xf; tp->tinfo.user.period = Tekram_sync[i]; } tp->tinfo.user.width = (tn->flags & TEKRAM_WIDE_NEGO) ? BUS_16_BIT : BUS_8_BIT; if (tn->flags & TEKRAM_TAGGED_COMMANDS) { tp->usrtags = 2 << nvram->max_tags_index; } if (tn->flags & TEKRAM_DISCONNECT_ENABLE) tp->usrflags |= SYM_DISC_ENABLED; /* If any device does not support parity, we will not use this option */ if (!(tn->flags & TEKRAM_PARITY_CHECK)) np->rv_scntl0 &= ~0x0a; /* SCSI parity checking disabled */ } #ifdef SYM_CONF_DEBUG_NVRAM /* * Dump Symbios format NVRAM for debugging purpose. */ static void sym_display_Symbios_nvram(hcb_p np, Symbios_nvram *nvram) { int i; /* display Symbios nvram host data */ printf("%s: HOST ID=%d%s%s%s%s%s%s\n", sym_name(np), nvram->host_id & 0x0f, (nvram->flags & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"", (nvram->flags & SYMBIOS_PARITY_ENABLE) ? " PARITY" :"", (nvram->flags & SYMBIOS_VERBOSE_MSGS) ? " VERBOSE" :"", (nvram->flags & SYMBIOS_CHS_MAPPING) ? " CHS_ALT" :"", (nvram->flags2 & SYMBIOS_AVOID_BUS_RESET)?" NO_RESET" :"", (nvram->flags1 & SYMBIOS_SCAN_HI_LO) ? " HI_LO" :""); /* display Symbios nvram drive data */ for (i = 0 ; i < 15 ; i++) { struct Symbios_target *tn = &nvram->target[i]; printf("%s-%d:%s%s%s%s WIDTH=%d SYNC=%d TMO=%d\n", sym_name(np), i, (tn->flags & SYMBIOS_DISCONNECT_ENABLE) ? " DISC" : "", (tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME) ? " SCAN_BOOT" : "", (tn->flags & SYMBIOS_SCAN_LUNS) ? " SCAN_LUNS" : "", (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? " TCQ" : "", tn->bus_width, tn->sync_period / 4, tn->timeout); } } /* * Dump TEKRAM format NVRAM for debugging purpose. */ static const u_char Tekram_boot_delay[7] = {3, 5, 10, 20, 30, 60, 120}; static void sym_display_Tekram_nvram(hcb_p np, Tekram_nvram *nvram) { int i, tags, boot_delay; char *rem; /* display Tekram nvram host data */ tags = 2 << nvram->max_tags_index; boot_delay = 0; if (nvram->boot_delay_index < 6) boot_delay = Tekram_boot_delay[nvram->boot_delay_index]; switch((nvram->flags & TEKRAM_REMOVABLE_FLAGS) >> 6) { default: case 0: rem = ""; break; case 1: rem = " REMOVABLE=boot device"; break; case 2: rem = " REMOVABLE=all"; break; } printf("%s: HOST ID=%d%s%s%s%s%s%s%s%s%s BOOT DELAY=%d tags=%d\n", sym_name(np), nvram->host_id & 0x0f, (nvram->flags1 & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"", (nvram->flags & TEKRAM_MORE_THAN_2_DRIVES) ? " >2DRIVES" :"", (nvram->flags & TEKRAM_DRIVES_SUP_1GB) ? " >1GB" :"", (nvram->flags & TEKRAM_RESET_ON_POWER_ON) ? " RESET" :"", (nvram->flags & TEKRAM_ACTIVE_NEGATION) ? " ACT_NEG" :"", (nvram->flags & TEKRAM_IMMEDIATE_SEEK) ? " IMM_SEEK" :"", (nvram->flags & TEKRAM_SCAN_LUNS) ? " SCAN_LUNS" :"", (nvram->flags1 & TEKRAM_F2_F6_ENABLED) ? " F2_F6" :"", rem, boot_delay, tags); /* display Tekram nvram drive data */ for (i = 0; i <= 15; i++) { int sync, j; struct Tekram_target *tn = &nvram->target[i]; j = tn->sync_index & 0xf; sync = Tekram_sync[j]; printf("%s-%d:%s%s%s%s%s%s PERIOD=%d\n", sym_name(np), i, (tn->flags & TEKRAM_PARITY_CHECK) ? " PARITY" : "", (tn->flags & TEKRAM_SYNC_NEGO) ? " SYNC" : "", (tn->flags & TEKRAM_DISCONNECT_ENABLE) ? " DISC" : "", (tn->flags & TEKRAM_START_CMD) ? " START" : "", (tn->flags & TEKRAM_TAGGED_COMMANDS) ? " TCQ" : "", (tn->flags & TEKRAM_WIDE_NEGO) ? " WIDE" : "", sync); } } #endif /* SYM_CONF_DEBUG_NVRAM */ #endif /* SYM_CONF_NVRAM_SUPPORT */ /* * Try reading Symbios or Tekram NVRAM */ #ifdef SYM_CONF_NVRAM_SUPPORT static int sym_read_Symbios_nvram (hcb_p np, Symbios_nvram *nvram); static int sym_read_Tekram_nvram (hcb_p np, Tekram_nvram *nvram); #endif static int sym_read_nvram(hcb_p np, struct sym_nvram *nvp) { #ifdef SYM_CONF_NVRAM_SUPPORT /* * Try to read SYMBIOS nvram. * Try to read TEKRAM nvram if Symbios nvram not found. */ if (SYM_SETUP_SYMBIOS_NVRAM && !sym_read_Symbios_nvram (np, &nvp->data.Symbios)) { nvp->type = SYM_SYMBIOS_NVRAM; #ifdef SYM_CONF_DEBUG_NVRAM sym_display_Symbios_nvram(np, &nvp->data.Symbios); #endif } else if (SYM_SETUP_TEKRAM_NVRAM && !sym_read_Tekram_nvram (np, &nvp->data.Tekram)) { nvp->type = SYM_TEKRAM_NVRAM; #ifdef SYM_CONF_DEBUG_NVRAM sym_display_Tekram_nvram(np, &nvp->data.Tekram); #endif } else nvp->type = 0; #else nvp->type = 0; #endif return nvp->type; } #ifdef SYM_CONF_NVRAM_SUPPORT /* * 24C16 EEPROM reading. * * GPOI0 - data in/data out * GPIO1 - clock * Symbios NVRAM wiring now also used by Tekram. */ #define SET_BIT 0 #define CLR_BIT 1 #define SET_CLK 2 #define CLR_CLK 3 /* * Set/clear data/clock bit in GPIO0 */ static void S24C16_set_bit(hcb_p np, u_char write_bit, u_char *gpreg, int bit_mode) { UDELAY (5); switch (bit_mode){ case SET_BIT: *gpreg |= write_bit; break; case CLR_BIT: *gpreg &= 0xfe; break; case SET_CLK: *gpreg |= 0x02; break; case CLR_CLK: *gpreg &= 0xfd; break; } OUTB (nc_gpreg, *gpreg); UDELAY (5); } /* * Send START condition to NVRAM to wake it up. */ static void S24C16_start(hcb_p np, u_char *gpreg) { S24C16_set_bit(np, 1, gpreg, SET_BIT); S24C16_set_bit(np, 0, gpreg, SET_CLK); S24C16_set_bit(np, 0, gpreg, CLR_BIT); S24C16_set_bit(np, 0, gpreg, CLR_CLK); } /* * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZzzzz!! */ static void S24C16_stop(hcb_p np, u_char *gpreg) { S24C16_set_bit(np, 0, gpreg, SET_CLK); S24C16_set_bit(np, 1, gpreg, SET_BIT); } /* * Read or write a bit to the NVRAM, * read if GPIO0 input else write if GPIO0 output */ static void S24C16_do_bit(hcb_p np, u_char *read_bit, u_char write_bit, u_char *gpreg) { S24C16_set_bit(np, write_bit, gpreg, SET_BIT); S24C16_set_bit(np, 0, gpreg, SET_CLK); if (read_bit) *read_bit = INB (nc_gpreg); S24C16_set_bit(np, 0, gpreg, CLR_CLK); S24C16_set_bit(np, 0, gpreg, CLR_BIT); } /* * Output an ACK to the NVRAM after reading, * change GPIO0 to output and when done back to an input */ static void S24C16_write_ack(hcb_p np, u_char write_bit, u_char *gpreg, u_char *gpcntl) { OUTB (nc_gpcntl, *gpcntl & 0xfe); S24C16_do_bit(np, 0, write_bit, gpreg); OUTB (nc_gpcntl, *gpcntl); } /* * Input an ACK from NVRAM after writing, * change GPIO0 to input and when done back to an output */ static void S24C16_read_ack(hcb_p np, u_char *read_bit, u_char *gpreg, u_char *gpcntl) { OUTB (nc_gpcntl, *gpcntl | 0x01); S24C16_do_bit(np, read_bit, 1, gpreg); OUTB (nc_gpcntl, *gpcntl); } /* * WRITE a byte to the NVRAM and then get an ACK to see it was accepted OK, * GPIO0 must already be set as an output */ static void S24C16_write_byte(hcb_p np, u_char *ack_data, u_char write_data, u_char *gpreg, u_char *gpcntl) { int x; for (x = 0; x < 8; x++) S24C16_do_bit(np, 0, (write_data >> (7 - x)) & 0x01, gpreg); S24C16_read_ack(np, ack_data, gpreg, gpcntl); } /* * READ a byte from the NVRAM and then send an ACK to say we have got it, * GPIO0 must already be set as an input */ static void S24C16_read_byte(hcb_p np, u_char *read_data, u_char ack_data, u_char *gpreg, u_char *gpcntl) { int x; u_char read_bit; *read_data = 0; for (x = 0; x < 8; x++) { S24C16_do_bit(np, &read_bit, 1, gpreg); *read_data |= ((read_bit & 0x01) << (7 - x)); } S24C16_write_ack(np, ack_data, gpreg, gpcntl); } /* * Read 'len' bytes starting at 'offset'. */ static int sym_read_S24C16_nvram (hcb_p np, int offset, u_char *data, int len) { u_char gpcntl, gpreg; u_char old_gpcntl, old_gpreg; u_char ack_data; int retv = 1; int x; /* save current state of GPCNTL and GPREG */ old_gpreg = INB (nc_gpreg); old_gpcntl = INB (nc_gpcntl); gpcntl = old_gpcntl & 0x1c; /* set up GPREG & GPCNTL to set GPIO0 and GPIO1 in to known state */ OUTB (nc_gpreg, old_gpreg); OUTB (nc_gpcntl, gpcntl); /* this is to set NVRAM into a known state with GPIO0/1 both low */ gpreg = old_gpreg; S24C16_set_bit(np, 0, &gpreg, CLR_CLK); S24C16_set_bit(np, 0, &gpreg, CLR_BIT); /* now set NVRAM inactive with GPIO0/1 both high */ S24C16_stop(np, &gpreg); /* activate NVRAM */ S24C16_start(np, &gpreg); /* write device code and random address MSB */ S24C16_write_byte(np, &ack_data, 0xa0 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl); if (ack_data & 0x01) goto out; /* write random address LSB */ S24C16_write_byte(np, &ack_data, offset & 0xff, &gpreg, &gpcntl); if (ack_data & 0x01) goto out; /* regenerate START state to set up for reading */ S24C16_start(np, &gpreg); /* rewrite device code and address MSB with read bit set (lsb = 0x01) */ S24C16_write_byte(np, &ack_data, 0xa1 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl); if (ack_data & 0x01) goto out; /* now set up GPIO0 for inputting data */ gpcntl |= 0x01; OUTB (nc_gpcntl, gpcntl); /* input all requested data - only part of total NVRAM */ for (x = 0; x < len; x++) S24C16_read_byte(np, &data[x], (x == (len-1)), &gpreg, &gpcntl); /* finally put NVRAM back in inactive mode */ gpcntl &= 0xfe; OUTB (nc_gpcntl, gpcntl); S24C16_stop(np, &gpreg); retv = 0; out: /* return GPIO0/1 to original states after having accessed NVRAM */ OUTB (nc_gpcntl, old_gpcntl); OUTB (nc_gpreg, old_gpreg); return retv; } #undef SET_BIT /* 0 */ #undef CLR_BIT /* 1 */ #undef SET_CLK /* 2 */ #undef CLR_CLK /* 3 */ /* * Try reading Symbios NVRAM. * Return 0 if OK. */ static int sym_read_Symbios_nvram (hcb_p np, Symbios_nvram *nvram) { static u_char Symbios_trailer[6] = {0xfe, 0xfe, 0, 0, 0, 0}; u_char *data = (u_char *) nvram; int len = sizeof(*nvram); u_short csum; int x; /* probe the 24c16 and read the SYMBIOS 24c16 area */ if (sym_read_S24C16_nvram (np, SYMBIOS_NVRAM_ADDRESS, data, len)) return 1; /* check valid NVRAM signature, verify byte count and checksum */ if (nvram->type != 0 || bcmp(nvram->trailer, Symbios_trailer, 6) || nvram->byte_count != len - 12) return 1; /* verify checksum */ for (x = 6, csum = 0; x < len - 6; x++) csum += data[x]; if (csum != nvram->checksum) return 1; return 0; } /* * 93C46 EEPROM reading. * * GPOI0 - data in * GPIO1 - data out * GPIO2 - clock * GPIO4 - chip select * * Used by Tekram. */ /* * Pulse clock bit in GPIO0 */ static void T93C46_Clk(hcb_p np, u_char *gpreg) { OUTB (nc_gpreg, *gpreg | 0x04); UDELAY (2); OUTB (nc_gpreg, *gpreg); } /* * Read bit from NVRAM */ static void T93C46_Read_Bit(hcb_p np, u_char *read_bit, u_char *gpreg) { UDELAY (2); T93C46_Clk(np, gpreg); *read_bit = INB (nc_gpreg); } /* * Write bit to GPIO0 */ static void T93C46_Write_Bit(hcb_p np, u_char write_bit, u_char *gpreg) { if (write_bit & 0x01) *gpreg |= 0x02; else *gpreg &= 0xfd; *gpreg |= 0x10; OUTB (nc_gpreg, *gpreg); UDELAY (2); T93C46_Clk(np, gpreg); } /* * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZZzzz!! */ static void T93C46_Stop(hcb_p np, u_char *gpreg) { *gpreg &= 0xef; OUTB (nc_gpreg, *gpreg); UDELAY (2); T93C46_Clk(np, gpreg); } /* * Send read command and address to NVRAM */ static void T93C46_Send_Command(hcb_p np, u_short write_data, u_char *read_bit, u_char *gpreg) { int x; /* send 9 bits, start bit (1), command (2), address (6) */ for (x = 0; x < 9; x++) T93C46_Write_Bit(np, (u_char) (write_data >> (8 - x)), gpreg); *read_bit = INB (nc_gpreg); } /* * READ 2 bytes from the NVRAM */ static void T93C46_Read_Word(hcb_p np, u_short *nvram_data, u_char *gpreg) { int x; u_char read_bit; *nvram_data = 0; for (x = 0; x < 16; x++) { T93C46_Read_Bit(np, &read_bit, gpreg); if (read_bit & 0x01) *nvram_data |= (0x01 << (15 - x)); else *nvram_data &= ~(0x01 << (15 - x)); } } /* * Read Tekram NvRAM data. */ static int T93C46_Read_Data(hcb_p np, u_short *data,int len,u_char *gpreg) { u_char read_bit; int x; for (x = 0; x < len; x++) { /* output read command and address */ T93C46_Send_Command(np, 0x180 | x, &read_bit, gpreg); if (read_bit & 0x01) return 1; /* Bad */ T93C46_Read_Word(np, &data[x], gpreg); T93C46_Stop(np, gpreg); } return 0; } /* * Try reading 93C46 Tekram NVRAM. */ static int sym_read_T93C46_nvram (hcb_p np, Tekram_nvram *nvram) { u_char gpcntl, gpreg; u_char old_gpcntl, old_gpreg; int retv = 1; /* save current state of GPCNTL and GPREG */ old_gpreg = INB (nc_gpreg); old_gpcntl = INB (nc_gpcntl); /* set up GPREG & GPCNTL to set GPIO0/1/2/4 in to known state, 0 in, 1/2/4 out */ gpreg = old_gpreg & 0xe9; OUTB (nc_gpreg, gpreg); gpcntl = (old_gpcntl & 0xe9) | 0x09; OUTB (nc_gpcntl, gpcntl); /* input all of NVRAM, 64 words */ retv = T93C46_Read_Data(np, (u_short *) nvram, sizeof(*nvram) / sizeof(short), &gpreg); /* return GPIO0/1/2/4 to original states after having accessed NVRAM */ OUTB (nc_gpcntl, old_gpcntl); OUTB (nc_gpreg, old_gpreg); return retv; } /* * Try reading Tekram NVRAM. * Return 0 if OK. */ static int sym_read_Tekram_nvram (hcb_p np, Tekram_nvram *nvram) { u_char *data = (u_char *) nvram; int len = sizeof(*nvram); u_short csum; int x; switch (np->device_id) { case PCI_ID_SYM53C885: case PCI_ID_SYM53C895: case PCI_ID_SYM53C896: x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS, data, len); break; case PCI_ID_SYM53C875: x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS, data, len); if (!x) break; default: x = sym_read_T93C46_nvram(np, nvram); break; } if (x) return 1; /* verify checksum */ for (x = 0, csum = 0; x < len - 1; x += 2) csum += data[x] + (data[x+1] << 8); if (csum != 0x1234) return 1; return 0; } #endif /* SYM_CONF_NVRAM_SUPPORT */ Index: head/sys/dev/vx/if_vx.c =================================================================== --- head/sys/dev/vx/if_vx.c (revision 313981) +++ head/sys/dev/vx/if_vx.c (revision 313982) @@ -1,1077 +1,1077 @@ /*- * Copyright (c) 1994 Herb Peyerl * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Herb Peyerl. * 4. The name of Herb Peyerl may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * */ #include __FBSDID("$FreeBSD$"); /* * Created from if_ep.c driver by Fred Gray (fgray@rice.edu) to support * the 3c590 family. */ /* * Modified from the FreeBSD 1.1.5.1 version by: * Andres Vega Garcia * INRIA - Sophia Antipolis, France * avega@sophia.inria.fr */ /* * Promiscuous mode added and interrupt logic slightly changed * to reduce the number of adapter failures. Transceiver select * logic changed to use value from EEPROM. Autoconfiguration * features added. * Done by: * Serge Babkin * Chelindbank (Chelyabinsk, Russia) * babkin@hq.icb.chel.su */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define ETHER_MAX_LEN 1518 #define ETHER_ADDR_LEN 6 #define ETHER_ALIGN 2 static struct connector_entry { int bit; char *name; } conn_tab[VX_CONNECTORS] = { #define CONNECTOR_UTP 0 { 0x08, "utp" }, #define CONNECTOR_AUI 1 { 0x20, "aui" }, /* dummy */ { 0, "???" }, #define CONNECTOR_BNC 3 { 0x10, "bnc" }, #define CONNECTOR_TX 4 { 0x02, "tx" }, #define CONNECTOR_FX 5 { 0x04, "fx" }, #define CONNECTOR_MII 6 { 0x40, "mii" }, { 0, "???" } }; static void vx_txstat(struct vx_softc *); static int vx_status(struct vx_softc *); static void vx_init(void *); static void vx_init_locked(struct vx_softc *); static int vx_ioctl(struct ifnet *, u_long, caddr_t); static void vx_start(struct ifnet *); static void vx_start_locked(struct ifnet *); static void vx_watchdog(void *); static void vx_reset(struct vx_softc *); static void vx_read(struct vx_softc *); static struct mbuf *vx_get(struct vx_softc *, u_int); static void vx_mbuf_fill(void *); static void vx_mbuf_empty(struct vx_softc *); static void vx_setfilter(struct vx_softc *); static void vx_getlink(struct vx_softc *); static void vx_setlink(struct vx_softc *); int vx_attach(device_t dev) { struct vx_softc *sc = device_get_softc(dev); struct ifnet *ifp; int i; u_char eaddr[6]; ifp = sc->vx_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); return 0; } if_initname(ifp, device_get_name(dev), device_get_unit(dev)); mtx_init(&sc->vx_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->vx_callout, &sc->vx_mtx, 0); callout_init_mtx(&sc->vx_watchdog, &sc->vx_mtx, 0); GO_WINDOW(0); CSR_WRITE_2(sc, VX_COMMAND, GLOBAL_RESET); VX_BUSY_WAIT; vx_getlink(sc); /* * Read the station address from the eeprom */ GO_WINDOW(0); for (i = 0; i < 3; i++) { int x; if (vx_busy_eeprom(sc)) { mtx_destroy(&sc->vx_mtx); if_free(ifp); return 0; } CSR_WRITE_2(sc, VX_W0_EEPROM_COMMAND, EEPROM_CMD_RD | (EEPROM_OEM_ADDR0 + i)); if (vx_busy_eeprom(sc)) { mtx_destroy(&sc->vx_mtx); if_free(ifp); return 0; } x = CSR_READ_2(sc, VX_W0_EEPROM_DATA); eaddr[(i << 1)] = x >> 8; eaddr[(i << 1) + 1] = x; } ifp->if_snd.ifq_maxlen = ifqmaxlen; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_start = vx_start; ifp->if_ioctl = vx_ioctl; ifp->if_init = vx_init; ifp->if_softc = sc; ether_ifattach(ifp, eaddr); sc->vx_tx_start_thresh = 20; /* probably a good starting point. */ VX_LOCK(sc); vx_stop(sc); VX_UNLOCK(sc); return 1; } /* * The order in here seems important. Otherwise we may not receive * interrupts. ?! */ static void vx_init(void *xsc) { struct vx_softc *sc = (struct vx_softc *)xsc; VX_LOCK(sc); vx_init_locked(sc); VX_UNLOCK(sc); } static void vx_init_locked(struct vx_softc *sc) { struct ifnet *ifp = sc->vx_ifp; int i; VX_LOCK_ASSERT(sc); VX_BUSY_WAIT; GO_WINDOW(2); for (i = 0; i < 6; i++) /* Reload the ether_addr. */ CSR_WRITE_1(sc, VX_W2_ADDR_0 + i, IF_LLADDR(sc->vx_ifp)[i]); CSR_WRITE_2(sc, VX_COMMAND, RX_RESET); VX_BUSY_WAIT; CSR_WRITE_2(sc, VX_COMMAND, TX_RESET); VX_BUSY_WAIT; GO_WINDOW(1); /* Window 1 is operating window */ for (i = 0; i < 31; i++) CSR_READ_1(sc, VX_W1_TX_STATUS); CSR_WRITE_2(sc, VX_COMMAND, SET_RD_0_MASK | S_CARD_FAILURE | S_RX_COMPLETE | S_TX_COMPLETE | S_TX_AVAIL); CSR_WRITE_2(sc, VX_COMMAND, SET_INTR_MASK | S_CARD_FAILURE | S_RX_COMPLETE | S_TX_COMPLETE | S_TX_AVAIL); /* * Attempt to get rid of any stray interrupts that occurred during * configuration. On the i386 this isn't possible because one may * already be queued. However, a single stray interrupt is * unimportant. */ CSR_WRITE_2(sc, VX_COMMAND, ACK_INTR | 0xff); vx_setfilter(sc); vx_setlink(sc); CSR_WRITE_2(sc, VX_COMMAND, RX_ENABLE); CSR_WRITE_2(sc, VX_COMMAND, TX_ENABLE); vx_mbuf_fill(sc); /* Interface is now `running', with no output active. */ ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; callout_reset(&sc->vx_watchdog, hz, vx_watchdog, sc); /* Attempt to start output, if any. */ vx_start_locked(ifp); } static void vx_setfilter(struct vx_softc *sc) { struct ifnet *ifp = sc->vx_ifp; VX_LOCK_ASSERT(sc); GO_WINDOW(1); /* Window 1 is operating window */ CSR_WRITE_2(sc, VX_COMMAND, SET_RX_FILTER | FIL_INDIVIDUAL | FIL_BRDCST | FIL_MULTICAST | ((ifp->if_flags & IFF_PROMISC) ? FIL_PROMISC : 0)); } static void vx_getlink(struct vx_softc *sc) { int n, k; GO_WINDOW(3); sc->vx_connectors = CSR_READ_2(sc, VX_W3_RESET_OPT) & 0x7f; for (n = 0, k = 0; k < VX_CONNECTORS; k++) { if (sc->vx_connectors & conn_tab[k].bit) { if (n > 0) printf("/"); printf("%s", conn_tab[k].name); n++; } } if (sc->vx_connectors == 0) { printf("no connectors!\n"); return; } GO_WINDOW(3); sc->vx_connector = (CSR_READ_4(sc, VX_W3_INTERNAL_CFG) & INTERNAL_CONNECTOR_MASK) >> INTERNAL_CONNECTOR_BITS; if (sc->vx_connector & 0x10) { sc->vx_connector &= 0x0f; printf("[*%s*]", conn_tab[(int)sc->vx_connector].name); printf(": disable 'auto select' with DOS util!\n"); } else { printf("[*%s*]\n", conn_tab[(int)sc->vx_connector].name); } } static void vx_setlink(struct vx_softc *sc) { struct ifnet *ifp = sc->vx_ifp; int i, j, k; char *reason, *warning; static int prev_flags; static signed char prev_conn = -1; VX_LOCK_ASSERT(sc); if (prev_conn == -1) prev_conn = sc->vx_connector; /* * S.B. * * Now behavior was slightly changed: * * if any of flags link[0-2] is used and its connector is * physically present the following connectors are used: * * link0 - AUI * highest precedence * link1 - BNC * link2 - UTP * lowest precedence * * If none of them is specified then * connector specified in the EEPROM is used * (if present on card or UTP if not). */ i = sc->vx_connector; /* default in EEPROM */ reason = "default"; - warning = 0; + warning = NULL; if (ifp->if_flags & IFF_LINK0) { if (sc->vx_connectors & conn_tab[CONNECTOR_AUI].bit) { i = CONNECTOR_AUI; reason = "link0"; } else { warning = "aui not present! (link0)"; } } else if (ifp->if_flags & IFF_LINK1) { if (sc->vx_connectors & conn_tab[CONNECTOR_BNC].bit) { i = CONNECTOR_BNC; reason = "link1"; } else { warning = "bnc not present! (link1)"; } } else if (ifp->if_flags & IFF_LINK2) { if (sc->vx_connectors & conn_tab[CONNECTOR_UTP].bit) { i = CONNECTOR_UTP; reason = "link2"; } else { warning = "utp not present! (link2)"; } } else if ((sc->vx_connectors & conn_tab[(int)sc->vx_connector].bit) == 0) { warning = "strange connector type in EEPROM."; reason = "forced"; i = CONNECTOR_UTP; } /* Avoid unnecessary message. */ k = (prev_flags ^ ifp->if_flags) & (IFF_LINK0 | IFF_LINK1 | IFF_LINK2); if ((k != 0) || (prev_conn != i)) { if (warning != NULL) if_printf(ifp, "warning: %s\n", warning); if_printf(ifp, "selected %s. (%s)\n", conn_tab[i].name, reason); } /* Set the selected connector. */ GO_WINDOW(3); j = CSR_READ_4(sc, VX_W3_INTERNAL_CFG) & ~INTERNAL_CONNECTOR_MASK; CSR_WRITE_4(sc, VX_W3_INTERNAL_CFG, j | (i << INTERNAL_CONNECTOR_BITS)); /* First, disable all. */ CSR_WRITE_2(sc, VX_COMMAND, STOP_TRANSCEIVER); DELAY(800); GO_WINDOW(4); CSR_WRITE_2(sc, VX_W4_MEDIA_TYPE, 0); /* Second, enable the selected one. */ switch (i) { case CONNECTOR_UTP: GO_WINDOW(4); CSR_WRITE_2(sc, VX_W4_MEDIA_TYPE, ENABLE_UTP); break; case CONNECTOR_BNC: CSR_WRITE_2(sc, VX_COMMAND, START_TRANSCEIVER); DELAY(800); break; case CONNECTOR_TX: case CONNECTOR_FX: GO_WINDOW(4); CSR_WRITE_2(sc, VX_W4_MEDIA_TYPE, LINKBEAT_ENABLE); break; default: /* AUI and MII fall here */ break; } GO_WINDOW(1); prev_flags = ifp->if_flags; prev_conn = i; } static void vx_start(struct ifnet *ifp) { struct vx_softc *sc = ifp->if_softc; VX_LOCK(sc); vx_start_locked(ifp); VX_UNLOCK(sc); } static void vx_start_locked(struct ifnet *ifp) { struct vx_softc *sc = ifp->if_softc; struct mbuf *m; int len, pad; VX_LOCK_ASSERT(sc); /* Don't transmit if interface is busy or not running */ if ((sc->vx_ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; startagain: /* Sneak a peek at the next packet */ m = ifp->if_snd.ifq_head; if (m == NULL) { return; } /* We need to use m->m_pkthdr.len, so require the header */ M_ASSERTPKTHDR(m); len = m->m_pkthdr.len; pad = (4 - len) & 3; /* * The 3c509 automatically pads short packets to minimum ethernet * length, but we drop packets that are too large. Perhaps we should * truncate them instead? */ if (len + pad > ETHER_MAX_LEN) { /* packet is obviously too large: toss it */ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); IF_DEQUEUE(&ifp->if_snd, m); m_freem(m); goto readcheck; } VX_BUSY_WAIT; if (CSR_READ_2(sc, VX_W1_FREE_TX) < len + pad + 4) { CSR_WRITE_2(sc, VX_COMMAND, SET_TX_AVAIL_THRESH | ((len + pad + 4) >> 2)); /* not enough room in FIFO - make sure */ if (CSR_READ_2(sc, VX_W1_FREE_TX) < len + pad + 4) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; sc->vx_timer = 1; return; } } CSR_WRITE_2(sc, VX_COMMAND, SET_TX_AVAIL_THRESH | (8188 >> 2)); IF_DEQUEUE(&ifp->if_snd, m); if (m == NULL) /* not really needed */ return; VX_BUSY_WAIT; CSR_WRITE_2(sc, VX_COMMAND, SET_TX_START_THRESH | ((len / 4 + sc->vx_tx_start_thresh) >> 2)); BPF_MTAP(sc->vx_ifp, m); /* * Do the output at splhigh() so that an interrupt from another device * won't cause a FIFO underrun. * * XXX: Can't enforce that anymore. */ CSR_WRITE_4(sc, VX_W1_TX_PIO_WR_1, len | TX_INDICATE); while (m) { if (m->m_len > 3) bus_space_write_multi_4(sc->vx_bst, sc->vx_bsh, VX_W1_TX_PIO_WR_1, (u_int32_t *)mtod(m, caddr_t), m->m_len / 4); if (m->m_len & 3) bus_space_write_multi_1(sc->vx_bst, sc->vx_bsh, VX_W1_TX_PIO_WR_1, mtod(m, caddr_t) + (m->m_len & ~3), m->m_len & 3); m = m_free(m); } while (pad--) CSR_WRITE_1(sc, VX_W1_TX_PIO_WR_1, 0); /* Padding */ if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); sc->vx_timer = 1; readcheck: if ((CSR_READ_2(sc, VX_W1_RX_STATUS) & ERR_INCOMPLETE) == 0) { /* We received a complete packet. */ if ((CSR_READ_2(sc, VX_STATUS) & S_INTR_LATCH) == 0) { /* * No interrupt, read the packet and continue * Is this supposed to happen? Is my motherboard * completely busted? */ vx_read(sc); } else /* * Got an interrupt, return so that it gets * serviced. */ return; } else { /* Check if we are stuck and reset [see XXX comment] */ if (vx_status(sc)) { if (ifp->if_flags & IFF_DEBUG) if_printf(ifp, "adapter reset\n"); vx_reset(sc); } } goto startagain; } /* * XXX: The 3c509 card can get in a mode where both the fifo status bit * FIFOS_RX_OVERRUN and the status bit ERR_INCOMPLETE are set * We detect this situation and we reset the adapter. * It happens at times when there is a lot of broadcast traffic * on the cable (once in a blue moon). */ static int vx_status(struct vx_softc *sc) { struct ifnet *ifp; int fifost; VX_LOCK_ASSERT(sc); /* * Check the FIFO status and act accordingly */ GO_WINDOW(4); fifost = CSR_READ_2(sc, VX_W4_FIFO_DIAG); GO_WINDOW(1); ifp = sc->vx_ifp; if (fifost & FIFOS_RX_UNDERRUN) { if (ifp->if_flags & IFF_DEBUG) if_printf(ifp, "RX underrun\n"); vx_reset(sc); return 0; } if (fifost & FIFOS_RX_STATUS_OVERRUN) { if (ifp->if_flags & IFF_DEBUG) if_printf(ifp, "RX Status overrun\n"); return 1; } if (fifost & FIFOS_RX_OVERRUN) { if (ifp->if_flags & IFF_DEBUG) if_printf(ifp, "RX overrun\n"); return 1; } if (fifost & FIFOS_TX_OVERRUN) { if (ifp->if_flags & IFF_DEBUG) if_printf(ifp, "TX overrun\n"); vx_reset(sc); return 0; } return 0; } static void vx_txstat(struct vx_softc *sc) { struct ifnet *ifp; int i; VX_LOCK_ASSERT(sc); /* * We need to read+write TX_STATUS until we get a 0 status * in order to turn off the interrupt flag. */ ifp = sc->vx_ifp; while ((i = CSR_READ_1(sc, VX_W1_TX_STATUS)) & TXS_COMPLETE) { CSR_WRITE_1(sc, VX_W1_TX_STATUS, 0x0); if (i & TXS_JABBER) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); if (ifp->if_flags & IFF_DEBUG) if_printf(ifp, "jabber (%x)\n", i); vx_reset(sc); } else if (i & TXS_UNDERRUN) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); if (ifp->if_flags & IFF_DEBUG) if_printf(ifp, "fifo underrun (%x) @%d\n", i, sc->vx_tx_start_thresh); if (sc->vx_tx_succ_ok < 100) sc->vx_tx_start_thresh = min(ETHER_MAX_LEN, sc->vx_tx_start_thresh + 20); sc->vx_tx_succ_ok = 0; vx_reset(sc); } else if (i & TXS_MAX_COLLISION) { if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); CSR_WRITE_2(sc, VX_COMMAND, TX_ENABLE); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } else sc->vx_tx_succ_ok = (sc->vx_tx_succ_ok + 1) & 127; } } void vx_intr(void *voidsc) { short status; struct vx_softc *sc = voidsc; struct ifnet *ifp = sc->vx_ifp; VX_LOCK(sc); for (;;) { CSR_WRITE_2(sc, VX_COMMAND, C_INTR_LATCH); status = CSR_READ_2(sc, VX_STATUS); if ((status & (S_TX_COMPLETE | S_TX_AVAIL | S_RX_COMPLETE | S_CARD_FAILURE)) == 0) break; /* * Acknowledge any interrupts. It's important that we do this * first, since there would otherwise be a race condition. * Due to the i386 interrupt queueing, we may get spurious * interrupts occasionally. */ CSR_WRITE_2(sc, VX_COMMAND, ACK_INTR | status); if (status & S_RX_COMPLETE) vx_read(sc); if (status & S_TX_AVAIL) { sc->vx_timer = 0; sc->vx_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; vx_start_locked(sc->vx_ifp); } if (status & S_CARD_FAILURE) { if_printf(ifp, "adapter failure (%x)\n", status); sc->vx_timer = 0; vx_reset(sc); break; } if (status & S_TX_COMPLETE) { sc->vx_timer = 0; vx_txstat(sc); vx_start_locked(ifp); } } VX_UNLOCK(sc); /* no more interrupts */ return; } static void vx_read(struct vx_softc *sc) { struct ifnet *ifp = sc->vx_ifp; struct mbuf *m; struct ether_header *eh; u_int len; VX_LOCK_ASSERT(sc); len = CSR_READ_2(sc, VX_W1_RX_STATUS); again: if (ifp->if_flags & IFF_DEBUG) { int err = len & ERR_MASK; char *s = NULL; if (len & ERR_INCOMPLETE) s = "incomplete packet"; else if (err == ERR_OVERRUN) s = "packet overrun"; else if (err == ERR_RUNT) s = "runt packet"; else if (err == ERR_ALIGNMENT) s = "bad alignment"; else if (err == ERR_CRC) s = "bad crc"; else if (err == ERR_OVERSIZE) s = "oversized packet"; else if (err == ERR_DRIBBLE) s = "dribble bits"; if (s) if_printf(ifp, "%s\n", s); } if (len & ERR_INCOMPLETE) return; if (len & ERR_RX) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); goto abort; } len &= RX_BYTES_MASK; /* Lower 11 bits = RX bytes. */ /* Pull packet off interface. */ m = vx_get(sc, len); - if (m == 0) { + if (m == NULL) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); goto abort; } if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); { struct mbuf *m0; m0 = m_devget(mtod(m, char *), m->m_pkthdr.len, ETHER_ALIGN, ifp, NULL); if (m0 == NULL) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); goto abort; } m_freem(m); m = m0; } /* We assume the header fit entirely in one mbuf. */ eh = mtod(m, struct ether_header *); /* * XXX: Some cards seem to be in promiscuous mode all the time. * we need to make sure we only get our own stuff always. * bleah! */ if (!(ifp->if_flags & IFF_PROMISC) && (eh->ether_dhost[0] & 1) == 0 /* !mcast and !bcast */ && bcmp(eh->ether_dhost, IF_LLADDR(sc->vx_ifp), ETHER_ADDR_LEN) != 0) { m_freem(m); return; } VX_UNLOCK(sc); (*ifp->if_input)(ifp, m); VX_LOCK(sc); /* * In periods of high traffic we can actually receive enough * packets so that the fifo overrun bit will be set at this point, * even though we just read a packet. In this case we * are not going to receive any more interrupts. We check for * this condition and read again until the fifo is not full. * We could simplify this test by not using vx_status(), but * rechecking the RX_STATUS register directly. This test could * result in unnecessary looping in cases where there is a new * packet but the fifo is not full, but it will not fix the * stuck behavior. * * Even with this improvement, we still get packet overrun errors * which are hurting performance. Maybe when I get some more time * I'll modify vx_read() so that it can handle RX_EARLY interrupts. */ if (vx_status(sc)) { len = CSR_READ_2(sc, VX_W1_RX_STATUS); /* Check if we are stuck and reset [see XXX comment] */ if (len & ERR_INCOMPLETE) { if (ifp->if_flags & IFF_DEBUG) if_printf(ifp, "adapter reset\n"); vx_reset(sc); return; } goto again; } return; abort: CSR_WRITE_2(sc, VX_COMMAND, RX_DISCARD_TOP_PACK); } static struct mbuf * vx_get(struct vx_softc *sc, u_int totlen) { struct ifnet *ifp = sc->vx_ifp; struct mbuf *top, **mp, *m; int len; VX_LOCK_ASSERT(sc); m = sc->vx_mb[sc->vx_next_mb]; sc->vx_mb[sc->vx_next_mb] = NULL; if (m == NULL) { MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) return NULL; } else { /* If the queue is no longer full, refill. */ if (sc->vx_last_mb == sc->vx_next_mb && sc->vx_buffill_pending == 0) { callout_reset(&sc->vx_callout, hz / 100, vx_mbuf_fill, sc); sc->vx_buffill_pending = 1; } /* Convert one of our saved mbuf's. */ sc->vx_next_mb = (sc->vx_next_mb + 1) % MAX_MBS; m->m_data = m->m_pktdat; m->m_flags = M_PKTHDR; bzero(&m->m_pkthdr, sizeof(m->m_pkthdr)); } m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = totlen; len = MHLEN; top = NULL; mp = ⊤ /* * We read the packet at splhigh() so that an interrupt from another * device doesn't cause the card's buffer to overflow while we're * reading it. We may still lose packets at other times. * * XXX: Can't enforce this anymore. */ /* * Since we don't set allowLargePackets bit in MacControl register, * we can assume that totlen <= 1500bytes. * The while loop will be performed iff we have a packet with * MLEN < m_len < MINCLSIZE. */ while (totlen > 0) { if (top) { m = sc->vx_mb[sc->vx_next_mb]; sc->vx_mb[sc->vx_next_mb] = NULL; if (m == NULL) { MGET(m, M_NOWAIT, MT_DATA); if (m == NULL) { m_freem(top); return NULL; } } else { sc->vx_next_mb = (sc->vx_next_mb + 1) % MAX_MBS; } len = MLEN; } if (totlen >= MINCLSIZE) { if (MCLGET(m, M_NOWAIT)) len = MCLBYTES; } len = min(totlen, len); if (len > 3) bus_space_read_multi_4(sc->vx_bst, sc->vx_bsh, VX_W1_RX_PIO_RD_1, mtod(m, u_int32_t *), len / 4); if (len & 3) { bus_space_read_multi_1(sc->vx_bst, sc->vx_bsh, VX_W1_RX_PIO_RD_1, mtod(m, u_int8_t *) + (len & ~3), len & 3); } m->m_len = len; totlen -= len; *mp = m; mp = &m->m_next; } CSR_WRITE_2(sc, VX_COMMAND, RX_DISCARD_TOP_PACK); return top; } static int vx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct vx_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; int error = 0; switch (cmd) { case SIOCSIFFLAGS: VX_LOCK(sc); if ((ifp->if_flags & IFF_UP) == 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { /* * If interface is marked up and it is stopped, then * start it. */ vx_stop(sc); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; } else if ((ifp->if_flags & IFF_UP) != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { /* * If interface is marked up and it is stopped, then * start it. */ vx_init_locked(sc); } else { /* * deal with flags changes: * IFF_MULTICAST, IFF_PROMISC, * IFF_LINK0, IFF_LINK1, */ vx_setfilter(sc); vx_setlink(sc); } VX_UNLOCK(sc); break; case SIOCSIFMTU: /* * Set the interface MTU. */ VX_LOCK(sc); if (ifr->ifr_mtu > ETHERMTU) { error = EINVAL; } else { ifp->if_mtu = ifr->ifr_mtu; } VX_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: /* * Multicast list has changed; set the hardware filter * accordingly. */ VX_LOCK(sc); vx_reset(sc); VX_UNLOCK(sc); error = 0; break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } static void vx_reset(struct vx_softc *sc) { VX_LOCK_ASSERT(sc); vx_stop(sc); vx_init_locked(sc); } static void vx_watchdog(void *arg) { struct vx_softc *sc; struct ifnet *ifp; sc = arg; VX_LOCK_ASSERT(sc); callout_reset(&sc->vx_watchdog, hz, vx_watchdog, sc); if (sc->vx_timer == 0 || --sc->vx_timer > 0) return; ifp = sc->vx_ifp; if (ifp->if_flags & IFF_DEBUG) if_printf(ifp, "device timeout\n"); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; vx_start_locked(ifp); vx_intr(sc); } void vx_stop(struct vx_softc *sc) { VX_LOCK_ASSERT(sc); sc->vx_timer = 0; callout_stop(&sc->vx_watchdog); CSR_WRITE_2(sc, VX_COMMAND, RX_DISABLE); CSR_WRITE_2(sc, VX_COMMAND, RX_DISCARD_TOP_PACK); VX_BUSY_WAIT; CSR_WRITE_2(sc, VX_COMMAND, TX_DISABLE); CSR_WRITE_2(sc, VX_COMMAND, STOP_TRANSCEIVER); DELAY(800); CSR_WRITE_2(sc, VX_COMMAND, RX_RESET); VX_BUSY_WAIT; CSR_WRITE_2(sc, VX_COMMAND, TX_RESET); VX_BUSY_WAIT; CSR_WRITE_2(sc, VX_COMMAND, C_INTR_LATCH); CSR_WRITE_2(sc, VX_COMMAND, SET_RD_0_MASK); CSR_WRITE_2(sc, VX_COMMAND, SET_INTR_MASK); CSR_WRITE_2(sc, VX_COMMAND, SET_RX_FILTER); vx_mbuf_empty(sc); } int vx_busy_eeprom(struct vx_softc *sc) { int j, i = 100; while (i--) { j = CSR_READ_2(sc, VX_W0_EEPROM_COMMAND); if (j & EEPROM_BUSY) DELAY(100); else break; } if (!i) { if_printf(sc->vx_ifp, "eeprom failed to come ready\n"); return (1); } return (0); } static void vx_mbuf_fill(void *sp) { struct vx_softc *sc = (struct vx_softc *)sp; int i; VX_LOCK_ASSERT(sc); i = sc->vx_last_mb; do { if (sc->vx_mb[i] == NULL) MGET(sc->vx_mb[i], M_NOWAIT, MT_DATA); if (sc->vx_mb[i] == NULL) break; i = (i + 1) % MAX_MBS; } while (i != sc->vx_next_mb); sc->vx_last_mb = i; /* If the queue was not filled, try again. */ if (sc->vx_last_mb != sc->vx_next_mb) { callout_reset(&sc->vx_callout, hz / 100, vx_mbuf_fill, sc); sc->vx_buffill_pending = 1; } else { sc->vx_buffill_pending = 0; } } static void vx_mbuf_empty(struct vx_softc *sc) { int i; VX_LOCK_ASSERT(sc); for (i = 0; i < MAX_MBS; i++) { if (sc->vx_mb[i]) { m_freem(sc->vx_mb[i]); sc->vx_mb[i] = NULL; } } sc->vx_last_mb = sc->vx_next_mb = 0; if (sc->vx_buffill_pending != 0) callout_stop(&sc->vx_callout); }